Python keras.layers.core.Flatten() Examples

The following are 30 code examples of keras.layers.core.Flatten(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers.core , or try the search function .
Example #1
Source File: train_and_save.py    From MNIST-cnn with MIT License 6 votes vote down vote up
def cnn(trn_set, tst_set):
    trn_x, trn_y = trn_set
    trn_y = np.squeeze(trn_y, axis=2)
    tst_x, tst_y = tst_set
    tst_y = np.squeeze(tst_y, axis=2)

    model = Sequential()

    model.add(Convolution2D(2, 5, 5, activation='sigmoid', input_shape=(1, 28, 28)))
    model.add(MaxPooling2D(pool_size=(3, 3)))
    model.add(Flatten())
    model.add(Dense(10, activation='softmax'))

    model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.1))
    return model, trn_x, trn_y, tst_x, tst_y

################################################################################ 
Example #2
Source File: networks.py    From C51-DDQN-Keras with MIT License 6 votes vote down vote up
def value_distribution_network(input_shape, num_atoms, action_size, learning_rate):
        """Model Value Distribution

        With States as inputs and output Probability Distributions for all Actions
        """

        state_input = Input(shape=(input_shape)) 
        cnn_feature = Convolution2D(32, 8, 8, subsample=(4,4), activation='relu')(state_input)
        cnn_feature = Convolution2D(64, 4, 4, subsample=(2,2), activation='relu')(cnn_feature)
        cnn_feature = Convolution2D(64, 3, 3, activation='relu')(cnn_feature)
        cnn_feature = Flatten()(cnn_feature)
        cnn_feature = Dense(512, activation='relu')(cnn_feature)

        distribution_list = []
        for i in range(action_size):
            distribution_list.append(Dense(num_atoms, activation='softmax')(cnn_feature))

        model = Model(input=state_input, output=distribution_list)

        adam = Adam(lr=learning_rate)
        model.compile(loss='categorical_crossentropy',optimizer=adam)

        return model 
Example #3
Source File: shallownet.py    From DL4CVStarterBundle with GNU General Public License v3.0 6 votes vote down vote up
def build(width, height, depth, classes):
        # Initialize the model along with the input shape to be 'channels_last'
        model = Sequential()
        input_shape = (height, width, depth)

        # Update the image shape if 'channels_first' is being used
        if K.image_data_format() == 'channels_first':
            input_shape = (depth, height, width)

        # Define the first (and only) CONV => RELU layer
        model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape))
        model.add(Activation('relu'))

        # Add a softmax classifier
        model.add(Flatten())
        model.add(Dense(classes))
        model.add(Activation('softmax'))

        # Return the network architecture
        return model 
Example #4
Source File: test_tasks.py    From CAPTCHA-breaking with MIT License 6 votes vote down vote up
def test_img_clf(self):
        print('image classification data:')
        (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(3, 32, 32),
                                                             classification=True, nb_class=2)
        print('X_train:', X_train.shape)
        print('X_test:', X_test.shape)
        print('y_train:', y_train.shape)
        print('y_test:', y_test.shape)

        y_train = to_categorical(y_train)
        y_test = to_categorical(y_test)

        model = Sequential()
        model.add(Convolution2D(32, 3, 32, 32))
        model.add(Activation('sigmoid'))
        model.add(Flatten())
        model.add(Dense(32, y_test.shape[-1]))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='sgd')
        history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), show_accuracy=True, verbose=2)
        self.assertTrue(history.history['val_acc'][-1] > 0.9) 
Example #5
Source File: example.py    From residual_block_keras with GNU General Public License v3.0 6 votes vote down vote up
def get_residual_model(is_mnist=True, img_channels=1, img_rows=28, img_cols=28):
    model = keras.models.Sequential()
    first_layer_channel = 128
    if is_mnist: # size to be changed to 32,32
        model.add(ZeroPadding2D((2,2), input_shape=(img_channels, img_rows, img_cols))) # resize (28,28)-->(32,32)
        # the first conv 
        model.add(Convolution2D(first_layer_channel, 3, 3, border_mode='same'))
    else:
        model.add(Convolution2D(first_layer_channel, 3, 3, border_mode='same', input_shape=(img_channels, img_rows, img_cols)))

    model.add(Activation('relu'))
    # [residual-based Conv layers]
    residual_blocks = design_for_residual_blocks(num_channel_input=first_layer_channel)
    model.add(residual_blocks)
    model.add(BatchNormalization(axis=1))
    model.add(Activation('relu'))
    # [Classifier]    
    model.add(Flatten())
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))
    # [END]
    return model 
Example #6
Source File: shallownet.py    From DL4CVStarterBundle with GNU General Public License v3.0 6 votes vote down vote up
def build(width, height, depth, classes):
        # Initialize the model along with the input shape to be 'channels_last'
        model = Sequential()
        input_shape = (height, width, depth)

        # Update the image shape if 'channels_first' is being used
        if K.image_data_format() == 'channels_first':
            input_shape = (depth, height, width)

        # Define the first (and only) CONV => RELU layer
        model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape))
        model.add(Activation('relu'))

        # Add a softmax classifier
        model.add(Flatten())
        model.add(Dense(classes))
        model.add(Activation('softmax'))

        # Return the network architecture
        return model 
Example #7
Source File: cnn_mnist.py    From deep_learning_ex with MIT License 6 votes vote down vote up
def init_model():
    """
    """
    start_time = time.time()
    print 'Compiling model...'
    model = Sequential()

    model.add(Convolution2D(64, 3,3, border_mode='valid', input_shape=INPUT_SHAPE))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(Dropout(.25))

    model.add(Flatten())

    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms,
      metrics=['accuracy'])
    print 'Model compiled in {0} seconds'.format(time.time() - start_time)

    model.summary()
    return model 
Example #8
Source File: shallownet.py    From DL4CVStarterBundle with GNU General Public License v3.0 6 votes vote down vote up
def build(width, height, depth, classes):
        # Initialize the model along with the input shape to be 'channels_last'
        model = Sequential()
        input_shape = (height, width, depth)

        # Update the image shape if 'channels_first' is being used
        if K.image_data_format() == 'channels_first':
            input_shape = (depth, height, width)

        # Define the first (and only) CONV => RELU layer
        model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape))
        model.add(Activation('relu'))

        # Add a softmax classifier
        model.add(Flatten())
        model.add(Dense(classes))
        model.add(Activation('softmax'))

        # Return the network architecture
        return model 
Example #9
Source File: shallownet.py    From DL4CVStarterBundle with GNU General Public License v3.0 6 votes vote down vote up
def build(width, height, depth, classes):
        # Initialize the model along with the input shape to be 'channels_last'
        model = Sequential()
        input_shape = (height, width, depth)

        # Update the image shape if 'channels_first' is being used
        if K.image_data_format() == 'channels_first':
            input_shape = (depth, height, width)

        # Define the first (and only) CONV => RELU layer
        model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape))
        model.add(Activation('relu'))

        # Add a softmax classifier
        model.add(Flatten())
        model.add(Dense(classes))
        model.add(Activation('softmax'))

        # Return the network architecture
        return model 
Example #10
Source File: cnn.py    From DeepFashion with Apache License 2.0 6 votes vote down vote up
def model_create(input_shape, num_classes):
        logging.debug('input_shape {}'.format(input_shape))

        model = Sequential()

        model.add(Conv2D(32, (3, 3), border_mode='same', input_shape=input_shape))
        model.add(Activation('relu'))

        model.add(Conv2D(32, (3, 3)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.5))

        model.add(Flatten())
        model.add(Dense(128))
        model.add(Activation('relu'))
        model.add(Dropout(0.5))

        model.add(Dense(num_classes))
        model.add(Activation('softmax'))

        # use binary_crossentropy if has just 2 prediction yes or no
        model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])

        return model 
Example #11
Source File: Build_Model.py    From DOVE with GNU General Public License v3.0 6 votes vote down vote up
def makecnn(learningrate,regular,decay,channel_number):
    #model structure
    model=Sequential()
    model.add(Conv3D(100, kernel_size=(3,3,3), strides=(1, 1, 1), input_shape = (20,20,20,channel_number),padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1),  use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None))
    model.add(BatchNormalization())
    model.add(LeakyReLU(0.2))
    #model.add(Dropout(0.3))

    model.add(Conv3D(200, kernel_size=(3,3,3), strides=(1, 1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None))
    model.add(BatchNormalization())
    model.add(LeakyReLU(0.2))
    #model.add(Dropout(0.3))

    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_last'))
    model.add(BatchNormalization(axis=1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None))
    model.add(Conv3D(400, kernel_size=(3,3,3),strides=(1, 1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None))
    model.add(BatchNormalization())
    model.add(LeakyReLU(0.2))
    #model.add(Dropout(0.3))

    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_last'))
    model.add(Flatten())
    model.add(Dropout(0.3))
    model.add(Dense(1000, use_bias=True, input_shape = (32000,),kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None))
    model.add(BatchNormalization())
    model.add(LeakyReLU(0.2))
    model.add(Dropout(0.3))

    model.add(Dense(100, use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None))
    model.add(BatchNormalization())
    model.add(LeakyReLU(0.2))
    model.add(Dropout(0.3))

    model.add(Dense(1, activation='sigmoid', use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None))
    nadam=Nadam(lr=learningrate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=decay)
    model.compile(loss='binary_crossentropy', optimizer=nadam, metrics=['accuracy',f1score,precision,recall])
    return model 
Example #12
Source File: liver_model.py    From MCF-3D-CNN with MIT License 6 votes vote down vote up
def cnn_3D(self, input_shape, modual=''):
        #建立Sequential模型
        model_in = Input(input_shape)    
        model = Convolution3D(
                filters = 6,
                kernel_size = (3, 3, 3),
                input_shape = input_shape,
                activation='relu',
                kernel_initializer='he_normal',
                name = modual+'conv1'
            )(model_in)# now 30x30x3x6
        model = MaxPooling3D(pool_size=(2,2,1))(model)# now 15x15x3x6
        model = Convolution3D(
                filters = 8,
                kernel_size = (4, 4, 3),
                activation='relu',
                kernel_initializer='he_normal',
                name = modual+'conv2'
            )(model)# now 12x12x1x8
        model = MaxPooling3D(pool_size=(2,2,1))(model)# now 6x6x1x8
        model = Flatten()(model)
        model = Dropout(0.5)(model)
        model_out = Dense(100, activation='relu', name = modual+'fc1')(model)
      
        return model_in, model_out 
Example #13
Source File: shallownet.py    From DL4CVStarterBundle with GNU General Public License v3.0 6 votes vote down vote up
def build(width, height, depth, classes):
        # Initialize the model along with the input shape to be 'channels_last'
        model = Sequential()
        input_shape = (height, width, depth)

        # Update the image shape if 'channels_first' is being used
        if K.image_data_format() == 'channels_first':
            input_shape = (depth, height, width)

        # Define the first (and only) CONV => RELU layer
        model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape))
        model.add(Activation('relu'))

        # Add a softmax classifier
        model.add(Flatten())
        model.add(Dense(classes))
        model.add(Activation('softmax'))

        # Return the network architecture
        return model 
Example #14
Source File: __init__.py    From deep_complex_networks with MIT License 6 votes vote down vote up
def get_shallow_convnet(window_size=4096, channels=2, output_size=84):
    inputs = Input(shape=(window_size, channels))

    conv = ComplexConv1D(
        32, 512, strides=16,
        activation='relu')(inputs)
    pool = AveragePooling1D(pool_size=4, strides=2)(conv)

    pool = Permute([2, 1])(pool)
    flattened = Flatten()(pool)

    dense = ComplexDense(2048, activation='relu')(flattened)
    predictions = ComplexDense(
        output_size, 
        activation='sigmoid',
        bias_initializer=Constant(value=-5))(dense)
    predictions = GetReal(predictions)
    model = Model(inputs=inputs, outputs=predictions)

    model.compile(optimizer=Adam(lr=1e-4),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    return model 
Example #15
Source File: shallownet.py    From DL4CVStarterBundle with GNU General Public License v3.0 6 votes vote down vote up
def build(width, height, depth, classes):
        # Initialize the model along with the input shape to be 'channels_last'
        model = Sequential()
        input_shape = (height, width, depth)

        # Update the image shape if 'channels_first' is being used
        if K.image_data_format() == 'channels_first':
            input_shape = (depth, height, width)

        # Define the first (and only) CONV => RELU layer
        model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape))
        model.add(Activation('relu'))

        # Add a softmax classifier
        model.add(Flatten())
        model.add(Dense(classes))
        model.add(Activation('softmax'))

        # Return the network architecture
        return model 
Example #16
Source File: shallownet.py    From DL4CVStarterBundle with GNU General Public License v3.0 6 votes vote down vote up
def build(width, height, depth, classes):
        # Initialize the model along with the input shape to be 'channels_last'
        model = Sequential()
        input_shape = (height, width, depth)

        # Update the image shape if 'channels_first' is being used
        if K.image_data_format() == 'channels_first':
            input_shape = (depth, height, width)

        # Define the first (and only) CONV => RELU layer
        model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape))
        model.add(Activation('relu'))

        # Add a softmax classifier
        model.add(Flatten())
        model.add(Dense(classes))
        model.add(Activation('softmax'))

        # Return the network architecture
        return model 
Example #17
Source File: shallownet.py    From DL4CVStarterBundle with GNU General Public License v3.0 6 votes vote down vote up
def build(width, height, depth, classes):
        # Initialize the model along with the input shape to be 'channels_last'
        model = Sequential()
        input_shape = (height, width, depth)

        # Update the image shape if 'channels_first' is being used
        if K.image_data_format() == 'channels_first':
            input_shape = (depth, height, width)

        # Define the first (and only) CONV => RELU layer
        model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape))
        model.add(Activation('relu'))

        # Add a softmax classifier
        model.add(Flatten())
        model.add(Dense(classes))
        model.add(Activation('softmax'))

        # Return the network architecture
        return model 
Example #18
Source File: lenet.py    From aiexamples with Apache License 2.0 6 votes vote down vote up
def build(input_shape, classes):
        model = Sequential()
        # CONV => RELU => POOL
        model.add(Conv2D(20, kernel_size=5, padding="same", input_shape=input_shape))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        # CONV => RELU => POOL
        model.add(Conv2D(50, kernel_size=5, border_mode="same"))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        # Flatten层到RELU层
        model.add(Flatten())
        model.add(Dense(500))
        model.add(Activation("relu"))
        # softmax分类器
        model.add(Dense(classes))
        model.add(Activation("softmax"))

        return model 
Example #19
Source File: models.py    From DigiX_HuaWei_Population_Age_Attribution_Predict with MIT License 6 votes vote down vote up
def CapsuleNet_v2(n_capsule = 10, n_routings = 5, capsule_dim = 16,
     n_recurrent=100, dropout_rate=0.2, l2_penalty=0.0001):
    K.clear_session()

    inputs = Input(shape=(200,))
    x = Embedding(20000, 300,  trainable=True)(inputs)        
    x = SpatialDropout1D(dropout_rate)(x)
    x = Bidirectional(
        CuDNNGRU(n_recurrent, return_sequences=True,
                 kernel_regularizer=l2(l2_penalty),
                 recurrent_regularizer=l2(l2_penalty)))(x)
    x = PReLU()(x)
    x = Capsule(
        num_capsule=n_capsule, dim_capsule=capsule_dim,
        routings=n_routings, share_weights=True)(x)
    x = Flatten(name = 'concatenate')(x)
    x = Dropout(dropout_rate)(x)
#     fc = Dense(128, activation='sigmoid')(x)
    outputs = Dense(6, activation='softmax')(x)
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy'])
    return model 
Example #20
Source File: models.py    From DigiX_HuaWei_Population_Age_Attribution_Predict with MIT License 6 votes vote down vote up
def CapsuleNet(n_capsule = 10, n_routings = 5, capsule_dim = 16,
     n_recurrent=100, dropout_rate=0.2, l2_penalty=0.0001):
    K.clear_session()

    inputs = Input(shape=(170,))
    x = Embedding(21099, 300,  trainable=True)(inputs)        
    x = SpatialDropout1D(dropout_rate)(x)
    x = Bidirectional(
        CuDNNGRU(n_recurrent, return_sequences=True,
                 kernel_regularizer=l2(l2_penalty),
                 recurrent_regularizer=l2(l2_penalty)))(x)
    x = PReLU()(x)
    x = Capsule(
        num_capsule=n_capsule, dim_capsule=capsule_dim,
        routings=n_routings, share_weights=True)(x)
    x = Flatten(name = 'concatenate')(x)
    x = Dropout(dropout_rate)(x)
#     fc = Dense(128, activation='sigmoid')(x)
    outputs = Dense(6, activation='softmax')(x)
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy'])
    return model 
Example #21
Source File: networks.py    From VizDoom-Keras-RL with MIT License 6 votes vote down vote up
def a2c_lstm(input_shape, action_size, value_size, learning_rate):
        """Actor and Critic Network share convolution layers with LSTM
        """

        state_input = Input(shape=(input_shape)) # 4x64x64x3
        x = TimeDistributed(Convolution2D(32, 8, 8, subsample=(4,4), activation='relu'))(state_input)
        x = TimeDistributed(Convolution2D(64, 4, 4, subsample=(2,2), activation='relu'))(x)
        x = TimeDistributed(Convolution2D(64, 3, 3, activation='relu'))(x)
        x = TimeDistributed(Flatten())(x)

        x = LSTM(512, activation='tanh')(x)

        # Actor Stream
        actor = Dense(action_size, activation='softmax')(x)

        # Critic Stream
        critic = Dense(value_size, activation='linear')(x)

        model = Model(input=state_input, output=[actor, critic])

        adam = Adam(lr=learning_rate, clipnorm=1.0)
        model.compile(loss=['categorical_crossentropy', 'mse'], optimizer=adam, loss_weights=[1., 1.])

        return model 
Example #22
Source File: networks.py    From VizDoom-Keras-RL with MIT License 6 votes vote down vote up
def drqn(input_shape, action_size, learning_rate):

        model = Sequential()
        model.add(TimeDistributed(Convolution2D(32, 8, 8, subsample=(4,4), activation='relu'), input_shape=(input_shape)))
        model.add(TimeDistributed(Convolution2D(64, 4, 4, subsample=(2,2), activation='relu')))
        model.add(TimeDistributed(Convolution2D(64, 3, 3, activation='relu')))
        model.add(TimeDistributed(Flatten()))

        # Use all traces for training
        #model.add(LSTM(512, return_sequences=True,  activation='tanh'))
        #model.add(TimeDistributed(Dense(output_dim=action_size, activation='linear')))

        # Use last trace for training
        model.add(LSTM(512,  activation='tanh'))
        model.add(Dense(output_dim=action_size, activation='linear'))

        adam = Adam(lr=learning_rate)
        model.compile(loss='mse',optimizer=adam)

        return model 
Example #23
Source File: networks.py    From VizDoom-Keras-RL with MIT License 6 votes vote down vote up
def value_distribution_network(input_shape, num_atoms, action_size, learning_rate):
        """Model Value Distribution

        With States as inputs and output Probability Distributions for all Actions
        """

        state_input = Input(shape=(input_shape)) 
        cnn_feature = Convolution2D(32, 8, 8, subsample=(4,4), activation='relu')(state_input)
        cnn_feature = Convolution2D(64, 4, 4, subsample=(2,2), activation='relu')(cnn_feature)
        cnn_feature = Convolution2D(64, 3, 3, activation='relu')(cnn_feature)
        cnn_feature = Flatten()(cnn_feature)
        cnn_feature = Dense(512, activation='relu')(cnn_feature)

        distribution_list = []
        for i in range(action_size):
            distribution_list.append(Dense(num_atoms, activation='softmax')(cnn_feature))

        model = Model(input=state_input, output=distribution_list)

        adam = Adam(lr=learning_rate)
        model.compile(loss='categorical_crossentropy',optimizer=adam)

        return model 
Example #24
Source File: example_support.py    From PyGame-Learning-Environment with MIT License 6 votes vote down vote up
def build_model(self):

        model = Sequential()
        model.add(Convolution2D(
            16, 8, 8, input_shape=(self.num_frames,) + self.frame_dim,
            subsample=(4, 4), activation="relu", init="he_uniform"
        ))
        model.add(Convolution2D(
            16, 4, 4, subsample=(2, 2), activation="relu", init="he_uniform"
        ))
        model.add(Convolution2D(
            32, 3, 3, subsample=(1, 1), activation="relu", init="he_uniform"
        ))
        model.add(Flatten())
        model.add(Dense(
            512, activation="relu", init="he_uniform"
        ))
        model.add(Dense(
            self.num_actions, activation="linear", init="he_uniform"
        ))

        model.compile(loss=self.q_loss, optimizer=self.optimizer)

        self.model = model 
Example #25
Source File: captcha_gan.py    From Intelligent-Projects-Using-Python with MIT License 6 votes vote down vote up
def discriminator(img_dim,alpha=0.2):
    model = Sequential()
    model.add(
            Conv2D(64, kernel_size=5,strides=2,
            padding='same',
            input_shape=img_dim)
            )
    model.add(LeakyReLU(alpha))
    model.add(Conv2D(128,kernel_size=5,strides=2,padding='same'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha))
    model.add(Conv2D(256,kernel_size=5,strides=2,padding='same'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha))
    model.add(Flatten())
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    return model

# Define a combination of Generator and Discriminator 
Example #26
Source File: shallownet.py    From DL4CVStarterBundle with GNU General Public License v3.0 6 votes vote down vote up
def build(width, height, depth, classes):
        # Initialize the model along with the input shape to be 'channels_last'
        model = Sequential()
        input_shape = (height, width, depth)

        # Update the image shape if 'channels_first' is being used
        if K.image_data_format() == 'channels_first':
            input_shape = (depth, height, width)

        # Define the first (and only) CONV => RELU layer
        model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape))
        model.add(Activation('relu'))

        # Add a softmax classifier
        model.add(Flatten())
        model.add(Dense(classes))
        model.add(Activation('softmax'))

        # Return the network architecture
        return model 
Example #27
Source File: keras_model.py    From alphazero with Apache License 2.0 5 votes vote down vote up
def build_model(args):
    cnn_filter_num = args['cnn_filter_num']
    cnn_filter_size = args['cnn_filter_size']
    l2_reg = args['l2_reg']

    in_x = x = Input(args['input_dim'])

    # (batch, channels, height, width)
    x = Conv2D(filters=cnn_filter_num, kernel_size=cnn_filter_size, padding="same",
                data_format="channels_first", kernel_regularizer=l2(l2_reg))(x)
    x = BatchNormalization(axis=1)(x)
    x = Activation("relu")(x)

    for _ in range(args['res_layer_num']):
        x = _build_residual_block(args, x)

    res_out = x
    
    # for policy output
    x = Conv2D(filters=2, kernel_size=1, data_format="channels_first", kernel_regularizer=l2(l2_reg))(res_out)
    x = BatchNormalization(axis=1)(x)
    x = Activation("relu")(x)
    x = Flatten()(x)
    policy_out = Dense(args['policy_dim'], kernel_regularizer=l2(l2_reg), activation="softmax", name="policy")(x)
    
    # for value output
    x = Conv2D(filters=1, kernel_size=1, data_format="channels_first", kernel_regularizer=l2(l2_reg))(res_out)
    x = BatchNormalization(axis=1)(x)
    x = Activation("relu")(x)
    x = Flatten()(x)
    x = Dense(256, kernel_regularizer=l2(l2_reg), activation="relu")(x)
    value_out = Dense(1, kernel_regularizer=l2(l2_reg), activation="tanh", name="value")(x)
    
    return Model(in_x, [policy_out, value_out], name="model") 
Example #28
Source File: networks.py    From VizDoom-Keras-RL with MIT License 5 votes vote down vote up
def dqn(input_shape, action_size, learning_rate):

        model = Sequential()
        model.add(Convolution2D(32, 8, 8, subsample=(4,4), activation='relu', input_shape=(input_shape)))
        model.add(Convolution2D(64, 4, 4, subsample=(2,2), activation='relu'))
        model.add(Convolution2D(64, 3, 3, activation='relu'))
        model.add(Flatten())
        model.add(Dense(output_dim=512, activation='relu'))
        model.add(Dense(output_dim=action_size, activation='linear'))

        adam = Adam(lr=learning_rate)
        model.compile(loss='mse',optimizer=adam)

        return model 
Example #29
Source File: CNN_LSTM.py    From DeepLearning-OCR with Apache License 2.0 5 votes vote down vote up
def build_CNN_LSTM(channels, width, height, lstm_output_size, nb_classes):
	model = Sequential()
	# 1 conv
	model.add(Convolution2D(64, 3, 3, border_mode='same', activation='relu', 
		input_shape=(channels, height, width)))
	model.add(BatchNormalization(mode=0, axis=1))
	# 2 conv
	model.add(Convolution2D(64, 3, 3, border_mode='same', activation='relu'))
	model.add(BatchNormalization(mode=0, axis=1))
	model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
	# 3 conv
	model.add(Convolution2D(128, 3, 3, border_mode='same', activation='relu'))
	model.add(BatchNormalization(mode=0, axis=1))
	# 4 conv
	model.add(Convolution2D(128, 3, 3, border_mode='same', activation='relu'))
	model.add(BatchNormalization(mode=0, axis=1))
	model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
	# flaten
	a = model.add(Flatten())
	# 1 dense
	model.add(Dense(512, activation='relu'))
	model.add(BatchNormalization())
	model.add(Dropout(0.5))
	# 2 dense
	model.add(Dense(512, activation='relu'))
	model.add(BatchNormalization())
	model.add(Dropout(0.5))
	# lstm
	model.add(RepeatVector(lstm_output_size))
	model.add(LSTM(512, return_sequences=True))
	model.add(TimeDistributed(Dropout(0.5)))
	model.add(TimeDistributed(Dense(nb_classes, activation='softmax')))
	model.summary()
	model.compile(loss='categorical_crossentropy',
				  optimizer='adam',
				  metrics=[categorical_accuracy_per_sequence],
				  sample_weight_mode='temporal'
				  )

	return model 
Example #30
Source File: lenet.py    From DL4CVStarterBundle with GNU General Public License v3.0 5 votes vote down vote up
def build(width, height, depth, classes):
        # Initialize the model
        model = Sequential()
        input_shape = (height, width, depth)

        # If we are using 'channels-first', update the input shape
        if K.image_data_format() == 'channels_first':
            input_shape = (depth, height, width)

        # First set of CONV => RELU => POOL layers
        model.add(Conv2D(20, (5, 5), padding='same', input_shape=input_shape))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

        # Second set of CONV => RELU => POOL layers
        model.add(Conv2D(50, (5, 5), padding='same'))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

        # First (and only) set of FC => RELU layers
        model.add(Flatten())
        model.add(Dense(500))
        model.add(Activation('relu'))

        # Softmax classifier
        model.add(Dense(classes))
        model.add(Activation('softmax'))

        # return the constructed network architecture
        return model