Python keras.layers.MaxPooling2D() Examples

The following are code examples for showing how to use keras.layers.MaxPooling2D(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn.py    MIT License 6 votes vote down vote up
def cnn_2d(input_shape):

    model = Sequential()
    model.add(Conv2D(100, (3, 3), padding='valid', activation='relu', input_shape=input_shape))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Conv2D(200, (3, 3), padding='valid', activation='relu'))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Flatten())
    model.add(Dense(200, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(84, activation='relu'))
    model.add(Dense(nb_classes, activation='softmax'))

    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])
    return model 
Example 2
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: gap_keras.py    MIT License 6 votes vote down vote up
def GAP():
    inputs = Input((img_height, img_width, 3))
    x = Conv2D(96, (7, 7), padding='valid', strides=2, activation='relu', name='conv1')(inputs)
    x = MaxPooling2D((3, 3), strides=2,  padding='same')(x)
    x = Conv2D(256, (5, 5), padding='valid', strides=2, activation='relu', name='conv2')(x)
    x = keras.layers.ZeroPadding2D(1)(x)
    x = MaxPooling2D((3, 3), strides=2, padding='same')(x)
    x = Conv2D(384, (3, 3), padding='same', activation='relu', name='conv3')(x)
    x = Conv2D(384, (3, 3), padding='same', activation='relu', name='conv4')(x)
    x = Conv2D(256, (3, 3), padding='same', activation='relu', name='conv5')(x)
    x = MaxPooling2D((3, 3), strides=2, padding='same')(x)
    # GAP
    x = Conv2D(num_classes, (1, 1), padding='same', activation=None, name='out')(x)
    x = keras.layers.GlobalAveragePooling2D()(x)
    x = Activation('softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 3
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: zfnet_keras.py    MIT License 6 votes vote down vote up
def ZFNet():
    inputs = Input((img_height, img_width, 3))
    x = Conv2D(96, (7, 7), padding='valid', strides=2, activation='relu', name='conv1')(inputs)
    x = MaxPooling2D((3, 3), strides=2,  padding='same')(x)
    x = Conv2D(256, (5, 5), padding='valid', strides=2, activation='relu', name='conv2')(x)
    x = keras.layers.ZeroPadding2D(1)(x)
    x = MaxPooling2D((3, 3), strides=2, padding='same')(x)
    x = Conv2D(384, (3, 3), padding='same', activation='relu', name='conv3')(x)
    x = Conv2D(384, (3, 3), padding='same', activation='relu', name='conv4')(x)
    x = Conv2D(256, (3, 3), padding='same', activation='relu', name='conv5')(x)
    x = MaxPooling2D((3, 3), strides=2, padding='same')(x)
    
    x = Flatten()(x)
    x = Dense(4096, name='dense1', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(4096, name='dense2', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(num_classes, activation='softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 4
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: lenet_keras.py    MIT License 6 votes vote down vote up
def LeNet():
    inputs = Input((img_height, img_width, 3))
    x = Conv2D(6, (5, 5), padding='valid', activation=None, name='conv1')(inputs)
    x = MaxPooling2D((2,2), padding='same')(x)
    x = Activation('sigmoid')(x)
    x = Conv2D(16, (5, 5), padding='valid', activation=None, name='conv2')(x)
    x = MaxPooling2D((2,2), padding='same')(x)
    x = Activation('sigmoid')(x)
    
    x = Flatten()(x)
    x = Dense(120, name='dense1', activation=None)(x)
    x = Dense(64, name='dense2', activation=None)(x)
    x = Dense(num_classes, activation='softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 5
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: alexnet_keras.py    MIT License 6 votes vote down vote up
def AlexNet():
    inputs = Input((img_height, img_width, 3))
    x = Conv2D(96, (11, 11), padding='valid', strides=4, activation='relu', name='conv1')(inputs)
    x = MaxPooling2D((3, 3), strides=2,  padding='same')(x)
    x = Conv2D(256, (5, 5), padding='valid', activation='relu', name='conv2')(x)
    x = keras.layers.ZeroPadding2D(1)(x)
    x = MaxPooling2D((3, 3), strides=2, padding='same')(x)
    x = Conv2D(384, (3, 3), padding='same', activation='relu', name='conv3')(x)
    x = Conv2D(384, (3, 3), padding='same', activation='relu', name='conv4')(x)
    x = Conv2D(256, (3, 3), padding='same', activation='relu', name='conv5')(x)
    
    x = Flatten()(x)
    x = Dense(4096, name='dense1', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(4096, name='dense2', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(num_classes, activation='softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 6
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: nin_keras.py    MIT License 6 votes vote down vote up
def NIN():
    inputs = Input((img_height, img_width, 3))
    x = Conv2D(192, (5, 5), padding='same', strides=1, activation='relu', name='conv1')(inputs)
    x = Conv2D(160, (1, 1), padding='same', strides=1, activation='relu', name='cccp1')(x)
    x = Conv2D(96, (1, 1), padding='same', strides=1, activation='relu', name='cccp2')(x)
    x = MaxPooling2D((3, 3), strides=2,  padding='same')(x)
    x = Dropout(0.5)(x)
    x = Conv2D(192, (5, 5), padding='same', strides=1, activation='relu', name='conv2')(x)
    x = Conv2D(192, (1, 1), padding='same', strides=1, activation='relu', name='cccp3')(x)
    x = Conv2D(192, (1, 1), padding='same', strides=1, activation='relu', name='cccp4')(x)
    x = AveragePooling2D((3, 3), strides=2,  padding='same')(x)
    x = Dropout(0.5)(x)
    x = Conv2D(192, (3, 3), padding='same', strides=1, activation='relu', name='conv3')(x)
    x = Conv2D(192, (1, 1), padding='same', strides=1, activation='relu', name='cccp5')(x)
    x = Conv2D(num_classes, (1, 1), padding='same', strides=1, activation='relu', name='cccp6')(x)
    x = keras.layers.GlobalAveragePooling2D()(x)
    x = Activation('softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 7
Project: derplearning   Author: notkarol   File: line_train.py    MIT License 6 votes vote down vote up
def create_model(input_shape, n_output, n_blocks=2):
    model = Sequential()
    model.add(Conv2D(96, (5, 5), padding='same', input_shape=input_shape))
    model.add(BatchNormalization())
    model.add(Activation('elu'))
    model.add(MaxPooling2D(pool_size=2))

    for i in range(n_blocks):
        model.add(Conv2D(32, (3, 3), padding='same'))
        model.add(BatchNormalization())
        model.add(Activation('elu'))
        model.add(Conv2D(32, (3, 3), padding='same'))
        model.add(BatchNormalization())
        model.add(Activation('elu'))
        model.add(MaxPooling2D(pool_size=2))

    model.add(Flatten())
    model.add(Dense(100))
    model.add(Activation('elu'))
    model.add(Dense(n_output))
    
    return model 
Example 8
Project: ocsvm-anomaly-detection   Author: hiram64   File: model.py    MIT License 6 votes vote down vote up
def build_cae_model(height=32, width=32, channel=3):
    """
    build convolutional autoencoder model
    """
    input_img = Input(shape=(height, width, channel))

    # encoder
    net = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
    net = MaxPooling2D((2, 2), padding='same')(net)
    net = Conv2D(8, (3, 3), activation='relu', padding='same')(net)
    net = MaxPooling2D((2, 2), padding='same')(net)
    net = Conv2D(4, (3, 3), activation='relu', padding='same')(net)
    encoded = MaxPooling2D((2, 2), padding='same', name='enc')(net)

    # decoder
    net = Conv2D(4, (3, 3), activation='relu', padding='same')(encoded)
    net = UpSampling2D((2, 2))(net)
    net = Conv2D(8, (3, 3), activation='relu', padding='same')(net)
    net = UpSampling2D((2, 2))(net)
    net = Conv2D(16, (3, 3), activation='relu', padding='same')(net)
    net = UpSampling2D((2, 2))(net)
    decoded = Conv2D(channel, (3, 3), activation='sigmoid', padding='same')(net)

    return Model(input_img, decoded) 
Example 9
Project: ascii-net   Author: wahtak   File: model.py    MIT License 6 votes vote down vote up
def __init__(self, shape_pixels, num_classes):
        self.input_shape = shape_pixels + (1, )

        self.model = Sequential()
        self.model.add(Convolution2D(nb_filters,
                                     kernel_size[0],
                                     kernel_size[1],
                                     border_mode='valid',
                                     input_shape=self.input_shape))
        self.model.add(Activation('relu'))
        self.model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=pool_size))
        self.model.add(Dropout(0.25))

        self.model.add(Flatten())
        self.model.add(Dense(128))
        self.model.add(Activation('relu'))
        self.model.add(Dropout(0.5))
        self.model.add(Dense(num_classes))
        self.model.add(Activation('softmax'))

        self.model.compile(loss='categorical_crossentropy',
                           optimizer='adadelta') 
Example 10
Project: Sushi-dish-detection   Author: blackrubystudio   File: parallel_model.py    MIT License 6 votes vote down vote up
def build_model(x_train, num_classes):
        # Reset default graph. Keras leaves old ops in the graph,
        # which are ignored for execution but clutter graph
        # visualization in TensorBoard.
        tf.reset_default_graph()

        inputs = KL.Input(shape=x_train.shape[1:], name="input_image")
        x = KL.Conv2D(32, (3, 3), activation='relu', padding="same",
                      name="conv1")(inputs)
        x = KL.Conv2D(64, (3, 3), activation='relu', padding="same",
                      name="conv2")(x)
        x = KL.MaxPooling2D(pool_size=(2, 2), name="pool1")(x)
        x = KL.Flatten(name="flat1")(x)
        x = KL.Dense(128, activation='relu', name="dense1")(x)
        x = KL.Dense(num_classes, activation='softmax', name="dense2")(x)

        return KM.Model(inputs, x, "digit_classifier_model")

    # Load MNIST Data 
Example 11
Project: blackbox-attacks   Author: sunblaze-ucb   File: mnist.py    MIT License 6 votes vote down vote up
def modelF():
    model = Sequential()

    model.add(Convolution2D(32, 3, 3,
                            border_mode='valid',
                            input_shape=(FLAGS.IMAGE_ROWS,
                                         FLAGS.IMAGE_COLS,
                                         FLAGS.NUM_CHANNELS)))
    model.add(Activation('relu'))

    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(64, 3, 3))
    model.add(Activation('relu'))

    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('relu'))

    model.add(Dense(FLAGS.NUM_CLASSES))

    return model 
Example 12
Project: GANomaly   Author: Scott1123   File: mnist_ganomaly.py    MIT License 6 votes vote down vote up
def make_encoder():
        modelE = Sequential()
        modelE.add(Conv2D(32, kernel_size=(3, 2), padding="same", input_shape=input_shape))
        modelE.add(BatchNormalization(momentum=0.8))
        modelE.add(Activation("relu"))
        modelE.add(MaxPooling2D(pool_size=(2, 2)))
        modelE.add(Conv2D(64, kernel_size=(3, 2), padding="same"))
        modelE.add(BatchNormalization(momentum=0.8))
        modelE.add(Activation("relu"))
        modelE.add(MaxPooling2D(pool_size=(2, 1)))
        modelE.add(Conv2D(128, kernel_size=(3, 2), padding="same"))
        modelE.add(BatchNormalization(momentum=0.8))
        modelE.add(Activation("relu"))
        modelE.add(Flatten())
        modelE.add(Dense(latent_dim))

        return modelE

    # Encoder 1 
Example 13
Project: GANomaly   Author: Scott1123   File: ganomaly.py    MIT License 6 votes vote down vote up
def basic_encoder(self):
        modelE = Sequential()
        modelE.add(Conv2D(32, kernel_size=(3, 2), padding="same", input_shape=self.input_shape))
        modelE.add(BatchNormalization(momentum=0.8))
        modelE.add(Activation("relu"))
        modelE.add(MaxPooling2D(pool_size=(2, 2)))
        modelE.add(Conv2D(64, kernel_size=(3, 2), padding="same"))
        modelE.add(BatchNormalization(momentum=0.8))
        modelE.add(Activation("relu"))
        modelE.add(MaxPooling2D(pool_size=(2, 1)))
        modelE.add(Conv2D(128, kernel_size=(3, 2), padding="same"))
        modelE.add(BatchNormalization(momentum=0.8))
        modelE.add(Activation("relu"))
        modelE.add(Flatten())
        modelE.add(Dense(self.latent_dim))

        return modelE

    # Encoder 1 
Example 14
Project: GANomaly   Author: Scott1123   File: gnomaly.py    MIT License 6 votes vote down vote up
def basic_encoder(self):
        modelE = Sequential()
        modelE.add(Conv2D(32, kernel_size=(3, 2), padding="same", input_shape=self.input_shape))
        modelE.add(BatchNormalization(momentum=0.8))
        modelE.add(Activation("relu"))
        modelE.add(MaxPooling2D(pool_size=(2, 2)))
        modelE.add(Conv2D(64, kernel_size=(3, 2), padding="same"))
        modelE.add(BatchNormalization(momentum=0.8))
        modelE.add(Activation("relu"))
        modelE.add(MaxPooling2D(pool_size=(2, 1)))
        modelE.add(Conv2D(128, kernel_size=(3, 2), padding="same"))
        modelE.add(BatchNormalization(momentum=0.8))
        modelE.add(Activation("relu"))
        modelE.add(Flatten())
        modelE.add(Dense(self.latent_dim))

        return modelE

    # Encoder 1 
Example 15
Project: MnistOnKeras   Author: jefferyUstc   File: model.py    GNU General Public License v3.0 6 votes vote down vote up
def keras_model(input_shape=(28, 28, 1), num_classes=10):
    """
    buid a keras based Sequential CNN model
    """
    model = Sequential()
    model.add(Conv2D(32, kernel_size=(3, 3),
                    activation='relu',
                    input_shape=input_shape))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes, activation='softmax'))
    return model 
Example 16
Project: musical_genres_classification   Author: shaoeric   File: pre_process_mfcc.py    MIT License 6 votes vote down vote up
def get_model():
    input_layer = Input(shape=(1, 1290, 40), name='mfcc_input')
    x = GaussianNoise(0.01)(input_layer)
    x = Conv2D(data_format='channels_first', filters=16, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(0.01))(x)
    x = MaxPooling2D(pool_size=(3, 1), padding='valid', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=30, kernel_size=3, padding='same', activation='relu',kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(0.01),data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=(3, 1), padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=64, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(0.01), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=30, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(0.01), data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=16, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(0.01), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Flatten()(x)
    x = Dropout(0.3)(x)
    x = Dense(10, activation='softmax', name='mfcc_output', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(0.2))(x)
    model = Model(input=input_layer, output=x)
    print(model.summary())
    return model 
Example 17
Project: musical_genres_classification   Author: shaoeric   File: final_model.py    MIT License 6 votes vote down vote up
def get_mfcc_model():
    input_layer = Input(shape=(1, 1290, 40), name='mfcc_input')
    x = GaussianNoise(noise)(input_layer)
    x = Conv2D(data_format='channels_first', filters=16, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(0.01))(x)
    x = MaxPooling2D(pool_size=(3, 1), padding='valid', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=30, kernel_size=3, padding='same', activation='relu',kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(0.01),data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=(3, 1), padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=64, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(0.01), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=30, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(0.01), data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=16, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(0.01), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Flatten(name='mfcc_flatten')(x)
    x = Dropout(0.3)(x)
    x = Dense(10, activation='softmax', name='mfcc_output', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(0.2))(x)
    model = Model(inputs=input_layer, outputs=x)

    return model 
Example 18
Project: mtrl-auto-uav   Author: brunapearson   File: mtrl_network.py    MIT License 5 votes vote down vote up
def create_model():
    #Create the convolutional stacks
    input_img = Input(shape=(224,224,3))

    x = Conv2D(16, kernel_size=3, activation='relu')(input_img)
    x = MaxPooling2D(pool_size=(2,2))(x)
    x = Conv2D(32, kernel_size=3, activation='relu')(x)
    x = MaxPooling2D(pool_size=(2,2))(x)
    x = Conv2D(64, kernel_size=3, activation='relu')(x)
    x = MaxPooling2D(pool_size=(2,2))(x)
    x = Flatten()(x)
    x = Dense(500, activation='relu')(x)
    x = Dropout(0.20)(x)
    x = Dense(100, activation='relu')(x)
    x = Dense(20, activation='relu')(x)

    n = Conv2D(16, kernel_size=3, activation='relu')(input_img)
    n = MaxPooling2D(pool_size=(2,2))(n)
    n = Conv2D(32, kernel_size=3, activation='relu')(n)
    n = MaxPooling2D(pool_size=(2,2))(n)
    n = Conv2D(64, kernel_size=3, activation='relu')(n)
    n = MaxPooling2D(pool_size=(2,2))(n)
    n = Flatten()(n)
    n = Dense(500, activation='relu')(n)
    #n = Dropout(0.50)(n)
    n = Dense(100, activation='relu')(n)
    n = Dense(20, activation='relu')(n)

    #output
    output_x = Dense(1, activation='linear', name='input_x')(n)
    output_y = Dense(1, activation='linear', name='input_y')(n)
    output_z = Dense(1, activation='linear', name='input_z')(n)

    output_qw = Dense(1, activation='linear', name='input_qw')(x)
    output_qx = Dense(1, activation='linear', name='input_qx')(x)
    output_qy = Dense(1, activation='linear', name='input_qy')(x)
    output_qz = Dense(1, activation='linear', name='input_qz')(x)


    model = Model(inputs=input_img, outputs=[output_x,output_y,output_z,output_qw,output_qx,output_qy,output_qz])
    return model 
Example 19
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_import(self):
        # Global Pooling 1D
        model = Sequential()
        model.add(GlobalMaxPooling1D(input_shape=(16, 1)))
        model.build()
        self.keras_param_test(model, 0, 5)
        # Global Pooling 2D
        model = Sequential()
        model.add(GlobalMaxPooling2D(input_shape=(16, 16, 1)))
        model.build()
        self.keras_param_test(model, 0, 8)
        # Pooling 1D
        model = Sequential()
        model.add(MaxPooling1D(pool_size=2, strides=2, padding='same', input_shape=(16, 1)))
        model.build()
        self.keras_param_test(model, 0, 5)
        # Pooling 2D
        model = Sequential()
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same', input_shape=(16, 16, 1)))
        model.build()
        self.keras_param_test(model, 0, 8)
        # Pooling 3D
        model = Sequential()
        model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='same',
                               input_shape=(16, 16, 16, 1)))
        model.build()
        self.keras_param_test(model, 0, 11)


# ********** Locally-connected Layers ********** 
Example 20
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['Input2'], 'l2': net['Input4'], 'l3': net['Pooling']}
        # Pool 1D
        net['l1']['connection']['output'].append('l3')
        net['l3']['connection']['input'] = ['l1']
        net['l3']['params']['layer_type'] = '1D'
        net['l3']['shape']['input'] = net['l1']['shape']['output']
        net['l3']['shape']['output'] = [12, 12]
        inp = data(net['l1'], '', 'l1')['l1']
        temp = pooling(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[2].__class__.__name__, 'MaxPooling1D')
        # Pool 2D
        net['l0']['connection']['output'].append('l0')
        net['l3']['connection']['input'] = ['l0']
        net['l3']['params']['layer_type'] = '2D'
        net['l3']['shape']['input'] = net['l0']['shape']['output']
        net['l3']['shape']['output'] = [3, 226, 226]
        inp = data(net['l0'], '', 'l0')['l0']
        temp = pooling(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[2].__class__.__name__, 'MaxPooling2D')
        # Pool 3D
        net['l2']['connection']['output'].append('l3')
        net['l3']['connection']['input'] = ['l2']
        net['l3']['params']['layer_type'] = '3D'
        net['l3']['shape']['input'] = net['l2']['shape']['output']
        net['l3']['shape']['output'] = [3, 226, 226, 18]
        inp = data(net['l2'], '', 'l2')['l2']
        temp = pooling(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[2].__class__.__name__, 'MaxPooling3D')


# ********** Locally-connected Layers ********** 
Example 21
Project: MODS_ConvNet   Author: santiagolopezg   File: hipster_net.py    MIT License 5 votes vote down vote up
def cifar():

    # Determine proper input shape
    K.set_image_dim_ordering('th')
    input_shape = (1, 256, 192)
    img_input = Input(shape=input_shape)

    x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='conv1_1')(img_input)
    x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='conv1_2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(x)

    x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='conv2_1')(x)
    x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='conv2_2')(x)
    x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='conv2_3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool2')(x)

    x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='conv3_1')(x)
    x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='conv3_2')(x)
    x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='conv3_3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool3')(x)

    x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='conv4_1')(x)
    x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='conv4_2')(x)
    x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='conv4_3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool4')(x)

    x = Flatten(name='flatten')(x)
    x = Dense(1000, activation='relu', name='fc1')(x)
    x = Dense(1000, activation='relu', name='fc2')(x)
    x = Dense(2, activation='softmax', name='pred')(x)

    # Create model.
    model = Model(img_input, x)

    #weights='MODS_keras_weights_3_he_normal_0.5_rmsprop_24.h5'
    #model.load_weights(weights)

    return model 
Example 22
Project: Handwritten-Digit-Recognition-using-Deep-Learning   Author: anujdutt9   File: neural_network.py    MIT License 5 votes vote down vote up
def build(width, height, depth, total_classes, Saved_Weights_Path=None):
        # Initialize the Model
        model = Sequential()

        # First CONV => RELU => POOL Layer
        model.add(Conv2D(20, 5, 5, border_mode="same", input_shape=(depth, height, width)))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), dim_ordering="th"))

        # Second CONV => RELU => POOL Layer
        model.add(Conv2D(50, 5, 5, border_mode="same"))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), dim_ordering="th"))

        # Third CONV => RELU => POOL Layer
        # Convolution -> ReLU Activation Function -> Pooling Layer
        model.add(Conv2D(100, 5, 5, border_mode="same"))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), dim_ordering="th"))

        # FC => RELU layers
        #  Fully Connected Layer -> ReLU Activation Function
        model.add(Flatten())
        model.add(Dense(500))
        model.add(Activation("relu"))

        # Using Softmax Classifier for Linear Classification
        model.add(Dense(total_classes))
        model.add(Activation("softmax"))

        # If the saved_weights file is already present i.e model is pre-trained, load that weights
        if Saved_Weights_Path is not None:
            model.load_weights(Saved_Weights_Path)
        return model
# --------------------------------- EOC ------------------------------------ 
Example 23
Project: Jetson-RaceCar-AI   Author: ardamavi   File: get_model.py    Apache License 2.0 5 votes vote down vote up
def get_model():
    img_inputs = Input(shape=(500, 500, 1))
    lidar_inputs = Input(shape=(3,))

    conv_1 = Conv2D(32, (4,4), strides=(2,2))(img_inputs)

    conv_2 = Conv2D(32, (4,4), strides=(2,2))(conv_1)

    conv_3 = Conv2D(32, (3,3), strides=(1,1))(conv_2)
    act_3 = Activation('relu')(conv_3)

    pooling_1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(act_3)

    flat_1 = Flatten()(pooling_1)

    fc = Dense(32)(flat_1)

    lidar_fc = Dense(32)(lidar_inputs)

    concatenate_layer = concatenate([fc, lidar_fc])

    fc = Dense(10)(concatenate_layer)
    fc = Activation('relu')(fc)
    fc = Dropout(0.5)(fc)

    outputs = Dense(2)(fc)

    outputs = Activation('sigmoid')(outputs)


    model = Model(inputs=[img_inputs, lidar_inputs], outputs=[outputs])

    model.compile(loss='mse', optimizer='adadelta', metrics=['accuracy'])

    print(model.summary())

    return model 
Example 24
Project: phoneticSimilarity   Author: ronggong   File: models.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def createModel_schluter_valid(input, num_filter, height_filter, width_filter, filter_density, pool_n_row,
                pool_n_col, dropout):
    """
    original Schluter relu activation, no dropout
    :param input:
    :param num_filter:
    :param height_filter:
    :param width_filter:
    :param filter_density:
    :param pool_n_row:
    :param pool_n_col:
    :param dropout:
    :return:
    """

    x = ZeroPadding2D(padding=(0, int(width_filter/2)),  data_format="channels_first")(input)

    x = Conv2D(int(num_filter * filter_density), (height_filter, width_filter), padding="valid",
                       data_format="channels_first",
                       activation='relu')(x)

    output_shape = K.int_shape(x)

    if pool_n_row == 'all' and pool_n_col == 'all':
        x = MaxPooling2D(pool_size=(output_shape[2], output_shape[3]), padding='same', data_format="channels_first")(x)
    elif pool_n_row == 'all' and pool_n_col != 'all':
        x = MaxPooling2D(pool_size=(output_shape[2], pool_n_col), padding='same', data_format="channels_first")(x)
    elif pool_n_row != 'all' and pool_n_col == 'all':
        x = MaxPooling2D(pool_size=(pool_n_row, output_shape[3]), padding='same', data_format="channels_first")(x)
    else:
        x = MaxPooling2D(pool_size=(pool_n_row, pool_n_col), padding='same', data_format="channels_first")(x)
    x = Dropout(dropout)(x)
    x = Flatten()(x)

    return x 
Example 25
Project: phoneticSimilarity   Author: ronggong   File: models_RNN.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def conv_module(conv, input_shape, input):
    if conv:
        x = Reshape((-1, input_shape[2]) + (1,))(input)
        # x = BatchNormalization()(x)
        x = Conv2D(filters=8, kernel_size=(1, 3), activation="relu")(x)
        # x = BatchNormalization()(x)
        # x = Activation("relu")(x)
        x = Conv2D(filters=8, kernel_size=(1, 3), activation="relu")(x)
        # x = BatchNormalization()(x)
        # x = Activation("relu")(x)
        x = Conv2D(filters=8, kernel_size=(1, 3), activation="relu")(x)
        # x = BatchNormalization()(x)
        # x = Activation("relu")(x)
        x = MaxPooling2D(pool_size=(1, 3))(x)

        x = Conv2D(filters=16, kernel_size=(1, 3), activation="relu")(x)
        # x = BatchNormalization()(x)
        # x = Activation("relu")(x)
        x = Conv2D(filters=16, kernel_size=(1, 3), activation="relu")(x)
        # x = BatchNormalization()(x)
        # x = Activation("relu")(x)
        x = Conv2D(filters=16, kernel_size=(1, 3), activation="relu")(x)
        # x = BatchNormalization()(x)
        # x = Activation("relu")(x)
        x = MaxPooling2D(pool_size=(1, 3))(x)
        shape = K.int_shape(x)
        x = Reshape((-1, shape[2] * shape[3]))(x)
    else:
        x = input
    return x 
Example 26
Project: SSD_keras_restnet   Author: hzm8341   File: SSD_resnet.py    MIT License 5 votes vote down vote up
def resnet_34(width,height,channel,classes):
    inpt = Input(shape=(width, height, channel))
    x = ZeroPadding2D((3, 3))(inpt)

    #conv1
    x = Conv2d_BN(x, nb_filter=64, kernel_size=(7, 7), strides=(2, 2), padding='valid')
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x)

    #conv2_x
    x = identity_Block(x, nb_filter=64, kernel_size=(3, 3))
    x = identity_Block(x, nb_filter=64, kernel_size=(3, 3))
    x = identity_Block(x, nb_filter=64, kernel_size=(3, 3))

    #conv3_x
    x = identity_Block(x, nb_filter=128, kernel_size=(3, 3), strides=(2, 2), with_conv_shortcut=True)
    x = identity_Block(x, nb_filter=128, kernel_size=(3, 3))
    x = identity_Block(x, nb_filter=128, kernel_size=(3, 3))
    x = identity_Block(x, nb_filter=128, kernel_size=(3, 3))

    #conv4_x
    x = identity_Block(x, nb_filter=256, kernel_size=(3, 3), strides=(2, 2), with_conv_shortcut=True)
    x = identity_Block(x, nb_filter=256, kernel_size=(3, 3))
    x = identity_Block(x, nb_filter=256, kernel_size=(3, 3))
    x = identity_Block(x, nb_filter=256, kernel_size=(3, 3))
    x = identity_Block(x, nb_filter=256, kernel_size=(3, 3))
    x = identity_Block(x, nb_filter=256, kernel_size=(3, 3))

    #conv5_x
    x = identity_Block(x, nb_filter=512, kernel_size=(3, 3), strides=(2, 2), with_conv_shortcut=True)
    x = identity_Block(x, nb_filter=512, kernel_size=(3, 3))
    x = identity_Block(x, nb_filter=512, kernel_size=(3, 3))
    x = AveragePooling2D(pool_size=(7, 7))(x)
    x = Flatten()(x)
    x = Dense(classes, activation='softmax')(x)

    model = Model(inputs=inpt, outputs=x)
    return model 
Example 27
Project: SSD_keras_restnet   Author: hzm8341   File: SSD_resnet.py    MIT License 5 votes vote down vote up
def resnet_50(input_shape, num_classes=21):

    input_tensor = Input(shape=input_shape)
    
    x = ZeroPadding2D((3, 3))(input_tensor)
    x = Conv2d_BN(x, nb_filter=64, kernel_size=(7, 7), strides=(2, 2), padding='valid')
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x)

    #conv2_x
    x = bottleneck_Block(x, nb_filters=[64,64,256],strides=(1,1),with_conv_shortcut=True)
    x = bottleneck_Block(x, nb_filters=[64,64,256])
    x = bottleneck_Block(x, nb_filters=[64,64,256])

    #conv3_x
    x = bottleneck_Block(x, nb_filters=[128, 128, 512],strides=(2,2),with_conv_shortcut=True)
    x = bottleneck_Block(x, nb_filters=[128, 128, 512])
    x = bottleneck_Block(x, nb_filters=[128, 128, 512])
    x = bottleneck_Block(x, nb_filters=[128, 128, 512])

    #conv4_x
    x = bottleneck_Block(x, nb_filters=[256, 256, 1024],strides=(2,2),with_conv_shortcut=True)
    x = bottleneck_Block(x, nb_filters=[256, 256, 1024])
    x = bottleneck_Block(x, nb_filters=[256, 256, 1024])
    x = bottleneck_Block(x, nb_filters=[256, 256, 1024])
    x = bottleneck_Block(x, nb_filters=[256, 256, 1024])
    x = bottleneck_Block(x, nb_filters=[256, 256, 1024])

    #conv5_x
    x = bottleneck_Block(x, nb_filters=[512, 512, 2048], strides=(2, 2), with_conv_shortcut=True)
    x = bottleneck_Block(x, nb_filters=[512, 512, 2048])
    x = bottleneck_Block(x, nb_filters=[512, 512, 2048])

    x = AveragePooling2D(pool_size=(7, 7))(x)
    x = Flatten()(x)
    x = Dense(classes, activation='softmax')(x)

    model = Model(inputs=inpt, outputs=x)
    return model 
Example 28
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn_all.py    MIT License 5 votes vote down vote up
def fcn_2d(input_shape):
    inputs = Input(input_shape)

    conv1 = Conv2D(16, kernel_size=(3, 3), strides=(1, 1), kernel_regularizer=l2(REG_lambda))(inputs)
#    bn1 = BatchNormalization()(conv1)
    act1 = Activation('relu')(conv1)
    pool1 = MaxPooling2D(pool_size=(3, 3), strides=(3, 3))(act1)

    conv2 = Conv2D(32, kernel_size=(6, 6), strides=(1, 1), kernel_regularizer=l2(REG_lambda))(pool1)
#    bn2 = BatchNormalization()(conv2)
    act2 = Activation('relu')(conv2)
    pool2 = MaxPooling2D(pool_size=(3, 3), strides=(3, 3))(act2)

    conv3 = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), kernel_regularizer=l2(REG_lambda))(pool2)
#    bn3 = BatchNormalization()(conv3)
    act3 = Activation('relu')(conv3)
    pool3 = MaxPooling2D(pool_size=(3, 3), strides=(3, 3))(act3)

    up1 = UpSampling2D(size=(3, 3))(pool3)
    deconv1 = Conv2DTranspose(32, 3)(up1)
    act4 = Activation('relu')(deconv1)

    up2 = UpSampling2D(size=(3, 3))(act4)
    deconv2 = Conv2DTranspose(16, 6)(up2)
    act5 = Activation('relu')(deconv2)

    up3 = UpSampling2D(size=(3, 3))(act5)
    deconv3 = Conv2DTranspose(nb_classes, 3)(up3)
    act6 = Activation('relu')(deconv3)
    deconv4 = Conv2DTranspose(nb_classes, 3)(act6)

    model = Model(inputs=inputs, outputs=deconv4)
    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    model.compile(loss=softmax_sparse_crossentropy_ignoring_first_label,
                  optimizer=adam,
                  metrics=[sparse_accuracy])
    return model


# U-net model 
Example 29
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn.py    MIT License 5 votes vote down vote up
def fcn_2d(input_shape):
    inputs = Input(input_shape)
    conv1 = Conv2D(16, 3, activation='relu', padding='same')(inputs)
    conv1 = Conv2D(16, 3, activation='relu', padding='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(32, 3, activation='relu', padding='same')(pool1)
    conv2 = Conv2D(32, 3, activation='relu', padding='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(64, 3, activation='relu', padding='same')(pool2)
    conv3 = Conv2D(64, 3, activation='relu', padding='same')(conv3)

    deconv1 = Conv2DTranspose(32, 3, activation='relu', padding='valid')(conv3)
    deconv2 = Conv2DTranspose(16, 3, activation='relu', padding='valid')(deconv1)

    model = Model(input=inputs, output=deconv2)
    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])
    return model


# U-net model 
Example 30
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn.py    MIT License 5 votes vote down vote up
def unet(input_shape):
    inputs = Input(input_shape)
    conv1 = Conv2D(32, 3, activation='relu', padding='same')(inputs)
    conv1 = Conv2D(32, 3, activation='relu', padding='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(64, 3, activation='relu', padding='same')(pool1)
    conv2 = Conv2D(64, 3, activation='relu', padding='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(128, 3, activation='relu', padding='same')(pool2)
    conv3 = Conv2D(128, 3, activation='relu', padding='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Conv2D(256, 3, activation='relu', padding='same')(pool3)
    conv4 = Conv2D(256, 3, activation='relu', padding='same')(conv4)

    up5 = merge([UpSampling2D(size=(2, 2))(conv4), conv3], mode='concat', concat_axis=1)
    conv5 = Conv2D(128, 3, activation='relu', padding='same')(up5)
    conv5 = Conv2D(128, 3, activation='relu', padding='same')(conv5)

    up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv2], mode='concat', concat_axis=1)
    conv6 = Conv2D(64, 3, activation='relu', padding='same')(up6)
    conv6 = Conv2D(64, 3, activation='relu', padding='same')(conv6)

    up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv1], mode='concat', concat_axis=1)
    conv7 = Conv2D(32, 3, activation='relu', padding='same')(up7)
    conv7 = Conv2D(32, 3, activation='relu', padding='same')(conv7)

    conv8 = Conv2D(nb_classes, 1, activation='softmax')(conv7)

    model = Model(input=inputs, output=conv8)
    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])
    return model


# 3D-CNN model 
Example 31
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: vgg16_keras.py    MIT License 5 votes vote down vote up
def VGG16():
    inputs = Input((img_height, img_width, 3))
    x = Conv2D(64, (3, 3), padding='same', strides=1, activation='relu', name='conv1_1')(inputs)
    x = Conv2D(64, (3, 3), padding='same', strides=1, activation='relu', name='conv1_2')(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    x = Conv2D(128, (3, 3), padding='same', strides=1, activation='relu', name='conv2_1')(x)
    x = Conv2D(128, (3, 3), padding='same', strides=1, activation='relu', name='conv2_2')(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    x = Conv2D(256, (3, 3), padding='same', strides=1, activation='relu', name='conv3_1')(x)
    x = Conv2D(256, (3, 3), padding='same', strides=1, activation='relu', name='conv3_2')(x)
    x = Conv2D(256, (3, 3), padding='same', strides=1, activation='relu', name='conv3_3')(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv4_1')(x)
    x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv4_2')(x)
    x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv4_3')(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv5_1')(x)
    x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv5_2')(x)
    x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv5_3')(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    x = Flatten()(x)
    x = Dense(4096, name='dense1', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(4096, name='dense2', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(num_classes, activation='softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 32
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: easy_keras.py    MIT License 5 votes vote down vote up
def Mynet():
    inputs = Input((img_height, img_width, 3))
    x = inputs
    # block conv1
    for i in range(2):
        x = Conv2D(64, (3, 3), padding='same', strides=1, activation='relu', name='conv1_{}'.format(i+1))(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    # block conv2
    for i in range(2):
        x = Conv2D(128, (3, 3), padding='same', strides=1, activation='relu', name='conv2_{}'.format(i+1))(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    # block conv3
    for i in range(3):
        x = Conv2D(256, (3, 3), padding='same', strides=1, activation='relu', name='conv3_{}'.format(i+1))(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    # block conv4
    for i in range(3):
        x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv4_{}'.format(i+1))(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    # block conv5
    for i in range(3):
        x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv5_{}'.format(i))(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    x = Flatten()(x)
    x = Dense(4096, name='dense1', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(4096, name='dense2', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(num_classes, activation='softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 33
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: vgg19_keras.py    MIT License 5 votes vote down vote up
def VGG19():
    inputs = Input((img_height, img_width, 3))
    x = Conv2D(64, (3, 3), padding='same', strides=1, activation='relu', name='conv1_1')(inputs)
    x = Conv2D(64, (3, 3), padding='same', strides=1, activation='relu', name='conv1_2')(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    x = Conv2D(128, (3, 3), padding='same', strides=1, activation='relu', name='conv2_1')(x)
    x = Conv2D(128, (3, 3), padding='same', strides=1, activation='relu', name='conv2_2')(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    x = Conv2D(256, (3, 3), padding='same', strides=1, activation='relu', name='conv3_1')(x)
    x = Conv2D(256, (3, 3), padding='same', strides=1, activation='relu', name='conv3_2')(x)
    x = Conv2D(256, (3, 3), padding='same', strides=1, activation='relu', name='conv3_3')(x)
    x = Conv2D(256, (3, 3), padding='same', strides=1, activation='relu', name='conv3_4')(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv4_1')(x)
    x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv4_2')(x)
    x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv4_3')(x)
    x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv4_4')(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv5_1')(x)
    x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv5_2')(x)
    x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv5_3')(x)
    x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv5_4')(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    x = Flatten()(x)
    x = Dense(4096, name='dense1', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(4096, name='dense2', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(num_classes, activation='softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 34
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: main_keras.py    MIT License 5 votes vote down vote up
def Mynet():
    inputs = Input((img_height, img_width, 3))
    x = Conv2D(32, (3, 3), padding='same', activation='relu', name='conv1_1')(inputs)
    x = BatchNormalization()(x)
    x = Conv2D(32, (3, 3), padding='same', activation='relu', name='conv1_2')(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((2,2), padding='same')(x)
    x = Conv2D(64, (3, 3), padding='same', activation='relu', name='conv2_1')(x)
    x = BatchNormalization()(x)
    x = Conv2D(64, (3, 3), padding='same', activation='relu', name='conv2_2')(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((2,2), padding='same')(x)
    x = Conv2D(128, (3, 3), padding='same', activation='relu', name='conv3_1')(x)
    x = BatchNormalization()(x)
    x = Conv2D(128, (3, 3), padding='same', activation='relu', name='conv3_2')(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((2,2), padding='same')(x)
    x = Conv2D(256, (3, 3), padding='same', activation='relu', name='conv4_1')(x)
    x = BatchNormalization()(x)
    x = Conv2D(256, (3, 3), padding='same', activation='relu', name='conv4_2')(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((2,2), padding='same')(x)
    x = Flatten()(x)
    x = Dense(1024, name='dense1', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(1024, name='dense2', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(num_classes, activation='softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 35
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: convae_keras.py    MIT License 5 votes vote down vote up
def Mynet(train=False):
    inputs = Input((img_height, img_width, channel), name='in')
    x = Conv2D(32, (3, 3), padding='same', strides=1, name='enc1')(inputs)
    x = MaxPooling2D((2,2), 2)(x)
    x = Conv2D(16, (3, 3), padding='same', strides=1, name='enc2')(x)
    x = MaxPooling2D((2,2), 2)(x)
    x = keras.layers.Conv2DTranspose(32, (2,2), strides=2, padding='same', name='dec2')(x)
    out = keras.layers.Conv2DTranspose(channel, (2,2), strides=2, padding='same', name='out')(x)
    
    model = Model(inputs=inputs, outputs=out, name='model')
    return model 
Example 36
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: convae_cifar10_keras.py    MIT License 5 votes vote down vote up
def Mynet(train=False):
    inputs = Input((img_height, img_width, channel), name='in')
    x = Conv2D(32, (3, 3), padding='same', strides=1, name='enc1')(inputs)
    x = MaxPooling2D((2,2), 2)(x)
    x = Conv2D(16, (3, 3), padding='same', strides=1, name='enc2')(x)
    x = MaxPooling2D((2,2), 2)(x)
    x = keras.layers.Conv2DTranspose(32, (2,2), strides=2, padding='same', name='dec2')(x)
    out = keras.layers.Conv2DTranspose(channel, (2,2), strides=2, padding='same', name='out')(x)
    
    model = Model(inputs=inputs, outputs=out, name='model')
    return model 
Example 37
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: concat_keras.py    MIT License 5 votes vote down vote up
def Mynet(train=False):
    inputs = Input((img_height, img_width, 3), name='in')
    x = inputs
    for i in range(2):
        x = Conv2D(32, (3, 3), padding='same', strides=1, name='conv1_{}'.format(i+1))(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)

    enc1 = x

    x = MaxPooling2D((2,2), 2)(x)

    for i in range(2):
        x = Conv2D(32, (3, 3), padding='same', strides=1, name='conv2_{}'.format(i+1))(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)

    x = keras.layers.Conv2DTranspose(32, (2,2), strides=2, padding='same')(x)
    x = Activation('relu')(x)
    x = BatchNormalization()(x)

    x = keras.layers.concatenate([x, enc1])
    x = Conv2D(32, (1, 1), padding='same', strides=1, name='concat_conv')(x)
    x = Activation('relu')(x)
    x = BatchNormalization()(x)
    
    for i in range(2):
        x = Conv2D(32, (3, 3), padding='same', strides=1, name='dec1_{}'.format(i+1))(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)
    
    x = Conv2D(num_classes+1, (1, 1), padding='same', strides=1)(x)
    x = Reshape([-1, num_classes+1])(x)
    x = Activation('softmax', name='out')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 38
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: transposeconv_keras.py    MIT License 5 votes vote down vote up
def Mynet(train=False):
    inputs = Input((img_height, img_width, 3), name='in')
    x = inputs
    for i in range(2):
        x = Conv2D(32, (3, 3), padding='same', strides=1, name='conv1_{}'.format(i+1))(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)

    x = MaxPooling2D((2,2), 2)(x)
    
    for i in range(2):
        x = Conv2D(32, (3, 3), padding='same', strides=1, name='conv2_{}'.format(i+1))(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)

    x = keras.layers.Conv2DTranspose(32, (2,2), strides=2, padding='same')(x)
    x = Activation('relu')(x)
    x = BatchNormalization()(x)

    for i in range(2):
        x = Conv2D(32, (3, 3), padding='same', strides=1, name='dec1_{}'.format(i+1))(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)
    
    x = Conv2D(num_classes+1, (1, 1), padding='same', strides=1)(x)
    x = Reshape([-1, num_classes+1])(x)
    x = Activation('softmax', name='out')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 39
Project: rogueinabox   Author: rogueinabox   File: models.py    GNU General Public License v3.0 5 votes vote down vote up
def build_model(self):
    
        initializer = initializers.random_normal(stddev=0.02)
    
        input_img = Input(shape=(self.layers, 22, 80))
        input_2 = Lambda(lambda x: x[:, 1:, :, :], output_shape=lambda x: (None, self.layers - 1, 22, 80))(input_img) # no map channel
    
        # whole map
        tower_1 = Conv2D(64, (3, 3), data_format="channels_first", strides=(1, 1), kernel_initializer=initializer, padding="same")(input_img)
        tower_1 = Conv2D(32, (3, 3), data_format="channels_first", strides=(1, 1), kernel_initializer=initializer, padding="same")(tower_1)
        tower_1 = MaxPooling2D(pool_size=(22, 80), data_format="channels_first")(tower_1)
    
    
        #tower2
        tower_2 = MaxPooling2D(pool_size=(2, 2), data_format="channels_first")(input_2)
        for _ in range(self.depth):
            tower_2 = Conv2D(32, (3, 3), data_format="channels_first", strides=(1, 1), kernel_initializer=initializer, padding="same", activation='relu')(tower_2)
        tower_2 = MaxPooling2D(pool_size=(11, 40), data_format="channels_first")(tower_2)
    
        #tower3
        tower_3 = MaxPooling2D(pool_size=(3, 6), data_format="channels_first", padding='same')(input_2)
        for _ in range(self.depth):
            tower_3 = Conv2D(32, (3, 3), data_format="channels_first", strides=(1, 1), kernel_initializer=initializer, padding="same", activation='relu')(tower_3)
        tower_3 = MaxPooling2D(pool_size=(8, 14), data_format="channels_first", padding='same')(tower_3)
    
        merged_layers = concatenate([tower_1, tower_2, tower_3], axis=1)
    
        flat_layer = Flatten()(merged_layers)
        
        predictions = Dense(5, kernel_initializer=initializer)(flat_layer)
        model = Model(inputs=input_img, outputs=predictions)
        
        rmsprop = RMSprop(lr=0.00025)
        model.compile(loss='mse', optimizer=rmsprop)
        return model 
Example 40
Project: rogueinabox   Author: rogueinabox   File: models.py    GNU General Public License v3.0 5 votes vote down vote up
def build_model(self):
    
        initializer = initializers.random_normal(stddev=0.02)
    
        input_img = Input(shape=(self.layers, 22, 80))
        input_2 = Lambda(lambda x: x[:, :2, :, :], output_shape=lambda x: (None, 2, 22, 80))(input_img) # no map channel
    
        # whole map 10x1
        tower_1 = ZeroPadding2D(padding=(1, 0), data_format="channels_first")(input_2)
        tower_1 = Conv2D(32, (10, 1), data_format="channels_first", strides=(7, 1), kernel_initializer=initializer, padding="valid")(tower_1)
        tower_1 = Flatten()(tower_1)
    
        # whole map 1x10
        tower_2 = Conv2D(32, (1, 10), data_format="channels_first", strides=(1, 7), kernel_initializer=initializer, padding="valid")(input_2)
        tower_2 = Flatten()(tower_2)
    
        # whole map 3x3 then maxpool 22x80
        tower_3 = Conv2D(32, (3, 3), data_format="channels_first", strides=(1, 1), kernel_initializer=initializer, padding="same")(input_2)
        tower_3 = MaxPooling2D(pool_size=(22, 80), data_format="channels_first")(tower_3)
        tower_3 = Flatten()(tower_3)
    
        merged_layers = concatenate([tower_1, tower_2, tower_3], axis=1)
    
        predictions = Dense(4, kernel_initializer=initializer)(merged_layers)
        model = Model(inputs=input_img, outputs=predictions)
        
        adam = Adam(lr=1e-6)
        model.compile(loss='mse', optimizer=adam)
        return model 
Example 41
Project: TaiwanTrainVerificationCode2text   Author: linsamtw   File: load_model.py    Apache License 2.0 5 votes vote down vote up
def load_model():

    from keras.models import Model
    from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D
    
    tensor_in = Input((60, 200, 3))
    out = tensor_in
    out = Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(out)
    out = Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(out)
    out = MaxPooling2D(pool_size=(2, 2))(out)
    out = Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation='relu')(out)
    out = Conv2D(filters=64, kernel_size=(3, 3), activation='relu')(out)
    out = MaxPooling2D(pool_size=(2, 2))(out)
    out = Conv2D(filters=128, kernel_size=(3, 3), padding='same', activation='relu')(out)
    out = Conv2D(filters=128, kernel_size=(3, 3), activation='relu')(out)
    out = MaxPooling2D(pool_size=(2, 2))(out)
    out = Conv2D(filters=256, kernel_size=(3, 3), activation='relu')(out)
    out = MaxPooling2D(pool_size=(2, 2))(out)
    out = Flatten()(out)
    out = Dropout(0.5)(out)
    out = [Dense(37, name='digit1', activation='softmax')(out),\
        Dense(37, name='digit2', activation='softmax')(out),\
        Dense(37, name='digit3', activation='softmax')(out),\
        Dense(37, name='digit4', activation='softmax')(out),\
        Dense(37, name='digit5', activation='softmax')(out),\
        Dense(37, name='digit6', activation='softmax')(out)]
    
    model = Model(inputs=tensor_in, outputs=out)
    
    # Define the optimizer
    model.compile(loss='categorical_crossentropy', optimizer='Adamax', metrics=['accuracy'])
    if 'Windows' in platform.platform():
        model.load_weights('{}\\cnn_weight\\verificatioin_code.h5'.format(PATH)) 
    else:
        model.load_weights('{}/cnn_weight/verificatioin_code.h5'.format(PATH)) 
    
    return model 
Example 42
Project: EUSIPCO2017   Author: Veleslavia   File: singlelayer.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def build_model(n_classes):

    if K.image_dim_ordering() == 'th':
        input_shape = (1, N_MEL_BANDS, SEGMENT_DUR)
        channel_axis = 1
    else:
        input_shape = (N_MEL_BANDS, SEGMENT_DUR, 1)
        channel_axis = 3
    melgram_input = Input(shape=input_shape)

    m_sizes = [50, 70]
    n_sizes = [1, 3, 5]
    n_filters = [128, 64, 32]
    maxpool_const = 4

    layers = list()

    for m_i in m_sizes:
        for i, n_i in enumerate(n_sizes):
            x = Convolution2D(n_filters[i], m_i, n_i,
                              border_mode='same',
                              init='he_normal',
                              W_regularizer=l2(1e-5),
                              name=str(n_i)+'_'+str(m_i)+'_'+'conv')(melgram_input)
            x = BatchNormalization(axis=channel_axis, mode=0, name=str(n_i)+'_'+str(m_i)+'_'+'bn')(x)
            x = ELU()(x)
            x = MaxPooling2D(pool_size=(N_MEL_BANDS, SEGMENT_DUR/maxpool_const), name=str(n_i)+'_'+str(m_i)+'_'+'pool')(x)
            x = Flatten(name=str(n_i)+'_'+str(m_i)+'_'+'flatten')(x)
            layers.append(x)

    x = merge(layers, mode='concat', concat_axis=channel_axis)
    x = Dropout(0.5)(x)
    x = Dense(n_classes, init='he_normal', W_regularizer=l2(1e-5), activation='softmax', name='prediction')(x)
    model = Model(melgram_input, x)

    return model 
Example 43
Project: ocr_svc   Author: daveshap   File: keras_alphanumeric_model.py    MIT License 5 votes vote down vote up
def instantiate_model():
    print('COMPILING MODEL')
    model = Sequential()
    model.add(Convolution2D(filters=32, kernel_size=(3, 3), strides=(1, 1), activation='relu', input_shape=input_shape))
    model.add(Convolution2D(filters=32, kernel_size=(3, 3), strides=(1, 1), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    print(model.summary())
    return model 
Example 44
Project: Sushi-dish-detection   Author: blackrubystudio   File: model.py    MIT License 5 votes vote down vote up
def resnet_graph(input_image, architecture, stage5=False, train_bn=True):
    """Build a ResNet graph.
        architecture: Can be resnet50 or resnet101
        stage5: Boolean. If False, stage5 of the network is not created
        train_bn: Boolean. Train or freeze Batch Norm layers
    """
    assert architecture in ["resnet50", "resnet101"]
    # Stage 1
    x = KL.ZeroPadding2D((3, 3))(input_image)
    x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
    x = BatchNorm(name='bn_conv1')(x, training=train_bn)
    x = KL.Activation('relu')(x)
    C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
    # Stage 2
    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)
    C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)
    # Stage 3
    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)
    C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)
    # Stage 4
    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)
    block_count = {"resnet50": 5, "resnet101": 22}[architecture]
    for i in range(block_count):
        x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)
    C4 = x
    # Stage 5
    if stage5:
        x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)
        x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)
        C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)
    else:
        C5 = None
    return [C1, C2, C3, C4, C5]


############################################################
#  Proposal Layer
############################################################ 
Example 45
Project: MesoNet   Author: DariusAf   File: classifiers.py    Apache License 2.0 5 votes vote down vote up
def init_model(self, dl_rate):
        x = Input(shape = (IMGWIDTH, IMGWIDTH, 3))
        
        x1 = Conv2D(16, (3, 3), dilation_rate = dl_rate, strides = 1, padding='same', activation = 'relu')(x)
        x1 = Conv2D(4, (1, 1), padding='same', activation = 'relu')(x1)
        x1 = BatchNormalization()(x1)
        x1 = MaxPooling2D(pool_size=(8, 8), padding='same')(x1)

        y = Flatten()(x1)
        y = Dropout(0.5)(y)
        y = Dense(1, activation = 'sigmoid')(y)
        return KerasModel(inputs = x, outputs = y) 
Example 46
Project: MesoNet   Author: DariusAf   File: classifiers.py    Apache License 2.0 5 votes vote down vote up
def init_model(self): 
        x = Input(shape = (IMGWIDTH, IMGWIDTH, 3))
        
        x1 = Conv2D(8, (3, 3), padding='same', activation = 'relu')(x)
        x1 = BatchNormalization()(x1)
        x1 = MaxPooling2D(pool_size=(2, 2), padding='same')(x1)
        
        x2 = Conv2D(8, (5, 5), padding='same', activation = 'relu')(x1)
        x2 = BatchNormalization()(x2)
        x2 = MaxPooling2D(pool_size=(2, 2), padding='same')(x2)
        
        x3 = Conv2D(16, (5, 5), padding='same', activation = 'relu')(x2)
        x3 = BatchNormalization()(x3)
        x3 = MaxPooling2D(pool_size=(2, 2), padding='same')(x3)
        
        x4 = Conv2D(16, (5, 5), padding='same', activation = 'relu')(x3)
        x4 = BatchNormalization()(x4)
        x4 = MaxPooling2D(pool_size=(4, 4), padding='same')(x4)
        
        y = Flatten()(x4)
        y = Dropout(0.5)(y)
        y = Dense(16)(y)
        y = LeakyReLU(alpha=0.1)(y)
        y = Dropout(0.5)(y)
        y = Dense(1, activation = 'sigmoid')(y)

        return KerasModel(inputs = x, outputs = y) 
Example 47
Project: MesoNet   Author: DariusAf   File: classifiers.py    Apache License 2.0 5 votes vote down vote up
def init_model(self):
        x = Input(shape = (IMGWIDTH, IMGWIDTH, 3))
        
        x1 = self.InceptionLayer(1, 4, 4, 2)(x)
        x1 = BatchNormalization()(x1)
        x1 = MaxPooling2D(pool_size=(2, 2), padding='same')(x1)
        
        x2 = self.InceptionLayer(2, 4, 4, 2)(x1)
        x2 = BatchNormalization()(x2)
        x2 = MaxPooling2D(pool_size=(2, 2), padding='same')(x2)        
        
        x3 = Conv2D(16, (5, 5), padding='same', activation = 'relu')(x2)
        x3 = BatchNormalization()(x3)
        x3 = MaxPooling2D(pool_size=(2, 2), padding='same')(x3)
        
        x4 = Conv2D(16, (5, 5), padding='same', activation = 'relu')(x3)
        x4 = BatchNormalization()(x4)
        x4 = MaxPooling2D(pool_size=(4, 4), padding='same')(x4)
        
        y = Flatten()(x4)
        y = Dropout(0.5)(y)
        y = Dense(16)(y)
        y = LeakyReLU(alpha=0.1)(y)
        y = Dropout(0.5)(y)
        y = Dense(1, activation = 'sigmoid')(y)

        return KerasModel(inputs = x, outputs = y) 
Example 48
Project: musical_genres_classification   Author: shaoeric   File: pre_process_mfcc.py    MIT License 5 votes vote down vote up
def get_model():
    input_layer = Input(shape=(1, 130, 40), name='mfcc_input')
    x = GaussianNoise(0.08)(input_layer)
    x = Conv2D(data_format='channels_first', filters=16, kernel_size=3, padding='same', activation='relu',
               kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(0.01))(x)
    x = MaxPooling2D(pool_size=(3, 1), padding='valid', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=30, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(0.01), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=(3, 1), padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=64, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(0.01), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=30, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(0.01), data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=16, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(0.01), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Flatten()(x)
    x = Dropout(0.3)(x)
    x = Dense(10, activation='softmax', name='mfcc_output', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(0.2))(x)
    model = Model(input=input_layer, output=x)
    # model.compile(optimizer=optimizers.sgd(lr=1e-4, decay=1e-5), loss='categorical_crossentropy', metrics=['accuracy'])
    print(model.summary())
    return model 
Example 49
Project: musical_genres_classification   Author: shaoeric   File: attention_logbank.py    MIT License 5 votes vote down vote up
def get_model():
    regularize = 0.01
    input_layer = Input(shape=(1, 120, 120), name='attention_logbank_input')
    x = GaussianNoise(0.03)(input_layer)
    x = Conv2D(data_format='channels_first', filters=16, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(regularize))(x)
    x = MaxPooling2D(pool_size=(3, 1), padding='valid', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=32, kernel_size=3, padding='same', activation='relu',kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(regularize),data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=(1, 3), padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=64, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(regularize), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=32, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(regularize), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=32, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(regularize), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Reshape(target_shape=(32, 25))(x)
    x = Dropout(0.3)(x)
    x = CuDNNGRU(34, return_sequences=True)(x)
    x = Attention(x.shape[1], name='attention')(x)
    x = Dropout(0.3)(x)
    x = Dense(10, activation='softmax', name='attention_logbank_output', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(regularize))(x)
    model = Model(input=input_layer, output=x)
    print(model.summary())
    return model 
Example 50
Project: musical_genres_classification   Author: shaoeric   File: final_model.py    MIT License 5 votes vote down vote up
def get_mfcc_model():
    input_layer = Input(shape=(1, 130, 40), name='mfcc_input')
    x = GaussianNoise(0.08)(input_layer)
    x = Conv2D(data_format='channels_first', filters=16, kernel_size=3, padding='same', activation='relu',
               kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(0.01))(x)
    x = MaxPooling2D(pool_size=(3, 1), padding='valid', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=30, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(0.01), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=(3, 1), padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=64, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(0.01), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=30, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(0.01), data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=16, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(0.01), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Flatten()(x)
    x = Dropout(0.3)(x)
    x = Dense(10, activation='softmax', name='mfcc_output', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(0.2))(x)
    model = Model(input=input_layer, output=x)
    # model.compile(optimizer=optimizers.sgd(lr=1e-4, decay=1e-5), loss='categorical_crossentropy', metrics=['accuracy'])

    return model 
Example 51
Project: musical_genres_classification   Author: shaoeric   File: final_model.py    MIT License 5 votes vote down vote up
def get_logfbank_model():
    regularize = 0.01
    input_layer = Input(shape=(1, 120, 120), name='attention_logbank_input')
    x = GaussianNoise(0.03)(input_layer)
    x = Conv2D(data_format='channels_first', filters=16, kernel_size=3, padding='same', activation='relu',
               kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(regularize))(x)
    x = MaxPooling2D(pool_size=(3, 1), padding='valid', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=32, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(regularize), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=(1, 3), padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=64, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(regularize), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=32, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(regularize), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=32, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(regularize), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Reshape(target_shape=(32, 25))(x)
    x = Dropout(0.3)(x)
    x = CuDNNGRU(34, return_sequences=True)(x)
    x = Attention(x.shape[1], name='attention')(x)
    x = Dropout(0.3)(x)
    x = Dense(10, activation='softmax', name='attention_logbank_output', kernel_initializer='glorot_normal',
              kernel_regularizer=regularizers.l2(regularize))(x)
    model = Model(input=input_layer, output=x)
    return model 
Example 52
Project: musical_genres_classification   Author: shaoeric   File: final_model.py    MIT License 5 votes vote down vote up
def get_logfbank_model():
    regularize = 0.01
    input_layer = Input(shape=(1, 1197, 120), name='attention_logbank_input')
    x = GaussianNoise(noise)(input_layer)
    x = Conv2D(data_format='channels_first', filters=16, kernel_size=3, padding='same', activation='relu',
               kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(regularize))(x)
    x = MaxPooling2D(pool_size=(3, 1), padding='valid', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=32, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(regularize), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=(3, 1), padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=64, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(regularize), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=32, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(regularize), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=32, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(regularize), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Reshape(target_shape=(17, 32 * 15))(x)
    x = Dropout(0.3)(x)
    x = CuDNNGRU(34, return_sequences=True)(x)
    x = Attention(x.shape[1], name='attention')(x)
    x = Dropout(0.3)(x)
    x = Dense(10, activation='softmax', name='attention_logbank_output', kernel_initializer='glorot_normal',
              kernel_regularizer=regularizers.l2(regularize))(x)
    model = Model(inputs=input_layer, outputs=x)
    return model 
Example 53
Project: labelImg   Author: keyuncheng   File: model.py    MIT License 5 votes vote down vote up
def resnet_graph(input_image, architecture, stage5=False):
    assert architecture in ["resnet50", "resnet101"]
    # Stage 1
    x = KL.ZeroPadding2D((3, 3))(input_image)
    x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
    x = BatchNorm(axis=3, name='bn_conv1')(x)
    x = KL.Activation('relu')(x)
    C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
    # Stage 2
    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
    # Stage 3
    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
    # Stage 4
    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
    block_count = {"resnet50": 5, "resnet101": 22}[architecture]
    for i in range(block_count):
        x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i))
    C4 = x
    # Stage 5
    if stage5:
        x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
        x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
        C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
    else:
        C5 = None
    return [C1, C2, C3, C4, C5]


############################################################
#  Proposal Layer
############################################################ 
Example 54
Project: VisualNN   Author: angelhunt   File: layers_export.py    GNU General Public License v3.0 4 votes vote down vote up
def pooling(layer, layer_in, layerId, tensor=True):
    poolMap = {
        ('1D', 'MAX'): MaxPooling1D,
        ('2D', 'MAX'): MaxPooling2D,
        ('3D', 'MAX'): MaxPooling3D,
        ('1D', 'AVE'): AveragePooling1D,
        ('2D', 'AVE'): AveragePooling2D,
        ('3D', 'AVE'): AveragePooling3D,
    }
    out = {}
    layer_type = layer['params']['layer_type']
    pool_type = layer['params']['pool']
    padding = get_padding(layer)
    if (layer_type == '1D'):
        strides = layer['params']['stride_w']
        kernel = layer['params']['kernel_w']
        if (padding == 'custom'):
            p_w = layer['params']['pad_w']
            out[layerId + 'Pad'] = ZeroPadding1D(padding=p_w)(*layer_in)
            padding = 'valid'
            layer_in = [out[layerId + 'Pad']]
    elif (layer_type == '2D'):
        strides = (layer['params']['stride_h'], layer['params']['stride_w'])
        kernel = (layer['params']['kernel_h'], layer['params']['kernel_w'])
        if (padding == 'custom'):
            p_h, p_w = layer['params']['pad_h'], layer['params']['pad_w']
            out[layerId + 'Pad'] = ZeroPadding2D(padding=(p_h, p_w))(*layer_in)
            padding = 'valid'
            layer_in = [out[layerId + 'Pad']]
    else:
        strides = (layer['params']['stride_h'], layer['params']['stride_w'],
                   layer['params']['stride_d'])
        kernel = (layer['params']['kernel_h'], layer['params']['kernel_w'],
                  layer['params']['kernel_d'])
        if (padding == 'custom'):
            p_h, p_w, p_d = layer['params']['pad_h'], layer['params']['pad_w'],\
                layer['params']['pad_d']
            out[layerId +
                'Pad'] = ZeroPadding3D(padding=(p_h, p_w, p_d))(*layer_in)
            padding = 'valid'
            layer_in = [out[layerId + 'Pad']]
    # Note - figure out a permanent fix for padding calculation of layers
    # in case padding is given in layer attributes
    # if ('padding' in layer['params']):
    #    padding = layer['params']['padding']
    out[layerId] = poolMap[(layer_type, pool_type)](
        pool_size=kernel, strides=strides, padding=padding)
    if tensor:
        out[layerId] = out[layerId](*layer_in)
    return out


# ********** Locally-connected Layers ********** 
Example 55
Project: MODS_ConvNet   Author: santiagolopezg   File: little_foo3.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(32, 8, 8,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 6, 6,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 4, 4,init=weight_init, name='conv1_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 2, 2,init=weight_init, name='conv1_4'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in 208, out 104
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 8, 8,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 6, 6,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 4, 4,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 2, 2,init=weight_init, name='conv2_4'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in is 88, out is 44 
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(220, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 56
Project: MODS_ConvNet   Author: santiagolopezg   File: foo_three.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(32, 3, 3,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(512, 3,3,init=weight_init, border_mode='same', name='conv4_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(512, 3,3,init=weight_init, border_mode='same', name='conv4_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(120, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dropout(dropout))
	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 57
Project: MODS_ConvNet   Author: santiagolopezg   File: little_foo.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(32, 5, 5,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv1_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in 116, out 58
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in is 52, out is 26 
	model.add(Dropout(dropout))

	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  # in is 20, out is 10 
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(10, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 58
Project: MODS_ConvNet   Author: santiagolopezg   File: foo_two.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(16, 3, 3,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(16, 3, 3,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))
	model.add(Dropout(dropout))

	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv3_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv3_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(128, 3,3,init=weight_init, border_mode='same', name='conv4_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3,3,init=weight_init, border_mode='same', name='conv4_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(120, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dropout(dropout))
	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 59
Project: MODS_ConvNet   Author: santiagolopezg   File: foo_three.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(32, 3, 3,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(512, 3,3,init=weight_init, border_mode='same', name='conv4_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(512, 3,3,init=weight_init, border_mode='same', name='conv4_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(120, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dropout(dropout))
	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 60
Project: MODS_ConvNet   Author: santiagolopezg   File: little_foo2.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(32, 7, 7,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 5, 5,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv1_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in 212, out 106
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 7, 7,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 5, 5,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in is 94, out is 47 
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(220, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 61
Project: Keras-Unet   Author: MLearing   File: unet.py    GNU General Public License v2.0 4 votes vote down vote up
def get_unet(self):
        inputs = Input((self.img_rows, self.img_cols, 1))

        conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
        conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
        pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) #pool1=MaxPolong2D()(b)是指张量b作为输入,其他与此类同
        
        conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
        conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
        conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
        pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
        conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
        drop4 = Dropout(0.5)(conv4)
        pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

        conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
        conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
        drop5 = Dropout(0.5)(conv5)

        up6 = Conv2D(512, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
            UpSampling2D(size=(2, 2))(drop5))
        merge6 = merge([drop4, up6], mode='concat', concat_axis=3)
        conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)
        conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)


        up7 = Conv2D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
            UpSampling2D(size=(2, 2))(conv6))
        merge7 = merge([conv3, up7], mode='concat', concat_axis=3)
        conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)
        conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)

        up8 = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
            UpSampling2D(size=(2, 2))(conv7))
        merge8 = merge([conv2, up8], mode='concat', concat_axis=3)
        conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)
        conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)

        up9 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
            UpSampling2D(size=(2, 2))(conv8))
        merge9 = merge([conv1, up9], mode='concat', concat_axis=3)
        conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)
        conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
        conv9 = Conv2D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
        conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)

        model = Model(input=inputs, output=conv10)

        model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy'])
        print('model compile')
        return model 
Example 62
Project: Scene-Understanding   Author: foamliu   File: vgg16.py    MIT License 4 votes vote down vote up
def vgg16_model(img_rows, img_cols, channel=3):
    model = Sequential()
    # Encoder
    model.add(ZeroPadding2D((1, 1), input_shape=(img_rows, img_cols, channel), name='input'))
    model.add(Conv2D(64, (3, 3), activation='relu', name='conv1_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(64, (3, 3), activation='relu', name='conv1_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(128, (3, 3), activation='relu', name='conv2_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(128, (3, 3), activation='relu', name='conv2_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(256, (3, 3), activation='relu', name='conv3_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(256, (3, 3), activation='relu', name='conv3_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(256, (3, 3), activation='relu', name='conv3_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv4_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv4_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv4_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv5_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv5_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv5_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    # Add Fully Connected Layer
    model.add(Flatten(name='flatten'))
    model.add(Dense(4096, activation='relu', name='dense1'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu', name='dense2'))
    model.add(Dropout(0.5))
    model.add(Dense(1000, activation='softmax', name='softmax'))

    # Loads ImageNet pre-trained data
    weights_path = 'models/vgg16_weights_tf_dim_ordering_tf_kernels.h5'
    model.load_weights(weights_path)

    return model 
Example 63
Project: phoneticSimilarity   Author: ronggong   File: models.py    GNU Affero General Public License v3.0 4 votes vote down vote up
def jan_original(filter_density, dropout, input_shape, batchNorm=False, dense_activation='relu', channel=1):
    if channel == 1:
        reshape_dim = (1, input_shape[0], input_shape[1])
        channel_order = 'channels_first'
    else:
        reshape_dim = input_shape
        channel_order = 'channels_last'

    model_1 = Sequential()

    if batchNorm:
        model_1.add(BatchNormalization(axis=1, input_shape=reshape_dim))

    model_1.add(Conv2D(int(10 * filter_density), (3, 7), padding="valid",
                       input_shape=reshape_dim,
                       data_format=channel_order, activation='relu'))
    model_1.add(MaxPooling2D(pool_size=(3, 1), padding='valid', data_format=channel_order))

    model_1.add(Conv2D(int(20 * filter_density), (3, 3), padding="valid",
                       data_format=channel_order, activation='relu'))
    model_1.add(MaxPooling2D(pool_size=(3, 1), padding='valid', data_format=channel_order))

    if dropout:
        model_1.add(Dropout(dropout))  # test Schluter dataset, comment in jingju dataset

    model_1.add(Flatten())

    model_1.add(Dense(units=256, activation=dense_activation))
    # model_1.add(ELU())

    if dropout:
        model_1.add(Dropout(dropout))

    model_1.add(Dense(1, activation='sigmoid'))
    # model_1.add(Activation("softmax"))

    # optimizer = SGD(lr=0.05, momentum=0.45, decay=0.0, nesterov=False)
    optimizer = Adam()

    model_1.compile(loss='binary_crossentropy',
                    optimizer=optimizer,
                    metrics=['accuracy'])

    print(model_1.summary())

    return model_1 
Example 64
Project: ndparse   Author: neurodata   File: nddl.py    Apache License 2.0 4 votes vote down vote up
def ciresan_n3(n=65, nOutput=2):
    """An approximation of the N3 network from [1].
    Note that we also made a few small modifications along the way
    (from Theano to caffe and now to tensorflow/keras).

    As of this writing, no serious attempt has been made to optimize
    hyperparameters or structure of this network.

    Parameters:
       n : The tile size (diameter) to use in the sliding window.
           Tiles are assumed to be square, hence only one parameter.

    [1] Ciresan et al 'Deep neural networks segment neuronal membranes in
        electron microscopy images,' NIPS 2012.
    """

    from keras.optimizers import SGD
    from keras.models import Sequential
    from keras.layers import Dense, Dropout, Activation, Flatten
    from keras.layers import Convolution2D, MaxPooling2D
    from keras.layers.normalization import BatchNormalization


    model = Sequential()

    # input: nxn images with 1 channel -> (1, n, n) tensors.
    # this applies 48 convolution filters of size 5x5 each.
    model.add(Convolution2D(48, 5, 5, border_mode='valid', dim_ordering='th', input_shape=(1, n, n)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
    model.add(BatchNormalization())  # note: we used LRN previously...

    model.add(Convolution2D(48, 5, 5))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
    model.add(BatchNormalization())  # note: we used LRN previously...
    #model.add(Dropout(0.25))

    model.add(Convolution2D(48, 5, 5, border_mode='valid'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))

    model.add(Flatten())
    # Note: Keras does automatic shape inference.
    model.add(Dense(200))
    model.add(Activation('relu'))
    #model.add(Dropout(0.5))

    model.add(Dense(nOutput))  # use 2 for binary classification
    model.add(Activation('softmax'))

    return model


#-------------------------------------------------------------------------------
#  Code for training a deep learning network
#------------------------------------------------------------------------------- 
Example 65
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn_all.py    MIT License 4 votes vote down vote up
def unet(input_shape):
    inputs = Input(input_shape)
    conv0 = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(inputs)
    conv0 = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(conv0)
    conv0 = Conv2D(32, kernel_size=(2, 2), strides=(1, 1), activation='relu')(conv0)

    conv1 = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(conv0)
    conv1 = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(pool1)
    conv2 = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(128, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(pool2)
    conv3 = Conv2D(128, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Conv2D(256, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(pool3)
    conv4 = Conv2D(256, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Conv2D(512, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(pool4)
    conv5 = Conv2D(512, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(conv5)

    up6 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4], axis=3)
    conv6 = Conv2D(256, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(up6)
    conv6 = Conv2D(256, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(conv6)

    up7 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3], axis=3)
    conv7 = Conv2D(128, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(up7)
    conv7 = Conv2D(128, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(conv7)

    up8 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2], axis=3)
    conv8 = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(up8)
    conv8 = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(conv8)

    up9 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1], axis=3)
    conv9 = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(up9)
    conv9 = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(conv9)

#    conv10 = Conv2D(nb_classes, kernel_size=(1, 1), strides=(1, 1), padding='same')(conv9)

    deconv10 = Conv2DTranspose(nb_classes, kernel_size=(2, 2), strides=(1, 1), activation='relu', trainable=False)(conv9)
    conv10 = Conv2D(nb_classes, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(deconv10)
    conv10 = Conv2D(nb_classes, kernel_size=(3, 3), strides=(1, 1), padding='same')(conv10)

    model = Model(inputs=inputs, outputs=conv10)
    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    model.compile(loss=softmax_sparse_crossentropy_ignoring_first_label,
                  optimizer=adam,
                  metrics=[sparse_accuracy])
    return model


# Global settings 
Example 66
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: res18_keras.py    MIT License 4 votes vote down vote up
def Res18():

    def ResBlock(x, in_f, out_f, stride=1, name="res"):
        res_x = Conv2D(out_f, [3, 3], strides=stride, padding='same', activation=None, name=name+"_conv1")(x)
        res_x = BatchNormalization(name=name+"_bn1")(res_x)
        res_x = Activation("relu")(res_x)

        res_x = Conv2D(out_f, [3, 3], strides=1, padding='same', activation=None, name=name+"_conv2")(res_x)
        res_x = BatchNormalization(name=name+"_bn2")(res_x)
        res_x = Activation("relu")(res_x)

        if in_f != out_f:
            x = Conv2D(out_f, [1, 1], strides=1, padding="same", activation=None, name=name+"_conv_sc")(x)
            x = BatchNormalization(name=name+"_bn_sc")(x)
            x = Activation("relu")(x)

        if stride == 2:
            x = MaxPooling2D([2, 2], strides=2, padding="same")(x)
        
        x = Add()([res_x, x])
        x = Activation("relu")(x)

        return x
        
    
    inputs = Input((img_height, img_width, channel))
    x = inputs
    
    x = Conv2D(64, [7, 7], strides=2, padding='same', activation=None, name="conv1")(x)
    x = BatchNormalization(name="bn1")(x)
    x = Activation("relu")(x)
    x = MaxPooling2D([3, 3], strides=2, padding='same')(x)

    x = ResBlock(x, 64, 64, name="res2_1")
    x = ResBlock(x, 64, 64, name="res2_2")

    x = ResBlock(x, 64, 128, stride=2, name="res3_1")
    x = ResBlock(x, 128, 128, name="res3_2")

    x = ResBlock(x, 128, 256, stride=2, name="res4_1")
    x = ResBlock(x, 256, 256, name="res4_2")

    x = ResBlock(x, 256, 512, stride=2, name="res5_1")
    x = ResBlock(x, 512, 512, name="res5_2")

    x = AveragePooling2D([7, 7], strides=1, padding='valid')(x)
    x = Flatten()(x)
    x = Dense(num_classes, activation='softmax', name="fc")(x)

    model = Model(inputs=inputs, outputs=x)

    return model 
Example 67
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: resNeXt101_keras.py    MIT License 4 votes vote down vote up
def ResNeXt101():

    def Block(x, in_f, f_1, out_f, stride=1, cardinality):
        res_x = Conv2D(f_1, [1, 1], strides=stride, padding='same', activation=None)(x)
        res_x = BatchNormalization()(res_x)
        res_x = Activation("relu")(res_x)

        multiplier = f_1 // cardinality
        res_x = SeparableConv2D(f_1, [3, 3], strides=1, padding='same', depth_multiplier=multiplier, activation=None)(res_x)
        res_x = BatchNormalization()(res_x)
        res_x = Activation("relu")(res_x)

        res_x = Conv2D(out_f, [1, 1], strides=1, padding='same', activation=None)(res_x)
        res_x = BatchNormalization()(res_x)
        res_x = Activation("relu")(res_x)

        if in_f != out_f:
            x = Conv2D(out_f, [1, 1], strides=1, padding="same", activation=None)(x)
            x = BatchNormalization()(x)
            x = Activation("relu")(x)

        if stride == 2:
            x = MaxPooling2D([2, 2], strides=2, padding="same")(x)
        
        x = Add()([res_x, x])

        return x
        
    
    inputs = Input((img_height, img_width, channel))
    x = inputs
    
    x = Conv2D(64, [7, 7], strides=2, padding='same', activation=None)(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)
    x = MaxPooling2D([3, 3], strides=2, padding='same')(x)

    x = Block(x, 64, 64, 256)
    x = Block(x, 256, 64, 256)
    x = Block(x, 256, 64, 256)

    x = Block(x, 256, 128, 512, stride=2)
    x = Block(x, 512, 128, 512)
    x = Block(x, 512, 128, 512)
    x = Block(x, 512, 128, 512)

    x = Block(x, 512, 256, 1024, stride=2)
    for i in range(22):
        x = Block(x, 1024, 256, 1024)

    x = Block(x, 1024, 512, 2048, stride=2)
    x = Block(x, 2048, 256, 2048)
    x = Block(x, 2048, 256, 2048)

    x = AveragePooling2D([img_height // 32, img_width // 32], strides=1, padding='valid')(x)
    x = Flatten()(x)
    x = Dense(num_classes, activation='softmax', name='out')(x)

    model = Model(inputs=inputs, outputs=[x])

    return model 
Example 68
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: res34_keras.py    MIT License 4 votes vote down vote up
def Res34():

    def ResBlock(x, in_f, out_f, stride=1, name="res"):
        res_x = Conv2D(out_f, [3, 3], strides=stride, padding='same', activation=None, name=name+"_conv1")(x)
        res_x = BatchNormalization(name=name+"_bn1")(res_x)
        res_x = Activation("relu")(res_x)

        res_x = Conv2D(out_f, [3, 3], strides=1, padding='same', activation=None, name=name+"_conv2")(res_x)
        res_x = BatchNormalization(name=name+"_bn2")(res_x)
        res_x = Activation("relu")(res_x)

        if in_f != out_f:
            x = Conv2D(out_f, [1, 1], strides=1, padding="same", activation=None, name=name+"_conv_sc")(x)
            x = BatchNormalization(name=name+"_bn_sc")(x)
            x = Activation("relu")(x)

        if stride == 2:
            x = MaxPooling2D([2, 2], strides=2, padding="same")(x)
        
        x = Add()([res_x, x])
        x = Activation("relu")(x)

        return x
        
    
    inputs = Input((img_height, img_width, channel))
    x = inputs
    
    x = Conv2D(64, [7, 7], strides=2, padding='same', activation=None, name="conv1")(x)
    x = BatchNormalization(name="bn1")(x)
    x = Activation("relu")(x)
    x = MaxPooling2D([3, 3], strides=2, padding='same')(x)

    x = ResBlock(x, 64, 64, name="res2_1")
    x = ResBlock(x, 64, 64, name="res2_2")
    x = ResBlock(x, 64, 64, name="res2_3")

    x = ResBlock(x, 64, 128, stride=2, name="res3_1")
    x = ResBlock(x, 128, 128, name="res3_2")
    x = ResBlock(x, 128, 128, name="res3_3")
    x = ResBlock(x, 128, 128, name="res3_4")

    x = ResBlock(x, 128, 256, stride=2, name="res4_1")
    x = ResBlock(x, 256, 256, name="res4_2")
    x = ResBlock(x, 256, 256, name="res4_3")
    x = ResBlock(x, 256, 256, name="res4_4")
    x = ResBlock(x, 256, 256, name="res4_5")
    x = ResBlock(x, 256, 256, name="res4_6")

    x = ResBlock(x, 256, 512, stride=2, name="res5_1")
    x = ResBlock(x, 512, 512, name="res5_2")
    x = ResBlock(x, 512, 512, name="res5_3")

    x = AveragePooling2D([img_height // 32, img_width // 32], strides=1, padding='valid')(x)
    x = Flatten()(x)
    x = Dense(num_classes, activation='softmax', name="fc")(x)

    model = Model(inputs=inputs, outputs=x)

    return model 
Example 69
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: bn_keras.py    MIT License 4 votes vote down vote up
def VGG16():
    inputs = Input((img_height, img_width, 3))
    x = inputs
    # block conv1
    for i in range(2):
        x = Conv2D(64, (3, 3), padding='same', strides=1, activation='relu', name='conv1_{}'.format(i+1))(x)
        x = BatchNormalization()(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    # block conv2
    for i in range(2):
        x = Conv2D(128, (3, 3), padding='same', strides=1, activation='relu', name='conv2_{}'.format(i+1))(x)
        x = BatchNormalization()(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    # block conv3
    for i in range(3):
        x = Conv2D(256, (3, 3), padding='same', strides=1, activation='relu', name='conv3_{}'.format(i+1))(x)
        x = BatchNormalization()(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    # block conv4
    for i in range(3):
        x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv4_{}'.format(i+1))(x)
        x = BatchNormalization()(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    # block conv5
    for i in range(3):
        x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv5_{}'.format(i))(x)
        x = BatchNormalization()(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    x = Flatten()(x)
    x = Dense(4096, name='dense1', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(4096, name='dense2', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(num_classes, activation='softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 70
Project: chartAnalyst   Author: huima58   File: chart_analyst.py    Apache License 2.0 4 votes vote down vote up
def predict(train_imgs, train_labels, test_imgs, test_labels, x_pix_num=x_pix_num_default, y_pix_num=y_pix_num_default,
            use_saved_weights=False, weights_file_name=''):
    model = Sequential()
    # use partial VGG16 model
    model.add(ZeroPadding2D((1, 1), input_shape=(1, y_pix_num, x_pix_num)))
    
    base_filter_num = 64
    model.add(Convolution2D(base_filter_num, 3, 3, activation='relu', name='conv1_1'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))  # added this layer to reduce the input size
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(base_filter_num, 3, 3, activation='relu', name='conv1_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))
    
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(base_filter_num * 2, 3, 3, activation='relu', name='conv2_1'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(base_filter_num *2, 3, 3, activation='relu', name='conv2_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Flatten())
    model.add(Dense(128, init='uniform', activation='tanh'))
    model.add(Dropout(0.25))
    model.add(Dense(64, init='uniform', activation='tanh'))
    model.add(Dense(3, init='uniform', activation='softmax'))
    
    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer='adadelta',
                  metrics=['accuracy'])
    
    if use_saved_weights:
        model.load_weights(weights_file_name)  #need to install h5py
    else:
        start_time = datetime.today()
        checkpointer = ModelCheckpoint(filepath=weights_file_name, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
        earlyStopping = EarlyStopping(monitor='val_acc', patience=10, verbose=1, mode='max')
        model.fit(train_imgs, train_labels,
                        nb_epoch=30,
                        verbose=1,
                        batch_size=70,
                        validation_split=0.1,
                        callbacks=[checkpointer, earlyStopping])
        model.load_weights(weights_file_name)
        end_time = datetime.today()
        print "----trained time is from " + str(start_time) + " to " + str(end_time)
        
    predict_rst = model.predict_proba(test_imgs, verbose=0)
    return predict_rst 
Example 71
Project: TaiwanTrainVerificationCode2text   Author: linsamtw   File: build_verification_code_cnn_model.py    Apache License 2.0 4 votes vote down vote up
def train_verification_model(self):

        def build_cnn_model():

            tensor_in = Input((60, 200, 3))
            tensor_out = tensor_in
            tensor_out = Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(tensor_out)
            tensor_out = Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(tensor_out)
            tensor_out = MaxPooling2D(pool_size=(2, 2))(tensor_out)
            tensor_out = Dropout(0.25)(tensor_out)
            
            tensor_out = Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation='relu')(tensor_out)
            tensor_out = Conv2D(filters=64, kernel_size=(3, 3), activation='relu')(tensor_out)
            tensor_out = MaxPooling2D(pool_size=(2, 2))(tensor_out)
            tensor_out = Dropout(0.25)(tensor_out)
            
            tensor_out = Conv2D(filters=128, kernel_size=(3, 3), padding='same', activation='relu')(tensor_out)
            tensor_out = Conv2D(filters=128, kernel_size=(3, 3), activation='relu')(tensor_out)
            tensor_out = MaxPooling2D(pool_size=(2, 2))(tensor_out)
            tensor_out = Dropout(0.25)(tensor_out)
            
            tensor_out = Conv2D(filters=256, kernel_size=(3, 3), activation='relu')(tensor_out)
            tensor_out = MaxPooling2D(pool_size=(2, 2))(tensor_out)
            
            Dense(1024, activation = "relu")            
            
            tensor_out = Flatten()(tensor_out)
            tensor_out = Dropout(0.5)(tensor_out)
            tensor_out = [Dense(37, name='digit1', activation='softmax')(tensor_out),\
                Dense(37, name='digit2', activation='softmax')(tensor_out),\
                Dense(37, name='digit3', activation='softmax')(tensor_out),\
                Dense(37, name='digit4', activation='softmax')(tensor_out),\
                Dense(37, name='digit5', activation='softmax')(tensor_out),\
                Dense(37, name='digit6', activation='softmax')(tensor_out)]
            model = Model(inputs=tensor_in, outputs=tensor_out)
            

            return model

        model = build_cnn_model()
        #===============================================================
        optimizer = RMSprop(lr=1e-3, rho=0.8, epsilon=1e-08, decay=0.0)
        # Adamax
        # Define the optimizer
        model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
        # model.summary()

        history = model.fit(self.train_data,self.train_labels, 
                            batch_size = 512, epochs=20, verbose=1, 
                            validation_data=(self.test_data,self.test_labels) )
        
        self.model = model
        self.history = history
        ( self.train_correct3 , self.test_correct3, 
          self.train_final_score, self.test_final_score ) = self.compare_val_train_error()
#------------------------------------------------------------------- 
Example 72
Project: EUSIPCO2017   Author: Veleslavia   File: multilayer.py    GNU Affero General Public License v3.0 4 votes vote down vote up
def build_model(n_classes):

    if K.image_dim_ordering() == 'th':
        input_shape = (1, N_MEL_BANDS, SEGMENT_DUR)
        channel_axis = 1
    else:
        input_shape = (N_MEL_BANDS, SEGMENT_DUR, 1)
        channel_axis = 3
    melgram_input = Input(shape=input_shape)

    maxpool_const = 4
    m_sizes = [5, 80]
    n_sizes = [1, 3, 5]
    n_filters = [128, 64, 32]

    layers = list()

    for m_i in m_sizes:
        for i, n_i in enumerate(n_sizes):
            x = Convolution2D(n_filters[i], m_i, n_i,
                              border_mode='same',
                              init='he_normal',
                              W_regularizer=l2(1e-5),
                              name=str(n_i)+'_'+str(m_i)+'_'+'conv')(melgram_input)
            x = BatchNormalization(axis=channel_axis, mode=0, name=str(n_i)+'_'+str(m_i)+'_'+'bn')(x)
            x = ELU()(x)
            x = MaxPooling2D(pool_size=(N_MEL_BANDS/maxpool_const, SEGMENT_DUR/maxpool_const),
                             name=str(n_i)+'_'+str(m_i)+'_'+'pool')(x)
            layers.append(x)

    x = merge(layers, mode='concat', concat_axis=channel_axis)

    x = Dropout(0.25)(x)
    x = Convolution2D(128, 3, 3, init='he_normal', W_regularizer=l2(1e-5), border_mode='same', name='conv2')(x)
    x = BatchNormalization(axis=channel_axis, mode=0, name='bn2')(x)
    x = ELU()(x)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool2')(x)

    x = Dropout(0.25)(x)
    x = Convolution2D(128, 3, 3, init='he_normal', W_regularizer=l2(1e-5), border_mode='same', name='conv3')(x)
    x = BatchNormalization(axis=channel_axis, mode=0, name='bn3')(x)
    x = ELU()(x)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool3')(x)

    x = Flatten(name='flatten')(x)
    x = Dropout(0.5)(x)
    x = Dense(256, init='he_normal', W_regularizer=l2(1e-5), name='fc1')(x)
    x = ELU()(x)
    x = Dropout(0.5)(x)
    x = Dense(n_classes, init='he_normal', W_regularizer=l2(1e-5), activation='softmax', name='prediction')(x)
    model = Model(melgram_input, x)

    return model 
Example 73
Project: surface-crack-detection   Author: Khoronus   File: unet.py    MIT License 4 votes vote down vote up
def model(weights_input=None):

    inputs = Input(IMAGE_SIZE)
    conv1 = Conv2D(64, 3, activation="relu", padding="same", kernel_initializer="he_normal")(inputs)
    conv1 = Conv2D(64, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(128, 3, activation="relu", padding="same", kernel_initializer="he_normal")(pool1)
    conv2 = Conv2D(128, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(256, 3, activation="relu", padding="same", kernel_initializer="he_normal")(pool2)
    conv3 = Conv2D(256, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Conv2D(512, 3, activation="relu", padding="same", kernel_initializer="he_normal")(pool3)
    conv4 = Conv2D(512, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv4)
    drop4 = Dropout(0.5)(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

    conv5 = Conv2D(1024, 3, activation="relu", padding="same", kernel_initializer="he_normal")(pool4)
    conv5 = Conv2D(1024, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv5)
    drop5 = Dropout(0.5)(conv5)

    up6 = Conv2D(512, 2, activation="relu", padding="same", kernel_initializer="he_normal")(UpSampling2D(size = (2,2))(drop5))
    merge6 = Concatenate(axis=3)([drop4,up6])
    conv6 = Conv2D(512, 3, activation="relu", padding="same", kernel_initializer="he_normal")(merge6)
    conv6 = Conv2D(512, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv6)

    up7 = Conv2D(256, 2, activation="relu", padding="same", kernel_initializer="he_normal")(UpSampling2D(size = (2,2))(conv6))
    merge7 = Concatenate(axis=3)([conv3,up7])
    conv7 = Conv2D(256, 3, activation="relu", padding="same", kernel_initializer="he_normal")(merge7)
    conv7 = Conv2D(256, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv7)

    up8 = Conv2D(128, 2, activation="relu", padding="same", kernel_initializer="he_normal")(UpSampling2D(size = (2,2))(conv7))
    merge8 = Concatenate(axis=3)([conv2,up8])
    conv8 = Conv2D(128, 3, activation="relu", padding="same", kernel_initializer="he_normal")(merge8)
    conv8 = Conv2D(128, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv8)

    up9 = Conv2D(64, 2, activation="relu", padding="same", kernel_initializer="he_normal")(UpSampling2D(size = (2,2))(conv8))
    merge9 = Concatenate(axis=3)([conv1,up9])
    conv9 = Conv2D(64, 3, activation="relu", padding="same", kernel_initializer="he_normal")(merge9)
    conv9 = Conv2D(64, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv9)
    conv9 = Conv2D(2, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv9)

    conv10 = Conv2D(1, 1, activation="sigmoid")(conv9)

    model = Model(inputs=inputs, outputs=conv10)
    model.compile(optimizer=Adam(lr=1e-4), loss="binary_crossentropy", metrics=["accuracy"])

    if weights_input:
        model.load_weights(weights_input)

    return model 
Example 74
Project: keras-ctpn   Author: yizt   File: base_net.py    Apache License 2.0 4 votes vote down vote up
def resnet50(image_input):
    bn_axis = 3

    x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(image_input)
    x = layers.Conv2D(64, (7, 7),
                      strides=(2, 2),
                      padding='valid',
                      name='conv1')(x)
    x = layers.BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = layers.Activation('relu')(x)
    x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
    # block 2
    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
    # # 确定精调层
    no_train_model = Model(inputs=image_input, outputs=x)
    for l in no_train_model.layers:
        if isinstance(l, layers.BatchNormalization):
            l.trainable = True
        else:
            l.trainable = False
    # block 3
    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
    # block 4
    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')
    # block 5
    # x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
    # x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
    # x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

    # model = Model(input, x, name='resnet50')

    return x 
Example 75
Project: Intelligent_Arm_Project   Author: TeamLimb   File: tiny_yolo2.py    MIT License 4 votes vote down vote up
def tiny_yolo2():
    my_input = Input(shape=(416, 416, 3))
    output = Conv2D(16, (3, 3),
                    strides=(1, 1),
                    padding='same',
                    use_bias=False,
                    kernel_initializer='glorot_normal',
                    kernel_regularizer=regularizers.l2(0.01))(my_input)
    output = BatchNormalization(beta_regularizer=regularizers.l2(0.01), gamma_regularizer=regularizers.l2(0.01))(output)
    output = LeakyReLU(alpha=0.1)(output)
    output = MaxPooling2D(pool_size=(2, 2))(output)

    # Layer 2 - 5
    for i in range(0, 4):
        output = Conv2D(32 * (2 ** i), (3, 3),
                        strides=(1, 1),
                        padding='same',
                        use_bias=False,
                        kernel_initializer='glorot_normal',
                        kernel_regularizer=regularizers.l2(0.01))(output)
        output = BatchNormalization(beta_regularizer=regularizers.l2(0.01), gamma_regularizer=regularizers.l2(0.01))(output)
        output = LeakyReLU(alpha=0.1)(output)
        output = MaxPooling2D(pool_size=(2, 2))(output)

    # Layer 6
    output = Conv2D(512, (3, 3),
                    strides=(1, 1),
                    padding='same',
                    use_bias=False,
                    kernel_initializer='glorot_normal',
                    kernel_regularizer=regularizers.l2(0.01))(output)
    output = BatchNormalization(beta_regularizer=regularizers.l2(0.01), gamma_regularizer=regularizers.l2(0.01))(output)
    output = LeakyReLU(alpha=0.1)(output)
    output = MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same')(output)

    # Layer 7 - 8
    for _ in range(0, 2):
        output = Conv2D(1024, (3, 3),
                        strides=(1, 1),
                        padding='same',
                        use_bias=False,
                        kernel_initializer='glorot_normal',
                        kernel_regularizer=regularizers.l2(0.01))(output)
        output = BatchNormalization(beta_regularizer=regularizers.l2(0.01), gamma_regularizer=regularizers.l2(0.01))(output)
        output = LeakyReLU(alpha=0.1)(output)

    # Layer 9
    output = Conv2D(5 * (4 + 1 + 80), (1, 1),
                    strides=(1, 1),
                    padding='same',
                    kernel_initializer='glorot_normal',
                    kernel_regularizer=regularizers.l2(0.01))(output)
    output = Activation('linear')(output)

    return Model(my_input, output) 
Example 76
Project: gumpy-deeplearning   Author: gumpy-bci   File: cnn_stft.py    MIT License 4 votes vote down vote up
def create_model(self, input_shape, dropout=0.5, print_summary=False):

        # basis of the CNN_STFT is a Sequential network
        model = Sequential()

        # spectrogram creation using STFT
        model.add(Spectrogram(n_dft = 128, n_hop = 16, input_shape = input_shape,
                  return_decibel_spectrogram = False, power_spectrogram = 2.0,
                  trainable_kernel = False, name = 'static_stft'))
        model.add(Normalization2D(str_axis = 'freq'))

        # Conv Block 1
        model.add(Conv2D(filters = 24, kernel_size = (12, 12),
                         strides = (1, 1), name = 'conv1',
                         border_mode = 'same'))
        model.add(BatchNormalization(axis = 1))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size = (2, 2), strides = (2,2), padding = 'valid',
                               data_format = 'channels_last'))

        # Conv Block 2
        model.add(Conv2D(filters = 48, kernel_size = (8, 8),
                         name = 'conv2', border_mode = 'same'))
        model.add(BatchNormalization(axis = 1))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size = (2, 2), strides = (2, 2), padding = 'valid',
                               data_format = 'channels_last'))

        # Conv Block 3
        model.add(Conv2D(filters = 96, kernel_size = (4, 4),
                         name = 'conv3', border_mode = 'same'))
        model.add(BatchNormalization(axis = 1))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size = (2, 2), strides = (2,2),
                               padding = 'valid',
                               data_format = 'channels_last'))
        model.add(Dropout(dropout))

        # classificator
        model.add(Flatten())
        model.add(Dense(2))  # two classes only
        model.add(Activation('softmax'))

        if print_summary:
            print(model.summary())

        # compile the model
        model.compile(loss = 'categorical_crossentropy',
                      optimizer = 'adam',
                      metrics = ['accuracy'])

        # assign model and return
        self.model = model
        return model 
Example 77
Project: DEXTR-KerasTensorflow   Author: scaelles   File: resnet.py    GNU General Public License v3.0 4 votes vote down vote up
def ResNet101(input_tensor=None):

    img_input = input_tensor
    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1

    x = ZeroPadding2D((3, 3))(img_input)
    x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=False)(x)
    x = BN(axis=bn_axis, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = ZeroPadding2D((1, 1))(x)
    x = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(x)

    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', strides=(2, 2))
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')

    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', dilation=2)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b', dilation=2)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c', dilation=2)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d', dilation=2)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e', dilation=2)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f', dilation=2)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='g', dilation=2)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='h', dilation=2)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='i', dilation=2)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='j', dilation=2)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='k', dilation=2)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='l', dilation=2)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='m', dilation=2)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='n', dilation=2)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='o', dilation=2)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='p', dilation=2)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='q', dilation=2)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='r', dilation=2)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='s', dilation=2)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='t', dilation=2)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='u', dilation=2)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='v', dilation=2)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='w', dilation=2)

    x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', dilation=4)
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', dilation=4)
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', dilation=4)

    return x 
Example 78
Project: 360_aware_saliency   Author: MikhailStartsev   File: dcn_resnet.py    GNU General Public License v3.0 4 votes vote down vote up
def dcn_resnet(input_tensor=None):
    input_shape = (3, None, None)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor)
        else:
            img_input = input_tensor

    bn_axis = 1

    # conv_1
    x = ZeroPadding2D((3, 3))(img_input)
    x = Convolution2D(64, 7, 7, subsample=(2, 2), name='conv1')(x)
    x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2), border_mode='same')(x)

    # conv_2
    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    # conv_3
    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', strides=(2, 2))
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')

    # conv_4
    x = conv_block_atrous(x, 3, [256, 256, 1024], stage=4, block='a', atrous_rate=(2, 2))
    x = identity_block_atrous(x, 3, [256, 256, 1024], stage=4, block='b', atrous_rate=(2, 2))
    x = identity_block_atrous(x, 3, [256, 256, 1024], stage=4, block='c', atrous_rate=(2, 2))
    x = identity_block_atrous(x, 3, [256, 256, 1024], stage=4, block='d', atrous_rate=(2, 2))
    x = identity_block_atrous(x, 3, [256, 256, 1024], stage=4, block='e', atrous_rate=(2, 2))
    x = identity_block_atrous(x, 3, [256, 256, 1024], stage=4, block='f', atrous_rate=(2, 2))

    # conv_5
    x = conv_block_atrous(x, 3, [512, 512, 2048], stage=5, block='a', atrous_rate=(4, 4))
    x = identity_block_atrous(x, 3, [512, 512, 2048], stage=5, block='b', atrous_rate=(4, 4))
    x = identity_block_atrous(x, 3, [512, 512, 2048], stage=5, block='c', atrous_rate=(4, 4))

    # Create model
    model = Model(img_input, x)

    # Load weights
    weights_path = get_file('resnet50_weights_th_dim_ordering_th_kernels_notop.h5', TH_WEIGHTS_PATH_NO_TOP,
                            cache_subdir='models', md5_hash='f64f049c92468c9affcd44b0976cdafe')
    model.load_weights(weights_path)

    return model 
Example 79
Project: 360_aware_saliency   Author: MikhailStartsev   File: dcn_vgg.py    GNU General Public License v3.0 4 votes vote down vote up
def dcn_vgg(input_tensor=None):
    input_shape = (3, None, None)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    # conv_1
    x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='block1_conv1')(img_input)
    x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='block1_conv2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    # conv_2
    x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='block2_conv1')(x)
    x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='block2_conv2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    # conv_3
    x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv1')(x)
    x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv2')(x)
    x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool', border_mode='same')(x)

    # conv_4
    x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv1')(x)
    x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv2')(x)
    x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(1, 1), name='block4_pool', border_mode='same')(x)

    # conv_5
    x = AtrousConvolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv1', atrous_rate=(2, 2))(x)
    x = AtrousConvolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv2', atrous_rate=(2, 2))(x)
    x = AtrousConvolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv3', atrous_rate=(2, 2))(x)

    # Create model
    model = Model(img_input, x)

    # Load weights
    weights_path = get_file('vgg16_weights_th_dim_ordering_th_kernels_notop.h5', TH_WEIGHTS_PATH_NO_TOP,
                            cache_subdir='models')
    model.load_weights(weights_path)

    return model 
Example 80
Project: Visual-Question-Answering   Author: Hynix07   File: models.py    MIT License 4 votes vote down vote up
def VGG_16(weights_path=None):
	model = Sequential()
	model.add(ZeroPadding2D((1,1),input_shape=(3,224,224)))
	model.add(Convolution2D(64, 3, 3, activation='relu'))
	model.add(ZeroPadding2D((1,1)))
	model.add(Convolution2D(64, 3, 3, activation='relu'))
	model.add(MaxPooling2D((2,2), strides =(2,2)))

	model.add(ZeroPadding2D((1,1)))
	model.add(Convolution2D(128, 3, 3, activation='relu'))
	model.add(ZeroPadding2D((1,1)))
	model.add(Convolution2D(128, 3, 3, activation='relu'))
	model.add(MaxPooling2D((2,2), strides =(2,2)))

	model.add(ZeroPadding2D((1,1)))
	model.add(Convolution2D(256, 3, 3, activation='relu'))
	model.add(ZeroPadding2D((1,1)))
	model.add(Convolution2D(256, 3, 3, activation='relu'))
	model.add(ZeroPadding2D((1,1)))
	model.add(Convolution2D(256, 3, 3, activation='relu'))
	model.add(MaxPooling2D((2,2), strides =(2,2)))

	model.add(ZeroPadding2D((1,1)))
	model.add(Convolution2D(512, 3, 3, activation='relu'))
	model.add(ZeroPadding2D((1,1)))
	model.add(Convolution2D(512, 3, 3, activation='relu'))
	model.add(ZeroPadding2D((1,1)))
	model.add(Convolution2D(512, 3, 3, activation='relu'))
	model.add(MaxPooling2D((2,2), strides =(2,2)))

	model.add(ZeroPadding2D((1,1)))
	model.add(Convolution2D(512, 3, 3, activation='relu'))
	model.add(ZeroPadding2D((1,1)))
	model.add(Convolution2D(512, 3, 3, activation='relu'))
	model.add(ZeroPadding2D((1,1)))
	model.add(Convolution2D(512, 3, 3, activation='relu'))
	model.add(MaxPooling2D((2,2), strides =(2,2)))

	model.add(Flatten())
	model.add(Dense(4096, activation='relu'))
	model.add(Dropout(0.5))
	model.add(Dense(4096, activation='relu'))
	model.add(Dropout(0.5))
	model.add(Dense(1000, activation='softmax'))

	if weights_path:
		model.load_weights(weights_path)
	
	return model