Python keras.layers.Dropout() Examples

The following are code examples for showing how to use keras.layers.Dropout(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: speed_estimation   Author: NeilNie   File: simple_conv.py    MIT License 13 votes vote down vote up
def commaai_model(self):

        model = Sequential()
        model.add(Lambda(lambda x: x / 127.5 - 1., input_shape=(configs.IMG_HEIGHT, configs.IMG_WIDTH, 3), output_shape=(configs.IMG_HEIGHT, configs.IMG_WIDTH, 3)))
        model.add(Conv2D(16, (8, 8), strides=4, padding="same"))
        model.add(ELU())
        model.add(Conv2D(32, (5, 5), strides=2, padding="same"))
        model.add(ELU())
        model.add(Conv2D(64, (5, 5), strides=2, padding="same"))
        model.add(Flatten())
        model.add(Dropout(.2))
        model.add(ELU())
        model.add(Dense(512))
        model.add(Dropout(.5))
        model.add(ELU())
        model.add(Dense(1))

        sgd = SGD(lr=0.00001, decay=1e-6, momentum=0.9, nesterov=True)
        model.compile(optimizer=sgd, loss='mean_squared_error')
        # print('steering model is created and compiled...')
        return model 
Example 2
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: VGG_OutputGen.py    MIT License 7 votes vote down vote up
def getVGGModel():
    input_2 = Input(shape=[1], name="angle")
    angle_layer = Dense(1, )(input_2)
    base_model = VGG16(weights='imagenet', include_top=False, 
                 input_shape=X_train.shape[1:], classes=1)
    x = base_model.get_layer('block5_pool').output
    

    x = GlobalMaxPooling2D()(x)
    merge_one = concatenate([x, angle_layer])
    merge_one = Dense(512, activation='relu', name='fc2')(merge_one)
    merge_one = Dropout(0.3)(merge_one)
    merge_one = Dense(512, activation='relu', name='fc3')(merge_one)
    merge_one = Dropout(0.3)(merge_one)
    
    predictions = Dense(1, activation='sigmoid')(merge_one)
    
    model = Model(inputs=[base_model.input, input_2], outputs=predictions)
    
    # adam = Adam(lr=1e-3, epsilon = 1e-8, beta_1 = .9, beta_2 = .999)
    # model.compile(loss='binary_crossentropy',
    #               optimizer=adam,
    #               metrics=['accuracy'])
    return model 
Example 3
Project: Image-Caption-Generator   Author: dabasajay   File: model.py    MIT License 6 votes vote down vote up
def RNNModel(vocab_size, max_len, rnnConfig, model_type):
	embedding_size = rnnConfig['embedding_size']
	if model_type == 'inceptionv3':
		# InceptionV3 outputs a 2048 dimensional vector for each image, which we'll feed to RNN Model
		image_input = Input(shape=(2048,))
	elif model_type == 'vgg16':
		# VGG16 outputs a 4096 dimensional vector for each image, which we'll feed to RNN Model
		image_input = Input(shape=(4096,))
	image_model_1 = Dropout(rnnConfig['dropout'])(image_input)
	image_model = Dense(embedding_size, activation='relu')(image_model_1)

	caption_input = Input(shape=(max_len,))
	# mask_zero: We zero pad inputs to the same length, the zero mask ignores those inputs. E.g. it is an efficiency.
	caption_model_1 = Embedding(vocab_size, embedding_size, mask_zero=True)(caption_input)
	caption_model_2 = Dropout(rnnConfig['dropout'])(caption_model_1)
	caption_model = LSTM(rnnConfig['LSTM_units'])(caption_model_2)

	# Merging the models and creating a softmax classifier
	final_model_1 = concatenate([image_model, caption_model])
	final_model_2 = Dense(rnnConfig['dense_units'], activation='relu')(final_model_1)
	final_model = Dense(vocab_size, activation='softmax')(final_model_2)

	model = Model(inputs=[image_input, caption_input], outputs=final_model)
	model.compile(loss='categorical_crossentropy', optimizer='adam')
	return model 
Example 4
Project: ANN   Author: waynezv   File: ANN_v0.1.py    MIT License 6 votes vote down vote up
def train(in_dim, out_dim, X_train, Y_train, X_test, Y_test):
    model = Sequential()
    model.add(Dense(100000, input_dim = in_dim, init='uniform'))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(100000, init='uniform'))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(out_dim, init='uniform'))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', optimizer='sgd',\
            metrics=['accuracy'])

    hist = model.fit(X_train, Y_train, nb_epoch=5, batch_size=32,\
            validation_split=0.1, shuffle=True)
    print(hist.history)

    loss_and_metrics = model.evaluate(X_test, Y_test, batch_size=32)

    classes = model.predict_classes(X_test, batch_size=32)

    proba = model.predict_proba(X_test, batch_size=32) 
Example 5
Project: phoneticSimilarity   Author: ronggong   File: models_RNN.py    GNU Affero General Public License v3.0 6 votes vote down vote up
def embedding_RNN_2_lstm(input_shape, conv=False, dropout=False):

    device = device_lib.list_local_devices()[0].device_type

    input = Input(batch_shape=input_shape)

    x = conv_module(conv, input_shape, input)

    if device == 'CPU':
        if dropout:
            x = Bidirectional(LSTM(units=32, return_sequences=True, dropout=dropout))(x)
            x = Bidirectional(LSTM(units=32, return_sequences=False, dropout=dropout))(x)
            x = Dropout(dropout)(x)
        else:
            x = Bidirectional(LSTM(units=32, return_sequences=True))(x)
            x = Bidirectional(LSTM(units=32, return_sequences=False))(x)
    else:
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=True))(x)
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=False))(x)

    return x, input 
Example 6
Project: phoneticSimilarity   Author: ronggong   File: models_RNN.py    GNU Affero General Public License v3.0 6 votes vote down vote up
def embedding_RNN_2_lstm_attention(input_shape, conv, dropout):

    device = device_lib.list_local_devices()[0].device_type

    input = Input(batch_shape=input_shape)

    x = conv_module(conv, input_shape, input)

    if device == 'CPU':
        if dropout:
            x = Bidirectional(LSTM(units=32, return_sequences=True, dropout=dropout))(x)
            x = Bidirectional(LSTM(units=32, return_sequences=True, dropout=dropout))(x)
            x = Dropout(dropout)(x)
        else:
            x = Bidirectional(LSTM(units=32, return_sequences=True))(x)
            x = Bidirectional(LSTM(units=32, return_sequences=True))(x)
    else:
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=True))(input)
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=True))(x)

    x, attention = Attention(return_attention=True)(x)

    return x, input, attention 
Example 7
Project: phoneticSimilarity   Author: ronggong   File: models_RNN.py    GNU Affero General Public License v3.0 6 votes vote down vote up
def embedding_RNN_2_lstm_1_dense(input_shape):

    device = device_lib.list_local_devices()[0].device_type

    input = Input(batch_shape=input_shape)

    if device == 'CPU':
        x = Bidirectional(LSTM(units=32, return_sequences=True))(input)
        x = Bidirectional(LSTM(units=32, return_sequences=False))(x)
    else:
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=True))(input)
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=False))(x)

    x = Dense(units=64, activation='relu')(x)

    x = Dropout(rate=0.5)(x)

    return x, input 
Example 8
Project: phoneticSimilarity   Author: ronggong   File: models_RNN.py    GNU Affero General Public License v3.0 6 votes vote down vote up
def embedding_RNN_2_lstm_2_dense(input_shape):

    device = device_lib.list_local_devices()[0].device_type

    input = Input(batch_shape=input_shape)

    if device == 'CPU':
        x = Bidirectional(LSTM(units=32, return_sequences=True))(input)
        x = Bidirectional(LSTM(units=32, return_sequences=False))(x)
    else:
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=True))(input)
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=False))(x)

    x = Dense(units=64, activation='relu')(x)

    x = Dropout(rate=0.5)(x)

    x = Dense(units=64, activation='relu')(x)

    x = Dropout(rate=0.5)(x)

    return x, input 
Example 9
Project: phoneticSimilarity   Author: ronggong   File: models_RNN.py    GNU Affero General Public License v3.0 6 votes vote down vote up
def embedding_RNN_3_lstm_1_dense(input_shape):

    device = device_lib.list_local_devices()[0].device_type

    input = Input(batch_shape=input_shape)

    if device == 'CPU':
        x = Bidirectional(LSTM(units=32, return_sequences=True))(input)
        x = Bidirectional(LSTM(units=32, return_sequences=True))(x)
        x = Bidirectional(LSTM(units=32, return_sequences=False))(x)
    else:
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=True))(input)
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=True))(x)
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=False))(x)

    x = Dense(units=64, activation='relu')(x)

    x = Dropout(rate=0.5)(x)

    return x, input 
Example 10
Project: cnn-levelset   Author: wiseodd   File: localizer.py    MIT License 6 votes vote down vote up
def __init__(self, model_path=None):
        if model_path is not None:
            self.model = self.load_model(model_path)
        else:
            # VGG16 last conv features
            inputs = Input(shape=(7, 7, 512))
            x = Convolution2D(128, 1, 1)(inputs)
            x = Flatten()(x)

            # Cls head
            h_cls = Dense(256, activation='relu', W_regularizer=l2(l=0.01))(x)
            h_cls = Dropout(p=0.5)(h_cls)
            cls_head = Dense(20, activation='softmax', name='cls')(h_cls)

            # Reg head
            h_reg = Dense(256, activation='relu', W_regularizer=l2(l=0.01))(x)
            h_reg = Dropout(p=0.5)(h_reg)
            reg_head = Dense(4, activation='linear', name='reg')(h_reg)

            # Joint model
            self.model = Model(input=inputs, output=[cls_head, reg_head]) 
Example 11
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn.py    MIT License 6 votes vote down vote up
def cnn_2d(input_shape):

    model = Sequential()
    model.add(Conv2D(100, (3, 3), padding='valid', activation='relu', input_shape=input_shape))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Conv2D(200, (3, 3), padding='valid', activation='relu'))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Flatten())
    model.add(Dense(200, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(84, activation='relu'))
    model.add(Dense(nb_classes, activation='softmax'))

    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])
    return model 
Example 12
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: zfnet_keras.py    MIT License 6 votes vote down vote up
def ZFNet():
    inputs = Input((img_height, img_width, 3))
    x = Conv2D(96, (7, 7), padding='valid', strides=2, activation='relu', name='conv1')(inputs)
    x = MaxPooling2D((3, 3), strides=2,  padding='same')(x)
    x = Conv2D(256, (5, 5), padding='valid', strides=2, activation='relu', name='conv2')(x)
    x = keras.layers.ZeroPadding2D(1)(x)
    x = MaxPooling2D((3, 3), strides=2, padding='same')(x)
    x = Conv2D(384, (3, 3), padding='same', activation='relu', name='conv3')(x)
    x = Conv2D(384, (3, 3), padding='same', activation='relu', name='conv4')(x)
    x = Conv2D(256, (3, 3), padding='same', activation='relu', name='conv5')(x)
    x = MaxPooling2D((3, 3), strides=2, padding='same')(x)
    
    x = Flatten()(x)
    x = Dense(4096, name='dense1', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(4096, name='dense2', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(num_classes, activation='softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 13
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: alexnet_keras.py    MIT License 6 votes vote down vote up
def AlexNet():
    inputs = Input((img_height, img_width, 3))
    x = Conv2D(96, (11, 11), padding='valid', strides=4, activation='relu', name='conv1')(inputs)
    x = MaxPooling2D((3, 3), strides=2,  padding='same')(x)
    x = Conv2D(256, (5, 5), padding='valid', activation='relu', name='conv2')(x)
    x = keras.layers.ZeroPadding2D(1)(x)
    x = MaxPooling2D((3, 3), strides=2, padding='same')(x)
    x = Conv2D(384, (3, 3), padding='same', activation='relu', name='conv3')(x)
    x = Conv2D(384, (3, 3), padding='same', activation='relu', name='conv4')(x)
    x = Conv2D(256, (3, 3), padding='same', activation='relu', name='conv5')(x)
    
    x = Flatten()(x)
    x = Dense(4096, name='dense1', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(4096, name='dense2', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(num_classes, activation='softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 14
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: nin_keras.py    MIT License 6 votes vote down vote up
def NIN():
    inputs = Input((img_height, img_width, 3))
    x = Conv2D(192, (5, 5), padding='same', strides=1, activation='relu', name='conv1')(inputs)
    x = Conv2D(160, (1, 1), padding='same', strides=1, activation='relu', name='cccp1')(x)
    x = Conv2D(96, (1, 1), padding='same', strides=1, activation='relu', name='cccp2')(x)
    x = MaxPooling2D((3, 3), strides=2,  padding='same')(x)
    x = Dropout(0.5)(x)
    x = Conv2D(192, (5, 5), padding='same', strides=1, activation='relu', name='conv2')(x)
    x = Conv2D(192, (1, 1), padding='same', strides=1, activation='relu', name='cccp3')(x)
    x = Conv2D(192, (1, 1), padding='same', strides=1, activation='relu', name='cccp4')(x)
    x = AveragePooling2D((3, 3), strides=2,  padding='same')(x)
    x = Dropout(0.5)(x)
    x = Conv2D(192, (3, 3), padding='same', strides=1, activation='relu', name='conv3')(x)
    x = Conv2D(192, (1, 1), padding='same', strides=1, activation='relu', name='cccp5')(x)
    x = Conv2D(num_classes, (1, 1), padding='same', strides=1, activation='relu', name='cccp6')(x)
    x = keras.layers.GlobalAveragePooling2D()(x)
    x = Activation('softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 15
Project: Keras-GAN   Author: eriklindernoren   File: bigan.py    MIT License 6 votes vote down vote up
def build_discriminator(self):

        z = Input(shape=(self.latent_dim, ))
        img = Input(shape=self.img_shape)
        d_in = concatenate([z, Flatten()(img)])

        model = Dense(1024)(d_in)
        model = LeakyReLU(alpha=0.2)(model)
        model = Dropout(0.5)(model)
        model = Dense(1024)(model)
        model = LeakyReLU(alpha=0.2)(model)
        model = Dropout(0.5)(model)
        model = Dense(1024)(model)
        model = LeakyReLU(alpha=0.2)(model)
        model = Dropout(0.5)(model)
        validity = Dense(1, activation="sigmoid")(model)

        return Model([z, img], validity) 
Example 16
Project: Keras-GAN   Author: eriklindernoren   File: dualgan.py    MIT License 6 votes vote down vote up
def build_generator(self):

        X = Input(shape=(self.img_dim,))

        model = Sequential()
        model.add(Dense(256, input_dim=self.img_dim))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dropout(0.4))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dropout(0.4))
        model.add(Dense(1024))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dropout(0.4))
        model.add(Dense(self.img_dim, activation='tanh'))

        X_translated = model(X)

        return Model(X, X_translated) 
Example 17
Project: Jtyoui   Author: jtyoui   File: cnn_rnn_crf.py    MIT License 6 votes vote down vote up
def create_model():
    inputs = Input(shape=(length,), dtype='int32', name='inputs')
    embedding_1 = Embedding(len(vocab), EMBED_DIM, input_length=length, mask_zero=True)(inputs)
    bilstm = Bidirectional(LSTM(EMBED_DIM // 2, return_sequences=True))(embedding_1)
    bilstm_dropout = Dropout(DROPOUT_RATE)(bilstm)
    embedding_2 = Embedding(len(vocab), EMBED_DIM, input_length=length)(inputs)
    con = Conv1D(filters=FILTERS, kernel_size=2 * HALF_WIN_SIZE + 1, padding='same')(embedding_2)
    con_d = Dropout(DROPOUT_RATE)(con)
    dense_con = TimeDistributed(Dense(DENSE_DIM))(con_d)
    rnn_cnn = concatenate([bilstm_dropout, dense_con], axis=2)
    dense = TimeDistributed(Dense(len(chunk_tags)))(rnn_cnn)
    crf = CRF(len(chunk_tags), sparse_target=True)
    crf_output = crf(dense)
    model = Model(input=[inputs], output=[crf_output])
    model.compile(loss=crf.loss_function, optimizer=Adam(), metrics=[crf.accuracy])
    return model 
Example 18
Project: Kickstart-AI   Author: katchu11   File: generative-adversarial-network.py    MIT License 6 votes vote down vote up
def discriminator_builder(depth=64,p=0.4):

    inputs = Input((img_w,img_h,1))

    conv1 = Conv2D(depth*1, 5, strides=2, padding='same', activation='relu')(inputs)
    conv1 = Dropout(p)(conv1)

    conv2 = Conv2D(depth*2, 5, strides=2, padding='same', activation='relu')(conv1)
    conv2 = Dropout(p)(conv2)

    conv3 = Conv2D(depth*4, 5, strides=2, padding='same', activation='relu')(conv2)
    conv3 = Dropout(p)(conv3)

    conv4 = Conv2D(depth*8, 5, strides=1, padding='same', activation='relu')(conv3)
    conv4 = Flatten()(Dropout(p)(conv4))

    output = Dense(1, activation='sigmoid')(conv4)

    model = Model(inputs=inputs, outputs=output)
    model.summary()

    return model


# In[21]: 
Example 19
Project: speed_estimation   Author: NeilNie   File: i3d.py    MIT License 5 votes vote down vote up
def __init__(self, weights_path=None, input_shape=None, dropout_prob=0.0, classes=1):

        """Instantiates the Inflated 3D Inception v1 architecture.

        Optionally loads weights pre-trained on Kinetics. Note that when using TensorFlow,
        Always channel last. The model and the weights are compatible with both TensorFlow. The data format
        convention used by the model is the one specified in your Keras config file. Note that the default
        input frame(image) size for this model is 224x224.

        :param weights_path: one of `None` (random initialization)
        :param input_shape: optional shape tuple, only to be specified if `include_top` is False
            (otherwise the input shape should have exactly 3 inputs channels. NUM_FRAMES should be no
            smaller than 8. The authors used 64 frames per example for training and testing on kinetics
            dataset Width and height should be no smaller than 32. i.e.: `(64, 150, 150, 3)` would be one
            valid value.
        :param dropout_prob: optional, dropout probability applied in dropout layer after global average pooling layer.
            0.0 means no dropout is applied, 1.0 means dropout is applied to all features. Note: Since Dropout is
            applied just before the classification layer, it is only useful when `include_top` is set to True.
        :param classes: For regression (i.e. behavorial cloning) 1 is the default value. optional number of classes
            to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is
            specified.
        """

        self.input_shape = input_shape
        self.dropout_prob = dropout_prob
        self.classes = classes
        self.weight_path = weights_path

        img_input = Input(shape=input_shape)
        self.model = self.create_model(img_input)

        if weights_path:
            self.model.load_weights(weights_path)
            print("loaded weights:" + weights_path) 
Example 20
Project: mtrl-auto-uav   Author: brunapearson   File: mtrl_network.py    MIT License 5 votes vote down vote up
def create_model():
    #Create the convolutional stacks
    input_img = Input(shape=(224,224,3))

    x = Conv2D(16, kernel_size=3, activation='relu')(input_img)
    x = MaxPooling2D(pool_size=(2,2))(x)
    x = Conv2D(32, kernel_size=3, activation='relu')(x)
    x = MaxPooling2D(pool_size=(2,2))(x)
    x = Conv2D(64, kernel_size=3, activation='relu')(x)
    x = MaxPooling2D(pool_size=(2,2))(x)
    x = Flatten()(x)
    x = Dense(500, activation='relu')(x)
    x = Dropout(0.20)(x)
    x = Dense(100, activation='relu')(x)
    x = Dense(20, activation='relu')(x)

    n = Conv2D(16, kernel_size=3, activation='relu')(input_img)
    n = MaxPooling2D(pool_size=(2,2))(n)
    n = Conv2D(32, kernel_size=3, activation='relu')(n)
    n = MaxPooling2D(pool_size=(2,2))(n)
    n = Conv2D(64, kernel_size=3, activation='relu')(n)
    n = MaxPooling2D(pool_size=(2,2))(n)
    n = Flatten()(n)
    n = Dense(500, activation='relu')(n)
    #n = Dropout(0.50)(n)
    n = Dense(100, activation='relu')(n)
    n = Dense(20, activation='relu')(n)

    #output
    output_x = Dense(1, activation='linear', name='input_x')(n)
    output_y = Dense(1, activation='linear', name='input_y')(n)
    output_z = Dense(1, activation='linear', name='input_z')(n)

    output_qw = Dense(1, activation='linear', name='input_qw')(x)
    output_qx = Dense(1, activation='linear', name='input_qx')(x)
    output_qy = Dense(1, activation='linear', name='input_qy')(x)
    output_qz = Dense(1, activation='linear', name='input_qz')(x)


    model = Model(inputs=input_img, outputs=[output_x,output_y,output_z,output_qw,output_qx,output_qy,output_qz])
    return model 
Example 21
Project: sklearn2docker   Author: KhaledSharif   File: keras_classifier_test.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def create_binary_classification_model():
        from keras.models import Sequential
        from keras.layers import Dense, Dropout

        model = Sequential()
        model.add(Dense(64, input_shape=(30,), activation='relu'))
        model.add(Dense(64, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(1, activation='sigmoid'))
        model.compile(loss='binary_crossentropy', optimizer='adam')
        return model 
Example 22
Project: sklearn2docker   Author: KhaledSharif   File: keras_classifier_test.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def create_categorical_classification_model():
        from keras.models import Sequential
        from keras.layers import Dense, Dropout

        model = Sequential()
        model.add(Dense(64, input_shape=(30,), activation='relu'))
        model.add(Dense(64, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(3, activation='sigmoid'))
        model.compile(loss='categorical_crossentropy', optimizer='adam')
        return model 
Example 23
Project: VisualNN   Author: angelhunt   File: layers_export.py    GNU General Public License v3.0 5 votes vote down vote up
def dropout(layer, layer_in, layerId, tensor=True):
    out = {layerId: Dropout(0.5)}
    if tensor:
        out[layerId] = out[layerId](*layer_in)
    return out 
Example 24
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_import(self):
        model = Sequential()
        model.add(Dropout(0.5, input_shape=(64, 10)))
        model.build()
        self.keras_type_test(model, 0, 'Dropout') 
Example 25
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input3'], 'l1': net['Dropout']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = dropout(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Dropout') 
Example 26
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: VGG16_BottleNeck.py    MIT License 5 votes vote down vote up
def getVGGModel():
    input_2 = Input(shape=[1], name="angle")
    angle_layer = Dense(1, )(input_2)
    base_model = VGG16(weights='imagenet', include_top=False, 
                 input_shape=X_train.shape[1:], classes=1)
    x = base_model.get_layer('block5_pool').output
    

    x = GlobalMaxPooling2D()(x)
    merge_one = concatenate([x, angle_layer])
    merge_one = Dense(512, activation='relu', name='fc2')(merge_one)
    merge_one = Dropout(0.3)(merge_one)
    merge_one = Dense(512, activation='relu', name='fc3')(merge_one)
    merge_one = Dropout(0.3)(merge_one)
    
    predictions = Dense(1, activation='sigmoid')(merge_one)
    
    model = Model(input=[base_model.input, input_2], output=predictions)
    
    adam = Adam(lr=1e-3, epsilon = 1e-8, beta_1 = .9, beta_2 = .999)
    model.compile(loss='binary_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])
    return model

################

#Using K-fold Cross Validation with Data Augmentation. 
Example 27
Project: smach_based_introspection_framework   Author: birlrobotics   File: anomaly_model_generation.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def generate_model_3():
    ip = Input(shape=(MAX_NB_VARIABLES, MAX_TIMESTEPS))

    x = Masking()(ip)
    x = LSTM(8)(x)
    x = Dropout(0.8)(x)

    y = Permute((2, 1))(ip)
    y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    #y = squeeze_excite_block(y)

    y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    #y = squeeze_excite_block(y)

    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)
    model.summary()

    # add load model code here to fine-tune

    return model 
Example 28
Project: smach_based_introspection_framework   Author: birlrobotics   File: anomaly_model_generation.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def generate_model_4():
    ip = Input(shape=(MAX_NB_VARIABLES, MAX_TIMESTEPS))
    # stride = 3
    #
    # x = Permute((2, 1))(ip)
    # x = Conv1D(MAX_NB_VARIABLES // stride, 8, strides=stride, padding='same', activation='relu', use_bias=False,
    #            kernel_initializer='he_uniform')(x)  # (None, variables / stride, timesteps)
    # x = Permute((2, 1))(x)

    x = Masking()(ip)
    x = AttentionLSTM(8)(x)
    x = Dropout(0.8)(x)

    y = Permute((2, 1))(ip)
    y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    #y = squeeze_excite_block(y)

    y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    #y = squeeze_excite_block(y)

    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)
    model.summary()

    # add load model code here to fine-tune

    return model 
Example 29
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: weather_model.py    Apache License 2.0 5 votes vote down vote up
def weather_fnn(layers, lr,
            decay, loss, seq_len, 
            input_features, output_features):
    
    ori_inputs = Input(shape=(seq_len, input_features), name='input_layer')
    #print(seq_len*input_features)
    conv_ = Conv1D(11, kernel_size=13, strides=1, 
                        data_format='channels_last', 
                        padding='valid', activation='linear')(ori_inputs)
    conv_ = BatchNormalization(name='BN_conv')(conv_)
    conv_ = Activation('relu')(conv_)
    conv_ = Conv1D(5, kernel_size=7, strides=1, 
                        data_format='channels_last', 
                        padding='valid', activation='linear')(conv_)
    conv_ = BatchNormalization(name='BN_conv2')(conv_)
    conv_ = Activation('relu')(conv_)

    inputs = Reshape((-1,))(conv_)

    for i, hidden_nums in enumerate(layers):
        if i==0:
            hn = Dense(hidden_nums, activation='linear')(inputs)
            hn = BatchNormalization(name='BN_{}'.format(i))(hn)
            hn = Activation('relu')(hn)
        else:
            hn = Dense(hidden_nums, activation='linear')(hn)
            hn = BatchNormalization(name='BN_{}'.format(i))(hn)
            hn = Activation('relu')(hn)
            #hn = Dropout(0.1)(hn)
    #print(seq_len, output_features)
    #print(hn)
    outputs = Dense(seq_len*output_features, activation='sigmoid', name='output_layer')(hn) # 37*3
    outputs = Reshape((seq_len, output_features))(outputs)

    weather_fnn = Model(ori_inputs, outputs=[outputs])

    return weather_fnn 
Example 30
Project: Jetson-RaceCar-AI   Author: ardamavi   File: get_model.py    Apache License 2.0 5 votes vote down vote up
def get_model():
    img_inputs = Input(shape=(500, 500, 1))
    lidar_inputs = Input(shape=(3,))

    conv_1 = Conv2D(32, (4,4), strides=(2,2))(img_inputs)

    conv_2 = Conv2D(32, (4,4), strides=(2,2))(conv_1)

    conv_3 = Conv2D(32, (3,3), strides=(1,1))(conv_2)
    act_3 = Activation('relu')(conv_3)

    pooling_1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(act_3)

    flat_1 = Flatten()(pooling_1)

    fc = Dense(32)(flat_1)

    lidar_fc = Dense(32)(lidar_inputs)

    concatenate_layer = concatenate([fc, lidar_fc])

    fc = Dense(10)(concatenate_layer)
    fc = Activation('relu')(fc)
    fc = Dropout(0.5)(fc)

    outputs = Dense(2)(fc)

    outputs = Activation('sigmoid')(outputs)


    model = Model(inputs=[img_inputs, lidar_inputs], outputs=[outputs])

    model.compile(loss='mse', optimizer='adadelta', metrics=['accuracy'])

    print(model.summary())

    return model 
Example 31
Project: Kaggler   Author: jeongyoonlee   File: categorical.py    MIT License 5 votes vote down vote up
def _get_model(X, cat_cols, num_cols, n_uniq, n_emb, output_activation):
        inputs = []
        num_inputs = []
        embeddings = []
        for i, col in enumerate(cat_cols):

            if not n_uniq[i]:
                n_uniq[i] = X[col].nunique()
            if not n_emb[i]:
                n_emb[i] = max(MIN_EMBEDDING, 2 * int(np.log2(n_uniq[i])))

            _input = Input(shape=(1,), name=col)
            _embed = Embedding(input_dim=n_uniq[i], output_dim=n_emb[i], name=col + EMBEDDING_SUFFIX)(_input)
            _embed = Dropout(.2)(_embed)
            _embed = Reshape((n_emb[i],))(_embed)

            inputs.append(_input)
            embeddings.append(_embed)

        if num_cols:
            num_inputs = Input(shape=(len(num_cols),), name='num_inputs')
            merged_input = Concatenate(axis=1)(embeddings + [num_inputs])

            inputs = inputs + [num_inputs]
        else:
            merged_input = Concatenate(axis=1)(embeddings)

        x = BatchNormalization()(merged_input)
        x = Dense(128, activation='relu')(x)
        x = Dropout(.5)(x)
        x = BatchNormalization()(x)
        x = Dense(64, activation='relu')(x)
        x = Dropout(.5)(x)
        x = BatchNormalization()(x)
        output = Dense(1, activation=output_activation)(x)

        model = Model(inputs=inputs, outputs=output)

        return model, n_emb, n_uniq 
Example 32
Project: dac   Author: KBNLresearch   File: models.py    GNU General Public License v3.0 5 votes vote down vote up
def create_model(self):
        '''
        Create new keras model.
        '''
        self.class_weight = {0: 0.25, 1: 0.75}

        model = Sequential()
        model.add(Dense(self.data.shape[1], input_dim=self.data.shape[1],
                        activation='relu', kernel_constraint=maxnorm(3)))
        model.add(Dropout(0.5))
        model.add(Dense(1, activation='sigmoid'))
        model.compile(optimizer='RMSprop', loss='binary_crossentropy',
                      metrics=['accuracy'])
        return model 
Example 33
Project: phoneticSimilarity   Author: ronggong   File: models_siamese_tripletloss.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def embedding_2_lstm_1_dense_base(device, base_input):
    if device == 'CPU':
        x = Bidirectional(LSTM(units=32, return_sequences=True))(base_input)
        x = Bidirectional(LSTM(units=32, return_sequences=False))(x)

    else:
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=True))(base_input)
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=False))(x)

    x = Dense(units=64, activation='relu')(x)

    x = Dropout(rate=0.5)(x)

    return x 
Example 34
Project: phoneticSimilarity   Author: ronggong   File: models_siamese_tripletloss.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def embedding_1_lstm_1_dense_base(device, base_input):
    if device == 'CPU':
        x = Bidirectional(LSTM(units=32, return_sequences=False))(base_input)
    else:
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=False))(base_input)

    x = Dense(units=64, activation='relu')(x)

    x = Dropout(rate=0.5)(x)

    return x 
Example 35
Project: phoneticSimilarity   Author: ronggong   File: models.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def createModel_schluter_valid(input, num_filter, height_filter, width_filter, filter_density, pool_n_row,
                pool_n_col, dropout):
    """
    original Schluter relu activation, no dropout
    :param input:
    :param num_filter:
    :param height_filter:
    :param width_filter:
    :param filter_density:
    :param pool_n_row:
    :param pool_n_col:
    :param dropout:
    :return:
    """

    x = ZeroPadding2D(padding=(0, int(width_filter/2)),  data_format="channels_first")(input)

    x = Conv2D(int(num_filter * filter_density), (height_filter, width_filter), padding="valid",
                       data_format="channels_first",
                       activation='relu')(x)

    output_shape = K.int_shape(x)

    if pool_n_row == 'all' and pool_n_col == 'all':
        x = MaxPooling2D(pool_size=(output_shape[2], output_shape[3]), padding='same', data_format="channels_first")(x)
    elif pool_n_row == 'all' and pool_n_col != 'all':
        x = MaxPooling2D(pool_size=(output_shape[2], pool_n_col), padding='same', data_format="channels_first")(x)
    elif pool_n_row != 'all' and pool_n_col == 'all':
        x = MaxPooling2D(pool_size=(pool_n_row, output_shape[3]), padding='same', data_format="channels_first")(x)
    else:
        x = MaxPooling2D(pool_size=(pool_n_row, pool_n_col), padding='same', data_format="channels_first")(x)
    x = Dropout(dropout)(x)
    x = Flatten()(x)

    return x 
Example 36
Project: phoneticSimilarity   Author: ronggong   File: models_RNN.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def embedding_RNN_1_lstm(input_shape, conv=False, dropout=False):

    device = device_lib.list_local_devices()[0].device_type

    input = Input(batch_shape=input_shape)

    x = conv_module(conv, input_shape, input)

    if device == 'CPU':
        if dropout:
            x = Bidirectional(LSTM(units=32, return_sequences=False, dropout=dropout))(x)
            x = Dropout(dropout)(x)
        else:
            x = Bidirectional(LSTM(units=32, return_sequences=False))(x)
    else:
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=False))(x)

    return x, input 
Example 37
Project: phoneticSimilarity   Author: ronggong   File: models_RNN.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def embedding_RNN_3_lstm_3_dense(input_shape):

    device = device_lib.list_local_devices()[0].device_type

    input = Input(batch_shape=input_shape)

    if device == 'CPU':
        x = Bidirectional(LSTM(units=32, return_sequences=True))(input)
        x = Bidirectional(LSTM(units=32, return_sequences=True))(x)
        x = Bidirectional(LSTM(units=32, return_sequences=False))(x)
    else:
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=True))(input)
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=True))(x)
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=False))(x)

    x = Dense(units=64, activation='relu')(x)

    x = Dropout(rate=0.5)(x)

    x = Dense(units=64, activation='relu')(x)

    x = Dropout(rate=0.5)(x)

    x = Dense(units=64, activation='relu')(x)

    x = Dropout(rate=0.5)(x)

    return x, input 
Example 38
Project: hepaccelerate   Author: hepaccelerate   File: train_dnn.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def layer(din, n_units, do_dropout=False):
    d = Dense(n_units)(din)
    d = LeakyReLU(alpha=0.2)(d)
    if do_dropout:
        d = Dropout(0.2)(d)
    return d 
Example 39
Project: ppi_lstm_rnn_keras   Author: ylhsieh   File: train_keras.py    MIT License 5 votes vote down vote up
def main():

    def build_model():
        model = Sequential()
        model.add(Embedding(len(train_vocab), hidden_size, weights=[embedding_array],\
                            input_length=max_sequence_length))
        model.add(Dropout(dropout_rate))
        model.add(Bidirectional(LSTM(rnn_hidden_size)))
        model.add(Dropout(dropout_rate))
        model.add(Dense(2, activation='softmax'))
        model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
        print(model.summary())
        return model

    train_vocab = load_vocab_from(opt.data + '.vocab')
    embedding_array = load_pretrained_embeddings(train_vocab, pretrained_embeddings_file)
    for fold_id in range(10):
        tfsession = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.5)))
        K.set_session(tfsession)
        train_file = 'corpus/{}_f{}_train.txt'.format(opt.data, fold_id)
        test_file = 'corpus/{}_f{}_test.txt'.format(opt.data, fold_id)
        log_file = '{}_f{}.log'.format(opt.data, fold_id)
        x_train, x_test, y_train, y_test, _ = read_corpus(train_file, test_file, train_vocab)
        fscore_cb = FscoreLogCallback(log_file)
        model = build_model()
        print("Fold {}".format(fold_id))
        model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, \
                  callbacks=[fscore_cb], verbose=2)
        predicted = np.argmax(model.predict(x_test), axis=1)
        y_test_to_label = np.argmax(y_test, axis=1)
        prec, reca, fscore, sup = precision_recall_fscore_support(y_test_to_label, predicted, average='binary')
        print("Final Precision:{:2.2f}% Recall:{:2.2f}% Fscore:{:2.2f}%".format(prec*100, reca*100, fscore*100)) 
Example 40
Project: Anamoly-Detection   Author: msmsk05   File: auto_encoder.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _build_model(self):
        model = Sequential()
        # Input layer
        model.add(Dense(
            self.hidden_neurons_[0], activation=self.hidden_activation,
            input_shape=(self.n_features_,),
            activity_regularizer=l2(self.l2_regularizer)))
        model.add(Dropout(self.dropout_rate))

        # Additional layers
        for i, hidden_neurons in enumerate(self.hidden_neurons_, 1):
            model.add(Dense(
                hidden_neurons,
                activation=self.hidden_activation,
                activity_regularizer=l2(self.l2_regularizer)))
            model.add(Dropout(self.dropout_rate))

        # Output layers
        model.add(Dense(self.n_features_, activation=self.output_activation,
                        activity_regularizer=l2(self.l2_regularizer)))

        # Compile model
        model.compile(loss=self.loss, optimizer=self.optimizer)
        print(model.summary())
        return model

    # noinspection PyUnresolvedReferences 
Example 41
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: vgg16_keras.py    MIT License 5 votes vote down vote up
def VGG16():
    inputs = Input((img_height, img_width, 3))
    x = Conv2D(64, (3, 3), padding='same', strides=1, activation='relu', name='conv1_1')(inputs)
    x = Conv2D(64, (3, 3), padding='same', strides=1, activation='relu', name='conv1_2')(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    x = Conv2D(128, (3, 3), padding='same', strides=1, activation='relu', name='conv2_1')(x)
    x = Conv2D(128, (3, 3), padding='same', strides=1, activation='relu', name='conv2_2')(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    x = Conv2D(256, (3, 3), padding='same', strides=1, activation='relu', name='conv3_1')(x)
    x = Conv2D(256, (3, 3), padding='same', strides=1, activation='relu', name='conv3_2')(x)
    x = Conv2D(256, (3, 3), padding='same', strides=1, activation='relu', name='conv3_3')(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv4_1')(x)
    x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv4_2')(x)
    x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv4_3')(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv5_1')(x)
    x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv5_2')(x)
    x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv5_3')(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    x = Flatten()(x)
    x = Dense(4096, name='dense1', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(4096, name='dense2', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(num_classes, activation='softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 42
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: easy_keras.py    MIT License 5 votes vote down vote up
def Mynet():
    inputs = Input((img_height, img_width, 3))
    x = inputs
    # block conv1
    for i in range(2):
        x = Conv2D(64, (3, 3), padding='same', strides=1, activation='relu', name='conv1_{}'.format(i+1))(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    # block conv2
    for i in range(2):
        x = Conv2D(128, (3, 3), padding='same', strides=1, activation='relu', name='conv2_{}'.format(i+1))(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    # block conv3
    for i in range(3):
        x = Conv2D(256, (3, 3), padding='same', strides=1, activation='relu', name='conv3_{}'.format(i+1))(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    # block conv4
    for i in range(3):
        x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv4_{}'.format(i+1))(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    # block conv5
    for i in range(3):
        x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv5_{}'.format(i))(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    x = Flatten()(x)
    x = Dense(4096, name='dense1', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(4096, name='dense2', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(num_classes, activation='softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 43
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: main_keras.py    MIT License 5 votes vote down vote up
def Mynet():
    inputs = Input((img_height, img_width, 3))
    x = Conv2D(32, (3, 3), padding='same', activation='relu', name='conv1_1')(inputs)
    x = BatchNormalization()(x)
    x = Conv2D(32, (3, 3), padding='same', activation='relu', name='conv1_2')(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((2,2), padding='same')(x)
    x = Conv2D(64, (3, 3), padding='same', activation='relu', name='conv2_1')(x)
    x = BatchNormalization()(x)
    x = Conv2D(64, (3, 3), padding='same', activation='relu', name='conv2_2')(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((2,2), padding='same')(x)
    x = Conv2D(128, (3, 3), padding='same', activation='relu', name='conv3_1')(x)
    x = BatchNormalization()(x)
    x = Conv2D(128, (3, 3), padding='same', activation='relu', name='conv3_2')(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((2,2), padding='same')(x)
    x = Conv2D(256, (3, 3), padding='same', activation='relu', name='conv4_1')(x)
    x = BatchNormalization()(x)
    x = Conv2D(256, (3, 3), padding='same', activation='relu', name='conv4_2')(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((2,2), padding='same')(x)
    x = Flatten()(x)
    x = Dense(1024, name='dense1', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(1024, name='dense2', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(num_classes, activation='softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 44
Project: Keras-GAN   Author: eriklindernoren   File: sgan.py    MIT License 5 votes vote down vote up
def build_discriminator(self):

        model = Sequential()

        model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0,1),(0,1))))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Flatten())

        model.summary()

        img = Input(shape=self.img_shape)

        features = model(img)
        valid = Dense(1, activation="sigmoid")(features)
        label = Dense(self.num_classes+1, activation="softmax")(features)

        return Model(img, [valid, label]) 
Example 45
Project: Keras-GAN   Author: eriklindernoren   File: context_encoder.py    MIT License 5 votes vote down vote up
def build_generator(self):


        model = Sequential()

        # Encoder
        model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))

        model.add(Conv2D(512, kernel_size=1, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.5))

        # Decoder
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=3, padding="same"))
        model.add(Activation('relu'))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=3, padding="same"))
        model.add(Activation('relu'))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(self.channels, kernel_size=3, padding="same"))
        model.add(Activation('tanh'))

        model.summary()

        masked_img = Input(shape=self.img_shape)
        gen_missing = model(masked_img)

        return Model(masked_img, gen_missing) 
Example 46
Project: Keras-GAN   Author: eriklindernoren   File: ccgan.py    MIT License 5 votes vote down vote up
def build_generator(self):
        """U-Net Generator"""

        def conv2d(layer_input, filters, f_size=4, bn=True):
            """Layers used during downsampling"""
            d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
            d = LeakyReLU(alpha=0.2)(d)
            if bn:
                d = BatchNormalization(momentum=0.8)(d)
            return d

        def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):
            """Layers used during upsampling"""
            u = UpSampling2D(size=2)(layer_input)
            u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)
            if dropout_rate:
                u = Dropout(dropout_rate)(u)
            u = BatchNormalization(momentum=0.8)(u)
            u = Concatenate()([u, skip_input])
            return u

        img = Input(shape=self.img_shape)

        # Downsampling
        d1 = conv2d(img, self.gf, bn=False)
        d2 = conv2d(d1, self.gf*2)
        d3 = conv2d(d2, self.gf*4)
        d4 = conv2d(d3, self.gf*8)

        # Upsampling
        u1 = deconv2d(d4, d3, self.gf*4)
        u2 = deconv2d(u1, d2, self.gf*2)
        u3 = deconv2d(u2, d1, self.gf)

        u4 = UpSampling2D(size=2)(u3)
        output_img = Conv2D(self.channels, kernel_size=4, strides=1, padding='same', activation='tanh')(u4)

        return Model(img, output_img) 
Example 47
Project: Keras-GAN   Author: eriklindernoren   File: infogan.py    MIT License 5 votes vote down vote up
def build_disk_and_q_net(self):

        img = Input(shape=self.img_shape)

        # Shared layers between discriminator and recognition network
        model = Sequential()
        model.add(Conv2D(64, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0,1),(0,1))))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(256, kernel_size=3, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(512, kernel_size=3, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Flatten())

        img_embedding = model(img)

        # Discriminator
        validity = Dense(1, activation='sigmoid')(img_embedding)

        # Recognition
        q_net = Dense(128, activation='relu')(img_embedding)
        label = Dense(self.num_classes, activation='softmax')(q_net)

        # Return discriminator and recognition network
        return Model(img, validity), Model(img, label) 
Example 48
Project: Keras-GAN   Author: eriklindernoren   File: wgan.py    MIT License 5 votes vote down vote up
def build_critic(self):

        model = Sequential()

        model.add(Conv2D(16, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(32, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0,1),(0,1))))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(128, kernel_size=3, strides=1, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(1))

        model.summary()

        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(img, validity) 
Example 49
Project: Keras-GAN   Author: eriklindernoren   File: wgan_gp.py    MIT License 5 votes vote down vote up
def build_critic(self):

        model = Sequential()

        model.add(Conv2D(16, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(32, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0,1),(0,1))))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(128, kernel_size=3, strides=1, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(1))

        model.summary()

        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(img, validity) 
Example 50
Project: Keras-GAN   Author: eriklindernoren   File: cyclegan.py    MIT License 5 votes vote down vote up
def build_generator(self):
        """U-Net Generator"""

        def conv2d(layer_input, filters, f_size=4):
            """Layers used during downsampling"""
            d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
            d = LeakyReLU(alpha=0.2)(d)
            d = InstanceNormalization()(d)
            return d

        def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):
            """Layers used during upsampling"""
            u = UpSampling2D(size=2)(layer_input)
            u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)
            if dropout_rate:
                u = Dropout(dropout_rate)(u)
            u = InstanceNormalization()(u)
            u = Concatenate()([u, skip_input])
            return u

        # Image input
        d0 = Input(shape=self.img_shape)

        # Downsampling
        d1 = conv2d(d0, self.gf)
        d2 = conv2d(d1, self.gf*2)
        d3 = conv2d(d2, self.gf*4)
        d4 = conv2d(d3, self.gf*8)

        # Upsampling
        u1 = deconv2d(d4, d3, self.gf*4)
        u2 = deconv2d(u1, d2, self.gf*2)
        u3 = deconv2d(u2, d1, self.gf)

        u4 = UpSampling2D(size=2)(u3)
        output_img = Conv2D(self.channels, kernel_size=4, strides=1, padding='same', activation='tanh')(u4)

        return Model(d0, output_img) 
Example 51
Project: Keras-GAN   Author: eriklindernoren   File: dcgan.py    MIT License 5 votes vote down vote up
def build_discriminator(self):

        model = Sequential()

        model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0,1),(0,1))))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(1, activation='sigmoid'))

        model.summary()

        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(img, validity) 
Example 52
Project: TaiwanTrainVerificationCode2text   Author: linsamtw   File: load_model.py    Apache License 2.0 5 votes vote down vote up
def load_model():

    from keras.models import Model
    from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D
    
    tensor_in = Input((60, 200, 3))
    out = tensor_in
    out = Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(out)
    out = Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(out)
    out = MaxPooling2D(pool_size=(2, 2))(out)
    out = Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation='relu')(out)
    out = Conv2D(filters=64, kernel_size=(3, 3), activation='relu')(out)
    out = MaxPooling2D(pool_size=(2, 2))(out)
    out = Conv2D(filters=128, kernel_size=(3, 3), padding='same', activation='relu')(out)
    out = Conv2D(filters=128, kernel_size=(3, 3), activation='relu')(out)
    out = MaxPooling2D(pool_size=(2, 2))(out)
    out = Conv2D(filters=256, kernel_size=(3, 3), activation='relu')(out)
    out = MaxPooling2D(pool_size=(2, 2))(out)
    out = Flatten()(out)
    out = Dropout(0.5)(out)
    out = [Dense(37, name='digit1', activation='softmax')(out),\
        Dense(37, name='digit2', activation='softmax')(out),\
        Dense(37, name='digit3', activation='softmax')(out),\
        Dense(37, name='digit4', activation='softmax')(out),\
        Dense(37, name='digit5', activation='softmax')(out),\
        Dense(37, name='digit6', activation='softmax')(out)]
    
    model = Model(inputs=tensor_in, outputs=out)
    
    # Define the optimizer
    model.compile(loss='categorical_crossentropy', optimizer='Adamax', metrics=['accuracy'])
    if 'Windows' in platform.platform():
        model.load_weights('{}\\cnn_weight\\verificatioin_code.h5'.format(PATH)) 
    else:
        model.load_weights('{}/cnn_weight/verificatioin_code.h5'.format(PATH)) 
    
    return model 
Example 53
Project: EUSIPCO2017   Author: Veleslavia   File: singlelayer.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def build_model(n_classes):

    if K.image_dim_ordering() == 'th':
        input_shape = (1, N_MEL_BANDS, SEGMENT_DUR)
        channel_axis = 1
    else:
        input_shape = (N_MEL_BANDS, SEGMENT_DUR, 1)
        channel_axis = 3
    melgram_input = Input(shape=input_shape)

    m_sizes = [50, 70]
    n_sizes = [1, 3, 5]
    n_filters = [128, 64, 32]
    maxpool_const = 4

    layers = list()

    for m_i in m_sizes:
        for i, n_i in enumerate(n_sizes):
            x = Convolution2D(n_filters[i], m_i, n_i,
                              border_mode='same',
                              init='he_normal',
                              W_regularizer=l2(1e-5),
                              name=str(n_i)+'_'+str(m_i)+'_'+'conv')(melgram_input)
            x = BatchNormalization(axis=channel_axis, mode=0, name=str(n_i)+'_'+str(m_i)+'_'+'bn')(x)
            x = ELU()(x)
            x = MaxPooling2D(pool_size=(N_MEL_BANDS, SEGMENT_DUR/maxpool_const), name=str(n_i)+'_'+str(m_i)+'_'+'pool')(x)
            x = Flatten(name=str(n_i)+'_'+str(m_i)+'_'+'flatten')(x)
            layers.append(x)

    x = merge(layers, mode='concat', concat_axis=channel_axis)
    x = Dropout(0.5)(x)
    x = Dense(n_classes, init='he_normal', W_regularizer=l2(1e-5), activation='softmax', name='prediction')(x)
    model = Model(melgram_input, x)

    return model 
Example 54
Project: ocr_svc   Author: daveshap   File: keras_alphanumeric_model.py    MIT License 5 votes vote down vote up
def instantiate_model():
    print('COMPILING MODEL')
    model = Sequential()
    model.add(Convolution2D(filters=32, kernel_size=(3, 3), strides=(1, 1), activation='relu', input_shape=input_shape))
    model.add(Convolution2D(filters=32, kernel_size=(3, 3), strides=(1, 1), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    print(model.summary())
    return model 
Example 55
Project: Kickstart-AI   Author: katchu11   File: generative-adversarial-network.py    MIT License 5 votes vote down vote up
def generator_builder(z_dim=100,depth=64,p=0.4):

    inputs = Input((z_dim,))

    dense1 = Dense(7*7*64)(inputs)
    dense1 = BatchNormalization(momentum=0.9)(dense1)
    dense1 = Activation(activation='relu')(dense1)
    dense1 = Reshape((7,7,64))(dense1)
    dense1 = Dropout(p)(dense1)

    conv1 = UpSampling2D()(dense1)
    conv1 = Conv2DTranspose(int(depth/2), kernel_size=5, padding='same', activation=None,)(conv1)
    conv1 = BatchNormalization(momentum=0.9)(conv1)
    conv1 = Activation(activation='relu')(conv1)

    conv2 = UpSampling2D()(conv1)
    conv2 = Conv2DTranspose(int(depth/4), kernel_size=5, padding='same', activation=None,)(conv2)
    conv2 = BatchNormalization(momentum=0.9)(conv2)
    conv2 = Activation(activation='relu')(conv2)

    conv3 = Conv2DTranspose(int(depth/8), kernel_size=5, padding='same', activation=None,)(conv2)
    conv3 = BatchNormalization(momentum=0.9)(conv3)
    conv3 = Activation(activation='relu')(conv3)

    output = Conv2D(1, kernel_size=5, padding='same', activation='sigmoid')(conv3)

    model = Model(inputs=inputs, outputs=output)
    model.summary()

    return model


# In[24]: 
Example 56
Project: speed_estimation   Author: NeilNie   File: simple_conv.py    MIT License 4 votes vote down vote up
def __init__(self, input_shape, weights_path=None, classes=1):


        '''Instantiates the Inflated 3D Inception v1 architecture.

        Optionally loads weights pre-trained on Kinetics. Note that when using TensorFlow,
        Always channel last. The model and the weights are compatible with both
        TensorFlow. The data format convention used by the model is the one
        specified in your Keras config file.
        Note that the default input frame(image) size for this model is 224x224.

        :param weights_path: one of `None` (random initialization)
        :param input_shape: optional shape tuple, only to be specified
                if `include_top` is False (otherwise the input shape should have exactly
                3 inputs channels. NUM_FRAMES should be no smaller than 8. The authors
                used 64 frames per example for training and testing on kinetics dataset
                Width and height should be no smaller than 32.
                i.e.: `(64, 150, 150, 3)` would be one valid value.
        :param dropout_prob: optional, dropout probability applied in dropout layer
                after global average pooling layer.
                0.0 means no dropout is applied, 1.0 means dropout is applied to all features.
                Note: Since Dropout is applied just before the classification
                layer, it is only useful when `include_top` is set to True.
        :param endpoint_logit: (boolean) optional. If True, the model's forward pass
                will end at producing logits. Otherwise, softmax is applied after producing
                the logits to produce the class probabilities prediction. Setting this parameter
                to True is particularly useful when you want to combine results of rgb model
                and optical flow model.
                - `True` end model forward pass at logit output
                - `False` go further after logit to produce softmax predictions
                Note: This parameter is only useful when `include_top` is set to True.
        :param classes: For regression (i.e. behavorial cloning) 1 is the default value.
                optional number of classes to classify images into, only to be specified
                if `include_top` is True, and if no `weights` argument is specified.

        '''
        self.classes = classes
        self.weight_path = weights_path

        img_input = Input(shape=input_shape)
        self.model = self.commaai_model()

        if weights_path:
            self.model = load_model(weights_path)
            print("loaded weights:" + weights_path) 
Example 57
Project: MODS_ConvNet   Author: santiagolopezg   File: little_foo3.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(32, 8, 8,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 6, 6,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 4, 4,init=weight_init, name='conv1_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 2, 2,init=weight_init, name='conv1_4'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in 208, out 104
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 8, 8,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 6, 6,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 4, 4,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 2, 2,init=weight_init, name='conv2_4'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in is 88, out is 44 
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(220, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 58
Project: MODS_ConvNet   Author: santiagolopezg   File: foo_three.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(32, 3, 3,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(512, 3,3,init=weight_init, border_mode='same', name='conv4_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(512, 3,3,init=weight_init, border_mode='same', name='conv4_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(120, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dropout(dropout))
	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 59
Project: MODS_ConvNet   Author: santiagolopezg   File: little_foo.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(32, 5, 5,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv1_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in 116, out 58
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in is 52, out is 26 
	model.add(Dropout(dropout))

	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  # in is 20, out is 10 
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(10, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 60
Project: MODS_ConvNet   Author: santiagolopezg   File: foo_two.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(16, 3, 3,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(16, 3, 3,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))
	model.add(Dropout(dropout))

	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv3_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv3_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(128, 3,3,init=weight_init, border_mode='same', name='conv4_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3,3,init=weight_init, border_mode='same', name='conv4_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(120, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dropout(dropout))
	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 61
Project: MODS_ConvNet   Author: santiagolopezg   File: big_hipster.py    MIT License 4 votes vote down vote up
def cifar(): #maybe change border mode, idk; also maybe add BatchNormalization(axis=1)

    # Determine proper input shape
    K.set_image_dim_ordering('th')
    input_shape = (1, 256, 192)
    img_input = Input(shape=input_shape)

    x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='conv1_1')(img_input)
    x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='conv1_2')(x)
    x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='conv1_3')(x)
    x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='conv1_4')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(x)
    x = Dropout(0.25)(x)

    x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='conv2_1')(x)
    x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='conv2_2')(x)
    x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='conv2_3')(x)
    x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='conv2_4')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool2')(x)
    x = Dropout(0.25)(x)

    x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='conv3_1')(x)
    x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='conv3_2')(x)
    x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='conv3_3')(x)
    x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='conv3_4')(x)
    x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='conv3_5')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool3')(x)
    x = Dropout(0.25)(x)

    x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='conv4_1')(x)
    x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='conv4_2')(x)
    x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='conv4_3')(x)
    x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='conv4_4')(x)
    x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='conv4_5')(x)
    x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='conv4_6')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool4')(x)
    x = Dropout(0.25)(x)

    x = Flatten(name='flatten')(x)
    x = Dense(1000, activation='relu', name='fc1')(x)
    x = Dropout(0.5)(x)
    x = Dense(1000, activation='relu', name='fc2')(x)
    x = Dropout(0.5)(x)
    x = Dense(2, activation='softmax', name='pred')(x)

    # Create model.
    model = Model(img_input, x)

    #weights=''
    #model.load_weights(weights)

    return model 
Example 62
Project: MODS_ConvNet   Author: santiagolopezg   File: little_foo2.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(32, 7, 7,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 5, 5,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv1_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in 212, out 106
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 7, 7,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 5, 5,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in is 94, out is 47 
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(220, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 63
Project: Keras-Unet   Author: MLearing   File: unet.py    GNU General Public License v2.0 4 votes vote down vote up
def get_unet(self):
        inputs = Input((self.img_rows, self.img_cols, 1))

        conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
        conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
        pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) #pool1=MaxPolong2D()(b)是指张量b作为输入,其他与此类同
        
        conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
        conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
        conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
        pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
        conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
        drop4 = Dropout(0.5)(conv4)
        pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

        conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
        conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
        drop5 = Dropout(0.5)(conv5)

        up6 = Conv2D(512, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
            UpSampling2D(size=(2, 2))(drop5))
        merge6 = merge([drop4, up6], mode='concat', concat_axis=3)
        conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)
        conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)


        up7 = Conv2D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
            UpSampling2D(size=(2, 2))(conv6))
        merge7 = merge([conv3, up7], mode='concat', concat_axis=3)
        conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)
        conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)

        up8 = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
            UpSampling2D(size=(2, 2))(conv7))
        merge8 = merge([conv2, up8], mode='concat', concat_axis=3)
        conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)
        conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)

        up9 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
            UpSampling2D(size=(2, 2))(conv8))
        merge9 = merge([conv1, up9], mode='concat', concat_axis=3)
        conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)
        conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
        conv9 = Conv2D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
        conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)

        model = Model(input=inputs, output=conv10)

        model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy'])
        print('model compile')
        return model 
Example 64
Project: Scene-Understanding   Author: foamliu   File: vgg16.py    MIT License 4 votes vote down vote up
def vgg16_model(img_rows, img_cols, channel=3):
    model = Sequential()
    # Encoder
    model.add(ZeroPadding2D((1, 1), input_shape=(img_rows, img_cols, channel), name='input'))
    model.add(Conv2D(64, (3, 3), activation='relu', name='conv1_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(64, (3, 3), activation='relu', name='conv1_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(128, (3, 3), activation='relu', name='conv2_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(128, (3, 3), activation='relu', name='conv2_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(256, (3, 3), activation='relu', name='conv3_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(256, (3, 3), activation='relu', name='conv3_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(256, (3, 3), activation='relu', name='conv3_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv4_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv4_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv4_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv5_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv5_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv5_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    # Add Fully Connected Layer
    model.add(Flatten(name='flatten'))
    model.add(Dense(4096, activation='relu', name='dense1'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu', name='dense2'))
    model.add(Dropout(0.5))
    model.add(Dense(1000, activation='softmax', name='softmax'))

    # Loads ImageNet pre-trained data
    weights_path = 'models/vgg16_weights_tf_dim_ordering_tf_kernels.h5'
    model.load_weights(weights_path)

    return model 
Example 65
Project: smach_based_introspection_framework   Author: birlrobotics   File: anomaly_model_generation.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def generate_model():
    ip = Input(shape=(MAX_NB_VARIABLES, MAX_TIMESTEPS))

    x = Masking()(ip)
    x = LSTM(8)(x)
    x = Dropout(0.8)(x)

    y = Permute((2, 1))(ip)
    y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    y = squeeze_excite_block(y)

    y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    y = squeeze_excite_block(y)

    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)
    model.summary()

    # add load model code here to fine-tune

    return model 
Example 66
Project: smach_based_introspection_framework   Author: birlrobotics   File: anomaly_model_generation.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def generate_model_2():
    ip = Input(shape=(MAX_NB_VARIABLES, MAX_TIMESTEPS))
    # stride = 10

    # x = Permute((2, 1))(ip)
    # x = Conv1D(MAX_NB_VARIABLES // stride, 8, strides=stride, padding='same', activation='relu', use_bias=False,
    #            kernel_initializer='he_uniform')(x)  # (None, variables / stride, timesteps)
    # x = Permute((2, 1))(x)

    #ip1 = K.reshape(ip,shape=(MAX_TIMESTEPS,MAX_NB_VARIABLES))
    #x = Permute((2, 1))(ip)
    x = Masking()(ip)
    x = AttentionLSTM(8)(x)
    x = Dropout(0.8)(x)

    y = Permute((2, 1))(ip)
    y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    y = squeeze_excite_block(y)

    y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    y = squeeze_excite_block(y)

    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)
    model.summary()

    # add load model code here to fine-tune

    return model 
Example 67
Project: dac   Author: KBNLresearch   File: models.py    GNU General Public License v3.0 4 votes vote down vote up
def create_model(self):
        '''
        Create new keras model.
        '''
        self.class_weight = {0: 0.25, 1: 0.75}

        # Entity branch
        entity_inputs = Input(shape=(self.data[0].shape[1],))
        entity_x = Dense(self.data[0].shape[1], activation='relu',
                         kernel_constraint=maxnorm(3))(entity_inputs)
        entity_x = Dropout(0.25)(entity_x)
        # entity_x = Dense(50, activation='relu',
        #                  self.kernel_constraint=maxnorm(3))(entity_x)
        # entity_x = Dropout(0.25)(entity_x)

        # Candidate branch
        candidate_inputs = Input(shape=(self.data[1].shape[1],))
        candidate_x = Dense(self.data[1].shape[1], activation='relu',
                            kernel_constraint=maxnorm(3))(candidate_inputs)
        candidate_x = Dropout(0.25)(candidate_x)
        # candidate_x = Dense(50, activation='relu',
        #                     kernel_constraint=maxnorm(3))(candidate_x)
        # candidate_x = Dropout(0.25)(candidate_x)

        # Cosine proximity
        # cos_x = dot([entity_x, candidate_x], axes=1, normalize=False)
        # cos_x = concatenate([entity_x, candidate_x])
        # cos_output = Dense(1, activation='sigmoid')(cos_x)

        # Match branch
        match_inputs = Input(shape=(self.data[2].shape[1],))
        match_x = Dense(self.data[1].shape[1], activation='relu',
                        kernel_constraint=maxnorm(3))(match_inputs)
        match_x = Dropout(0.25)(match_x)

        # Merge
        x = concatenate([entity_x, candidate_x, match_x])
        x = Dense(32, activation='relu', kernel_constraint=maxnorm(3))(x)
        x = Dropout(0.25)(x)
        x = Dense(16, activation='relu', kernel_constraint=maxnorm(3))(x)
        x = Dropout(0.25)(x)
        x = Dense(8, activation='relu', kernel_constraint=maxnorm(3))(x)
        x = Dropout(0.25)(x)

        predictions = Dense(1, activation='sigmoid')(x)

        model = Model(inputs=[entity_inputs, candidate_inputs, match_inputs],
                      outputs=predictions)
        model.compile(optimizer='RMSprop', loss='binary_crossentropy',
                      metrics=['accuracy'])

        return model 
Example 68
Project: phoneticSimilarity   Author: ronggong   File: models.py    GNU Affero General Public License v3.0 4 votes vote down vote up
def jan_original(filter_density, dropout, input_shape, batchNorm=False, dense_activation='relu', channel=1):
    if channel == 1:
        reshape_dim = (1, input_shape[0], input_shape[1])
        channel_order = 'channels_first'
    else:
        reshape_dim = input_shape
        channel_order = 'channels_last'

    model_1 = Sequential()

    if batchNorm:
        model_1.add(BatchNormalization(axis=1, input_shape=reshape_dim))

    model_1.add(Conv2D(int(10 * filter_density), (3, 7), padding="valid",
                       input_shape=reshape_dim,
                       data_format=channel_order, activation='relu'))
    model_1.add(MaxPooling2D(pool_size=(3, 1), padding='valid', data_format=channel_order))

    model_1.add(Conv2D(int(20 * filter_density), (3, 3), padding="valid",
                       data_format=channel_order, activation='relu'))
    model_1.add(MaxPooling2D(pool_size=(3, 1), padding='valid', data_format=channel_order))

    if dropout:
        model_1.add(Dropout(dropout))  # test Schluter dataset, comment in jingju dataset

    model_1.add(Flatten())

    model_1.add(Dense(units=256, activation=dense_activation))
    # model_1.add(ELU())

    if dropout:
        model_1.add(Dropout(dropout))

    model_1.add(Dense(1, activation='sigmoid'))
    # model_1.add(Activation("softmax"))

    # optimizer = SGD(lr=0.05, momentum=0.45, decay=0.0, nesterov=False)
    optimizer = Adam()

    model_1.compile(loss='binary_crossentropy',
                    optimizer=optimizer,
                    metrics=['accuracy'])

    print(model_1.summary())

    return model_1 
Example 69
Project: ndparse   Author: neurodata   File: nddl.py    Apache License 2.0 4 votes vote down vote up
def ciresan_n3(n=65, nOutput=2):
    """An approximation of the N3 network from [1].
    Note that we also made a few small modifications along the way
    (from Theano to caffe and now to tensorflow/keras).

    As of this writing, no serious attempt has been made to optimize
    hyperparameters or structure of this network.

    Parameters:
       n : The tile size (diameter) to use in the sliding window.
           Tiles are assumed to be square, hence only one parameter.

    [1] Ciresan et al 'Deep neural networks segment neuronal membranes in
        electron microscopy images,' NIPS 2012.
    """

    from keras.optimizers import SGD
    from keras.models import Sequential
    from keras.layers import Dense, Dropout, Activation, Flatten
    from keras.layers import Convolution2D, MaxPooling2D
    from keras.layers.normalization import BatchNormalization


    model = Sequential()

    # input: nxn images with 1 channel -> (1, n, n) tensors.
    # this applies 48 convolution filters of size 5x5 each.
    model.add(Convolution2D(48, 5, 5, border_mode='valid', dim_ordering='th', input_shape=(1, n, n)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
    model.add(BatchNormalization())  # note: we used LRN previously...

    model.add(Convolution2D(48, 5, 5))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
    model.add(BatchNormalization())  # note: we used LRN previously...
    #model.add(Dropout(0.25))

    model.add(Convolution2D(48, 5, 5, border_mode='valid'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))

    model.add(Flatten())
    # Note: Keras does automatic shape inference.
    model.add(Dense(200))
    model.add(Activation('relu'))
    #model.add(Dropout(0.5))

    model.add(Dense(nOutput))  # use 2 for binary classification
    model.add(Activation('softmax'))

    return model


#-------------------------------------------------------------------------------
#  Code for training a deep learning network
#------------------------------------------------------------------------------- 
Example 70
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn.py    MIT License 4 votes vote down vote up
def cnn_3d(input_shape):

    model = Sequential()
    model.add(Conv3D(16, kernel_size=(3, 3, 20), strides=(1, 1, 10), padding='valid', kernel_regularizer=l2(REG_lambda), input_shape=input_shape))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(Conv3D(16, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same', kernel_regularizer=l2(REG_lambda)))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(MaxPooling3D(pool_size=(2, 2, 3)))

    model.add(Conv3D(32, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same', kernel_regularizer=l2(REG_lambda)))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(Conv3D(32, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same', kernel_regularizer=l2(REG_lambda)))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(MaxPooling3D(pool_size=(2, 2, 3)))

    model.add(Conv3D(64, kernel_size=(2, 2, 2), strides=(1, 1, 1), padding='same', kernel_regularizer=l2(REG_lambda)))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(Conv3D(64, kernel_size=(2, 2, 2), strides=(1, 1, 1), padding='same', kernel_regularizer=l2(REG_lambda)))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(MaxPooling3D(pool_size=(2, 2, 2)))

    model.add(Flatten())
    model.add(Dense(128))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes, activation='softmax'))

    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])
    return model


# 2D-CNN model 
Example 71
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: bn_keras.py    MIT License 4 votes vote down vote up
def VGG16():
    inputs = Input((img_height, img_width, 3))
    x = inputs
    # block conv1
    for i in range(2):
        x = Conv2D(64, (3, 3), padding='same', strides=1, activation='relu', name='conv1_{}'.format(i+1))(x)
        x = BatchNormalization()(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    # block conv2
    for i in range(2):
        x = Conv2D(128, (3, 3), padding='same', strides=1, activation='relu', name='conv2_{}'.format(i+1))(x)
        x = BatchNormalization()(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    # block conv3
    for i in range(3):
        x = Conv2D(256, (3, 3), padding='same', strides=1, activation='relu', name='conv3_{}'.format(i+1))(x)
        x = BatchNormalization()(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    # block conv4
    for i in range(3):
        x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv4_{}'.format(i+1))(x)
        x = BatchNormalization()(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    # block conv5
    for i in range(3):
        x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv5_{}'.format(i))(x)
        x = BatchNormalization()(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    x = Flatten()(x)
    x = Dense(4096, name='dense1', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(4096, name='dense2', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(num_classes, activation='softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 72
Project: Keras-GAN   Author: eriklindernoren   File: discogan.py    MIT License 4 votes vote down vote up
def build_generator(self):
        """U-Net Generator"""

        def conv2d(layer_input, filters, f_size=4, normalize=True):
            """Layers used during downsampling"""
            d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
            d = LeakyReLU(alpha=0.2)(d)
            if normalize:
                d = InstanceNormalization()(d)
            return d

        def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):
            """Layers used during upsampling"""
            u = UpSampling2D(size=2)(layer_input)
            u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)
            if dropout_rate:
                u = Dropout(dropout_rate)(u)
            u = InstanceNormalization()(u)
            u = Concatenate()([u, skip_input])
            return u

        # Image input
        d0 = Input(shape=self.img_shape)

        # Downsampling
        d1 = conv2d(d0, self.gf, normalize=False)
        d2 = conv2d(d1, self.gf*2)
        d3 = conv2d(d2, self.gf*4)
        d4 = conv2d(d3, self.gf*8)
        d5 = conv2d(d4, self.gf*8)
        d6 = conv2d(d5, self.gf*8)
        d7 = conv2d(d6, self.gf*8)

        # Upsampling
        u1 = deconv2d(d7, d6, self.gf*8)
        u2 = deconv2d(u1, d5, self.gf*8)
        u3 = deconv2d(u2, d4, self.gf*8)
        u4 = deconv2d(u3, d3, self.gf*4)
        u5 = deconv2d(u4, d2, self.gf*2)
        u6 = deconv2d(u5, d1, self.gf)

        u7 = UpSampling2D(size=2)(u6)
        output_img = Conv2D(self.channels, kernel_size=4, strides=1,
                            padding='same', activation='tanh')(u7)

        return Model(d0, output_img) 
Example 73
Project: Keras-GAN   Author: eriklindernoren   File: pix2pix.py    MIT License 4 votes vote down vote up
def build_generator(self):
        """U-Net Generator"""

        def conv2d(layer_input, filters, f_size=4, bn=True):
            """Layers used during downsampling"""
            d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
            d = LeakyReLU(alpha=0.2)(d)
            if bn:
                d = BatchNormalization(momentum=0.8)(d)
            return d

        def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):
            """Layers used during upsampling"""
            u = UpSampling2D(size=2)(layer_input)
            u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)
            if dropout_rate:
                u = Dropout(dropout_rate)(u)
            u = BatchNormalization(momentum=0.8)(u)
            u = Concatenate()([u, skip_input])
            return u

        # Image input
        d0 = Input(shape=self.img_shape)

        # Downsampling
        d1 = conv2d(d0, self.gf, bn=False)
        d2 = conv2d(d1, self.gf*2)
        d3 = conv2d(d2, self.gf*4)
        d4 = conv2d(d3, self.gf*8)
        d5 = conv2d(d4, self.gf*8)
        d6 = conv2d(d5, self.gf*8)
        d7 = conv2d(d6, self.gf*8)

        # Upsampling
        u1 = deconv2d(d7, d6, self.gf*8)
        u2 = deconv2d(u1, d5, self.gf*8)
        u3 = deconv2d(u2, d4, self.gf*8)
        u4 = deconv2d(u3, d3, self.gf*4)
        u5 = deconv2d(u4, d2, self.gf*2)
        u6 = deconv2d(u5, d1, self.gf)

        u7 = UpSampling2D(size=2)(u6)
        output_img = Conv2D(self.channels, kernel_size=4, strides=1, padding='same', activation='tanh')(u7)

        return Model(d0, output_img) 
Example 74
Project: ismir2018-artist   Author: jongpillee   File: model.py    MIT License 4 votes vote down vote up
def model_basic(num_frame,num_sing):
	pos_anchor = Input(shape = (num_frame,128))

	# item model **audio**
	conv1 = Conv1D(128,4,padding='same',use_bias=True,kernel_regularizer=l2(1e-5),kernel_initializer='he_uniform')
	bn1 = BatchNormalization()
	activ1 = Activation('relu')
	MP1 = MaxPool1D(pool_size=4)
	conv2 = Conv1D(128,4,padding='same',use_bias=True,kernel_regularizer=l2(1e-5),kernel_initializer='he_uniform')
	bn2 = BatchNormalization()
	activ2 = Activation('relu')
	MP2 = MaxPool1D(pool_size=4)
	conv3 = Conv1D(128,4,padding='same',use_bias=True,kernel_regularizer=l2(1e-5),kernel_initializer='he_uniform')
	bn3 = BatchNormalization()
	activ3 = Activation('relu')
	MP3 = MaxPool1D(pool_size=4)
	conv4 = Conv1D(128,2,padding='same',use_bias=True,kernel_regularizer=l2(1e-5),kernel_initializer='he_uniform')
	bn4 = BatchNormalization()
	activ4 = Activation('relu')
	MP4 = MaxPool1D(pool_size=2)
	conv5 = Conv1D(256,1,padding='same',use_bias=True,kernel_regularizer=l2(1e-5),kernel_initializer='he_uniform')
	bn5 = BatchNormalization()
	activ5 = Activation('relu')
	drop1 = Dropout(0.5)

	item_sem = GlobalAvgPool1D()
	
	# pos anchor
	pos_anchor_conv1 = conv1(pos_anchor)
	pos_anchor_bn1 = bn1(pos_anchor_conv1)
	pos_anchor_activ1 = activ1(pos_anchor_bn1)
	pos_anchor_MP1 = MP1(pos_anchor_activ1)
	pos_anchor_conv2 = conv2(pos_anchor_MP1)
	pos_anchor_bn2 = bn2(pos_anchor_conv2)
	pos_anchor_activ2 = activ2(pos_anchor_bn2)
	pos_anchor_MP2 = MP2(pos_anchor_activ2)
	pos_anchor_conv3 = conv3(pos_anchor_MP2)
	pos_anchor_bn3 = bn3(pos_anchor_conv3)
	pos_anchor_activ3 = activ3(pos_anchor_bn3)
	pos_anchor_MP3 = MP3(pos_anchor_activ3)
	pos_anchor_conv4 = conv4(pos_anchor_MP3)
	pos_anchor_bn4 = bn4(pos_anchor_conv4)
	pos_anchor_activ4 = activ4(pos_anchor_bn4)
	pos_anchor_MP4 = MP4(pos_anchor_activ4)
	pos_anchor_conv5 = conv5(pos_anchor_MP4)
	pos_anchor_bn5 = bn5(pos_anchor_conv5)
	pos_anchor_activ5 = activ5(pos_anchor_bn5)
	pos_anchor_sem = item_sem(pos_anchor_activ5)

	output = Dense(num_sing, activation='softmax')(pos_anchor_sem)
	model = Model(inputs = pos_anchor, outputs = output)
	return model 
Example 75
Project: chartAnalyst   Author: huima58   File: chart_analyst.py    Apache License 2.0 4 votes vote down vote up
def predict(train_imgs, train_labels, test_imgs, test_labels, x_pix_num=x_pix_num_default, y_pix_num=y_pix_num_default,
            use_saved_weights=False, weights_file_name=''):
    model = Sequential()
    # use partial VGG16 model
    model.add(ZeroPadding2D((1, 1), input_shape=(1, y_pix_num, x_pix_num)))
    
    base_filter_num = 64
    model.add(Convolution2D(base_filter_num, 3, 3, activation='relu', name='conv1_1'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))  # added this layer to reduce the input size
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(base_filter_num, 3, 3, activation='relu', name='conv1_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))
    
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(base_filter_num * 2, 3, 3, activation='relu', name='conv2_1'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(base_filter_num *2, 3, 3, activation='relu', name='conv2_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Flatten())
    model.add(Dense(128, init='uniform', activation='tanh'))
    model.add(Dropout(0.25))
    model.add(Dense(64, init='uniform', activation='tanh'))
    model.add(Dense(3, init='uniform', activation='softmax'))
    
    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer='adadelta',
                  metrics=['accuracy'])
    
    if use_saved_weights:
        model.load_weights(weights_file_name)  #need to install h5py
    else:
        start_time = datetime.today()
        checkpointer = ModelCheckpoint(filepath=weights_file_name, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
        earlyStopping = EarlyStopping(monitor='val_acc', patience=10, verbose=1, mode='max')
        model.fit(train_imgs, train_labels,
                        nb_epoch=30,
                        verbose=1,
                        batch_size=70,
                        validation_split=0.1,
                        callbacks=[checkpointer, earlyStopping])
        model.load_weights(weights_file_name)
        end_time = datetime.today()
        print "----trained time is from " + str(start_time) + " to " + str(end_time)
        
    predict_rst = model.predict_proba(test_imgs, verbose=0)
    return predict_rst 
Example 76
Project: TaiwanTrainVerificationCode2text   Author: linsamtw   File: build_verification_code_cnn_model.py    Apache License 2.0 4 votes vote down vote up
def train_verification_model(self):

        def build_cnn_model():

            tensor_in = Input((60, 200, 3))
            tensor_out = tensor_in
            tensor_out = Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(tensor_out)
            tensor_out = Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(tensor_out)
            tensor_out = MaxPooling2D(pool_size=(2, 2))(tensor_out)
            tensor_out = Dropout(0.25)(tensor_out)
            
            tensor_out = Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation='relu')(tensor_out)
            tensor_out = Conv2D(filters=64, kernel_size=(3, 3), activation='relu')(tensor_out)
            tensor_out = MaxPooling2D(pool_size=(2, 2))(tensor_out)
            tensor_out = Dropout(0.25)(tensor_out)
            
            tensor_out = Conv2D(filters=128, kernel_size=(3, 3), padding='same', activation='relu')(tensor_out)
            tensor_out = Conv2D(filters=128, kernel_size=(3, 3), activation='relu')(tensor_out)
            tensor_out = MaxPooling2D(pool_size=(2, 2))(tensor_out)
            tensor_out = Dropout(0.25)(tensor_out)
            
            tensor_out = Conv2D(filters=256, kernel_size=(3, 3), activation='relu')(tensor_out)
            tensor_out = MaxPooling2D(pool_size=(2, 2))(tensor_out)
            
            Dense(1024, activation = "relu")            
            
            tensor_out = Flatten()(tensor_out)
            tensor_out = Dropout(0.5)(tensor_out)
            tensor_out = [Dense(37, name='digit1', activation='softmax')(tensor_out),\
                Dense(37, name='digit2', activation='softmax')(tensor_out),\
                Dense(37, name='digit3', activation='softmax')(tensor_out),\
                Dense(37, name='digit4', activation='softmax')(tensor_out),\
                Dense(37, name='digit5', activation='softmax')(tensor_out),\
                Dense(37, name='digit6', activation='softmax')(tensor_out)]
            model = Model(inputs=tensor_in, outputs=tensor_out)
            

            return model

        model = build_cnn_model()
        #===============================================================
        optimizer = RMSprop(lr=1e-3, rho=0.8, epsilon=1e-08, decay=0.0)
        # Adamax
        # Define the optimizer
        model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
        # model.summary()

        history = model.fit(self.train_data,self.train_labels, 
                            batch_size = 512, epochs=20, verbose=1, 
                            validation_data=(self.test_data,self.test_labels) )
        
        self.model = model
        self.history = history
        ( self.train_correct3 , self.test_correct3, 
          self.train_final_score, self.test_final_score ) = self.compare_val_train_error()
#------------------------------------------------------------------- 
Example 77
Project: EUSIPCO2017   Author: Veleslavia   File: multilayer.py    GNU Affero General Public License v3.0 4 votes vote down vote up
def build_model(n_classes):

    if K.image_dim_ordering() == 'th':
        input_shape = (1, N_MEL_BANDS, SEGMENT_DUR)
        channel_axis = 1
    else:
        input_shape = (N_MEL_BANDS, SEGMENT_DUR, 1)
        channel_axis = 3
    melgram_input = Input(shape=input_shape)

    maxpool_const = 4
    m_sizes = [5, 80]
    n_sizes = [1, 3, 5]
    n_filters = [128, 64, 32]

    layers = list()

    for m_i in m_sizes:
        for i, n_i in enumerate(n_sizes):
            x = Convolution2D(n_filters[i], m_i, n_i,
                              border_mode='same',
                              init='he_normal',
                              W_regularizer=l2(1e-5),
                              name=str(n_i)+'_'+str(m_i)+'_'+'conv')(melgram_input)
            x = BatchNormalization(axis=channel_axis, mode=0, name=str(n_i)+'_'+str(m_i)+'_'+'bn')(x)
            x = ELU()(x)
            x = MaxPooling2D(pool_size=(N_MEL_BANDS/maxpool_const, SEGMENT_DUR/maxpool_const),
                             name=str(n_i)+'_'+str(m_i)+'_'+'pool')(x)
            layers.append(x)

    x = merge(layers, mode='concat', concat_axis=channel_axis)

    x = Dropout(0.25)(x)
    x = Convolution2D(128, 3, 3, init='he_normal', W_regularizer=l2(1e-5), border_mode='same', name='conv2')(x)
    x = BatchNormalization(axis=channel_axis, mode=0, name='bn2')(x)
    x = ELU()(x)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool2')(x)

    x = Dropout(0.25)(x)
    x = Convolution2D(128, 3, 3, init='he_normal', W_regularizer=l2(1e-5), border_mode='same', name='conv3')(x)
    x = BatchNormalization(axis=channel_axis, mode=0, name='bn3')(x)
    x = ELU()(x)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool3')(x)

    x = Flatten(name='flatten')(x)
    x = Dropout(0.5)(x)
    x = Dense(256, init='he_normal', W_regularizer=l2(1e-5), name='fc1')(x)
    x = ELU()(x)
    x = Dropout(0.5)(x)
    x = Dense(n_classes, init='he_normal', W_regularizer=l2(1e-5), activation='softmax', name='prediction')(x)
    model = Model(melgram_input, x)

    return model 
Example 78
Project: dsl-char-cnn   Author: boknilev   File: cnn_multifilter_cv.py    MIT License 4 votes vote down vote up
def make_model(maxlen, alphabet_size, embedding_dims, embedding_droupout,
               nb_filters, filter_lengths, hidden_dims, fc_dropout, 
               num_classes):
    print('Build model...')
    main_input = Input(shape=(maxlen,))
    
    # we start off with an efficient embedding layer which maps
    # our vocab indices into embedding_dims dimensions
    embedding_layer = Embedding(alphabet_size,
                        embedding_dims,
                        input_length=maxlen,
                        dropout=embedding_droupout)
    embedded = embedding_layer(main_input)
    
    # we add a Convolution1D for each filter length, which will learn nb_filters[i]
    # word group filters of size filter_lengths[i]:
    convs = []
    for i in xrange(len(nb_filters)):
        conv_layer = Convolution1D(nb_filter=nb_filters[i],
                            filter_length=filter_lengths[i],
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1)
        conv_out = conv_layer(embedded)
        # we use max pooling:
        conv_out = MaxPooling1D(pool_length=conv_layer.output_shape[1])(conv_out)
        # We flatten the output of the conv layer,
        # so that we can concat all conv outpus and add a vanilla dense layer:
        conv_out = Flatten()(conv_out)
        convs.append(conv_out)
    
    # concat all conv outputs
    x = merge(convs, mode='concat') if len(convs) > 1 else convs[0]
    #concat = BatchNormalization()(concat)
    
    # We add a vanilla hidden layer:
    x = Dense(hidden_dims)(x)
    x = Dropout(fc_dropout)(x)
    x = Activation('relu')(x)
    
    # We project onto number of classes output layer, and squash it with a softmax:
    main_output = Dense(num_classes, activation='softmax')(x)
    
    # finally, define the model 
    model = Model(input=main_input, output=main_output)
    model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
    return model 
Example 79
Project: surface-crack-detection   Author: Khoronus   File: unet.py    MIT License 4 votes vote down vote up
def model(weights_input=None):

    inputs = Input(IMAGE_SIZE)
    conv1 = Conv2D(64, 3, activation="relu", padding="same", kernel_initializer="he_normal")(inputs)
    conv1 = Conv2D(64, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(128, 3, activation="relu", padding="same", kernel_initializer="he_normal")(pool1)
    conv2 = Conv2D(128, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(256, 3, activation="relu", padding="same", kernel_initializer="he_normal")(pool2)
    conv3 = Conv2D(256, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Conv2D(512, 3, activation="relu", padding="same", kernel_initializer="he_normal")(pool3)
    conv4 = Conv2D(512, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv4)
    drop4 = Dropout(0.5)(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

    conv5 = Conv2D(1024, 3, activation="relu", padding="same", kernel_initializer="he_normal")(pool4)
    conv5 = Conv2D(1024, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv5)
    drop5 = Dropout(0.5)(conv5)

    up6 = Conv2D(512, 2, activation="relu", padding="same", kernel_initializer="he_normal")(UpSampling2D(size = (2,2))(drop5))
    merge6 = Concatenate(axis=3)([drop4,up6])
    conv6 = Conv2D(512, 3, activation="relu", padding="same", kernel_initializer="he_normal")(merge6)
    conv6 = Conv2D(512, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv6)

    up7 = Conv2D(256, 2, activation="relu", padding="same", kernel_initializer="he_normal")(UpSampling2D(size = (2,2))(conv6))
    merge7 = Concatenate(axis=3)([conv3,up7])
    conv7 = Conv2D(256, 3, activation="relu", padding="same", kernel_initializer="he_normal")(merge7)
    conv7 = Conv2D(256, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv7)

    up8 = Conv2D(128, 2, activation="relu", padding="same", kernel_initializer="he_normal")(UpSampling2D(size = (2,2))(conv7))
    merge8 = Concatenate(axis=3)([conv2,up8])
    conv8 = Conv2D(128, 3, activation="relu", padding="same", kernel_initializer="he_normal")(merge8)
    conv8 = Conv2D(128, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv8)

    up9 = Conv2D(64, 2, activation="relu", padding="same", kernel_initializer="he_normal")(UpSampling2D(size = (2,2))(conv8))
    merge9 = Concatenate(axis=3)([conv1,up9])
    conv9 = Conv2D(64, 3, activation="relu", padding="same", kernel_initializer="he_normal")(merge9)
    conv9 = Conv2D(64, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv9)
    conv9 = Conv2D(2, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv9)

    conv10 = Conv2D(1, 1, activation="sigmoid")(conv9)

    model = Model(inputs=inputs, outputs=conv10)
    model.compile(optimizer=Adam(lr=1e-4), loss="binary_crossentropy", metrics=["accuracy"])

    if weights_input:
        model.load_weights(weights_input)

    return model 
Example 80
Project: OpenBottle   Author: xiaozhuchacha   File: openbottle.py    MIT License 4 votes vote down vote up
def main():
    index_name = ['end', 'ignore', 'approach', 'move', 'grasp_left', 'grasp_right', 'ungrasp_left', 'ungrasp_right',
                  'twist', 'push', 'neutral', 'pull', 'pinch', 'unpinch']

    training_data, training_current_action, training_next_action, testing_data, testing_current_action, testing_next_action = load_data(index_name)

    # model = Sequential()
    # model.add(Dense(64, input_dim=training_data.shape[1]+len(index_name), activation='relu'))
    # model.add(Dropout(0.3))
    # model.add(Dense(64, activation='relu'))
    # model.add(Dropout(0.3))
    # model.add(Dense(len(index_name), activation='softmax'))
    #
    # model.compile(loss='categorical_crossentropy',
    #               optimizer='adadelta',
    #               metrics=['accuracy'])
    #
    # model.fit(np.hstack((training_data, training_current_action)),
    #           training_next_action,
    #           nb_epoch=500,
    #           validation_split=0.2,
    #           batch_size=16)  # starts training
    #
    # model.save('model1.h5')
    # score = model.fit(np.hstack((testing_data, testing_current_action)), testing_next_action, batch_size=16)
    # print score

    left_branch = Sequential()
    left_branch.add(Dense(64, input_dim=training_data.shape[1], activation='relu'))
    left_branch.add(Dropout(0.3))
    left_branch.add(Dense(64, activation='relu'))
    left_branch.add(Dropout(0.3))
    left_branch.add(Dense(64, activation='relu'))
    left_branch.add(Dropout(0.3))
    left_branch.add(Dense(64, activation='relu'))

    right_branch = Sequential()
    right_branch.add(Dense(8, input_dim=len(index_name), activation='relu'))

    merged = Merge([left_branch, right_branch], mode='concat')

    model = Sequential()
    model.add(merged)
    model.add(Dense(32, activation='relu'))
    # model.add(Dense(32, activation='relu'))
    model.add(Dense(14, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adadelta',
                  metrics=['accuracy'])

    model.fit([training_data, training_current_action],
              training_next_action,
              nb_epoch=20000,
              validation_split=0.2,
              batch_size=16)  # starts training
    model.save('model2.h5')
    score = model.evaluate([testing_data, testing_current_action], testing_next_action, batch_size=16)
    print score