Python keras.layers.Dense() Examples

The following are code examples for showing how to use keras.layers.Dense(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: speed_estimation   Author: NeilNie   File: simple_conv.py    MIT License 13 votes vote down vote up
def commaai_model(self):

        model = Sequential()
        model.add(Lambda(lambda x: x / 127.5 - 1., input_shape=(configs.IMG_HEIGHT, configs.IMG_WIDTH, 3), output_shape=(configs.IMG_HEIGHT, configs.IMG_WIDTH, 3)))
        model.add(Conv2D(16, (8, 8), strides=4, padding="same"))
        model.add(ELU())
        model.add(Conv2D(32, (5, 5), strides=2, padding="same"))
        model.add(ELU())
        model.add(Conv2D(64, (5, 5), strides=2, padding="same"))
        model.add(Flatten())
        model.add(Dropout(.2))
        model.add(ELU())
        model.add(Dense(512))
        model.add(Dropout(.5))
        model.add(ELU())
        model.add(Dense(1))

        sgd = SGD(lr=0.00001, decay=1e-6, momentum=0.9, nesterov=True)
        model.compile(optimizer=sgd, loss='mean_squared_error')
        # print('steering model is created and compiled...')
        return model 
Example 2
Project: keras-anomaly-detection   Author: chen0040   File: recurrent.py    MIT License 9 votes vote down vote up
def create_model(time_window_size, metric):
        model = Sequential()

        model.add(Conv1D(filters=256, kernel_size=5, padding='same', activation='relu',
                         input_shape=(time_window_size, 1)))
        model.add(MaxPooling1D(pool_size=4))

        model.add(LSTM(64))

        model.add(Dense(units=time_window_size, activation='linear'))

        model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric])

        # model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric])
        # model.compile(optimizer="sgd", loss="mse", metrics=[metric])

        print(model.summary())
        return model 
Example 3
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: VGG_OutputGen.py    MIT License 7 votes vote down vote up
def getVGGModel():
    input_2 = Input(shape=[1], name="angle")
    angle_layer = Dense(1, )(input_2)
    base_model = VGG16(weights='imagenet', include_top=False, 
                 input_shape=X_train.shape[1:], classes=1)
    x = base_model.get_layer('block5_pool').output
    

    x = GlobalMaxPooling2D()(x)
    merge_one = concatenate([x, angle_layer])
    merge_one = Dense(512, activation='relu', name='fc2')(merge_one)
    merge_one = Dropout(0.3)(merge_one)
    merge_one = Dense(512, activation='relu', name='fc3')(merge_one)
    merge_one = Dropout(0.3)(merge_one)
    
    predictions = Dense(1, activation='sigmoid')(merge_one)
    
    model = Model(inputs=[base_model.input, input_2], outputs=predictions)
    
    # adam = Adam(lr=1e-3, epsilon = 1e-8, beta_1 = .9, beta_2 = .999)
    # model.compile(loss='binary_crossentropy',
    #               optimizer=adam,
    #               metrics=['accuracy'])
    return model 
Example 4
Project: oslodatascience-rl   Author: Froskekongen   File: havakv_atari.py    MIT License 7 votes vote down vote up
def deepMindAtariNet(nbClasses, inputShape, includeTop=True):
        '''Set up the 3 conv layer keras model.
        classes: Number of outputs.
        inputShape: The input shape without the batch size.
        includeTop: If you only want the whole net, or just the convolutions.
        '''
        inp = Input(shape=inputShape)
        x = Conv2D(32, 8, 8, subsample=(4, 4), activation='relu', border_mode='same', name='conv1')(inp)
        x = Conv2D(64, 4, 4, subsample=(2, 2), activation='relu', border_mode='same', name='conv2')(x)
        x = Conv2D(64, 3, 3, activation='relu', border_mode='same', name='conv3')(x)
        if includeTop:
            x = Flatten(name='flatten')(x)
            x = Dense(512, activation='relu', name='dense1')(x)
            out = Dense(nbClasses, activation='softmax', name='output')(x)
        else:
            out = x
        model = Model(inp, out)
        return model 
Example 5
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: weather_model.py    Apache License 2.0 7 votes vote down vote up
def weather_l2(hidden_nums=100,l2=0.01): 
    input_img = Input(shape=(37,))
    hn = Dense(hidden_nums, activation='relu')(input_img)
    hn = Dense(hidden_nums, activation='relu',
               kernel_regularizer=regularizers.l2(l2))(hn)
    out_u = Dense(37, activation='sigmoid',                 
                  name='ae_part')(hn)
    out_sig = Dense(37, activation='linear', 
                    name='pred_part')(hn)
    out_both = concatenate([out_u, out_sig], axis=1, name = 'concatenate')

    #weather_model = Model(input_img, outputs=[out_ae, out_pred])
    mve_model = Model(input_img, outputs=[out_both])
    mve_model.compile(optimizer='adam', loss=mve_loss, loss_weights=[1.])
    
    return mve_model 
Example 6
Project: Anamoly-Detection   Author: msmsk05   File: gaal_base.py    BSD 2-Clause "Simplified" License 7 votes vote down vote up
def create_generator(latent_size):  # pragma: no cover
    """Create the generator of the GAN for a given latent size.

    Parameters
    ----------
    latent_size : int
        The size of the latent space of the generator

    Returns
    -------
    D : Keras model() object
        Returns a model() object.
    """

    gen = Sequential()
    gen.add(Dense(latent_size, input_dim=latent_size, activation='relu',
                  kernel_initializer=keras.initializers.Identity(
                      gain=1.0)))
    gen.add(Dense(latent_size, activation='relu',
                  kernel_initializer=keras.initializers.Identity(
                      gain=1.0)))
    latent = Input(shape=(latent_size,))
    fake_data = gen(latent)
    return Model(latent, fake_data) 
Example 7
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 6 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input2'], 'l1': net['InnerProduct']}
        net['l0']['connection']['output'].append('l1')
        # Test 1
        inp = data(net['l0'], '', 'l0')['l0']
        temp = dense(net['l1'], [inp], 'l1')
        model = Model(inp, temp['l1'])
        self.assertEqual(model.layers[2].__class__.__name__, 'Dense')
        # Test 2
        net['l1']['params']['weight_filler'] = 'glorot_normal'
        net['l1']['params']['bias_filler'] = 'glorot_normal'
        inp = data(net['l0'], '', 'l0')['l0']
        temp = dense(net['l1'], [inp], 'l1')
        model = Model(inp, temp['l1'])
        self.assertEqual(model.layers[2].__class__.__name__, 'Dense') 
Example 8
Project: oslodatascience-rl   Author: Froskekongen   File: erlenda_pong_parallel.py    MIT License 6 votes vote down vote up
def buildmodel(opt):
    print("Now we build the model")
    model = Sequential()
    model.add(Convolution2D(32, 8, 8, subsample=(4,4), border_mode='same',input_shape=(80,80,1)))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 4, 4, subsample=(2,2), border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3, subsample=(1,1), border_mode='same'))
    model.add(Activation('relu'))
    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dense(1))

    model.compile(loss='binary_crossentropy',optimizer=opt)
    print("We finish building the model")
    return model 
Example 9
Project: oslodatascience-rl   Author: Froskekongen   File: erlenda_pong_parallel.py    MIT License 6 votes vote down vote up
def get_dense_model():
    """Make keras model"""

    learning_rate=1e-4
    inp = Input(shape=(80*80,))
    h = Dense(200, activation='relu')(inp)
    out = Dense(1, activation='sigmoid')(h)
    model = Model(inp, out)
    optim = RMSprop(learning_rate)
    model.compile(optim, 'binary_crossentropy')
    try:
        model.load_weights('mod_weights_binary.h5')
        print('weights loaded')
    except:
        pass
    return model 
Example 10
Project: Image-Caption-Generator   Author: dabasajay   File: model.py    MIT License 6 votes vote down vote up
def RNNModel(vocab_size, max_len, rnnConfig, model_type):
	embedding_size = rnnConfig['embedding_size']
	if model_type == 'inceptionv3':
		# InceptionV3 outputs a 2048 dimensional vector for each image, which we'll feed to RNN Model
		image_input = Input(shape=(2048,))
	elif model_type == 'vgg16':
		# VGG16 outputs a 4096 dimensional vector for each image, which we'll feed to RNN Model
		image_input = Input(shape=(4096,))
	image_model_1 = Dropout(rnnConfig['dropout'])(image_input)
	image_model = Dense(embedding_size, activation='relu')(image_model_1)

	caption_input = Input(shape=(max_len,))
	# mask_zero: We zero pad inputs to the same length, the zero mask ignores those inputs. E.g. it is an efficiency.
	caption_model_1 = Embedding(vocab_size, embedding_size, mask_zero=True)(caption_input)
	caption_model_2 = Dropout(rnnConfig['dropout'])(caption_model_1)
	caption_model = LSTM(rnnConfig['LSTM_units'])(caption_model_2)

	# Merging the models and creating a softmax classifier
	final_model_1 = concatenate([image_model, caption_model])
	final_model_2 = Dense(rnnConfig['dense_units'], activation='relu')(final_model_1)
	final_model = Dense(vocab_size, activation='softmax')(final_model_2)

	model = Model(inputs=[image_input, caption_input], outputs=final_model)
	model.compile(loss='categorical_crossentropy', optimizer='adam')
	return model 
Example 11
Project: keras-anomaly-detection   Author: chen0040   File: feedforward.py    MIT License 6 votes vote down vote up
def create_model(self, input_dim):
        encoding_dim = 14
        input_layer = Input(shape=(input_dim,))

        encoder = Dense(encoding_dim, activation="tanh",
                        activity_regularizer=regularizers.l1(10e-5))(input_layer)
        encoder = Dense(encoding_dim // 2, activation="relu")(encoder)

        decoder = Dense(encoding_dim // 2, activation='tanh')(encoder)
        decoder = Dense(input_dim, activation='relu')(decoder)

        model = Model(inputs=input_layer, outputs=decoder)
        model.compile(optimizer='adam',
                      loss='mean_squared_error',
                      metrics=['accuracy'])

        return model 
Example 12
Project: smach_based_introspection_framework   Author: birlrobotics   File: anomaly_model_generation.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def squeeze_excite_block(input):
    ''' Create a squeeze-excite block
    Args:
        input: input tensor
        filters: number of output filters
        k: width factor

    Returns: a keras tensor
    '''
    filters = input._keras_shape[-1] # channel_axis = -1 for TF

    se = GlobalAveragePooling1D()(input)
    se = Reshape((1, filters))(se)
    se = Dense(filters // 16,  activation='relu', kernel_initializer='he_normal', use_bias=False)(se)
    se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)
    se = multiply([input, se])
    return se 
Example 13
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: weather_model.py    Apache License 2.0 6 votes vote down vote up
def CausalCNN(n_filters, lr, decay, loss, 
               seq_len, input_features, 
               strides_len, kernel_size,
               dilation_rates):

    inputs = Input(shape=(seq_len, input_features), name='input_layer')   
    x=inputs
    for dilation_rate in dilation_rates:
        x = Conv1D(filters=n_filters,
               kernel_size=kernel_size, 
               padding='causal',
               dilation_rate=dilation_rate,
               activation='linear')(x) 
        x = BatchNormalization()(x)
        x = Activation('relu')(x)

    #x = Dense(7, activation='relu', name='dense_layer')(x)
    outputs = Dense(3, activation='sigmoid', name='output_layer')(x)
    causalcnn = Model(inputs, outputs=[outputs])

    return causalcnn 
Example 14
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: weather_model.py    Apache License 2.0 6 votes vote down vote up
def weather_ae(layers, lr, decay, loss, 
               input_len, input_features):
    
    inputs = Input(shape=(input_len, input_features), name='input_layer')
    
    for i, hidden_nums in enumerate(layers):
        if i==0:
            hn = Dense(hidden_nums, activation='relu')(inputs)
        else:
            hn = Dense(hidden_nums, activation='relu')(hn)

    outputs = Dense(3, activation='sigmoid', name='output_layer')(hn)

    weather_model = Model(inputs, outputs=[outputs])

    return weather_model 
Example 15
Project: ANN   Author: waynezv   File: ANN_v0.1.py    MIT License 6 votes vote down vote up
def train(in_dim, out_dim, X_train, Y_train, X_test, Y_test):
    model = Sequential()
    model.add(Dense(100000, input_dim = in_dim, init='uniform'))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(100000, init='uniform'))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(out_dim, init='uniform'))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', optimizer='sgd',\
            metrics=['accuracy'])

    hist = model.fit(X_train, Y_train, nb_epoch=5, batch_size=32,\
            validation_split=0.1, shuffle=True)
    print(hist.history)

    loss_and_metrics = model.evaluate(X_test, Y_test, batch_size=32)

    classes = model.predict_classes(X_test, batch_size=32)

    proba = model.predict_proba(X_test, batch_size=32) 
Example 16
Project: phoneticSimilarity   Author: ronggong   File: models_RNN.py    GNU Affero General Public License v3.0 6 votes vote down vote up
def embedding_RNN_1_lstm_1_dense(input_shape):

    device = device_lib.list_local_devices()[0].device_type

    input = Input(batch_shape=input_shape)

    if device == 'CPU':
        x = Bidirectional(LSTM(units=32, return_sequences=False))(input)
    else:
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=False))(input)

    x = Dense(units=64, activation='relu')(x)

    x = Dropout(rate=0.5)(x)

    return x, input 
Example 17
Project: phoneticSimilarity   Author: ronggong   File: models_RNN.py    GNU Affero General Public License v3.0 6 votes vote down vote up
def embedding_RNN_2_lstm_1_dense(input_shape):

    device = device_lib.list_local_devices()[0].device_type

    input = Input(batch_shape=input_shape)

    if device == 'CPU':
        x = Bidirectional(LSTM(units=32, return_sequences=True))(input)
        x = Bidirectional(LSTM(units=32, return_sequences=False))(x)
    else:
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=True))(input)
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=False))(x)

    x = Dense(units=64, activation='relu')(x)

    x = Dropout(rate=0.5)(x)

    return x, input 
Example 18
Project: phoneticSimilarity   Author: ronggong   File: models_RNN.py    GNU Affero General Public License v3.0 6 votes vote down vote up
def embedding_RNN_2_lstm_2_dense(input_shape):

    device = device_lib.list_local_devices()[0].device_type

    input = Input(batch_shape=input_shape)

    if device == 'CPU':
        x = Bidirectional(LSTM(units=32, return_sequences=True))(input)
        x = Bidirectional(LSTM(units=32, return_sequences=False))(x)
    else:
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=True))(input)
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=False))(x)

    x = Dense(units=64, activation='relu')(x)

    x = Dropout(rate=0.5)(x)

    x = Dense(units=64, activation='relu')(x)

    x = Dropout(rate=0.5)(x)

    return x, input 
Example 19
Project: phoneticSimilarity   Author: ronggong   File: models_RNN.py    GNU Affero General Public License v3.0 6 votes vote down vote up
def embedding_RNN_3_lstm_1_dense(input_shape):

    device = device_lib.list_local_devices()[0].device_type

    input = Input(batch_shape=input_shape)

    if device == 'CPU':
        x = Bidirectional(LSTM(units=32, return_sequences=True))(input)
        x = Bidirectional(LSTM(units=32, return_sequences=True))(x)
        x = Bidirectional(LSTM(units=32, return_sequences=False))(x)
    else:
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=True))(input)
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=True))(x)
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=False))(x)

    x = Dense(units=64, activation='relu')(x)

    x = Dropout(rate=0.5)(x)

    return x, input 
Example 20
Project: phoneticSimilarity   Author: ronggong   File: models_RNN.py    GNU Affero General Public License v3.0 6 votes vote down vote up
def embedding_RNN_3_lstm_2_dense(input_shape):

    device = device_lib.list_local_devices()[0].device_type

    input = Input(batch_shape=input_shape)

    if device == 'CPU':
        x = Bidirectional(LSTM(units=32, return_sequences=True))(input)
        x = Bidirectional(LSTM(units=32, return_sequences=True))(x)
        x = Bidirectional(LSTM(units=32, return_sequences=False))(x)
    else:
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=True))(input)
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=True))(x)
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=False))(x)

    x = Dense(units=64, activation='relu')(x)

    x = Dropout(rate=0.5)(x)

    x = Dense(units=64, activation='relu')(x)

    x = Dropout(rate=0.5)(x)

    return x, input 
Example 21
Project: mtrl-auto-uav   Author: brunapearson   File: mtrl_network.py    MIT License 5 votes vote down vote up
def create_model():
    #Create the convolutional stacks
    input_img = Input(shape=(224,224,3))

    x = Conv2D(16, kernel_size=3, activation='relu')(input_img)
    x = MaxPooling2D(pool_size=(2,2))(x)
    x = Conv2D(32, kernel_size=3, activation='relu')(x)
    x = MaxPooling2D(pool_size=(2,2))(x)
    x = Conv2D(64, kernel_size=3, activation='relu')(x)
    x = MaxPooling2D(pool_size=(2,2))(x)
    x = Flatten()(x)
    x = Dense(500, activation='relu')(x)
    x = Dropout(0.20)(x)
    x = Dense(100, activation='relu')(x)
    x = Dense(20, activation='relu')(x)

    n = Conv2D(16, kernel_size=3, activation='relu')(input_img)
    n = MaxPooling2D(pool_size=(2,2))(n)
    n = Conv2D(32, kernel_size=3, activation='relu')(n)
    n = MaxPooling2D(pool_size=(2,2))(n)
    n = Conv2D(64, kernel_size=3, activation='relu')(n)
    n = MaxPooling2D(pool_size=(2,2))(n)
    n = Flatten()(n)
    n = Dense(500, activation='relu')(n)
    #n = Dropout(0.50)(n)
    n = Dense(100, activation='relu')(n)
    n = Dense(20, activation='relu')(n)

    #output
    output_x = Dense(1, activation='linear', name='input_x')(n)
    output_y = Dense(1, activation='linear', name='input_y')(n)
    output_z = Dense(1, activation='linear', name='input_z')(n)

    output_qw = Dense(1, activation='linear', name='input_qw')(x)
    output_qx = Dense(1, activation='linear', name='input_qx')(x)
    output_qy = Dense(1, activation='linear', name='input_qy')(x)
    output_qz = Dense(1, activation='linear', name='input_qz')(x)


    model = Model(inputs=input_img, outputs=[output_x,output_y,output_z,output_qw,output_qx,output_qy,output_qz])
    return model 
Example 22
Project: sklearn2docker   Author: KhaledSharif   File: keras_classifier_test.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def create_binary_classification_model():
        from keras.models import Sequential
        from keras.layers import Dense, Dropout

        model = Sequential()
        model.add(Dense(64, input_shape=(30,), activation='relu'))
        model.add(Dense(64, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(1, activation='sigmoid'))
        model.compile(loss='binary_crossentropy', optimizer='adam')
        return model 
Example 23
Project: sklearn2docker   Author: KhaledSharif   File: keras_classifier_test.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def create_categorical_classification_model():
        from keras.models import Sequential
        from keras.layers import Dense, Dropout

        model = Sequential()
        model.add(Dense(64, input_shape=(30,), activation='relu'))
        model.add(Dense(64, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(3, activation='sigmoid'))
        model.compile(loss='categorical_crossentropy', optimizer='adam')
        return model 
Example 24
Project: VisualNN   Author: angelhunt   File: layers_export.py    GNU General Public License v3.0 5 votes vote down vote up
def dense(layer, layer_in, layerId, tensor=True):
    out = {}
    if (len(layer['shape']['input']) > 1):
        out[layerId + 'Flatten'] = Flatten()(*layer_in)
        layer_in = [out[layerId + 'Flatten']]
    units = layer['params']['num_output']
    if (layer['params']['weight_filler'] in fillerMap):
        kernel_initializer = fillerMap[layer['params']['weight_filler']]
    else:
        kernel_initializer = layer['params']['weight_filler']
    if (layer['params']['bias_filler'] in fillerMap):
        bias_initializer = fillerMap[layer['params']['bias_filler']]
    else:
        bias_initializer = layer['params']['bias_filler']
    # safety checks to avoid runtime errors
    kernel_regularizer = None
    bias_regularizer = None
    activity_regularizer = None
    kernel_constraint = None
    bias_constraint = None
    if 'kernel_regularizer' in layer['params']:
        kernel_regularizer = regularizerMap[layer['params']['kernel_regularizer']]
    if 'bias_regularizer' in layer['params']:
        bias_regularizer = regularizerMap[layer['params']['bias_regularizer']]
    if 'activity_regularizer' in layer['params']:
        activity_regularizer = regularizerMap[layer['params']
                                              ['activity_regularizer']]
    if 'kernel_constraint' in layer['params']:
        kernel_constraint = constraintMap[layer['params']['kernel_constraint']]
    if 'bias_constraint' in layer['params']:
        bias_constraint = constraintMap[layer['params']['bias_constraint']]
    use_bias = layer['params']['use_bias']
    out[layerId] = Dense(units=units, kernel_initializer=kernel_initializer,
                         kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer,
                         activity_regularizer=activity_regularizer, bias_constraint=bias_constraint,
                         kernel_constraint=kernel_constraint, use_bias=use_bias,
                         bias_initializer=bias_initializer)
    if tensor:
        out[layerId] = out[layerId](*layer_in)
    return out 
Example 25
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_import(self):
        model = Sequential()
        model.add(Dense(100, kernel_regularizer=regularizers.l2(0.01), bias_regularizer=regularizers.l2(0.01),
                        activity_regularizer=regularizers.l2(0.01), kernel_constraint='max_norm',
                        bias_constraint='max_norm', activation='relu', input_shape=(16,)))
        model.build()
        self.keras_param_test(model, 1, 3) 
Example 26
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: capsulenet.py    MIT License 5 votes vote down vote up
def CapsNet(input_shape, n_class, num_routing):
    """
    A Capsule Network on MNIST.
    :param input_shape: data shape, 3d, [width, height, channels]
    :param n_class: number of classes
    :param num_routing: number of routing iterations
    :return: Two Keras Models, the first one used for training, and the second one for evaluation.
            `eval_model` can also be used for training.
    """
    x = layers.Input(shape=input_shape)

    # Layer 1: Just a conventional Conv2D layer
    conv1 = layers.Conv2D(filters=256, kernel_size=9, strides=1, padding='valid', activation='relu', name='conv1')(x)

    # Layer 2: Conv2D layer with `squash` activation, then reshape to [None, num_capsule, dim_capsule]
    primarycaps = PrimaryCap(conv1, dim_capsule=8, n_channels=32, kernel_size=9, strides=2, padding='valid')

    # Layer 3: Capsule layer. Routing algorithm works here.
    digitcaps = CapsuleLayer(num_capsule=n_class, dim_capsule=16, num_routing=num_routing,
                             name='digitcaps')(primarycaps)

    # Layer 4: This is an auxiliary layer to replace each capsule with its length. Just to match the true label's shape.
    # If using tensorflow, this will not be necessary. :)
    out_caps = Length(name='capsnet')(digitcaps)

    # Decoder network.
    y = layers.Input(shape=(n_class,))
    masked_by_y = Mask()([digitcaps, y])  # The true label is used to mask the output of capsule layer. For training
    masked = Mask()(digitcaps)  # Mask using the capsule with maximal length. For prediction

    # Shared Decoder model in training and prediction
    decoder = models.Sequential(name='decoder')
    decoder.add(layers.Dense(512, activation='relu', input_dim=16*n_class))
    decoder.add(layers.Dense(1024, activation='relu'))
    decoder.add(layers.Dense(np.prod(input_shape), activation='sigmoid'))
    decoder.add(layers.Reshape(target_shape=input_shape, name='out_recon'))

    # Models for training and evaluation (prediction)
    train_model = models.Model([x, y], [out_caps, decoder(masked_by_y)])
    eval_model = models.Model(x, [out_caps, decoder(masked)])
    return train_model, eval_model 
Example 27
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: VGG16_BottleNeck.py    MIT License 5 votes vote down vote up
def getVGGModel():
    input_2 = Input(shape=[1], name="angle")
    angle_layer = Dense(1, )(input_2)
    base_model = VGG16(weights='imagenet', include_top=False, 
                 input_shape=X_train.shape[1:], classes=1)
    x = base_model.get_layer('block5_pool').output
    

    x = GlobalMaxPooling2D()(x)
    merge_one = concatenate([x, angle_layer])
    merge_one = Dense(512, activation='relu', name='fc2')(merge_one)
    merge_one = Dropout(0.3)(merge_one)
    merge_one = Dense(512, activation='relu', name='fc3')(merge_one)
    merge_one = Dropout(0.3)(merge_one)
    
    predictions = Dense(1, activation='sigmoid')(merge_one)
    
    model = Model(input=[base_model.input, input_2], output=predictions)
    
    adam = Adam(lr=1e-3, epsilon = 1e-8, beta_1 = .9, beta_2 = .999)
    model.compile(loss='binary_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])
    return model

################

#Using K-fold Cross Validation with Data Augmentation. 
Example 28
Project: MODS_ConvNet   Author: santiagolopezg   File: hipster_net.py    MIT License 5 votes vote down vote up
def cifar():

    # Determine proper input shape
    K.set_image_dim_ordering('th')
    input_shape = (1, 256, 192)
    img_input = Input(shape=input_shape)

    x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='conv1_1')(img_input)
    x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='conv1_2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(x)

    x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='conv2_1')(x)
    x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='conv2_2')(x)
    x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='conv2_3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool2')(x)

    x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='conv3_1')(x)
    x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='conv3_2')(x)
    x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='conv3_3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool3')(x)

    x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='conv4_1')(x)
    x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='conv4_2')(x)
    x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='conv4_3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool4')(x)

    x = Flatten(name='flatten')(x)
    x = Dense(1000, activation='relu', name='fc1')(x)
    x = Dense(1000, activation='relu', name='fc2')(x)
    x = Dense(2, activation='softmax', name='pred')(x)

    # Create model.
    model = Model(img_input, x)

    #weights='MODS_keras_weights_3_he_normal_0.5_rmsprop_24.h5'
    #model.load_weights(weights)

    return model 
Example 29
Project: neural-fingerprinting   Author: StephanZheng   File: test_utils_keras.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def setUp(self):
        from keras.models import Sequential
        from keras.layers import Dense, Activation
        import tensorflow as tf

        def dummy_model():
            input_shape = (100,)
            return Sequential([Dense(20, name='l1',
                                     input_shape=input_shape),
                               Dense(10, name='l2'),
                               Activation('softmax', name='softmax')])

        self.sess = tf.Session()
        self.sess.as_default()
        self.model = dummy_model() 
Example 30
Project: oslodatascience-rl   Author: Froskekongen   File: havakv_atari_multi.py    MIT License 5 votes vote down vote up
def deepMindAtariNet(nbClasses, inputShape, includeTop=True):
        '''Set up the 3 conv layer keras model.
        classes: Number of outputs.
        inputShape: The input shape without the batch size.
        includeTop: If you only want the whole net, or just the convolutions.
        '''
        inp = Input(shape=inputShape)
        x = Conv2D(32, 8, 8, subsample=(4, 4), activation='relu', border_mode='same', name='conv1')(inp)
        x = Conv2D(64, 4, 4, subsample=(2, 2), activation='relu', border_mode='same', name='conv2')(x)
        x = Conv2D(64, 3, 3, activation='relu', border_mode='same', name='conv3')(x)
        if includeTop:
            x = Flatten(name='flatten')(x)
            x = Dense(512, activation='relu', name='dense1')(x)
            out = Dense(nbClasses, activation='softmax', name='output')(x)
        else:
            out = x
        model = Model(inp, out)
        return model 
Example 31
Project: oslodatascience-rl   Author: Froskekongen   File: havakv_atari_multi.py    MIT License 5 votes vote down vote up
def setupModel(self):
        '''Setup models:
        self.actionModel is the action predictions.
        self.valueModel is the prediction of the value function.
        self.model is the model with both outputs
        '''
        inputShape = (self.D, self.D, self.nbImgInState)
        model = self.deepMindAtariNet(self.nbActionClasses, inputShape, includeTop=False)
        inp = Input(shape=inputShape)
        x = model(inp)
        x = Flatten()(x)
        x = Dense(512, activation='relu', name='dense1')(x)

        action = Dense(self.nbActionClasses, activation='softmax', name='action')(x)
        self.actionModel = Model(inp, action)
        # Should we compile model?

        value = Dense(1, activation='linear', name='value')(x)
        self.valueModel = Model(inp, value)
        # Should we compile model?

        self.model = Model(inp, [action, value])
        actionAndEntropyLoss = makeActionAndEntropyLossA3C(self.entropyBeta)
        loss = {'action': actionAndEntropyLoss, 'value': 'mse'}
        loss_weights = {'action': 1, 'value': self.mseBeta}
        optim = RMSprop(self.learningRate, self.decayRate)
        self.model.compile(optim, loss) # Need to make it possible to set other optimizers

        if self.resume:
            self.model.load_weights(self.modelFileName)
            return 
Example 32
Project: oslodatascience-rl   Author: Froskekongen   File: havakv_atari_multi.py    MIT License 5 votes vote down vote up
def setupModel(self):
        """Make keras model"""
        if self.resume:
            self.model = load_model(self.modelFileName)
        else:
            inp = Input(shape=(self.D,))
            h = Dense(self.H, activation='relu')(inp)
            out = Dense(1, activation='sigmoid')(h)
            self.model = Model(inp, out)
            optim = RMSprop(self.learning_rate, self.decay_rate)
            self.model.compile(optim, 'binary_crossentropy')

    # @staticmethod
    # def _preprocess_image(I):
        # '''Preprocess 210x160x3 uint8 frame into 6400 (80x80) 1D float vector'''
        # I = I[35:195] # crop
        # I = I[::2,::2,0] # downsample by factor of 2
        # I[I == 144] = 0 # erase background (background type 1)
        # I[I == 109] = 0 # erase background (background type 2)
        # I[I != 0] = 1 # everything else (paddles, ball) just set to 1
        # return I.astype(np.float).ravel()

    # def preprocess(self, observation):
        # '''Proprocess observation. And store in states list'''
        # cur_x = self._preprocess_image(observation)
        # x = cur_x - self.prev_x if self.prev_x is not None else np.zeros(self.D)
        # self.prev_x = cur_x
        # x = x.reshape((1, -1))
        # self.states.append(x) 
Example 33
Project: oslodatascience-rl   Author: Froskekongen   File: havakv_atari.py    MIT License 5 votes vote down vote up
def setupModel(self):
        '''Setup models:
        self.actionModel is the action predictions.
        self.valueModel is the prediction of the value function.
        self.model is the model with both outputs
        '''
        if self.resume:
            self.model = load_model(self.modelFileName)
            # Need the other models as well...
            return
        inputShape = (self.D, self.D, self.nbImgInState)
        model = self.deepMindAtariNet(self.nbClasses, inputShape, includeTop=False)
        inp = Input(shape=inputShape)
        x = model(inp)
        x = Flatten()(x)
        x = Dense(512, activation='relu', name='dense1')(x)

        action = Dense(self.nbClasses, activation='softmax', name='action')(x)
        self.actionModel = Model(inp, action)
        # Should we compile model?

        value = Dense(1, activation='linear', name='value')(x)
        self.valueModel = Model(inp, value)
        # Should we compile model?

        self.model = Model(inp, [action, value])
        # loss = {'action': 'categorical_crossentropy', 'value': 'mse'}
        # loss = {'action': categoricalCrossentropyWithWeights, 'value': 'mse'}
        actionAndEntropyLoss = makeActionAndEntropyLossA3C(self.entropyBeta)
        loss = {'action': actionAndEntropyLoss, 'value': 'mse'}
        loss_weights = {'action': 1, 'value': self.mseBeta}
        optim = RMSprop(self.learningRate, self.decayRate)
        self.model.compile(optim, loss) # Need to make it possible to set other optimizers 
Example 34
Project: oslodatascience-rl   Author: Froskekongen   File: havakv_a2c.py    MIT License 5 votes vote down vote up
def setupModel(self):
        '''Setup models:
        self.actionModel is the action predictions.
        self.valueModel is the prediction of the value function V. 
        self.model is the model with both outputs
        '''
        inputShape = (self.D, self.D, self.nbImgInState)

        inp = Input(shape=inputShape)
        x = Conv2D(32, 8, 8, subsample=(4, 4), activation='relu', border_mode='same', name='conv1')(inp)
        x = Conv2D(64, 4, 4, subsample=(2, 2), activation='relu', border_mode='same', name='conv2')(x)
        x = Conv2D(64, 3, 3, activation='relu', border_mode='same', name='conv3')(x)
        x = Flatten(name='flatten')(x)
        x = Dense(512, activation='relu', name='dense1')(x)

        action = Dense(self.nbActionClasses, activation='softmax', name='action')(x)
        self.actionModel = Model(inp, action)
        # Should we compile model?

        value = Dense(1, activation='linear', name='value')(x)
        self.valueModel = Model(inp, value)
        # Should we compile model?

        self.model = Model(inp, [action, value])

        actionAndEntropyLoss = makeActionAndEntropyLossA3C(self.entropyBeta)
        loss = {'action': actionAndEntropyLoss, 'value': 'mse'}
        loss_weights = {'action': 1, 'value': self.mseBeta}

        optim = RMSprop(self.learningRate, self.decayRate)
        self.model.compile(optim, loss) 

        if self.resume:
            self.model.load_weights(self.modelFileName)
            return 
Example 35
Project: oslodatascience-rl   Author: Froskekongen   File: erlenda_pong_parallel.py    MIT License 5 votes vote down vote up
def create_perc_model(input_dim,hidden_dim):
    inp=Input(shape=(80,80,1), dtype='float32', name='main_input')
    dd=Flatten()(inp)
    dd=Dense(hidden_dim,activation='relu')(dd)
    out=Dense(1,activation='sigmoid')(dd)
    return inp,out 
Example 36
Project: oslodatascience-rl   Author: Froskekongen   File: erlenda_pong_parallel.py    MIT License 5 votes vote down vote up
def create_conv_model(input_dim):
    inp=Input(shape=(80,80,1), dtype='float32', name='main_input')
    dd=Convolution2D(32,4,4,border_mode='same',activation='relu')(inp)
    dd=Convolution2D(32,4,4,border_mode='same',activation='relu')(dd)
    dd=Flatten()(dd)
    out=Dense(1,activation='sigmoid')(dd)
    return inp,out 
Example 37
Project: oslodatascience-rl   Author: Froskekongen   File: pogn_parallel.py    MIT License 5 votes vote down vote up
def setupModel(self):
            inp = Input(shape=(self.input_dimensions,))
            h = Dense(self.num_hidden_layer_neurons, activation='relu')(inp)
            out = Dense(1, activation='sigmoid')(h)
            self.model = Model(inp, out)
            optim = RMSprop(self.learning_rate, self.decay_rate)
            self.model.compile(optim, 'binary_crossentropy')
            #self.model._make_predict_function()
            #self.graph = tf.get_default_graph() 
Example 38
Project: oslodatascience-rl   Author: Froskekongen   File: havakv_pongDense.py    MIT License 5 votes vote down vote up
def get_dense_model():
    """Make keras model"""
    if resume:
        return load_model(model_file_name)
    inp = Input(shape=(D,))
    h = Dense(H, activation='relu')(inp)
    out = Dense(1, activation='sigmoid')(h)
    model = Model(inp, out)
    optim = RMSprop(learning_rate, decay_rate)
    model.compile(optim, 'binary_crossentropy')
    return model 
Example 39
Project: Image-Caption-Generator   Author: dabasajay   File: model.py    MIT License 5 votes vote down vote up
def AlternativeRNNModel(vocab_size, max_len, rnnConfig, model_type):
	embedding_size = rnnConfig['embedding_size']
	if model_type == 'inceptionv3':
		# InceptionV3 outputs a 2048 dimensional vector for each image, which we'll feed to RNN Model
		image_input = Input(shape=(2048,))
	elif model_type == 'vgg16':
		# VGG16 outputs a 4096 dimensional vector for each image, which we'll feed to RNN Model
		image_input = Input(shape=(4096,))
	image_model_1 = Dense(embedding_size, activation='relu')(image_input)
	image_model = RepeatVector(max_len)(image_model_1)

	caption_input = Input(shape=(max_len,))
	# mask_zero: We zero pad inputs to the same length, the zero mask ignores those inputs. E.g. it is an efficiency.
	caption_model_1 = Embedding(vocab_size, embedding_size, mask_zero=True)(caption_input)
	# Since we are going to predict the next word using the previous words
	# (length of previous words changes with every iteration over the caption), we have to set return_sequences = True.
	caption_model_2 = LSTM(rnnConfig['LSTM_units'], return_sequences=True)(caption_model_1)
	# caption_model = TimeDistributed(Dense(embedding_size, activation='relu'))(caption_model_2)
	caption_model = TimeDistributed(Dense(embedding_size))(caption_model_2)

	# Merging the models and creating a softmax classifier
	final_model_1 = concatenate([image_model, caption_model])
	# final_model_2 = LSTM(rnnConfig['LSTM_units'], return_sequences=False)(final_model_1)
	final_model_2 = Bidirectional(LSTM(rnnConfig['LSTM_units'], return_sequences=False))(final_model_1)
	# final_model_3 = Dense(rnnConfig['dense_units'], activation='relu')(final_model_2)
	# final_model = Dense(vocab_size, activation='softmax')(final_model_3)
	final_model = Dense(vocab_size, activation='softmax')(final_model_2)

	model = Model(inputs=[image_input, caption_input], outputs=final_model)
	model.compile(loss='categorical_crossentropy', optimizer='adam')
	# model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
	return model 
Example 40
Project: deep-learning-note   Author: wdxtub   File: 3_price_prediction.py    MIT License 5 votes vote down vote up
def build_model():
    model = models.Sequential()
    model.add(layers.Dense(64, activation='relu', input_shape=(train_data.shape[1], )))
    model.add(layers.Dense(64, activation='relu'))
    model.add(layers.Dense(1))
    model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
    return model 
Example 41
Project: keras-anomaly-detection   Author: chen0040   File: convolutional.py    MIT License 5 votes vote down vote up
def create_model(time_window_size, metric):
        model = Sequential()
        model.add(Conv1D(filters=256, kernel_size=5, padding='same', activation='relu',
                         input_shape=(time_window_size, 1)))
        model.add(GlobalMaxPool1D())

        model.add(Dense(units=time_window_size, activation='linear'))

        model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric])
        print(model.summary())
        return model 
Example 42
Project: keras-anomaly-detection   Author: chen0040   File: recurrent.py    MIT License 5 votes vote down vote up
def create_model(time_window_size, metric):
        model = Sequential()
        model.add(LSTM(units=128, input_shape=(time_window_size, 1), return_sequences=False))

        model.add(Dense(units=time_window_size, activation='linear'))

        model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric])
        print(model.summary())
        return model 
Example 43
Project: keras-anomaly-detection   Author: chen0040   File: recurrent.py    MIT License 5 votes vote down vote up
def create_model(time_window_size, metric):
        model = Sequential()

        model.add(Bidirectional(LSTM(units=64, dropout=0.2, recurrent_dropout=0.2), input_shape=(time_window_size, 1)))

        model.add(Dense(units=time_window_size, activation='linear'))

        model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric])

        # model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric])
        # model.compile(optimizer="sgd", loss="mse", metrics=[metric])

        print(model.summary())
        return model 
Example 44
Project: smach_based_introspection_framework   Author: birlrobotics   File: anomaly_model_generation.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def generate_model_3():
    ip = Input(shape=(MAX_NB_VARIABLES, MAX_TIMESTEPS))

    x = Masking()(ip)
    x = LSTM(8)(x)
    x = Dropout(0.8)(x)

    y = Permute((2, 1))(ip)
    y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    #y = squeeze_excite_block(y)

    y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    #y = squeeze_excite_block(y)

    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)
    model.summary()

    # add load model code here to fine-tune

    return model 
Example 45
Project: smach_based_introspection_framework   Author: birlrobotics   File: anomaly_model_generation.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def generate_model_4():
    ip = Input(shape=(MAX_NB_VARIABLES, MAX_TIMESTEPS))
    # stride = 3
    #
    # x = Permute((2, 1))(ip)
    # x = Conv1D(MAX_NB_VARIABLES // stride, 8, strides=stride, padding='same', activation='relu', use_bias=False,
    #            kernel_initializer='he_uniform')(x)  # (None, variables / stride, timesteps)
    # x = Permute((2, 1))(x)

    x = Masking()(ip)
    x = AttentionLSTM(8)(x)
    x = Dropout(0.8)(x)

    y = Permute((2, 1))(ip)
    y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    #y = squeeze_excite_block(y)

    y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    #y = squeeze_excite_block(y)

    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)
    model.summary()

    # add load model code here to fine-tune

    return model 
Example 46
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: weather_model.py    Apache License 2.0 5 votes vote down vote up
def weather_mve(hidden_nums=100): 
    input_img = Input(shape=(37,))
    hn = Dense(hidden_nums, activation='relu')(input_img)
    hn = Dense(hidden_nums, activation='relu')(hn)
    out_u = Dense(37, activation='sigmoid', name='ae_part')(hn)
    out_sig = Dense(37, activation='linear', name='pred_part')(hn)
    out_both = concatenate([out_u, out_sig], axis=1, name = 'concatenate')

    #weather_model = Model(input_img, outputs=[out_ae, out_pred])
    mve_model = Model(input_img, outputs=[out_both])
    mve_model.compile(optimizer='adam', loss=mve_loss, loss_weights=[1.])
    
    return mve_model 
Example 47
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: weather_model.py    Apache License 2.0 5 votes vote down vote up
def weather_mse():
    input_img = Input(shape=(37,))
    hn = Dense(100, activation='relu')(input_img)
    hn = Dense(100, activation='relu')(hn)
    out_pred = Dense(37, activation='sigmoid', name='pred_part')(hn)
    weather_model = Model(input_img, outputs=[out_pred])
    weather_model.compile(optimizer='adam', loss='mse',loss_weights=[1.])
    
    return weather_model 
Example 48
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: weather_model.py    Apache License 2.0 5 votes vote down vote up
def weather_fnn(layers, lr,
            decay, loss, seq_len, 
            input_features, output_features):
    
    ori_inputs = Input(shape=(seq_len, input_features), name='input_layer')
    #print(seq_len*input_features)
    conv_ = Conv1D(11, kernel_size=13, strides=1, 
                        data_format='channels_last', 
                        padding='valid', activation='linear')(ori_inputs)
    conv_ = BatchNormalization(name='BN_conv')(conv_)
    conv_ = Activation('relu')(conv_)
    conv_ = Conv1D(5, kernel_size=7, strides=1, 
                        data_format='channels_last', 
                        padding='valid', activation='linear')(conv_)
    conv_ = BatchNormalization(name='BN_conv2')(conv_)
    conv_ = Activation('relu')(conv_)

    inputs = Reshape((-1,))(conv_)

    for i, hidden_nums in enumerate(layers):
        if i==0:
            hn = Dense(hidden_nums, activation='linear')(inputs)
            hn = BatchNormalization(name='BN_{}'.format(i))(hn)
            hn = Activation('relu')(hn)
        else:
            hn = Dense(hidden_nums, activation='linear')(hn)
            hn = BatchNormalization(name='BN_{}'.format(i))(hn)
            hn = Activation('relu')(hn)
            #hn = Dropout(0.1)(hn)
    #print(seq_len, output_features)
    #print(hn)
    outputs = Dense(seq_len*output_features, activation='sigmoid', name='output_layer')(hn) # 37*3
    outputs = Reshape((seq_len, output_features))(outputs)

    weather_fnn = Model(ori_inputs, outputs=[outputs])

    return weather_fnn 
Example 49
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: weather_model.py    Apache License 2.0 5 votes vote down vote up
def weather_fusion():
    input_img = Input(shape=(37,))
    hn = Dense(100, activation='relu')(input_img)
    hn = Dense(100, activation='relu')(hn)
    #out_ae = Dense(37, activation='sigmoid', name='ae_part')(hn)
    out_pred = Dense(37, activation='sigmoid', name='pred_part')(hn)

    weather_model = Model(input_img, outputs=[out_ae, out_pred])
    weather_model.compile(optimizer='adam', loss='mse',loss_weights=[1.5, 1.])

    return weather_model 
Example 50
Project: Jetson-RaceCar-AI   Author: ardamavi   File: get_model.py    Apache License 2.0 5 votes vote down vote up
def get_model():
    img_inputs = Input(shape=(500, 500, 1))
    lidar_inputs = Input(shape=(3,))

    conv_1 = Conv2D(32, (4,4), strides=(2,2))(img_inputs)

    conv_2 = Conv2D(32, (4,4), strides=(2,2))(conv_1)

    conv_3 = Conv2D(32, (3,3), strides=(1,1))(conv_2)
    act_3 = Activation('relu')(conv_3)

    pooling_1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(act_3)

    flat_1 = Flatten()(pooling_1)

    fc = Dense(32)(flat_1)

    lidar_fc = Dense(32)(lidar_inputs)

    concatenate_layer = concatenate([fc, lidar_fc])

    fc = Dense(10)(concatenate_layer)
    fc = Activation('relu')(fc)
    fc = Dropout(0.5)(fc)

    outputs = Dense(2)(fc)

    outputs = Activation('sigmoid')(outputs)


    model = Model(inputs=[img_inputs, lidar_inputs], outputs=[outputs])

    model.compile(loss='mse', optimizer='adadelta', metrics=['accuracy'])

    print(model.summary())

    return model 
Example 51
Project: Kaggler   Author: jeongyoonlee   File: categorical.py    MIT License 5 votes vote down vote up
def _get_model(X, cat_cols, num_cols, n_uniq, n_emb, output_activation):
        inputs = []
        num_inputs = []
        embeddings = []
        for i, col in enumerate(cat_cols):

            if not n_uniq[i]:
                n_uniq[i] = X[col].nunique()
            if not n_emb[i]:
                n_emb[i] = max(MIN_EMBEDDING, 2 * int(np.log2(n_uniq[i])))

            _input = Input(shape=(1,), name=col)
            _embed = Embedding(input_dim=n_uniq[i], output_dim=n_emb[i], name=col + EMBEDDING_SUFFIX)(_input)
            _embed = Dropout(.2)(_embed)
            _embed = Reshape((n_emb[i],))(_embed)

            inputs.append(_input)
            embeddings.append(_embed)

        if num_cols:
            num_inputs = Input(shape=(len(num_cols),), name='num_inputs')
            merged_input = Concatenate(axis=1)(embeddings + [num_inputs])

            inputs = inputs + [num_inputs]
        else:
            merged_input = Concatenate(axis=1)(embeddings)

        x = BatchNormalization()(merged_input)
        x = Dense(128, activation='relu')(x)
        x = Dropout(.5)(x)
        x = BatchNormalization()(x)
        x = Dense(64, activation='relu')(x)
        x = Dropout(.5)(x)
        x = BatchNormalization()(x)
        output = Dense(1, activation=output_activation)(x)

        model = Model(inputs=inputs, outputs=output)

        return model, n_emb, n_uniq 
Example 52
Project: dac   Author: KBNLresearch   File: models.py    GNU General Public License v3.0 5 votes vote down vote up
def create_model(self):
        '''
        Create new keras model.
        '''
        self.class_weight = {0: 0.25, 1: 0.75}

        model = Sequential()
        model.add(Dense(self.data.shape[1], input_dim=self.data.shape[1],
                        activation='relu', kernel_constraint=maxnorm(3)))
        model.add(Dropout(0.5))
        model.add(Dense(1, activation='sigmoid'))
        model.compile(optimizer='RMSprop', loss='binary_crossentropy',
                      metrics=['accuracy'])
        return model 
Example 53
Project: keras-utility-layer-collection   Author: zimmerrol   File: attention.py    MIT License 5 votes vote down vote up
def build(self, input_shape):
        self._validate_input_shape(input_shape)
        
        d_k = self._d_k if self._d_k else input_shape[1][-1]
        d_model = self._d_model if self._d_model else input_shape[1][-1]
        d_v = self._d_v

        if type(d_k) == tf.Dimension:
            d_k = d_k.value
        if type(d_model) == tf.Dimension:
            d_model = d_model.value
        
        self._q_layers = []
        self._k_layers = []
        self._v_layers = []
        self._sdp_layer = ScaledDotProductAttention(return_attention=self._return_attention)
    
        for _ in range(self._h):
            self._q_layers.append(
                TimeDistributed(
                    Dense(d_k, activation=self._activation, use_bias=False)
                )
            )
            self._k_layers.append(
                TimeDistributed(
                    Dense(d_k, activation=self._activation, use_bias=False)
                )
            )
            self._v_layers.append(
                TimeDistributed(
                    Dense(d_v, activation=self._activation, use_bias=False)
                )
            )
        
        self._output = TimeDistributed(Dense(d_model))
        #if self._return_attention:
        #    self._output = Concatenate() 
Example 54
Project: wrangle   Author: autonomio   File: create_synth_model.py    MIT License 5 votes vote down vote up
def _base_for_model(mode, n=50, neurons=50):

    from keras.models import Sequential
    from keras.layers import Dense

    from .create_synth_data import create_synth_data

    x, y = create_synth_data(mode, n=n)
    model = Sequential()
    model.add(Dense(neurons,
                    input_dim=x.shape[1],
                    activation='relu'))

    return x, y, model 
Example 55
Project: wrangle   Author: autonomio   File: create_synth_model.py    MIT License 5 votes vote down vote up
def create_synth_multi_class_model(n=50, neurons=50):

    from keras.layers import Dense

    x, y, model = _base_for_model('multi_class', n=n, neurons=neurons)

    model.add(Dense(4))
    model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
    model.fit(x, y)

    return model 
Example 56
Project: wrangle   Author: autonomio   File: create_synth_model.py    MIT License 5 votes vote down vote up
def create_synth_regression_model(n=50, neurons=50):

    from keras.layers import Dense

    x, y, model = _base_for_model('regression', n=n, neurons=neurons)

    model.add(Dense(1))
    model.compile(optimizer='sgd', loss='mean_absolute_percentage_error')
    model.fit(x, y)

    return model 
Example 57
Project: wrangle   Author: autonomio   File: create_synth_model.py    MIT License 5 votes vote down vote up
def create_synth_multi_label_model(n=50, neurons=50):

    from keras.layers import Dense

    x, y, model = _base_for_model('multi_label', n=n, neurons=neurons)

    model.add(Dense(4, activation='softmax'))
    model.compile(optimizer='adam', loss='categorical_crossentropy')
    model.fit(x, y)

    return model 
Example 58
Project: wrangle   Author: autonomio   File: create_synth_model.py    MIT License 5 votes vote down vote up
def create_synth_binary_model(n=50, neurons=50):

    from keras.layers import Dense

    x, y, model = _base_for_model('binary', n=n, neurons=neurons)

    model.add(Dense(1, activation='sigmoid'))
    model.compile(optimizer='adam', loss='binary_crossentropy')
    model.fit(x, y)

    return model 
Example 59
Project: phoneticSimilarity   Author: ronggong   File: models_RNN_regression.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def embedding_RNN(input_shape):

    input = Input(shape=input_shape)

    x = Bidirectional(LSTM(units=16, return_sequences=False))(input)

    outputs = Dense(1, activation='linear')(x)
    model = Model(inputs=input, outputs=outputs)

    model.compile(optimizer='adam',
                  loss='mse',
                  metrics=['accuracy'])

    # model.summary()
    return model 
Example 60
Project: phoneticSimilarity   Author: ronggong   File: models_siamese_tripletloss.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def embedding_2_lstm_1_dense_base(device, base_input):
    if device == 'CPU':
        x = Bidirectional(LSTM(units=32, return_sequences=True))(base_input)
        x = Bidirectional(LSTM(units=32, return_sequences=False))(x)

    else:
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=True))(base_input)
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=False))(x)

    x = Dense(units=64, activation='relu')(x)

    x = Dropout(rate=0.5)(x)

    return x 
Example 61
Project: phoneticSimilarity   Author: ronggong   File: models_siamese_tripletloss.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def embedding_1_lstm_1_dense_base(device, base_input):
    if device == 'CPU':
        x = Bidirectional(LSTM(units=32, return_sequences=False))(base_input)
    else:
        x = Bidirectional(CuDNNLSTM(units=32, return_sequences=False))(base_input)

    x = Dense(units=64, activation='relu')(x)

    x = Dropout(rate=0.5)(x)

    return x 
Example 62
Project: phoneticSimilarity   Author: ronggong   File: models_siamese_tripletloss.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def embedding_base_model(input_shape, output_shape, base_model):
    device = device_lib.list_local_devices()[0].device_type

    # embedding base model
    base_input = Input(batch_shape=input_shape)

    x = base_model(device, base_input)

    x = Dense(output_shape, activation='linear', name='embedding_layer')(x)

    embedding_model = Model(inputs=base_input, outputs=x, name='embedding')

    return embedding_model 
Example 63
Project: phoneticSimilarity   Author: ronggong   File: cal_embeddigns_for_plotting.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def calculate_overall_quality_score(list_feature_teacher, list_feature_student):
    """
    Calculate overall quality similarity
    :param list_feature_teacher:
    :param list_feature_student:
    :return:
    """
    path_model = '/home/gong/Documents/pycharmProjects/phoneticSimilarity/models/phone_embedding_classifier'
    path_dataset = '/media/gong/ec990efa-9ee0-4693-984b-29372dcea0d1/Data/RongGong/phoneEmbedding'
    config = [1, 0]
    input_shape = [1, None, 80]
    embedding_dim = 32
    prefix = '_2_class_teacher_student'
    attention_dense_str = "dense_32_"
    model_name = config_select(config) + prefix
    filename_model = os.path.join(path_model, model_name + '_' + attention_dense_str + str(0) + '.h5')
    filename_scaler = os.path.join(path_dataset, 'scaler_phn_embedding_train_teacher_student.pkl')

    model = load_model(filepath=filename_model, custom_objects={'Attention': Attention(return_attention=True)})
    weights = model.get_weights()
    x, input = model_select(config=config, input_shape=input_shape, conv=False, dropout=False)
    outputs = Dense(embedding_dim)(x)
    model_1_batch = Model(inputs=input, outputs=outputs)
    model_1_batch.compile(optimizer='adam',
                          loss='categorical_crossentropy',
                          metrics=['accuracy'])
    model_1_batch.set_weights(weights=weights)
    scaler = pickle.load(open(filename_scaler, 'rb'))

    list_simi = []
    for ii in range(len(list_feature_teacher)):
        x_batch_teacher = np.expand_dims(scaler.transform(list_feature_teacher[ii]), axis=0)
        x_batch_student = np.expand_dims(scaler.transform(list_feature_student[ii]), axis=0)
        embedding_teacher = model_1_batch.predict_on_batch(x_batch_teacher)
        embedding_student = model_1_batch.predict_on_batch(x_batch_student)
        dist = (2.0 - cosine(embedding_teacher, embedding_student)) / 2.0
        list_simi.append(dist)
    return list_simi 
Example 64
Project: hepaccelerate   Author: hepaccelerate   File: train_dnn.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def layer(din, n_units, do_dropout=False):
    d = Dense(n_units)(din)
    d = LeakyReLU(alpha=0.2)(d)
    if do_dropout:
        d = Dropout(0.2)(d)
    return d 
Example 65
Project: ppi_lstm_rnn_keras   Author: ylhsieh   File: train_keras.py    MIT License 5 votes vote down vote up
def main():

    def build_model():
        model = Sequential()
        model.add(Embedding(len(train_vocab), hidden_size, weights=[embedding_array],\
                            input_length=max_sequence_length))
        model.add(Dropout(dropout_rate))
        model.add(Bidirectional(LSTM(rnn_hidden_size)))
        model.add(Dropout(dropout_rate))
        model.add(Dense(2, activation='softmax'))
        model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
        print(model.summary())
        return model

    train_vocab = load_vocab_from(opt.data + '.vocab')
    embedding_array = load_pretrained_embeddings(train_vocab, pretrained_embeddings_file)
    for fold_id in range(10):
        tfsession = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.5)))
        K.set_session(tfsession)
        train_file = 'corpus/{}_f{}_train.txt'.format(opt.data, fold_id)
        test_file = 'corpus/{}_f{}_test.txt'.format(opt.data, fold_id)
        log_file = '{}_f{}.log'.format(opt.data, fold_id)
        x_train, x_test, y_train, y_test, _ = read_corpus(train_file, test_file, train_vocab)
        fscore_cb = FscoreLogCallback(log_file)
        model = build_model()
        print("Fold {}".format(fold_id))
        model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, \
                  callbacks=[fscore_cb], verbose=2)
        predicted = np.argmax(model.predict(x_test), axis=1)
        y_test_to_label = np.argmax(y_test, axis=1)
        prec, reca, fscore, sup = precision_recall_fscore_support(y_test_to_label, predicted, average='binary')
        print("Final Precision:{:2.2f}% Recall:{:2.2f}% Fscore:{:2.2f}%".format(prec*100, reca*100, fscore*100)) 
Example 66
Project: Anamoly-Detection   Author: msmsk05   File: auto_encoder.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _build_model(self):
        model = Sequential()
        # Input layer
        model.add(Dense(
            self.hidden_neurons_[0], activation=self.hidden_activation,
            input_shape=(self.n_features_,),
            activity_regularizer=l2(self.l2_regularizer)))
        model.add(Dropout(self.dropout_rate))

        # Additional layers
        for i, hidden_neurons in enumerate(self.hidden_neurons_, 1):
            model.add(Dense(
                hidden_neurons,
                activation=self.hidden_activation,
                activity_regularizer=l2(self.l2_regularizer)))
            model.add(Dropout(self.dropout_rate))

        # Output layers
        model.add(Dense(self.n_features_, activation=self.output_activation,
                        activity_regularizer=l2(self.l2_regularizer)))

        # Compile model
        model.compile(loss=self.loss, optimizer=self.optimizer)
        print(model.summary())
        return model

    # noinspection PyUnresolvedReferences 
Example 67
Project: Anamoly-Detection   Author: msmsk05   File: gaal_base.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def create_discriminator(latent_size, data_size):  # pragma: no cover
    """Create the discriminator of the GAN for a given latent size.

    Parameters
    ----------
    latent_size : int
        The size of the latent space of the generator.

    data_size : int
        Size of the input data.

    Returns
    -------
    D : Keras model() object
        Returns a model() object.
    """

    dis = Sequential()
    dis.add(Dense(int(math.ceil(math.sqrt(data_size))),
                  input_dim=latent_size, activation='relu',
                  kernel_initializer=keras.initializers.VarianceScaling(
                      scale=1.0, mode='fan_in', distribution='normal',
                      seed=None)))
    dis.add(Dense(1, activation='sigmoid',
                  kernel_initializer=keras.initializers.VarianceScaling(
                      scale=1.0, mode='fan_in', distribution='normal',
                      seed=None)))
    data = Input(shape=(latent_size,))
    fake = dis(data)
    return Model(data, fake) 
Example 68
Project: MODS_ConvNet   Author: santiagolopezg   File: little_foo3.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(32, 8, 8,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 6, 6,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 4, 4,init=weight_init, name='conv1_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 2, 2,init=weight_init, name='conv1_4'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in 208, out 104
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 8, 8,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 6, 6,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 4, 4,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 2, 2,init=weight_init, name='conv2_4'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in is 88, out is 44 
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(220, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 69
Project: MODS_ConvNet   Author: santiagolopezg   File: foo_three.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(32, 3, 3,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(512, 3,3,init=weight_init, border_mode='same', name='conv4_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(512, 3,3,init=weight_init, border_mode='same', name='conv4_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(120, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dropout(dropout))
	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 70
Project: MODS_ConvNet   Author: santiagolopezg   File: little_foo.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(32, 5, 5,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv1_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in 116, out 58
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in is 52, out is 26 
	model.add(Dropout(dropout))

	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  # in is 20, out is 10 
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(10, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 71
Project: MODS_ConvNet   Author: santiagolopezg   File: foo_two.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(16, 3, 3,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(16, 3, 3,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))
	model.add(Dropout(dropout))

	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv3_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv3_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(128, 3,3,init=weight_init, border_mode='same', name='conv4_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3,3,init=weight_init, border_mode='same', name='conv4_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(120, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dropout(dropout))
	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 72
Project: MODS_ConvNet   Author: santiagolopezg   File: foo_three.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(32, 3, 3,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(512, 3,3,init=weight_init, border_mode='same', name='conv4_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(512, 3,3,init=weight_init, border_mode='same', name='conv4_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(120, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dropout(dropout))
	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 73
Project: MODS_ConvNet   Author: santiagolopezg   File: little_foo2.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(32, 7, 7,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 5, 5,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv1_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in 212, out 106
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 7, 7,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 5, 5,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in is 94, out is 47 
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(220, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 74
Project: Scene-Understanding   Author: foamliu   File: vgg16.py    MIT License 4 votes vote down vote up
def vgg16_model(img_rows, img_cols, channel=3):
    model = Sequential()
    # Encoder
    model.add(ZeroPadding2D((1, 1), input_shape=(img_rows, img_cols, channel), name='input'))
    model.add(Conv2D(64, (3, 3), activation='relu', name='conv1_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(64, (3, 3), activation='relu', name='conv1_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(128, (3, 3), activation='relu', name='conv2_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(128, (3, 3), activation='relu', name='conv2_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(256, (3, 3), activation='relu', name='conv3_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(256, (3, 3), activation='relu', name='conv3_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(256, (3, 3), activation='relu', name='conv3_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv4_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv4_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv4_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv5_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv5_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv5_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    # Add Fully Connected Layer
    model.add(Flatten(name='flatten'))
    model.add(Dense(4096, activation='relu', name='dense1'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu', name='dense2'))
    model.add(Dropout(0.5))
    model.add(Dense(1000, activation='softmax', name='softmax'))

    # Loads ImageNet pre-trained data
    weights_path = 'models/vgg16_weights_tf_dim_ordering_tf_kernels.h5'
    model.load_weights(weights_path)

    return model 
Example 75
Project: smach_based_introspection_framework   Author: birlrobotics   File: anomaly_model_generation.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def generate_model():
    ip = Input(shape=(MAX_NB_VARIABLES, MAX_TIMESTEPS))

    x = Masking()(ip)
    x = LSTM(8)(x)
    x = Dropout(0.8)(x)

    y = Permute((2, 1))(ip)
    y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    y = squeeze_excite_block(y)

    y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    y = squeeze_excite_block(y)

    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)
    model.summary()

    # add load model code here to fine-tune

    return model 
Example 76
Project: smach_based_introspection_framework   Author: birlrobotics   File: anomaly_model_generation.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def generate_model_2():
    ip = Input(shape=(MAX_NB_VARIABLES, MAX_TIMESTEPS))
    # stride = 10

    # x = Permute((2, 1))(ip)
    # x = Conv1D(MAX_NB_VARIABLES // stride, 8, strides=stride, padding='same', activation='relu', use_bias=False,
    #            kernel_initializer='he_uniform')(x)  # (None, variables / stride, timesteps)
    # x = Permute((2, 1))(x)

    #ip1 = K.reshape(ip,shape=(MAX_TIMESTEPS,MAX_NB_VARIABLES))
    #x = Permute((2, 1))(ip)
    x = Masking()(ip)
    x = AttentionLSTM(8)(x)
    x = Dropout(0.8)(x)

    y = Permute((2, 1))(ip)
    y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    y = squeeze_excite_block(y)

    y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    y = squeeze_excite_block(y)

    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)
    model.summary()

    # add load model code here to fine-tune

    return model 
Example 77
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: weather_model.py    Apache License 2.0 4 votes vote down vote up
def RNN_builder(num_output_features, num_decoder_features,
                target_sequence_length,
              num_steps_to_predict, regulariser,
              lr, decay, loss, layers):

    optimiser = keras.optimizers.Adam(lr=lr, decay=decay)
    # Define a decoder sequence.
    decoder_inputs = keras.layers.Input(shape=(37, num_decoder_features), name='decoder_inputs')

    decoder_cells = []

    for hidden_neurons in layers:
        print(hidden_neurons)
        decoder_cells.append(keras.layers.GRUCell(hidden_neurons,
                                                  kernel_regularizer = regulariser,
                                                  recurrent_regularizer = regulariser,
                                                  bias_regularizer = regulariser))

    print(decoder_cells)
    decoder = keras.layers.RNN(decoder_cells, return_sequences=True, return_state=True)
    # Set the initial state of the decoder to be the ouput state of the encoder.
    decoder_outputs_and_states = decoder(decoder_inputs, initial_state=None)

    # Only select the output of the decoder (not the states)
    decoder_outputs = decoder_outputs_and_states[0]

    # Apply a dense layer with linear activation to set output to correct dimension
    # and scale (tanh is default activation for GRU in Keras, our output sine function can be larger then 1)
    
    #decoder_dense1 = keras.layers.Dense(units=64,
    #                                   activation='tanh',
    #                                   kernel_regularizer = regulariser,
    #                                   bias_regularizer = regulariser, name='dense_tanh')

    output_dense = keras.layers.Dense(num_output_features,
                                       activation='sigmoid',
                                       kernel_regularizer = regulariser,
                                       bias_regularizer = regulariser, name='output_sig')

    #densen1=decoder_dense1(decoder_outputs)
    decoder_outputs = output_dense(decoder_outputs)
    # Create a model using the functional API provided by Keras.
    rnn_model = keras.models.Model(inputs=[decoder_inputs], outputs=decoder_outputs)
    return rnn_model 
Example 78
Project: dac   Author: KBNLresearch   File: models.py    GNU General Public License v3.0 4 votes vote down vote up
def create_model(self):
        '''
        Create new keras model.
        '''
        self.class_weight = {0: 0.25, 1: 0.75}

        # Entity branch
        entity_inputs = Input(shape=(self.data[0].shape[1],))
        entity_x = Dense(self.data[0].shape[1], activation='relu',
                         kernel_constraint=maxnorm(3))(entity_inputs)
        entity_x = Dropout(0.25)(entity_x)
        # entity_x = Dense(50, activation='relu',
        #                  self.kernel_constraint=maxnorm(3))(entity_x)
        # entity_x = Dropout(0.25)(entity_x)

        # Candidate branch
        candidate_inputs = Input(shape=(self.data[1].shape[1],))
        candidate_x = Dense(self.data[1].shape[1], activation='relu',
                            kernel_constraint=maxnorm(3))(candidate_inputs)
        candidate_x = Dropout(0.25)(candidate_x)
        # candidate_x = Dense(50, activation='relu',
        #                     kernel_constraint=maxnorm(3))(candidate_x)
        # candidate_x = Dropout(0.25)(candidate_x)

        # Cosine proximity
        # cos_x = dot([entity_x, candidate_x], axes=1, normalize=False)
        # cos_x = concatenate([entity_x, candidate_x])
        # cos_output = Dense(1, activation='sigmoid')(cos_x)

        # Match branch
        match_inputs = Input(shape=(self.data[2].shape[1],))
        match_x = Dense(self.data[1].shape[1], activation='relu',
                        kernel_constraint=maxnorm(3))(match_inputs)
        match_x = Dropout(0.25)(match_x)

        # Merge
        x = concatenate([entity_x, candidate_x, match_x])
        x = Dense(32, activation='relu', kernel_constraint=maxnorm(3))(x)
        x = Dropout(0.25)(x)
        x = Dense(16, activation='relu', kernel_constraint=maxnorm(3))(x)
        x = Dropout(0.25)(x)
        x = Dense(8, activation='relu', kernel_constraint=maxnorm(3))(x)
        x = Dropout(0.25)(x)

        predictions = Dense(1, activation='sigmoid')(x)

        model = Model(inputs=[entity_inputs, candidate_inputs, match_inputs],
                      outputs=predictions)
        model.compile(optimizer='RMSprop', loss='binary_crossentropy',
                      metrics=['accuracy'])

        return model 
Example 79
Project: phoneticSimilarity   Author: ronggong   File: models.py    GNU Affero General Public License v3.0 4 votes vote down vote up
def jan_original(filter_density, dropout, input_shape, batchNorm=False, dense_activation='relu', channel=1):
    if channel == 1:
        reshape_dim = (1, input_shape[0], input_shape[1])
        channel_order = 'channels_first'
    else:
        reshape_dim = input_shape
        channel_order = 'channels_last'

    model_1 = Sequential()

    if batchNorm:
        model_1.add(BatchNormalization(axis=1, input_shape=reshape_dim))

    model_1.add(Conv2D(int(10 * filter_density), (3, 7), padding="valid",
                       input_shape=reshape_dim,
                       data_format=channel_order, activation='relu'))
    model_1.add(MaxPooling2D(pool_size=(3, 1), padding='valid', data_format=channel_order))

    model_1.add(Conv2D(int(20 * filter_density), (3, 3), padding="valid",
                       data_format=channel_order, activation='relu'))
    model_1.add(MaxPooling2D(pool_size=(3, 1), padding='valid', data_format=channel_order))

    if dropout:
        model_1.add(Dropout(dropout))  # test Schluter dataset, comment in jingju dataset

    model_1.add(Flatten())

    model_1.add(Dense(units=256, activation=dense_activation))
    # model_1.add(ELU())

    if dropout:
        model_1.add(Dropout(dropout))

    model_1.add(Dense(1, activation='sigmoid'))
    # model_1.add(Activation("softmax"))

    # optimizer = SGD(lr=0.05, momentum=0.45, decay=0.0, nesterov=False)
    optimizer = Adam()

    model_1.compile(loss='binary_crossentropy',
                    optimizer=optimizer,
                    metrics=['accuracy'])

    print(model_1.summary())

    return model_1 
Example 80
Project: phoneticSimilarity   Author: ronggong   File: cal_embeddigns_for_plotting.py    GNU Affero General Public License v3.0 4 votes vote down vote up
def calculate_pronunciation_score(list_feature_teacher, list_feature_student):
    """
    Calculate pronunciation similarity
    :param list_feature_teacher:
    :param list_feature_student:
    :return:
    """
    path_model = '/home/gong/Documents/pycharmProjects/phoneticSimilarity/models/phone_embedding_classifier'
    path_dataset = '/media/gong/ec990efa-9ee0-4693-984b-29372dcea0d1/Data/RongGong/phoneEmbedding'
    conv = True
    dropout = True
    config = [2, 0]
    input_shape = [1, None, 80]
    embedding_dim = 27
    prefix = '_27_class'
    attention_dense_str = 'attention_conv_dropout_'
    model_name = config_select(config)
    filename_model = os.path.join(path_model, model_name + prefix + '_' + attention_dense_str + str(0) + '.h5')
    filename_scaler = os.path.join(path_dataset, 'scaler_phn_embedding_train_teacher_student.pkl')

    # loading models
    model = load_model(filepath=filename_model, custom_objects={'Attention': Attention(return_attention=True)})
    weights = model.get_weights()
    x, input, _ = model_select_attention(config=config, input_shape=input_shape, conv=conv, dropout=dropout)
    outputs = Dense(embedding_dim, activation='softmax')(x)
    model_1_batch = Model(inputs=input, outputs=outputs)
    model_1_batch.compile(optimizer='adam',
                          loss='categorical_crossentropy',
                          metrics=['accuracy'])
    model_1_batch.set_weights(weights=weights)
    scaler = pickle.load(open(filename_scaler, 'rb'))

    # calculate the similarity
    list_simi = []
    for ii in range(len(list_feature_teacher)):
        x_batch_teacher = np.expand_dims(scaler.transform(list_feature_teacher[ii]), axis=0)
        x_batch_student = np.expand_dims(scaler.transform(list_feature_student[ii]), axis=0)
        embedding_teacher = model_1_batch.predict_on_batch(x_batch_teacher)
        embedding_student = model_1_batch.predict_on_batch(x_batch_student)
        dist = (2.0 - cosine(embedding_teacher, embedding_student)) / 2.0
        list_simi.append(dist)
    return list_simi