Python keras.layers.MaxPooling1D() Examples
The following are 30
code examples of keras.layers.MaxPooling1D().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers
, or try the search function
.
Example #1
Source File: recurrent.py From keras-anomaly-detection with MIT License | 18 votes |
def create_model(time_window_size, metric): model = Sequential() model.add(Conv1D(filters=256, kernel_size=5, padding='same', activation='relu', input_shape=(time_window_size, 1))) model.add(MaxPooling1D(pool_size=4)) model.add(LSTM(64)) model.add(Dense(units=time_window_size, activation='linear')) model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric]) # model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric]) # model.compile(optimizer="sgd", loss="mse", metrics=[metric]) print(model.summary()) return model
Example #2
Source File: models.py From delft with Apache License 2.0 | 9 votes |
def cnn(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes): #inp = Input(shape=(maxlen, )) input_layer = Input(shape=(maxlen, embed_size), ) #x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp) x = Dropout(dropout_rate)(input_layer) x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x) x = MaxPooling1D(pool_size=2)(x) x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x) x = MaxPooling1D(pool_size=2)(x) x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x) x = MaxPooling1D(pool_size=2)(x) x = GRU(recurrent_units)(x) x = Dropout(dropout_rate)(x) x = Dense(dense_size, activation="relu")(x) x = Dense(nb_classes, activation="sigmoid")(x) model = Model(inputs=input_layer, outputs=x) model.summary() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model
Example #3
Source File: dna.py From deepcpg with MIT License | 6 votes |
def __call__(self, inputs): x = inputs[0] kernel_regularizer = kr.L1L2(self.l1_decay, self.l2_decay) x = kl.Conv1D(128, 11, kernel_initializer=self.init, kernel_regularizer=kernel_regularizer)(x) x = kl.Activation('relu')(x) x = kl.MaxPooling1D(4)(x) x = kl.Flatten()(x) kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay) x = kl.Dense(self.nb_hidden, kernel_initializer=self.init, kernel_regularizer=kernel_regularizer)(x) x = kl.Activation('relu')(x) x = kl.Dropout(self.dropout)(x) return self._build(inputs, x)
Example #4
Source File: models.py From delft with Apache License 2.0 | 6 votes |
def conv(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes): filter_kernels = [7, 7, 5, 5, 3, 3] #inp = Input(shape=(maxlen, )) input_layer = Input(shape=(maxlen, embed_size), ) #x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp) conv = Conv1D(nb_filter=recurrent_units, filter_length=filter_kernels[0], border_mode='valid', activation='relu')(input_layer) conv = MaxPooling1D(pool_length=3)(conv) conv1 = Conv1D(nb_filter=recurrent_units, filter_length=filter_kernels[1], border_mode='valid', activation='relu')(conv) conv1 = MaxPooling1D(pool_length=3)(conv1) conv2 = Conv1D(nb_filter=recurrent_units, filter_length=filter_kernels[2], border_mode='valid', activation='relu')(conv1) conv3 = Conv1D(nb_filter=recurrent_units, filter_length=filter_kernels[3], border_mode='valid', activation='relu')(conv2) conv4 = Conv1D(nb_filter=recurrent_units, filter_length=filter_kernels[4], border_mode='valid', activation='relu')(conv3) conv5 = Conv1D(nb_filter=recurrent_units, filter_length=filter_kernels[5], border_mode='valid', activation='relu')(conv4) conv5 = MaxPooling1D(pool_length=3)(conv5) conv5 = Flatten()(conv5) z = Dropout(0.5)(Dense(dense_size, activation='relu')(conv5)) #x = GlobalMaxPool1D()(x) x = Dense(nb_classes, activation="sigmoid")(z) model = Model(inputs=input_layer, outputs=x) model.summary() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model # LSTM + conv
Example #5
Source File: main.py From wdcnn_bearning_fault_diagnosis with MIT License | 6 votes |
def wdcnn(filters, kernerl_size, strides, conv_padding, pool_padding, pool_size, BatchNormal): """wdcnn层神经元 :param filters: 卷积核的数目,整数 :param kernerl_size: 卷积核的尺寸,整数 :param strides: 步长,整数 :param conv_padding: 'same','valid' :param pool_padding: 'same','valid' :param pool_size: 池化层核尺寸,整数 :param BatchNormal: 是否Batchnormal,布尔值 :return: model """ model.add(Conv1D(filters=filters, kernel_size=kernerl_size, strides=strides, padding=conv_padding, kernel_regularizer=l2(1e-4))) if BatchNormal: model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPooling1D(pool_size=pool_size, padding=pool_padding)) return model # 实例化序贯模型
Example #6
Source File: newsgroup_classifier_word_embeddings.py From Deep-Learning-Quick-Reference with MIT License | 6 votes |
def build_model(vocab_size, embedding_dim, sequence_length): sequence_input = Input(shape=(sequence_length,), dtype='int32') embedding_layer = Embedding(input_dim=vocab_size, output_dim=embedding_dim, input_length=sequence_length, name="embedding")(sequence_input) x = Conv1D(128, 5, activation='relu')(embedding_layer) x = MaxPooling1D(5)(x) x = Conv1D(128, 5, activation='relu')(x) x = MaxPooling1D(5)(x) x = Conv1D(128, 5, activation='relu')(x) x = GlobalMaxPooling1D()(x) x = Dense(128, activation='relu')(x) preds = Dense(20, activation='softmax')(x) model = Model(sequence_input, preds) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model
Example #7
Source File: newsgroup_classifier_pretrained_word_embeddings.py From Deep-Learning-Quick-Reference with MIT License | 6 votes |
def build_model(vocab_size, embedding_dim, sequence_length, embedding_matrix): sequence_input = Input(shape=(sequence_length,), dtype='int32') embedding_layer = Embedding(input_dim=vocab_size, output_dim=embedding_dim, weights=[embedding_matrix], input_length=sequence_length, trainable=False, name="embedding")(sequence_input) x = Conv1D(128, 5, activation='relu')(embedding_layer) x = MaxPooling1D(5)(x) x = Conv1D(128, 5, activation='relu')(x) x = MaxPooling1D(5)(x) x = Conv1D(128, 5, activation='relu')(x) x = GlobalMaxPooling1D()(x) x = Dense(128, activation='relu')(x) preds = Dense(20, activation='softmax')(x) model = Model(sequence_input, preds) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model
Example #8
Source File: transfer_learning.py From hyperspectral_deeplearning_review with GNU General Public License v3.0 | 6 votes |
def get_model_compiled(args, inputshape, num_class): model = Sequential() if args.arch == "CNN1D": model.add(Conv1D(20, (24), activation='relu', input_shape=inputshape)) model.add(MaxPooling1D(pool_size=5)) model.add(Flatten()) model.add(Dense(100)) elif "CNN2D" in args.arch: model.add(Conv2D(50, kernel_size=(5, 5), input_shape=inputshape)) model.add(Activation('relu')) model.add(Conv2D(100, (5, 5))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(100)) elif args.arch == "CNN3D": model.add(Conv3D(32, kernel_size=(5, 5, 24), input_shape=inputshape)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv3D(64, (5, 5, 16))) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPooling3D(pool_size=(2, 2, 1))) model.add(Flatten()) model.add(Dense(300)) if args.arch != "CNN2D": model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dense(num_class, activation='softmax')) model.compile(loss=categorical_crossentropy, optimizer=Adam(args.lr1), metrics=['accuracy']) return model
Example #9
Source File: models.py From delft with Apache License 2.0 | 6 votes |
def cnn2(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes): #inp = Input(shape=(maxlen, )) input_layer = Input(shape=(maxlen, embed_size), ) #x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp) x = Dropout(dropout_rate)(input_layer) x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x) #x = MaxPooling1D(pool_size=2)(x) x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x) #x = MaxPooling1D(pool_size=2)(x) x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x) #x = MaxPooling1D(pool_size=2)(x) x = GRU(recurrent_units, return_sequences=False, dropout=dropout_rate, recurrent_dropout=dropout_rate)(x) #x = Dropout(dropout_rate)(x) x = Dense(dense_size, activation="relu")(x) x = Dense(nb_classes, activation="sigmoid")(x) model = Model(inputs=input_layer, outputs=x) model.summary() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model
Example #10
Source File: graph.py From Keras-TextClassification with MIT License | 6 votes |
def downsampling(inputs, pool_type='max'): """ In addition, downsampling with stride 2 essentially doubles the effective coverage (i.e., coverage in the original document) of the convolution kernel; therefore, after going through downsampling L times, associations among words within a distance in the order of 2L can be represented. Thus, deep pyramid CNN is computationally efficient for representing long-range associations and so more global information. 参考: https://github.com/zonetrooper32/VDCNN/blob/keras_version/vdcnn.py :param inputs: tensor, :param pool_type: str, select 'max', 'k-max' or 'conv' :return: tensor, """ if pool_type == 'max': output = MaxPooling1D(pool_size=3, strides=2, padding='SAME')(inputs) elif pool_type == 'k-max': output = k_max_pooling(top_k=int(K.int_shape(inputs)[1]/2))(inputs) elif pool_type == 'conv': output = Conv1D(kernel_size=3, strides=2, padding='SAME')(inputs) else: output = MaxPooling1D(pool_size=3, strides=2, padding='SAME')(inputs) return output
Example #11
Source File: models.py From delft with Apache License 2.0 | 6 votes |
def cnn2_best(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes): #inp = Input(shape=(maxlen, )) input_layer = Input(shape=(maxlen, embed_size), ) #x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp) x = Dropout(dropout_rate)(input_layer) x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x) #x = MaxPooling1D(pool_size=2)(x) x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x) #x = MaxPooling1D(pool_size=2)(x) x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x) #x = MaxPooling1D(pool_size=2)(x) x = GRU(recurrent_units, return_sequences=False, dropout=dropout_rate, recurrent_dropout=dropout_rate)(x) #x = Dropout(dropout_rate)(x) x = Dense(dense_size, activation="relu")(x) x = Dense(nb_classes, activation="sigmoid")(x) model = Model(inputs=input_layer, outputs=x) model.summary() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model
Example #12
Source File: models.py From delft with Apache License 2.0 | 6 votes |
def bidLstm_simple(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes): #inp = Input(shape=(maxlen, )) input_layer = Input(shape=(maxlen, embed_size), ) #x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp) x = Bidirectional(LSTM(recurrent_units, return_sequences=True, dropout=dropout_rate, recurrent_dropout=dropout_rate))(input_layer) x = Dropout(dropout_rate)(x) x_a = GlobalMaxPool1D()(x) x_b = GlobalAveragePooling1D()(x) #x_c = AttentionWeightedAverage()(x) #x_a = MaxPooling1D(pool_size=2)(x) #x_b = AveragePooling1D(pool_size=2)(x) x = concatenate([x_a,x_b]) x = Dense(dense_size, activation="relu")(x) x = Dropout(dropout_rate)(x) x = Dense(nb_classes, activation="sigmoid")(x) model = Model(inputs=input_layer, outputs=x) model.summary() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model # bidirectional LSTM with attention layer
Example #13
Source File: iscx2012_cnn_rnn_5class.py From DeepTraffic with Mozilla Public License 2.0 | 6 votes |
def byte_block(in_layer, nb_filter=(64, 100), filter_length=(3, 3), subsample=(2, 1), pool_length=(2, 2)): block = in_layer for i in range(len(nb_filter)): block = Conv1D(filters=nb_filter[i], kernel_size=filter_length[i], padding='valid', activation='tanh', strides=subsample[i])(block) # block = BatchNormalization()(block) # block = Dropout(0.1)(block) if pool_length[i]: block = MaxPooling1D(pool_size=pool_length[i])(block) # block = Lambda(max_1d, output_shape=(nb_filter[-1],))(block) block = GlobalMaxPool1D()(block) block = Dense(128, activation='relu')(block) return block
Example #14
Source File: dna.py From deepcpg with MIT License | 6 votes |
def __call__(self, inputs): x = inputs[0] kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay) x = kl.Conv1D(128, 11, kernel_initializer=self.init, kernel_regularizer=kernel_regularizer)(x) x = kl.Activation('relu')(x) x = kl.MaxPooling1D(4)(x) kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay) x = kl.Conv1D(256, 7, kernel_initializer=self.init, kernel_regularizer=kernel_regularizer)(x) x = kl.Activation('relu')(x) x = kl.MaxPooling1D(4)(x) kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay) gru = kl.recurrent.GRU(256, kernel_regularizer=kernel_regularizer) x = kl.Bidirectional(gru)(x) x = kl.Dropout(self.dropout)(x) return self._build(inputs, x)
Example #15
Source File: test_views.py From Fabrik with GNU General Public License v3.0 | 5 votes |
def test_keras_export(self): tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app', 'keras_export_test.json'), 'r') response = json.load(tests) tests.close() net = yaml.safe_load(json.dumps(response['net'])) net = {'l0': net['Input'], 'l1': net['Input2'], 'l2': net['Input4'], 'l3': net['Pooling']} # Pool 1D net['l1']['connection']['output'].append('l3') net['l3']['connection']['input'] = ['l1'] net['l3']['params']['layer_type'] = '1D' net['l3']['shape']['input'] = net['l1']['shape']['output'] net['l3']['shape']['output'] = [12, 12] inp = data(net['l1'], '', 'l1')['l1'] temp = pooling(net['l3'], [inp], 'l3') model = Model(inp, temp['l3']) self.assertEqual(model.layers[2].__class__.__name__, 'MaxPooling1D') # Pool 2D net['l0']['connection']['output'].append('l0') net['l3']['connection']['input'] = ['l0'] net['l3']['params']['layer_type'] = '2D' net['l3']['shape']['input'] = net['l0']['shape']['output'] net['l3']['shape']['output'] = [3, 226, 226] inp = data(net['l0'], '', 'l0')['l0'] temp = pooling(net['l3'], [inp], 'l3') model = Model(inp, temp['l3']) self.assertEqual(model.layers[2].__class__.__name__, 'MaxPooling2D') # Pool 3D net['l2']['connection']['output'].append('l3') net['l3']['connection']['input'] = ['l2'] net['l3']['params']['layer_type'] = '3D' net['l3']['shape']['input'] = net['l2']['shape']['output'] net['l3']['shape']['output'] = [3, 226, 226, 18] inp = data(net['l2'], '', 'l2')['l2'] temp = pooling(net['l3'], [inp], 'l3') model = Model(inp, temp['l3']) self.assertEqual(model.layers[2].__class__.__name__, 'MaxPooling3D') # ********** Locally-connected Layers **********
Example #16
Source File: dna.py From deepcpg with MIT License | 5 votes |
def __call__(self, inputs): x = inputs[0] kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay) x = kl.Conv1D(128, 11, name='conv1', kernel_initializer=self.init, kernel_regularizer=kernel_regularizer)(x) x = kl.Activation('relu', name='act1')(x) x = kl.MaxPooling1D(2, name='pool1')(x) # 124 x = self._res_unit(x, [32, 32, 128], stage=1, block=1, stride=2) x = self._res_unit(x, [32, 32, 128], atrous=2, stage=1, block=2) x = self._res_unit(x, [32, 32, 128], atrous=4, stage=1, block=3) # 64 x = self._res_unit(x, [64, 64, 256], stage=2, block=1, stride=2) x = self._res_unit(x, [64, 64, 256], atrous=2, stage=2, block=2) x = self._res_unit(x, [64, 64, 256], atrous=4, stage=2, block=3) # 32 x = self._res_unit(x, [128, 128, 512], stage=3, block=1, stride=2) x = self._res_unit(x, [128, 128, 512], atrous=2, stage=3, block=2) x = self._res_unit(x, [128, 128, 512], atrous=4, stage=3, block=3) # 16 x = self._res_unit(x, [256, 256, 1024], stage=4, block=1, stride=2) x = kl.GlobalAveragePooling1D()(x) x = kl.Dropout(self.dropout)(x) return self._build(inputs, x)
Example #17
Source File: cnn_lstm.py From keras-malicious-url-detector with MIT License | 5 votes |
def make_cnn_lstm_model(num_input_tokens, max_len): model = Sequential() model.add(Embedding(input_dim=num_input_tokens, input_length=max_len, output_dim=EMBEDDING_SIZE)) model.add(SpatialDropout1D(0.2)) model.add(Conv1D(filters=256, kernel_size=5, padding='same', activation='relu')) model.add(MaxPooling1D(pool_size=4)) model.add(LSTM(NB_LSTM_CELLS)) model.add(Dense(units=2, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) return model
Example #18
Source File: cnn_lstm.py From keras-english-resume-parser-and-analyzer with MIT License | 5 votes |
def create_model(self): lstm_output_size = 70 embedding_size = 100 self.model = Sequential() self.model.add(Embedding(input_dim=self.vocab_size, input_length=self.max_len, output_dim=embedding_size)) self.model.add(SpatialDropout1D(0.2)) self.model.add(Conv1D(filters=256, kernel_size=5, padding='same', activation='relu')) self.model.add(MaxPooling1D(pool_size=4)) self.model.add(LSTM(lstm_output_size)) self.model.add(Dense(units=len(self.labels), activation='softmax')) self.model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
Example #19
Source File: test_views.py From Fabrik with GNU General Public License v3.0 | 5 votes |
def test_keras_import(self): # Global Pooling 1D model = Sequential() model.add(GlobalMaxPooling1D(input_shape=(16, 1))) model.build() self.keras_param_test(model, 0, 5) # Global Pooling 2D model = Sequential() model.add(GlobalMaxPooling2D(input_shape=(16, 16, 1))) model.build() self.keras_param_test(model, 0, 8) # Pooling 1D model = Sequential() model.add(MaxPooling1D(pool_size=2, strides=2, padding='same', input_shape=(16, 1))) model.build() self.keras_param_test(model, 0, 5) # Pooling 2D model = Sequential() model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same', input_shape=(16, 16, 1))) model.build() self.keras_param_test(model, 0, 8) # Pooling 3D model = Sequential() model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='same', input_shape=(16, 16, 16, 1))) model.build() self.keras_param_test(model, 0, 11) # ********** Locally-connected Layers **********
Example #20
Source File: model.py From ProteinSecondaryStructure-CNN with MIT License | 5 votes |
def CNN_model(): # We fix the window size to 11 because the average length of an alpha helix is around eleven residues # and that of a beta strand is around six. # ref: https://www.researchgate.net/publication/285648102_Protein_Secondary_Structure_Prediction_Using_Deep_Convolutional_Neural_Fields m = Sequential() m.add(Conv1D(128, 11, padding='same', activation='relu', input_shape=(dataset.sequence_len, dataset.amino_acid_residues))) # <---- # m.add(BatchNormalization()) # m.add(MaxPooling1D(pool_size=2, strides=1, padding='same')) m.add(Dropout(drop_out)) # <---- m.add(Conv1D(64, 11, padding='same', activation='relu')) # <---- # m.add(BatchNormalization()) # m.add(MaxPooling1D(pool_size=2, strides=1, padding='same')) m.add(Dropout(drop_out)) # <---- # m.add(Conv1D(22, 11, padding='same', activation='relu')) # m.add(MaxPooling1D(pool_size=2, strides=1, padding='same')) # m.add(Dropout(drop_out)) m.add(Conv1D(dataset.num_classes, 11, padding='same', activation='softmax')) # <---- # m.add(Conv1D(dataset.num_classes, 11, padding='same')) # m.add(TimeDistributed(Activation('softmax'))) # m.add(Conv1D(dataset.num_classes, 11, padding='same', activation='softmax', input_shape=(dataset.sequence_len, dataset.amino_acid_residues))) opt = optimizers.Adam(lr=LR) m.compile(optimizer=opt, loss=loss, metrics=['accuracy', 'mae']) if do_summary: print("\nHyper Parameters\n") print("Learning Rate: " + str(LR)) print("Drop out: " + str(drop_out)) print("Batch dim: " + str(batch_dim)) print("Number of epochs: " + str(nn_epochs)) print("\nLoss: " + loss + "\n") m.summary() return m
Example #21
Source File: model.py From ProteinSecondaryStructure-CNN with MIT License | 5 votes |
def CNN_model(): m = Sequential() m.add(Conv1D(128, 5, padding='same', activation='relu', input_shape=(dataset.cnn_width, dataset.amino_acid_residues))) m.add(BatchNormalization()) # m.add(MaxPooling1D(pool_size=2)) m.add(Dropout(drop_out)) m.add(Conv1D(128, 3, padding='same', activation='relu')) m.add(BatchNormalization()) # m.add(MaxPooling1D(pool_size=2)) m.add(Dropout(drop_out)) m.add(Conv1D(64, 3, padding='same', activation='relu')) m.add(BatchNormalization()) # m.add(MaxPooling1D(pool_size=2)) m.add(Dropout(drop_out)) # m.add(Conv1D(32, 3, padding='same', activation='relu')) # m.add(BatchNormalization()) # m.add(MaxPooling1D(pool_size=2)) # m.add(Dropout(drop_out)) m.add(Flatten()) m.add(Dense(128, activation='relu')) m.add(Dense(32, activation='relu')) m.add(Dense(dataset.num_classes, activation = 'softmax')) opt = optimizers.Adam(lr=LR) m.compile(optimizer=opt, loss=loss, metrics=['accuracy', 'mae']) if do_summary: print("\nHyper Parameters\n") print("Learning Rate: " + str(LR)) print("Drop out: " + str(drop_out)) print("Batch dim: " + str(batch_dim)) print("Number of epochs: " + str(nn_epochs)) print("\nLoss: " + loss + "\n") m.summary() return m
Example #22
Source File: dna.py From deepcpg with MIT License | 5 votes |
def __call__(self, inputs): x = inputs[0] kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay) x = kl.Conv1D(128, 11, name='conv1', kernel_initializer=self.init, kernel_regularizer=kernel_regularizer)(x) x = kl.BatchNormalization(name='bn1')(x) x = kl.Activation('relu', name='act1')(x) x = kl.MaxPooling1D(2, name='pool1')(x) # 124 x = self._res_unit(x, [32, 32, 128], stage=1, block=1, stride=2) x = self._res_unit(x, [32, 32, 128], stage=1, block=2) # 64 x = self._res_unit(x, [64, 64, 256], stage=2, block=1, stride=2) x = self._res_unit(x, [64, 64, 256], stage=2, block=2) # 32 x = self._res_unit(x, [128, 128, 512], stage=3, block=1, stride=2) x = self._res_unit(x, [128, 128, 512], stage=3, block=2) # 16 x = self._res_unit(x, [256, 256, 1024], stage=4, block=1, stride=2) x = kl.GlobalAveragePooling1D()(x) x = kl.Dropout(self.dropout)(x) return self._build(inputs, x)
Example #23
Source File: model.py From polyaxon with Apache License 2.0 | 5 votes |
def train(experiment, max_features, maxlen, embedding_size, kernel_size, optimizer, filters, pool_size, lstm_output_size, log_learning_rate, batch_size, epochs): model = Sequential() model.add(Embedding(max_features, embedding_size, input_length=maxlen)) model.add(Dropout(0.25)) model.add(Conv1D(filters, kernel_size, padding='valid', activation='relu', strides=1)) model.add(MaxPooling1D(pool_size=pool_size)) model.add(LSTM(lstm_output_size)) model.add(Dense(1)) model.add(Activation('sigmoid')) model.compile(OPTIMIZERS[optimizer](lr=10 ** log_learning_rate), loss='binary_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test), callbacks=[PolyaxonKerasCallback(run=experiment)]) score, accuracy = model.evaluate(x_test, y_test, batch_size=batch_size) return score, accuracy
Example #24
Source File: architecture_pooling.py From temporalCNN with GNU General Public License v3.0 | 5 votes |
def Archi_3CONV2MP_1FC256_GAP_f17_9_5fd(X, nbclasses): #-- get the input sizes m, L, depth = X.shape input_shape = (L,depth) #-- parameters of the architecture l2_rate = 1.e-6 dropout_rate = 0.5 nb_conv = 3 nb_fc= 1 nbunits_fc = 256 #-- will be double # Define the input placeholder. X_input = Input(input_shape) #-- nb_conv CONV layers X = conv_bn_relu(X_input, nbunits=256, kernel_size=17, kernel_regularizer=l2(l2_rate), padding='same') X = MaxPooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) X = conv_bn_relu(X, nbunits=512, kernel_size=9, kernel_regularizer=l2(l2_rate), padding='same') X = MaxPooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) X = conv_bn_relu(X, nbunits=512, kernel_size=5, kernel_regularizer=l2(l2_rate), padding='same') X = MaxPooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) #-- Flatten + 1 FC layers X = GlobalAveragePooling1D()(X) for add in range(nb_fc): X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate) #-- SOFTMAX layer out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate)) # Create model. return Model(inputs = X_input, outputs = out, name='Archi_3CONV2MP_1FC256_GAP_f17_9_5fd') #-----------------------------------------------------------------------
Example #25
Source File: test_tf_keras_layers.py From tf-coreml with Apache License 2.0 | 5 votes |
def test_max_pooling_1d(self): model = Sequential() model.add(MaxPooling1D(input_shape=(16, 3), pool_size=4)) self._test_keras_model(model, has_variables = False)
Example #26
Source File: models.py From neurowriter with MIT License | 5 votes |
def create(inputtokens, vocabsize, convlayers=5, kernels=32, convdrop=0.1, denselayers=0, denseunits=64, densedrop=0.1, embedding=32): kernel_size = 2 pool_size = 2 if convlayers < 1: raise ValueError("Number of layers must be at least 1") model = Sequential() # Embedding layer model.add(Embedding(input_dim=vocabsize, output_dim=embedding, input_length=inputtokens)) # First conv+pool layer model.add(Conv1D(kernels, kernel_size, padding='causal', activation='relu')) model.add(Dropout(convdrop)) model.add(MaxPooling1D(pool_size)) # Additional dilated conv + pool layers (if possible) for i in range(1, convlayers): try: model.add(Conv1D(kernels, kernel_size, padding='causal', dilation_rate=2**i, activation='relu')) model.add(Dropout(convdrop)) model.add(MaxPooling1D(pool_size)) except: print("Warning: not possible to add %i-th layer, moving to output" % i) break # Flatten and dense layers model.add(Flatten()) for i in range(denselayers): model.add(Dense(denseunits, activation='relu')) model.add(Dropout(densedrop)) # Output layer model.add(Dense(vocabsize, activation='softmax')) return model
Example #27
Source File: architecture_pooling.py From temporalCNN with GNU General Public License v3.0 | 5 votes |
def Archi_3CONV2MP_1FC256_GAP_f3_1_1fd(X, nbclasses): #-- get the input sizes m, L, depth = X.shape input_shape = (L,depth) #-- parameters of the architecture l2_rate = 1.e-6 dropout_rate = 0.5 nb_conv = 3 nb_fc= 1 nbunits_fc = 256 #-- will be double # Define the input placeholder. X_input = Input(input_shape) #-- nb_conv CONV layers X = conv_bn_relu(X_input, nbunits=768, kernel_size=3, kernel_regularizer=l2(l2_rate), padding='same') X = MaxPooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) X = conv_bn_relu(X, nbunits=1024, kernel_size=1, kernel_regularizer=l2(l2_rate), padding='same') X = Dropout(dropout_rate)(X) X = conv_bn_relu(X, nbunits=1024, kernel_size=1, kernel_regularizer=l2(l2_rate), padding='same') X = Dropout(dropout_rate)(X) #-- Flatten + 1 FC layers X = GlobalAveragePooling1D()(X) for add in range(nb_fc): X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate) #-- SOFTMAX layer out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate)) # Create model. return Model(inputs = X_input, outputs = out, name='Archi_3CONV2MP_1FC256_GAP_f3_1_1fd') #-----------------------------------------------------------------------
Example #28
Source File: architecture_pooling.py From temporalCNN with GNU General Public License v3.0 | 5 votes |
def Archi_3CONV2MP_1FC256_GAP_f5_3_1fd(X, nbclasses): #-- get the input sizes m, L, depth = X.shape input_shape = (L,depth) #-- parameters of the architecture l2_rate = 1.e-6 dropout_rate = 0.5 nb_conv = 3 nb_fc= 1 nbunits_fc = 256 #-- will be double # Define the input placeholder. X_input = Input(input_shape) #-- nb_conv CONV layers X = conv_bn_relu(X_input, nbunits=512, kernel_size=5, kernel_regularizer=l2(l2_rate), padding='same') X = MaxPooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) X = conv_bn_relu(X, nbunits=768, kernel_size=3, kernel_regularizer=l2(l2_rate), padding='same') X = MaxPooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) X = conv_bn_relu(X, nbunits=1024, kernel_size=1, kernel_regularizer=l2(l2_rate), padding='same') #~ X = MaxPooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) #-- Flatten + 1 FC layers X = GlobalAveragePooling1D()(X) for add in range(nb_fc): X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate) #-- SOFTMAX layer out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate)) # Create model. return Model(inputs = X_input, outputs = out, name='Archi_3CONV2MP_1FC256_GAP_f5_3_1fd') #-----------------------------------------------------------------------
Example #29
Source File: architecture_pooling.py From temporalCNN with GNU General Public License v3.0 | 5 votes |
def Archi_3CONV2MP_1FC256_GAP_f9_5_3fd(X, nbclasses): #-- get the input sizes m, L, depth = X.shape input_shape = (L,depth) #-- parameters of the architecture l2_rate = 1.e-6 dropout_rate = 0.5 nb_conv = 3 nb_fc= 1 nbunits_fc = 256 #-- will be double # Define the input placeholder. X_input = Input(input_shape) #-- nb_conv CONV layers X = conv_bn_relu(X_input, nbunits=512, kernel_size=9, kernel_regularizer=l2(l2_rate), padding='same') X = MaxPooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) X = conv_bn_relu(X, nbunits=512, kernel_size=5, kernel_regularizer=l2(l2_rate), padding='same') X = MaxPooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) X = conv_bn_relu(X, nbunits=512, kernel_size=3, kernel_regularizer=l2(l2_rate), padding='same') X = MaxPooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) #-- Flatten + 1 FC layers X = GlobalAveragePooling1D()(X) for add in range(nb_fc): X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate) #-- SOFTMAX layer out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate)) # Create model. return Model(inputs = X_input, outputs = out, name='Archi_3CONV2MP_1FC256_GAP_f9_5_3fd') #-----------------------------------------------------------------------
Example #30
Source File: architecture_pooling.py From temporalCNN with GNU General Public License v3.0 | 5 votes |
def Archi_3CONV2MP_1FC256_f9_5_3fd(X, nbclasses): #-- get the input sizes m, L, depth = X.shape input_shape = (L,depth) #-- parameters of the architecture l2_rate = 1.e-6 dropout_rate = 0.5 nb_conv = 3 nb_fc= 1 nbunits_conv = 128 #-- will be double nbunits_fc = 256 #-- will be double # Define the input placeholder. X_input = Input(input_shape) #-- nb_conv CONV layers X = conv_bn_relu(X_input, nbunits=nbunits_conv, kernel_size=9, kernel_regularizer=l2(l2_rate), padding='same') X = MaxPooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) X = conv_bn_relu(X, nbunits=nbunits_conv*2**1, kernel_size=5, kernel_regularizer=l2(l2_rate), padding='same') X = MaxPooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) X = conv_bn_relu(X, nbunits=nbunits_conv*2**2, kernel_size=3, kernel_regularizer=l2(l2_rate), padding='same') X = MaxPooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) #-- Flatten + 1 FC layers X = Flatten()(X) for add in range(nb_fc): X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate) #-- SOFTMAX layer out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate)) # Create model. return Model(inputs = X_input, outputs = out, name='Archi_3CONV2MP_1FC256_f9_5_3fd') #-----------------------------------------------------------------------