Python keras.layers.GlobalMaxPooling1D() Examples
The following are 30
code examples of keras.layers.GlobalMaxPooling1D().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers
, or try the search function
.

Example #1
Source Project: semeval2017-scienceie Author: UKPLab File: convNet.py License: Apache License 2.0 | 8 votes |
def build_cnn(input_shape, output_dim,nb_filter): clf = Sequential() clf.add(Convolution1D(nb_filter=nb_filter, filter_length=4,border_mode="valid",activation="relu",subsample_length=1,input_shape=input_shape)) clf.add(GlobalMaxPooling1D()) clf.add(Dense(100)) clf.add(Dropout(0.2)) clf.add(Activation("tanh")) clf.add(Dense(output_dim=output_dim, activation='softmax')) clf.compile(optimizer='adagrad', loss='categorical_crossentropy', metrics=['accuracy']) return clf # just one filter
Example #2
Source Project: SeqGAN Author: tyo-yo File: models.py License: MIT License | 6 votes |
def VariousConv1D(x, filter_sizes, num_filters, name_prefix=''): ''' Layer wrapper function for various filter sizes Conv1Ds # Arguments: x: tensor, shape = (B, T, E) filter_sizes: list of int, list of each Conv1D filter sizes num_filters: list of int, list of each Conv1D num of filters name_prefix: str, layer name prefix # Returns: out: tensor, shape = (B, sum(num_filters)) ''' conv_outputs = [] for filter_size, n_filter in zip(filter_sizes, num_filters): conv_name = '{}VariousConv1D/Conv1D/filter_size_{}'.format(name_prefix, filter_size) pooling_name = '{}VariousConv1D/MaxPooling/filter_size_{}'.format(name_prefix, filter_size) conv_out = Conv1D(n_filter, filter_size, name=conv_name)(x) # (B, time_steps, n_filter) conv_out = GlobalMaxPooling1D(name=pooling_name)(conv_out) # (B, n_filter) conv_outputs.append(conv_out) concatenate_name = '{}VariousConv1D/Concatenate'.format(name_prefix) out = Concatenate(name=concatenate_name)(conv_outputs) return out
Example #3
Source Project: Keras-TextClassification Author: yongzhuo File: graph.py License: MIT License | 6 votes |
def create_model(self, hyper_parameters): """ 构建神经网络 :param hyper_parameters:json, hyper parameters of network :return: tensor, moedl """ super().create_model(hyper_parameters) x = self.word_embedding.output x = SpatialDropout1D(self.dropout_spatial)(x) x = AttentionSelf(self.word_embedding.embed_size)(x) x = GlobalMaxPooling1D()(x) x = Dropout(self.dropout)(x) # x = Flatten()(x) # 最后就是softmax dense_layer = Dense(self.label, activation=self.activate_classify)(x) output = [dense_layer] self.model = Model(self.word_embedding.input, output) self.model.summary(120)
Example #4
Source Project: nlp_xiaojiang Author: yongzhuo File: keras_bert_classify_text_cnn.py License: MIT License | 6 votes |
def build_model_text_cnn(self): ######### text-cnn ######### # bert embedding bert_inputs, bert_output = KerasBertEmbedding().bert_encode() # text cnn bert_output_emmbed = SpatialDropout1D(rate=self.keep_prob)(bert_output) concat_out = [] for index, filter_size in enumerate(self.filters): x = Conv1D(name='TextCNN_Conv1D_{}'.format(index), filters=int(self.embedding_dim/2), kernel_size=self.filters[index], padding='valid', kernel_initializer='normal', activation='relu')(bert_output_emmbed) x = GlobalMaxPooling1D(name='TextCNN_MaxPool1D_{}'.format(index))(x) concat_out.append(x) x = Concatenate(axis=1)(concat_out) x = Dropout(self.keep_prob)(x) # 最后就是softmax dense_layer = Dense(self.label, activation=self.activation)(x) output_layers = [dense_layer] self.model = Model(bert_inputs, output_layers)
Example #5
Source Project: semeval2017-scienceie Author: UKPLab File: convNet.py License: Apache License 2.0 | 6 votes |
def build_cnn_char(input_dim, output_dim,nb_filter): clf = Sequential() clf.add(Embedding(input_dim, 32, # character embedding size input_length=maxlen, dropout=0.2)) clf.add(Convolution1D(nb_filter=nb_filter, filter_length=3,border_mode="valid",activation="relu",subsample_length=1)) clf.add(GlobalMaxPooling1D()) clf.add(Dense(100)) clf.add(Dropout(0.2)) clf.add(Activation("tanh")) clf.add(Dense(output_dim=output_dim, activation='softmax')) clf.compile(optimizer='adagrad', loss='categorical_crossentropy', metrics=['accuracy']) return clf # just one filter
Example #6
Source Project: WeSTClass Author: yumeng5 File: model.py License: Apache License 2.0 | 6 votes |
def ConvolutionLayer(input_shape, n_classes, filter_sizes=[2, 3, 4, 5], num_filters=20, word_trainable=False, vocab_sz=None, embedding_matrix=None, word_embedding_dim=100, hidden_dim=20, act='relu', init='ones'): x = Input(shape=(input_shape,), name='input') z = Embedding(vocab_sz, word_embedding_dim, input_length=(input_shape,), name="embedding", weights=[embedding_matrix], trainable=word_trainable)(x) conv_blocks = [] for sz in filter_sizes: conv = Convolution1D(filters=num_filters, kernel_size=sz, padding="valid", activation=act, strides=1, kernel_initializer=init)(z) conv = GlobalMaxPooling1D()(conv) conv_blocks.append(conv) z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0] z = Dense(hidden_dim, activation="relu")(z) y = Dense(n_classes, activation="softmax")(z) return Model(inputs=x, outputs=y, name='classifier')
Example #7
Source Project: RecSys2019_DeepLearning_Evaluation Author: MaurizioFD File: MCRecRecommenderWrapper.py License: GNU Affero General Public License v3.0 | 6 votes |
def get_umtmum_embedding(umtmum_input, path_num, timestamps, length, user_latent, item_latent, path_attention_layer_1, path_attention_layer_2): conv_umtmum = Conv1D(filters = 128, kernel_size = 4, activation = 'relu', kernel_regularizer = l2(0.0), kernel_initializer = 'glorot_uniform', padding = 'valid', strides = 1, name = 'umtmum_conv') path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':0})(umtmum_input) output = conv_umtmum(path_input) output = GlobalMaxPooling1D()(output) output = Dropout(0.5)(output) for i in range(1, path_num): path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':i})(umtmum_input) tmp_output = GlobalMaxPooling1D()(conv_umtmum(path_input)) tmp_output = Dropout(0.5)(tmp_output) output = concatenate([output, tmp_output]) output = Reshape((path_num, 128))(output) #output = path_attention(user_latent, item_latent, output, 128, 64, path_attention_layer_1, path_attention_layer_2, 'umtmum') output = GlobalMaxPooling1D()(output) return output
Example #8
Source Project: RecSys2019_DeepLearning_Evaluation Author: MaurizioFD File: MCRecRecommenderWrapper.py License: GNU Affero General Public License v3.0 | 6 votes |
def get_umtm_embedding(umtm_input, path_num, timestamps, length, user_latent, item_latent, path_attention_layer_1, path_attention_layer_2): conv_umtm = Conv1D(filters = 128, kernel_size = 4, activation = 'relu', kernel_regularizer = l2(0.0), kernel_initializer = 'glorot_uniform', padding = 'valid', strides = 1, name = 'umtm_conv') path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':0})(umtm_input) output = GlobalMaxPooling1D()(conv_umtm(path_input)) output = Dropout(0.5)(output) for i in range(1, path_num): path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':i})(umtm_input) tmp_output = GlobalMaxPooling1D()(conv_umtm(path_input)) tmp_output = Dropout(0.5)(tmp_output) output = concatenate([output, tmp_output]) output = Reshape((path_num, 128))(output) #output = path_attention(user_latent, item_latent, output, 128, 64, path_attention_layer_1, path_attention_layer_2, 'umtm') output = GlobalMaxPooling1D()(output) return output
Example #9
Source Project: RecSys2019_DeepLearning_Evaluation Author: MaurizioFD File: MCRecRecommenderWrapper.py License: GNU Affero General Public License v3.0 | 6 votes |
def get_umum_embedding(umum_input, path_num, timestamps, length, user_latent, item_latent, path_attention_layer_1, path_attention_layer_2): conv_umum = Conv1D(filters = 128, kernel_size = 4, activation = 'relu', kernel_regularizer = l2(0.0), kernel_initializer = 'glorot_uniform', padding = 'valid', strides = 1, name = 'umum_conv') path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':0})(umum_input) output = GlobalMaxPooling1D()(conv_umum(path_input)) output = Dropout(0.5)(output) for i in range(1, path_num): path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':i})(umum_input) tmp_output = GlobalMaxPooling1D()(conv_umum(path_input)) tmp_output = Dropout(0.5)(tmp_output) output = concatenate([output, tmp_output]) output = Reshape((path_num, 128))(output) #output = path_attention(user_latent, item_latent, output, 128, 64, path_attention_layer_1, path_attention_layer_2, 'umum') output = GlobalMaxPooling1D()(output) return output
Example #10
Source Project: RecSys2019_DeepLearning_Evaluation Author: MaurizioFD File: MCRecRecommenderWrapper.py License: GNU Affero General Public License v3.0 | 6 votes |
def get_uuum_embedding(umum_input, path_num, timestamps, length, user_latent, item_latent, path_attention_layer_1, path_attention_layer_2): conv_umum = Conv1D(filters = 128, kernel_size = 4, activation = 'relu', kernel_regularizer = l2(0.0), kernel_initializer = 'glorot_uniform', padding = 'valid', strides = 1, name = 'uuum_conv') path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':0})(umum_input) output = GlobalMaxPooling1D()(conv_umum(path_input)) output = Dropout(0.5)(output) for i in range(1, path_num): path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':i})(umum_input) tmp_output = GlobalMaxPooling1D()(conv_umum(path_input)) tmp_output = Dropout(0.5)(tmp_output) output = concatenate([output, tmp_output]) output = Reshape((path_num, 128))(output) #output = path_attention(user_latent, item_latent, output, 128, 64, path_attention_layer_1, path_attention_layer_2, 'uuum') output = GlobalMaxPooling1D()(output) return output
Example #11
Source Project: RecSys2019_DeepLearning_Evaluation Author: MaurizioFD File: MCRec.py License: GNU Affero General Public License v3.0 | 6 votes |
def get_umtmum_embedding(umtmum_input, path_num, timestamps, length, user_latent, item_latent, path_attention_layer_1, path_attention_layer_2): conv_umtmum = Conv1D(filters = 128, kernel_size = 4, activation = 'relu', kernel_regularizer = l2(0.0), kernel_initializer = 'glorot_uniform', padding = 'valid', strides = 1, name = 'umtmum_conv') path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':0})(umtmum_input) output = conv_umtmum(path_input) output = GlobalMaxPooling1D()(output) output = Dropout(0.5)(output) for i in range(1, path_num): path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':i})(umtmum_input) tmp_output = GlobalMaxPooling1D()(conv_umtmum(path_input)) tmp_output = Dropout(0.5)(tmp_output) output = concatenate([output, tmp_output]) output = Reshape((path_num, 128))(output) #output = path_attention(user_latent, item_latent, output, 128, 64, path_attention_layer_1, path_attention_layer_2, 'umtmum') output = GlobalMaxPooling1D()(output) return output
Example #12
Source Project: RecSys2019_DeepLearning_Evaluation Author: MaurizioFD File: MCRec.py License: GNU Affero General Public License v3.0 | 6 votes |
def get_umum_embedding(umum_input, path_num, timestamps, length, user_latent, item_latent, path_attention_layer_1, path_attention_layer_2): conv_umum = Conv1D(filters = 128, kernel_size = 4, activation = 'relu', kernel_regularizer = l2(0.0), kernel_initializer = 'glorot_uniform', padding = 'valid', strides = 1, name = 'umum_conv') path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':0})(umum_input) output = GlobalMaxPooling1D()(conv_umum(path_input)) output = Dropout(0.5)(output) for i in range(1, path_num): path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':i})(umum_input) tmp_output = GlobalMaxPooling1D()(conv_umum(path_input)) tmp_output = Dropout(0.5)(tmp_output) output = concatenate([output, tmp_output]) output = Reshape((path_num, 128))(output) #output = path_attention(user_latent, item_latent, output, 128, 64, path_attention_layer_1, path_attention_layer_2, 'umum') output = GlobalMaxPooling1D()(output) return output
Example #13
Source Project: RecSys2019_DeepLearning_Evaluation Author: MaurizioFD File: MCRec.py License: GNU Affero General Public License v3.0 | 6 votes |
def get_uuum_embedding(umum_input, path_num, timestamps, length, user_latent, item_latent, path_attention_layer_1, path_attention_layer_2): conv_umum = Conv1D(filters = 128, kernel_size = 4, activation = 'relu', kernel_regularizer = l2(0.0), kernel_initializer = 'glorot_uniform', padding = 'valid', strides = 1, name = 'uuum_conv') path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':0})(umum_input) output = GlobalMaxPooling1D()(conv_umum(path_input)) output = Dropout(0.5)(output) for i in range(1, path_num): path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':i})(umum_input) tmp_output = GlobalMaxPooling1D()(conv_umum(path_input)) tmp_output = Dropout(0.5)(tmp_output) output = concatenate([output, tmp_output]) output = Reshape((path_num, 128))(output) #output = path_attention(user_latent, item_latent, output, 128, 64, path_attention_layer_1, path_attention_layer_2, 'uuum') output = GlobalMaxPooling1D()(output) return output
Example #14
Source Project: WeSHClass Author: yumeng5 File: models.py License: Apache License 2.0 | 6 votes |
def ConvolutionLayer(x, input_shape, n_classes, filter_sizes=[2, 3, 4, 5], num_filters=20, word_trainable=False, vocab_sz=None, embedding_matrix=None, word_embedding_dim=100, hidden_dim=100, act='relu', init='ones'): if embedding_matrix is not None: z = Embedding(vocab_sz, word_embedding_dim, input_length=(input_shape,), weights=[embedding_matrix], trainable=word_trainable)(x) else: z = Embedding(vocab_sz, word_embedding_dim, input_length=(input_shape,), trainable=word_trainable)(x) conv_blocks = [] for sz in filter_sizes: conv = Convolution1D(filters=num_filters, kernel_size=sz, padding="valid", activation=act, strides=1, kernel_initializer=init)(z) conv = GlobalMaxPooling1D()(conv) conv_blocks.append(conv) z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0] z = Dense(hidden_dim, activation="relu")(z) y = Dense(n_classes, activation="softmax")(z) return Model(inputs=x, outputs=y)
Example #15
Source Project: Deep-Learning-Quick-Reference Author: PacktPublishing File: newsgroup_classifier_pretrained_word_embeddings.py License: MIT License | 6 votes |
def build_model(vocab_size, embedding_dim, sequence_length, embedding_matrix): sequence_input = Input(shape=(sequence_length,), dtype='int32') embedding_layer = Embedding(input_dim=vocab_size, output_dim=embedding_dim, weights=[embedding_matrix], input_length=sequence_length, trainable=False, name="embedding")(sequence_input) x = Conv1D(128, 5, activation='relu')(embedding_layer) x = MaxPooling1D(5)(x) x = Conv1D(128, 5, activation='relu')(x) x = MaxPooling1D(5)(x) x = Conv1D(128, 5, activation='relu')(x) x = GlobalMaxPooling1D()(x) x = Dense(128, activation='relu')(x) preds = Dense(20, activation='softmax')(x) model = Model(sequence_input, preds) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model
Example #16
Source Project: Deep-Learning-Quick-Reference Author: PacktPublishing File: newsgroup_classifier_word_embeddings.py License: MIT License | 6 votes |
def build_model(vocab_size, embedding_dim, sequence_length): sequence_input = Input(shape=(sequence_length,), dtype='int32') embedding_layer = Embedding(input_dim=vocab_size, output_dim=embedding_dim, input_length=sequence_length, name="embedding")(sequence_input) x = Conv1D(128, 5, activation='relu')(embedding_layer) x = MaxPooling1D(5)(x) x = Conv1D(128, 5, activation='relu')(x) x = MaxPooling1D(5)(x) x = Conv1D(128, 5, activation='relu')(x) x = GlobalMaxPooling1D()(x) x = Dense(128, activation='relu')(x) preds = Dense(20, activation='softmax')(x) model = Model(sequence_input, preds) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model
Example #17
Source Project: toxic_comments Author: Donskov7 File: models.py License: MIT License | 5 votes |
def cnn(embedding_matrix, char_matrix, num_classes, max_seq_len, max_ll3_seq_len, num_filters=64, l2_weight_decay=0.0001, dropout_val=0.5, dense_dim=32, add_sigmoid=True, train_embeds=False, gpus=0, n_cnn_layers=1, pool='max', add_embeds=False): if pool == 'max': Pooling = MaxPooling1D GlobalPooling = GlobalMaxPooling1D elif pool == 'avg': Pooling = AveragePooling1D GlobalPooling = GlobalAveragePooling1D input_ = Input(shape=(max_seq_len,)) embeds = Embedding(embedding_matrix.shape[0], embedding_matrix.shape[1], weights=[embedding_matrix], input_length=max_seq_len, trainable=train_embeds)(input_) x = embeds for i in range(n_cnn_layers-1): x = Conv1D(num_filters, 7, activation='relu', padding='same')(x) x = Pooling(2)(x) x = Conv1D(num_filters, 7, activation='relu', padding='same')(x) x = GlobalPooling()(x) if add_embeds: x1 = Conv1D(num_filters, 7, activation='relu', padding='same')(embeds) x1 = GlobalPooling()(x1) x = Concatenate()([x, x1]) x = BatchNormalization()(x) x = Dropout(dropout_val)(x) x = Dense(dense_dim, activation='relu', kernel_regularizer=regularizers.l2(l2_weight_decay))(x) if add_sigmoid: x = Dense(num_classes, activation='sigmoid')(x) model = Model(inputs=input_, outputs=x) if gpus > 0: model = multi_gpu_model(model, gpus=gpus) return model
Example #18
Source Project: Keras-TextClassification Author: yongzhuo File: graph.py License: MIT License | 5 votes |
def create_model(self, hyper_parameters): """ 构建神经网络 :param hyper_parameters:json, hyper parameters of network :return: tensor, moedl """ super().create_model(hyper_parameters) embedding_output = self.word_embedding.output x = Lambda(lambda x : x[:, 0:1, :])(embedding_output) # 获取CLS # # text cnn # bert_output_emmbed = SpatialDropout1D(rate=self.dropout)(embedding_output) # concat_out = [] # for index, filter_size in enumerate(self.filters): # x = Conv1D(name='TextCNN_Conv1D_{}'.format(index), # filters= self.filters_num, # int(K.int_shape(embedding_output)[-1]/self.len_max), # strides=1, # kernel_size=self.filters[index], # padding='valid', # kernel_initializer='normal', # activation='relu')(bert_output_emmbed) # x = GlobalMaxPooling1D(name='TextCNN_MaxPool1D_{}'.format(index))(x) # concat_out.append(x) # x = Concatenate(axis=1)(concat_out) # x = Dropout(self.dropout)(x) x = Flatten()(x) # 最后就是softmax dense_layer = Dense(self.label, activation=self.activate_classify)(x) output_layers = [dense_layer] self.model = Model(self.word_embedding.input, output_layers) self.model.summary(120)
Example #19
Source Project: Keras-TextClassification Author: yongzhuo File: graph.py License: MIT License | 5 votes |
def create_model(self, hyper_parameters): """ 构建神经网络 :param hyper_parameters:json, hyper parameters of network :return: tensor, moedl """ super().create_model(hyper_parameters) embedding = self.word_embedding.output def win_mean(x): res_list = [] for i in range(self.len_max-self.n_win+1): x_mean = tf.reduce_mean(x[:, i:i + self.n_win, :], axis=1) x_mean_dims = tf.expand_dims(x_mean, axis=-1) res_list.append(x_mean_dims) res_list = tf.concat(res_list, axis=-1) gg = tf.reduce_max(res_list, axis=-1) return gg if self.encode_type=="HIERARCHICAL": x = Lambda(win_mean, output_shape=(self.embed_size, ))(embedding) elif self.encode_type=="MAX": x = GlobalMaxPooling1D()(embedding) elif self.encode_type=="AVG": x = GlobalAveragePooling1D()(embedding) elif self.encode_type == "CONCAT": x_max = GlobalMaxPooling1D()(embedding) x_avg = GlobalAveragePooling1D()(embedding) x = Concatenate()([x_max, x_avg]) else: raise RuntimeError("encode_type must be 'MAX', 'AVG', 'CONCAT', 'HIERARCHICAL'") output = Dense(self.label, activation=self.activate_classify)(x) self.model = Model(inputs=self.word_embedding.input, outputs=output) self.model.summary(132)
Example #20
Source Project: Keras-TextClassification Author: yongzhuo File: graph.py License: MIT License | 5 votes |
def create_model(self, hyper_parameters): """ 构建神经网络 :param hyper_parameters:json, hyper parameters of network :return: tensor, moedl """ super().create_model(hyper_parameters) embedding = self.word_embedding.output x = GlobalMaxPooling1D()(embedding) output = Dense(self.label, activation=self.activate_classify)(x) self.model = Model(inputs=self.word_embedding.input, outputs=output) self.model.summary(132)
Example #21
Source Project: Keras-TextClassification Author: yongzhuo File: graph.py License: MIT License | 5 votes |
def create_model(self, hyper_parameters): """ 构建神经网络 :param hyper_parameters:json, hyper parameters of network :return: tensor, moedl """ super().create_model(hyper_parameters) embedding_output = self.word_embedding.output # x = embedding_output x = Lambda(lambda x : x[:, -2:-1, :])(embedding_output) # 获取CLS # # text cnn # bert_output_emmbed = SpatialDropout1D(rate=self.dropout)(embedding_output) # concat_out = [] # for index, filter_size in enumerate(self.filters): # x = Conv1D(name='TextCNN_Conv1D_{}'.format(index), # filters= self.filters_num, # int(K.int_shape(embedding_output)[-1]/self.len_max), # strides=1, # kernel_size=self.filters[index], # padding='valid', # kernel_initializer='normal', # activation='relu')(bert_output_emmbed) # x = GlobalMaxPooling1D(name='TextCNN_MaxPool1D_{}'.format(index))(x) # concat_out.append(x) # x = Concatenate(axis=1)(concat_out) # x = Dropout(self.dropout)(x) x = Flatten()(x) # 最后就是softmax dense_layer = Dense(self.label, activation=self.activate_classify)(x) output_layers = [dense_layer] self.model = Model(self.word_embedding.input, output_layers) self.model.summary(120)
Example #22
Source Project: nlp_xiaojiang Author: yongzhuo File: keras_bert_classify_text_cnn.py License: MIT License | 5 votes |
def build_model_r_cnn(self): ######### RCNN ######### # bert embedding bert_inputs, bert_output = KerasBertEmbedding().bert_encode() # rcnn bert_output_emmbed = SpatialDropout1D(rate=self.keep_prob)(bert_output) if args.use_lstm: if args.use_cudnn_cell: layer_cell = CuDNNLSTM else: layer_cell = LSTM else: if args.use_cudnn_cell: layer_cell = CuDNNGRU else: layer_cell = GRU x = Bidirectional(layer_cell(units=args.units, return_sequences=args.return_sequences, kernel_regularizer=regularizers.l2(args.l2 * 0.1), recurrent_regularizer=regularizers.l2(args.l2) ))(bert_output_emmbed) x = Dropout(args.keep_prob)(x) x = Conv1D(filters=int(self.embedding_dim / 2), kernel_size=2, padding='valid', kernel_initializer='normal', activation='relu')(x) x = GlobalMaxPooling1D()(x) x = Dropout(args.keep_prob)(x) # 最后就是softmax dense_layer = Dense(self.label, activation=self.activation)(x) output_layers = [dense_layer] self.model = Model(bert_inputs, output_layers)
Example #23
Source Project: nlp_xiaojiang Author: yongzhuo File: keras_bert_classify_text_cnn.py License: MIT License | 5 votes |
def build_model_avt_cnn(self): #########text-cnn######### # bert embedding bert_inputs, bert_output = KerasBertEmbedding().bert_encode() # text cnn bert_output_emmbed = SpatialDropout1D(rate=self.keep_prob)(bert_output) concat_x = [] concat_y = [] concat_z = [] for index, filter_size in enumerate(self.filters): conv = Conv1D(name='TextCNN_Conv1D_{}'.format(index), filters=int(self.embedding_dim/2), kernel_size=self.filters[index], padding='valid', kernel_initializer='normal', activation='relu')(bert_output_emmbed) x = GlobalMaxPooling1D(name='TextCNN_MaxPooling1D_{}'.format(index))(conv) y = GlobalAveragePooling1D(name='TextCNN_AveragePooling1D_{}'.format(index))(conv) z = AttentionWeightedAverage(name='TextCNN_Annention_{}'.format(index))(conv) concat_x.append(x) concat_y.append(y) concat_z.append(z) merge_x = Concatenate(axis=1)(concat_x) merge_y = Concatenate(axis=1)(concat_y) merge_z = Concatenate(axis=1)(concat_z) merge_xyz = Concatenate(axis=1)([merge_x, merge_y, merge_z]) x = Dropout(self.keep_prob)(merge_xyz) # 最后就是softmax dense_layer = Dense(self.label, activation=self.activation)(x) output_layers = [dense_layer] self.model = Model(bert_inputs, output_layers)
Example #24
Source Project: nlp_toolkit Author: stevewyl File: dpcnn.py License: MIT License | 5 votes |
def forward(self): model_input = Input(shape=(self.maxlen,), dtype='int32', name='token') # region embedding x = Token_Embedding(model_input, self.nb_tokens, self.embedding_dim, self.token_embeddings, False, self.maxlen, self.embed_dropout_rate, name='token_embeddings') if isinstance(self.region_kernel_size, list): region = [Conv1D(self.nb_filters, f, padding='same')(x) for f in self.region_kernel_size] region_embedding = add(region, name='region_embeddings') else: region_embedding = Conv1D( self.nb_filters, self.region_kernel_size, padding='same', name='region_embeddings')(x) # same padding convolution x = Activation('relu')(region_embedding) x = Conv1D(self.nb_filters, self.conv_kernel_size, padding='same', name='conv_1')(x) x = Activation('relu')(x) x = Conv1D(self.nb_filters, self.conv_kernel_size, padding='same', name='conv_2')(x) # residual connection x = add([x, region_embedding], name='pre_block_hidden') for k in range(self.repeat_time): x = self._block(x, k) x = GlobalMaxPooling1D()(x) outputs = tc_output_logits(x, self.nb_classes, self.final_dropout_rate) self.model = Model(inputs=model_input, outputs=outputs, name="Deep Pyramid CNN")
Example #25
Source Project: Sarcasm-Detection Author: MirunaPislar File: dl_models.py License: MIT License | 5 votes |
def cnn_model(**kwargs): X = Conv1D(filters=kwargs['hidden_units'], kernel_size=3, kernel_initializer='he_normal', padding='valid', activation='relu')(kwargs['embeddings']) X = Conv1D(filters=kwargs['hidden_units'], kernel_size=3, kernel_initializer='he_normal', padding='valid', activation='relu')(X) X = GlobalMaxPooling1D()(X) # X = MaxPooling1D(pool_size=3)(X) # an alternative to global max pooling # X = Flatten()(X) return X # A model using Long Short Term Memory (LSTM) Units
Example #26
Source Project: Neural-Headline-Generator-CN Author: QuantumLiu File: models.py License: GNU General Public License v3.0 | 5 votes |
def c2r(dic_len,input_length,output_length,emb_dim=128,hidden=512,nb_filter=64,deepth=(1,1),stride=3): model = Sequential() model.add(Embedding(input_dim=dic_len, output_dim=emb_dim, input_length=input_length)) for l in range(deepth[0]): model.add(Conv1D(nb_filter,3,activation='relu')) model.add(GlobalMaxPooling1D()) model.add(Dropout(0.5)) model.add(RepeatVector(output_length)) for l in range(deepth[0]): model.add(LSTM(hidden, return_sequences=True)) model.add(TimeDistributed(Dense(units=dic_len, activation='softmax'))) model.compile(optimizer='rmsprop',loss='categorical_crossentropy',metrics=['acc']) return model
Example #27
Source Project: DeepConv-DTI Author: GIST-CSBL File: DeepConvDTI.py License: GNU General Public License v3.0 | 5 votes |
def PLayer(self, size, filters, activation, initializer, regularizer_param): def f(input): # model_p = Convolution1D(filters=filters, kernel_size=size, padding='valid', activity_regularizer=l2(regularizer_param), kernel_initializer=initializer, kernel_regularizer=l2(regularizer_param))(input) model_p = Convolution1D(filters=filters, kernel_size=size, padding='same', kernel_initializer=initializer, kernel_regularizer=l2(regularizer_param))(input) model_p = BatchNormalization()(model_p) model_p = Activation(activation)(model_p) return GlobalMaxPooling1D()(model_p) return f
Example #28
Source Project: kopt Author: Avsecz File: model.py License: MIT License | 5 votes |
def build_model(train_data, max_features=5000, maxlen=400, batch_size=32, embedding_dims=50, filters=250, kernel_size=3, hidden_dims=250): print('Build model...') model = Sequential() # we start off with an efficient embedding layer which maps # our vocab indices into embedding_dims dimensions model.add(Embedding(max_features, embedding_dims, input_length=maxlen)) model.add(Dropout(0.2)) # we add a Convolution1D, which will learn filters # word group filters of size filter_length: model.add(Conv1D(filters, kernel_size, padding='valid', activation='relu', strides=1)) # we use max pooling: model.add(GlobalMaxPooling1D()) # We add a vanilla hidden layer: model.add(Dense(hidden_dims)) model.add(Dropout(0.2)) model.add(Activation('relu')) # We project onto a single unit output layer, and squash it with a sigmoid: model.add(Dense(1)) model.add(Activation('sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model # model.fit(x_train, y_train, # batch_size=batch_size, # epochs=epochs, # validation_data=(x_test, y_test))
Example #29
Source Project: ccg2lambda Author: mynlp File: graph_emb.py License: Apache License 2.0 | 5 votes |
def make_child_parent_branch(token_emb, max_nodes, max_bi_relations): node_indices = Input( shape=(max_nodes,), dtype='int32', name='node_inds') graph_node_embs = token_emb(node_indices) child_rel_outputs, child_rel_inputs = make_pair_branch( graph_node_embs, max_nodes, max_bi_relations, label='child') parent_rel_outputs, parent_rel_inputs = make_pair_branch( graph_node_embs, max_nodes, max_bi_relations, label='parent') x = Add(name='child_parent_add')( child_rel_outputs + parent_rel_outputs) # Integrate node embeddings into a single graph embedding. x = GlobalMaxPooling1D()(x) outputs = [x] inputs = [node_indices] + child_rel_inputs + parent_rel_inputs return outputs, inputs
Example #30
Source Project: awesome-text-classification Author: Hironsan File: model.py License: MIT License | 5 votes |
def build(self): sequence_input = Input(shape=(self.max_sequence_length,), dtype='int32') if self.weights is None: embedding = Embedding( self.vocab_size + 1, # due to mask_zero self.embedding_dim, input_length=self.max_sequence_length, )(sequence_input) else: embedding = Embedding( self.weights.shape[0], # due to mask_zero self.weights.shape[1], input_length=self.max_sequence_length, weights=[self.weights], )(sequence_input) convs = [] for filter_size, num_filter in zip(self.filter_sizes, self.num_filters): conv = Conv1D(filters=num_filter, kernel_size=filter_size, activation='relu')(embedding) pool = GlobalMaxPooling1D()(conv) convs.append(pool) z = Concatenate()(convs) z = Dense(self.num_units)(z) z = Dropout(self.keep_prob)(z) z = Activation('relu')(z) pred = Dense(self.num_tags, activation='softmax')(z) model = Model(inputs=[sequence_input], outputs=[pred]) return model