Python keras.layers.Convolution1D() Examples
The following are 18
code examples of keras.layers.Convolution1D().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers
, or try the search function
.

Example #1
Source Project: semeval2017-scienceie Author: UKPLab File: convNet.py License: Apache License 2.0 | 8 votes |
def build_cnn(input_shape, output_dim,nb_filter): clf = Sequential() clf.add(Convolution1D(nb_filter=nb_filter, filter_length=4,border_mode="valid",activation="relu",subsample_length=1,input_shape=input_shape)) clf.add(GlobalMaxPooling1D()) clf.add(Dense(100)) clf.add(Dropout(0.2)) clf.add(Activation("tanh")) clf.add(Dense(output_dim=output_dim, activation='softmax')) clf.compile(optimizer='adagrad', loss='categorical_crossentropy', metrics=['accuracy']) return clf # just one filter
Example #2
Source Project: semeval2017-scienceie Author: UKPLab File: convNet.py License: Apache License 2.0 | 6 votes |
def build_cnn_char(input_dim, output_dim,nb_filter): clf = Sequential() clf.add(Embedding(input_dim, 32, # character embedding size input_length=maxlen, dropout=0.2)) clf.add(Convolution1D(nb_filter=nb_filter, filter_length=3,border_mode="valid",activation="relu",subsample_length=1)) clf.add(GlobalMaxPooling1D()) clf.add(Dense(100)) clf.add(Dropout(0.2)) clf.add(Activation("tanh")) clf.add(Dense(output_dim=output_dim, activation='softmax')) clf.compile(optimizer='adagrad', loss='categorical_crossentropy', metrics=['accuracy']) return clf # just one filter
Example #3
Source Project: deep_qa Author: allenai File: convolutional_encoder.py License: Apache License 2.0 | 6 votes |
def build(self, input_shape): # We define convolution, maxpooling and dense layers first. self.convolution_layers = [Convolution1D(filters=self.num_filters, kernel_size=ngram_size, activation=self.conv_layer_activation, kernel_regularizer=self.regularizer(), bias_regularizer=self.regularizer()) for ngram_size in self.ngram_filter_sizes] self.projection_layer = Dense(self.output_dim) # Building all layers because these sub-layers are not explitly part of the computatonal graph. for convolution_layer in self.convolution_layers: with K.name_scope(convolution_layer.name): convolution_layer.build(input_shape) maxpool_output_dim = self.num_filters * len(self.ngram_filter_sizes) projection_input_shape = (input_shape[0], maxpool_output_dim) with K.name_scope(self.projection_layer.name): self.projection_layer.build(projection_input_shape) # Defining the weights of this "layer" as the set of weights from all convolution # and maxpooling layers. self.trainable_weights = [] for layer in self.convolution_layers + [self.projection_layer]: self.trainable_weights.extend(layer.trainable_weights) super(CNNEncoder, self).build(input_shape)
Example #4
Source Project: WeSTClass Author: yumeng5 File: model.py License: Apache License 2.0 | 6 votes |
def ConvolutionLayer(input_shape, n_classes, filter_sizes=[2, 3, 4, 5], num_filters=20, word_trainable=False, vocab_sz=None, embedding_matrix=None, word_embedding_dim=100, hidden_dim=20, act='relu', init='ones'): x = Input(shape=(input_shape,), name='input') z = Embedding(vocab_sz, word_embedding_dim, input_length=(input_shape,), name="embedding", weights=[embedding_matrix], trainable=word_trainable)(x) conv_blocks = [] for sz in filter_sizes: conv = Convolution1D(filters=num_filters, kernel_size=sz, padding="valid", activation=act, strides=1, kernel_initializer=init)(z) conv = GlobalMaxPooling1D()(conv) conv_blocks.append(conv) z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0] z = Dense(hidden_dim, activation="relu")(z) y = Dense(n_classes, activation="softmax")(z) return Model(inputs=x, outputs=y, name='classifier')
Example #5
Source Project: WeSHClass Author: yumeng5 File: models.py License: Apache License 2.0 | 6 votes |
def ConvolutionLayer(x, input_shape, n_classes, filter_sizes=[2, 3, 4, 5], num_filters=20, word_trainable=False, vocab_sz=None, embedding_matrix=None, word_embedding_dim=100, hidden_dim=100, act='relu', init='ones'): if embedding_matrix is not None: z = Embedding(vocab_sz, word_embedding_dim, input_length=(input_shape,), weights=[embedding_matrix], trainable=word_trainable)(x) else: z = Embedding(vocab_sz, word_embedding_dim, input_length=(input_shape,), trainable=word_trainable)(x) conv_blocks = [] for sz in filter_sizes: conv = Convolution1D(filters=num_filters, kernel_size=sz, padding="valid", activation=act, strides=1, kernel_initializer=init)(z) conv = GlobalMaxPooling1D()(conv) conv_blocks.append(conv) z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0] z = Dense(hidden_dim, activation="relu")(z) y = Dense(n_classes, activation="softmax")(z) return Model(inputs=x, outputs=y)
Example #6
Source Project: robotreviewer Author: ijmarshall File: rct_robot.py License: GNU General Public License v3.0 | 5 votes |
def __init__(self): from keras.preprocessing import sequence from keras.models import load_model from keras.models import Sequential from keras.preprocessing import sequence from keras.layers import Dense, Dropout, Activation, Lambda, Input, merge, Flatten from keras.layers import Embedding from keras.layers import Convolution1D, MaxPooling1D from keras import backend as K from keras.models import Model from keras.regularizers import l2 global sequence, load_model, Sequential, Dense, Dropout, Activation, Lambda, Input, merge, Flatten global Embedding, Convolution1D, MaxPooling1D, K, Model, l2 self.svm_clf = MiniClassifier(os.path.join(robotreviewer.DATA_ROOT, 'rct/rct_svm_weights.npz')) cnn_weight_files = glob.glob(os.path.join(robotreviewer.DATA_ROOT, 'rct/*.h5')) self.cnn_clfs = [load_model(cnn_weight_file) for cnn_weight_file in cnn_weight_files] self.svm_vectorizer = HashingVectorizer(binary=False, ngram_range=(1, 1), stop_words='english') self.cnn_vectorizer = KerasVectorizer(vocab_map_file=os.path.join(robotreviewer.DATA_ROOT, 'rct/cnn_vocab_map.pck'), stop_words='english') with open(os.path.join(robotreviewer.DATA_ROOT, 'rct/rct_model_calibration.json'), 'r') as f: self.constants = json.load(f) self.calibration_lr = {} with open(os.path.join(robotreviewer.DATA_ROOT, 'rct/svm_cnn_ptyp_calibration.pck'), 'rb') as f: self.calibration_lr['svm_cnn_ptyp'] = pickle.load(f) with open(os.path.join(robotreviewer.DATA_ROOT, 'rct/svm_cnn_calibration.pck'), 'rb') as f: self.calibration_lr['svm_cnn'] = pickle.load(f)
Example #7
Source Project: text-classifier Author: shibing624 File: deep_model.py License: Apache License 2.0 | 5 votes |
def cnn_model(max_len=400, vocabulary_size=20000, embedding_dim=128, hidden_dim=128, num_filters=512, filter_sizes="3,4,5", num_classses=4, dropout=0.5): print("Creating text CNN Model...") # a tensor inputs = Input(shape=(max_len,), dtype='int32') # emb embedding = Embedding(input_dim=vocabulary_size, output_dim=embedding_dim, input_length=max_len, name="embedding")(inputs) # convolution block if "," in filter_sizes: filter_sizes = filter_sizes.split(",") else: filter_sizes = [3, 4, 5] conv_blocks = [] for sz in filter_sizes: conv = Convolution1D(filters=num_filters, kernel_size=int(sz), strides=1, padding='valid', activation='relu')(embedding) conv = MaxPooling1D()(conv) conv = Flatten()(conv) conv_blocks.append(conv) conv_concate = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0] dropout_layer = Dropout(dropout)(conv_concate) output = Dense(hidden_dim, activation='relu')(dropout_layer) output = Dense(num_classses, activation='softmax')(output) # model model = Model(inputs=inputs, outputs=output) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() return model
Example #8
Source Project: Keras-TextClassification Author: yongzhuo File: graph_zhang.py License: MIT License | 5 votes |
def create_model(self, hyper_parameters): """ 构建神经网络 :param hyper_parameters:json, hyper parameters of network :return: tensor, moedl """ super().create_model(hyper_parameters) x = self.word_embedding.output # x = Reshape((self.len_max, self.embed_size, 1))(embedding_output) # (None, 50, 30, 1) # cnn + pool for char_cnn_size in self.char_cnn_layers: x = Convolution1D(filters = char_cnn_size[0], kernel_size = char_cnn_size[1],)(x) x = ThresholdedReLU(self.threshold)(x) if char_cnn_size[2] != -1: x = MaxPooling1D(pool_size = char_cnn_size[2], strides = 1)(x) x = Flatten()(x) # full-connect for full in self.full_connect_layers: x = Dense(units=full,)(x) x = ThresholdedReLU(self.threshold)(x) x = Dropout(self.dropout)(x) output = Dense(units=self.label, activation=self.activate_classify)(x) self.model = Model(inputs=self.word_embedding.input, outputs=output) self.model.summary(120)
Example #9
Source Project: coremltools Author: apple File: test_keras.py License: BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_conv1d_lstm(self): from keras.layers import Convolution1D, LSTM, Dense model = Sequential() # input_shape = (time_step, dimensions) model.add(Convolution1D(32, 3, border_mode="same", input_shape=(10, 8))) # conv1d output shape = (None, 10, 32) model.add(LSTM(24)) model.add(Dense(1, activation="sigmoid")) print("model.layers[1].output_shape=", model.layers[1].output_shape) input_names = ["input"] output_names = ["output"] spec = keras.convert(model, input_names, output_names).get_spec() self.assertIsNotNone(spec) self.assertTrue(spec.HasField("neuralNetwork")) # Test the inputs and outputs self.assertEquals(len(spec.description.input), len(input_names)) six.assertCountEqual( self, input_names, [x.name for x in spec.description.input] ) self.assertEquals(len(spec.description.output), len(output_names)) six.assertCountEqual( self, output_names, [x.name for x in spec.description.output] ) # Test the layer parameters. layers = spec.neuralNetwork.layers self.assertIsNotNone(layers[0].convolution) self.assertIsNotNone(layers[1].simpleRecurrent) self.assertIsNotNone(layers[2].innerProduct)
Example #10
Source Project: DeepConv-DTI Author: GIST-CSBL File: DeepConvDTI.py License: GNU General Public License v3.0 | 5 votes |
def PLayer(self, size, filters, activation, initializer, regularizer_param): def f(input): # model_p = Convolution1D(filters=filters, kernel_size=size, padding='valid', activity_regularizer=l2(regularizer_param), kernel_initializer=initializer, kernel_regularizer=l2(regularizer_param))(input) model_p = Convolution1D(filters=filters, kernel_size=size, padding='same', kernel_initializer=initializer, kernel_regularizer=l2(regularizer_param))(input) model_p = BatchNormalization()(model_p) model_p = Activation(activation)(model_p) return GlobalMaxPooling1D()(model_p) return f
Example #11
Source Project: semeval2017-scienceie Author: UKPLab File: convNet.py License: Apache License 2.0 | 5 votes |
def build_cnn_char_complex(input_dim, output_dim,nb_filter): randomEmbeddingLayer = Embedding(input_dim,32, input_length=maxlen,dropout=0.1) poolingLayer = Lambda(max_1d, output_shape=(nb_filter,)) conv_filters = [] for n_gram in range(2,4): ngramModel = Sequential() ngramModel.add(randomEmbeddingLayer) ngramModel.add(Convolution1D(nb_filter=nb_filter, filter_length=n_gram, border_mode="valid", activation="relu", subsample_length=1)) ngramModel.add(poolingLayer) conv_filters.append(ngramModel) clf = Sequential() clf.add(Merge(conv_filters,mode="concat")) clf.add(Activation("relu")) clf.add(Dense(100)) clf.add(Dropout(0.1)) clf.add(Activation("tanh")) clf.add(Dense(output_dim=output_dim, activation='softmax')) clf.compile(optimizer='adagrad', loss='categorical_crossentropy', metrics=['accuracy']) return clf
Example #12
Source Project: semeval2017-scienceie Author: UKPLab File: blstm.py License: Apache License 2.0 | 5 votes |
def build_lstm(output_dim, embeddings): loss_function = "categorical_crossentropy" # this is the placeholder tensor for the input sequences sequence = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype="int32") # this embedding layer will transform the sequences of integers embedded = Embedding(embeddings.shape[0], embeddings.shape[1], input_length=MAX_SEQUENCE_LENGTH, weights=[embeddings], trainable=True)(sequence) # 4 convolution layers (each 1000 filters) cnn = [Convolution1D(filter_length=filters, nb_filter=1000, border_mode="same") for filters in [2, 3, 5, 7]] # concatenate merged_cnn = merge([cnn(embedded) for cnn in cnn], mode="concat") # create attention vector from max-pooled convoluted maxpool = Lambda(lambda x: keras_backend.max(x, axis=1, keepdims=False), output_shape=lambda x: (x[0], x[2])) attention_vector = maxpool(merged_cnn) forwards = AttentionLSTM(64, attention_vector)(embedded) backwards = AttentionLSTM(64, attention_vector, go_backwards=True)(embedded) # concatenate the outputs of the 2 LSTM layers bi_lstm = merge([forwards, backwards], mode="concat", concat_axis=-1) after_dropout = Dropout(0.5)(bi_lstm) # softmax output layer output = Dense(output_dim=output_dim, activation="softmax")(after_dropout) # the complete omdel model = Model(input=sequence, output=output) # try using different optimizers and different optimizer configs model.compile("adagrad", loss_function, metrics=["accuracy"]) return model
Example #13
Source Project: tartarus Author: sergiooramas File: models.py License: MIT License | 4 votes |
def get_model_4(params): embedding_weights = pickle.load(open(common.TRAINDATA_DIR+"/embedding_weights_w2v_%s.pk" % params['embeddings_suffix'],"rb")) graph_in = Input(shape=(params['sequence_length'], params['embedding_dim'])) convs = [] for fsz in params['filter_sizes']: conv = Convolution1D(nb_filter=params['num_filters'], filter_length=fsz, border_mode='valid', activation='relu', subsample_length=1) x = conv(graph_in) logging.debug("Filter size: %s" % fsz) logging.debug("Output CNN: %s" % str(conv.output_shape)) pool = GlobalMaxPooling1D() x = pool(x) logging.debug("Output Pooling: %s" % str(pool.output_shape)) convs.append(x) if len(params['filter_sizes'])>1: merge = Merge(mode='concat') out = merge(convs) logging.debug("Merge: %s" % str(merge.output_shape)) else: out = convs[0] graph = Model(input=graph_in, output=out) # main sequential model model = Sequential() if not params['model_variation']=='CNN-static': model.add(Embedding(len(embedding_weights[0]), params['embedding_dim'], input_length=params['sequence_length'], weights=embedding_weights)) model.add(Dropout(params['dropout_prob'][0], input_shape=(params['sequence_length'], params['embedding_dim']))) model.add(graph) model.add(Dense(params['n_dense'])) model.add(Dropout(params['dropout_prob'][1])) model.add(Activation('relu')) model.add(Dense(output_dim=params["n_out"], init="uniform")) model.add(Activation(params['final_activation'])) logging.debug("Output CNN: %s" % str(model.output_shape)) if params['final_activation'] == 'linear': model.add(Lambda(lambda x :K.l2_normalize(x, axis=1))) return model # word2vec ARCH with LSTM
Example #14
Source Project: deep-mlsa Author: spinningbytes File: default_cnn.py License: Apache License 2.0 | 4 votes |
def create_default_model(config_data): nb_filter = 200 filter_length = 6 hidden_dims = nb_filter embedding_matrix = load_embedding_matrix(config_data) max_features = embedding_matrix.shape[0] embedding_dims = embedding_matrix.shape[1] max_len = config_data['max_sentence_length'] logging.info('Build Model...') logging.info('Embedding Dimensions: ({},{})'.format(max_features, embedding_dims)) main_input = Input(batch_shape=(None, max_len), dtype='int32', name='main_input') if not config_data.get('random_embedding', None): logging.info('Pretrained Word Embeddings') embeddings = Embedding( max_features, embedding_dims, input_length=max_len, weights=[embedding_matrix], trainable=False )(main_input) else: logging.info('Random Word Embeddings') embeddings = Embedding(max_features, embedding_dims, init='lecun_uniform', input_length=max_len)(main_input) zeropadding = ZeroPadding1D(filter_length - 1)(embeddings) conv1 = Convolution1D( nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', activation='relu', subsample_length=1)(zeropadding) max_pooling1 = MaxPooling1D(pool_length=4, stride=2)(conv1) conv2 = Convolution1D( nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', activation='relu', subsample_length=1)(max_pooling1) max_pooling2 = MaxPooling1D(pool_length=conv2._keras_shape[1])(conv2) flatten = Flatten()(max_pooling2) hidden = Dense(hidden_dims)(flatten) softmax_layer1 = Dense(3, activation='softmax', name='sentiment_softmax', init='lecun_uniform')(hidden) model = Model(input=[main_input], output=softmax_layer1) test_model = Model(input=[main_input], output=[softmax_layer1, hidden]) return model, test_model
Example #15
Source Project: SSAN-self-attention-sentiment-analysis-classification Author: Artaches File: cnn.py License: Apache License 2.0 | 4 votes |
def create_cnn(W, max_length, dim=300, dropout=.5, output_dim=8): # Convolutional model filter_sizes=(2,3,4) num_filters = 3 graph_in = Input(shape=(max_length, len(W[0]))) convs = [] for fsz in filter_sizes: conv = Convolution1D(nb_filter=num_filters, filter_length=fsz, border_mode='valid', activation='relu', subsample_length=1)(graph_in) pool = MaxPooling1D(pool_length=2)(conv) flatten = Flatten()(pool) convs.append(flatten) out = Merge(mode='concat')(convs) graph = Model(input=graph_in, output=out) # Full model model = Sequential() model.add(Embedding(output_dim=W.shape[1], input_dim=W.shape[0], input_length=max_length, weights=[W], trainable=True)) model.add(Dropout(dropout)) model.add(graph) model.add(Dense(dim, activation='relu')) model.add(Dropout(dropout)) model.add(Dense(output_dim, activation='softmax')) if output_dim == 2: model.compile('adam', 'binary_crossentropy', metrics=['accuracy']) else: model.compile('adam', 'categorical_crossentropy', metrics=['accuracy']) return model return model
Example #16
Source Project: semeval2017-scienceie Author: UKPLab File: convNet.py License: Apache License 2.0 | 4 votes |
def build_cnn_char_threeModels(input_dim, output_dim,nb_filter,filter_size=3): left = Sequential() left.add(Embedding(input_dim, 32, # character embedding size input_length=L, dropout=0.2)) left.add(Convolution1D(nb_filter=nb_filter, filter_length=filter_size,border_mode="valid",activation="relu",subsample_length=1)) left.add(GlobalMaxPooling1D()) left.add(Dense(100)) left.add(Dropout(0.2)) left.add(Activation("tanh")) center = Sequential() center.add(Embedding(input_dim, 32, # character embedding size input_length=M, dropout=0.2)) center.add(Convolution1D(nb_filter=nb_filter, filter_length=filter_size,border_mode="valid",activation="relu",subsample_length=1)) center.add(GlobalMaxPooling1D()) center.add(Dense(100)) center.add(Dropout(0.2)) center.add(Activation("tanh")) right = Sequential() right.add(Embedding(input_dim, 32, # character embedding size input_length=R, dropout=0.2)) right.add(Convolution1D(nb_filter=nb_filter, filter_length=filter_size,border_mode="valid",activation="relu",subsample_length=1)) right.add(GlobalMaxPooling1D()) right.add(Dense(100)) right.add(Dropout(0.2)) right.add(Activation("tanh")) clf = Sequential() clf.add(Merge([left,center,right],mode="concat")) clf.add(Dense(output_dim=output_dim, activation='softmax')) clf.compile(optimizer='adagrad', loss='categorical_crossentropy', metrics=['accuracy']) return clf
Example #17
Source Project: Benchmarks Author: ECP-CANDLE File: keras_mt_shared_cnn.py License: MIT License | 4 votes |
def init_export_network(num_classes, in_seq_len, vocab_size, wv_space, filter_sizes, num_filters, concat_dropout_prob, emb_l2, w_l2, optimizer): # define network layers ---------------------------------------------------- input_shape = tuple([in_seq_len]) model_input = Input(shape=input_shape, name= "Input") # embedding lookup emb_lookup = Embedding(vocab_size, wv_space, input_length=in_seq_len, name="embedding", #embeddings_initializer=RandomUniform, embeddings_regularizer=l2(emb_l2))(model_input) # convolutional layer and dropout conv_blocks = [] for ith_filter,sz in enumerate(filter_sizes): conv = Convolution1D(filters=num_filters[ ith_filter ], kernel_size=sz, padding="same", activation="relu", strides=1, # kernel_initializer ='lecun_uniform, name=str(ith_filter) + "_thfilter")(emb_lookup) conv_blocks.append(GlobalMaxPooling1D()(conv)) concat = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0] concat_drop = Dropout(concat_dropout_prob)(concat) # different dense layer per tasks FC_models = [] for i in range(len(num_classes)): outlayer = Dense(num_classes[i], name= "Dense"+str(i), activation='softmax')( concat_drop )#, kernel_regularizer=l2(0.01))( concat_drop ) FC_models.append(outlayer) # the multitsk model model = Model(inputs=model_input, outputs = FC_models) model.compile( loss= "sparse_categorical_crossentropy", optimizer= optimizer, metrics=[ "acc" ] ) return model
Example #18
Source Project: active-qa Author: google File: selector_keras.py License: Apache License 2.0 | 4 votes |
def _build_model(self, embedding_matrix): """Builds the model. Args: embedding_matrix: A float32 array of shape [vocab_size, embedding_dim]. Returns: The model. """ max_feature_length = FLAGS.max_sequence_length model_inputs = [] encoder_outputs = [] for _ in range(3): model_input = Input(shape=(max_feature_length,)) model_inputs.append(model_input) embed = Embedding( output_dim=100, input_dim=len(embedding_matrix), input_length=max_feature_length, weights=[embedding_matrix], trainable=False)( model_input) conv = Convolution1D( filters=100, kernel_size=3, padding='valid', activation='relu', strides=1)( embed) conv = Dropout(0.4)(conv) conv = GlobalMaxPooling1D()(conv) encoder_outputs.append(conv) merge = Concatenate()(encoder_outputs) model_output = Dense(1, activation='sigmoid')(merge) model = Model(model_inputs, model_output) model.compile( loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) logging.info('Model successfully built. Summary: %s', model.summary()) return model