Python keras.layers.CuDNNLSTM() Examples
The following are 10
code examples of keras.layers.CuDNNLSTM().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers
, or try the search function
.
Example #1
Source File: models.py From neurowriter with MIT License | 6 votes |
def create(inputtokens, vocabsize, units=16, dropout=0, embedding=32): input_ = Input(shape=(inputtokens,), dtype='int32') # Embedding layer net = Embedding(input_dim=vocabsize, output_dim=embedding, input_length=inputtokens)(input_) net = Dropout(dropout)(net) # Bidirectional LSTM layer net = BatchNormalization()(net) net = Bidirectional(CuDNNLSTM(units))(net) net = Dropout(dropout)(net) # Output layer net = Dense(vocabsize, activation='softmax')(net) model = Model(inputs=input_, outputs=net) # Make data-parallel ngpus = len(get_available_gpus()) if ngpus > 1: model = make_parallel(model, ngpus) return model
Example #2
Source File: CPSC_model.py From CPSC_Scheme with MIT License | 6 votes |
def nnet(inputs, keep_prob, num_classes): """ # 适用于单导联的深度网络模型 :param inputs: keras tensor, 切片并堆叠后的单导联信号. :param keep_prob: float, dropout-随机片段屏蔽概率. :param num_classes: int, 目标类别数. :return: keras tensor, 各类概率及全连接层前自动提取的特征. """ branches = [] for i in range(int(inputs.shape[-1])): ld = Lambda(Net.__slice, output_shape=(int(inputs.shape[1]), 1), arguments={'index': i})(inputs) ld = Reshape((int(inputs.shape[1]), 1))(ld) bch = Net.__backbone(ld) branches.append(bch) features = Concatenate(axis=1)(branches) features = Dropout(keep_prob, [1, int(inputs.shape[-1]), 1])(features) features = Bidirectional(CuDNNLSTM(1, return_sequences=True), merge_mode='concat')(features) features = Flatten()(features) net = Dense(units=num_classes, activation='softmax')(features) return net, features
Example #3
Source File: main.py From ai-platform with MIT License | 5 votes |
def build_and_compile_model(): seq_input = Input(shape=(lookback_window, 1), name='seq_input', batch_shape=(1, lookback_window, 1)) x = CuDNNLSTM(LSTMunits, kernel_initializer='glorot_uniform', recurrent_initializer='glorot_uniform', return_sequences=True)(seq_input) x = CuDNNLSTM(LSTMunits, kernel_initializer='glorot_uniform', recurrent_initializer='glorot_uniform', return_sequences=False)(x) output_1 = Dense(1, activation='linear', name='output_1')(x) weathernet = Model(inputs=seq_input, outputs=output_1) weathernet.compile(optimizer=keras.optimizers.Adam(lr=1e-3), loss='mse') weathernet.summary() return weathernet #Load existing model
Example #4
Source File: train_weathernet.py From ai-platform with MIT License | 5 votes |
def build_and_compile_model(): seq_input = Input(shape=(lookback_window, 1), name='seq_input', batch_shape=(1, lookback_window, 1)) x = CuDNNLSTM(LSTMunits, kernel_initializer='glorot_uniform', recurrent_initializer='glorot_uniform', return_sequences=True)(seq_input) x = CuDNNLSTM(LSTMunits, kernel_initializer='glorot_uniform', recurrent_initializer='glorot_uniform', return_sequences=False)(x) output_1 = Dense(1, activation='linear', name='output_1')(x) weathernet = Model(inputs=seq_input, outputs=output_1) weathernet.compile(optimizer=keras.optimizers.Adam(lr=1e-3), loss='mse') weathernet.summary() return weathernet # Predict
Example #5
Source File: graph.py From Keras-TextClassification with MIT License | 5 votes |
def create_model(self, hyper_parameters): """ 构建神经网络 :param hyper_parameters:json, hyper parameters of network :return: tensor, moedl """ super().create_model(hyper_parameters) x = self.word_embedding.output # x = Reshape((self.len_max, self.embed_size, 1))(embedding) if self.rnn_type=="LSTM": layer_cell = LSTM elif self.rnn_type=="GRU": layer_cell = GRU elif self.rnn_type=="CuDNNLSTM": layer_cell = CuDNNLSTM elif self.rnn_type=="CuDNNGRU": layer_cell = CuDNNGRU else: layer_cell = GRU # Bi-LSTM for nrl in range(self.num_rnn_layers): x = Bidirectional(layer_cell(units=self.rnn_units, return_sequences=True, activation='relu', kernel_regularizer=regularizers.l2(0.32 * 0.1), recurrent_regularizer=regularizers.l2(0.32) ))(x) x = Dropout(self.dropout)(x) x = Flatten()(x) # 最后就是softmax dense_layer = Dense(self.label, activation=self.activate_classify)(x) output = [dense_layer] self.model = Model(self.word_embedding.input, output) self.model.summary(120)
Example #6
Source File: models.py From delft with Apache License 2.0 | 5 votes |
def lstm(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes): #inp = Input(shape=(maxlen, )) input_layer = Input(shape=(maxlen, embed_size), ) #x = Embedding(max_features, embed_size, weights=[embedding_matrix], # trainable=False)(inp) x = LSTM(recurrent_units, return_sequences=True, dropout=dropout_rate, recurrent_dropout=dropout_rate)(input_layer) #x = CuDNNLSTM(recurrent_units, return_sequences=True)(x) x = Dropout(dropout_rate)(x) x_a = GlobalMaxPool1D()(x) x_b = GlobalAveragePooling1D()(x) #x_c = AttentionWeightedAverage()(x) #x_a = MaxPooling1D(pool_size=2)(x) #x_b = AveragePooling1D(pool_size=2)(x) x = concatenate([x_a,x_b]) x = Dense(dense_size, activation="relu")(x) x = Dropout(dropout_rate)(x) x = Dense(nb_classes, activation="sigmoid")(x) model = Model(inputs=input_layer, outputs=x) model.summary() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model # bidirectional LSTM
Example #7
Source File: models.py From neurowriter with MIT License | 5 votes |
def create(inputtokens, vocabsize, layers=1, units=16, dropout=0, embedding=32): input_ = Input(shape=(inputtokens,), dtype='int32') # Embedding layer net = Embedding(input_dim=vocabsize, output_dim=embedding, input_length=inputtokens)(input_) net = Dropout(dropout)(net) # Bidirectional LSTM layer net = BatchNormalization()(net) net = Bidirectional(CuDNNLSTM(units, return_sequences=(layers > 1)))(net) net = Dropout(dropout)(net) # Rest of LSTM layers with residual connections (if any) for i in range(1, layers): if i < layers-1: block = BatchNormalization()(net) block = CuDNNLSTM(2*units, return_sequences=True)(block) block = Dropout(dropout)(block) net = add([block, net]) else: net = BatchNormalization()(net) net = CuDNNLSTM(2*units)(net) net = Dropout(dropout)(net) # Output layer net = Dense(vocabsize, activation='softmax')(net) model = Model(inputs=input_, outputs=net) # Make data-parallel ngpus = len(get_available_gpus()) if ngpus > 1: model = make_parallel(model, ngpus) return model
Example #8
Source File: models.py From neurowriter with MIT License | 5 votes |
def create(inputtokens, vocabsize, convlayers=3, kernels=512, kernelsize=5, convdropout=0.5, lstmunits=256, lstmdropout=0.1, embedding=512, embdropout=0.5): input_ = Input(shape=(inputtokens,), dtype='int32') # Embedding layer net = Embedding(input_dim=vocabsize, output_dim=embedding, input_length=inputtokens)(input_) net = Dropout(embdropout)(net) # Convolutional layers (if any) for layer in range(convlayers): net = Conv1D(kernels, kernelsize, padding='same')(net) net = BatchNormalization()(net) net = Activation('relu')(net) net = Dropout(convdropout)(net) # Bidirectional LSTM layer net = Bidirectional(CuDNNLSTM(lstmunits))(net) net = Dropout(lstmdropout)(net) # Output layer net = Dense(vocabsize, activation='softmax')(net) model = Model(inputs=input_, outputs=net) # Make data-parallel ngpus = len(get_available_gpus()) if ngpus > 1: model = make_parallel(model, ngpus) return model
Example #9
Source File: graph.py From Keras-TextClassification with MIT License | 4 votes |
def create_model(self, hyper_parameters): """ 构建神经网络 :param hyper_parameters:json, hyper parameters of network :return: tensor, moedl """ super().create_model(hyper_parameters) x = self.word_embedding.output embedding_output_spatial = SpatialDropout1D(self.dropout_spatial)(x) if self.rnn_units=="LSTM": layer_cell = LSTM elif self.rnn_units=="GRU": layer_cell = GRU elif self.rnn_units=="CuDNNLSTM": layer_cell = CuDNNLSTM elif self.rnn_units=="CuDNNGRU": layer_cell = CuDNNGRU else: layer_cell = GRU # CNN convs = [] for kernel_size in self.filters: conv = Conv1D(self.filters_num, kernel_size=kernel_size, strides=1, padding='SAME', kernel_regularizer=regularizers.l2(self.l2), bias_regularizer=regularizers.l2(self.l2), )(embedding_output_spatial) convs.append(conv) x = Concatenate(axis=1)(convs) # Bi-LSTM, 论文中使用的是LSTM x = Bidirectional(layer_cell(units=self.rnn_units, return_sequences=True, activation='relu', kernel_regularizer=regularizers.l2(self.l2), recurrent_regularizer=regularizers.l2(self.l2) ))(x) x = Dropout(self.dropout)(x) x = Flatten()(x) # 最后就是softmax dense_layer = Dense(self.label, activation=self.activate_classify)(x) output = [dense_layer] self.model = Model(self.word_embedding.input, output) self.model.summary(120)
Example #10
Source File: graph.py From Keras-TextClassification with MIT License | 4 votes |
def create_model(self, hyper_parameters): """ 构建神经网络, a bit like RCNN, R :param hyper_parameters:json, hyper parameters of network :return: tensor, moedl """ super().create_model(hyper_parameters) x = self.word_embedding.output x = Activation('tanh')(x) # entire embedding channels are dropped out instead of the # normal Keras embedding dropout, which drops all channels for entire words # many of the datasets contain so few words that losing one or more words can alter the emotions completely x = SpatialDropout1D(self.dropout_spatial)(x) if self.rnn_units=="LSTM": layer_cell = LSTM elif self.rnn_units=="GRU": layer_cell = GRU elif self.rnn_units=="CuDNNLSTM": layer_cell = CuDNNLSTM elif self.rnn_units=="CuDNNGRU": layer_cell = CuDNNGRU else: layer_cell = GRU # skip-connection from embedding to output eases gradient-flow and allows access to lower-level features # ordering of the way the merge is done is important for consistency with the pretrained model lstm_0_output = Bidirectional(layer_cell(units=self.rnn_units, return_sequences=True, activation='relu', kernel_regularizer=regularizers.l2(self.l2), recurrent_regularizer=regularizers.l2(self.l2) ), name="bi_lstm_0")(x) lstm_1_output = Bidirectional(layer_cell(units=self.rnn_units, return_sequences=True, activation='relu', kernel_regularizer=regularizers.l2(self.l2), recurrent_regularizer=regularizers.l2(self.l2) ), name="bi_lstm_1")(lstm_0_output) x = concatenate([lstm_1_output, lstm_0_output, x]) # if return_attention is True in AttentionWeightedAverage, an additional tensor # representing the weight at each timestep is returned weights = None x = AttentionWeightedAverage(name='attlayer', return_attention=self.return_attention)(x) if self.return_attention: x, weights = x x = Dropout(self.dropout)(x) # x = Flatten()(x) # 最后就是softmax dense_layer = Dense(self.label, activation=self.activate_classify)(x) output = [dense_layer] self.model = Model(self.word_embedding.input, output) self.model.summary(120)