Python keras.layers.LSTM Examples

The following are 30 code examples of keras.layers.LSTM(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers , or try the search function .
Example #1
Source File: recurrent.py    From keras-anomaly-detection with MIT License 18 votes vote down vote up
def create_model(time_window_size, metric):
        model = Sequential()

        model.add(Conv1D(filters=256, kernel_size=5, padding='same', activation='relu',
                         input_shape=(time_window_size, 1)))
        model.add(MaxPooling1D(pool_size=4))

        model.add(LSTM(64))

        model.add(Dense(units=time_window_size, activation='linear'))

        model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric])

        # model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric])
        # model.compile(optimizer="sgd", loss="mse", metrics=[metric])

        print(model.summary())
        return model 
Example #2
Source File: model.py    From Image-Caption-Generator with MIT License 11 votes vote down vote up
def RNNModel(vocab_size, max_len, rnnConfig, model_type):
	embedding_size = rnnConfig['embedding_size']
	if model_type == 'inceptionv3':
		# InceptionV3 outputs a 2048 dimensional vector for each image, which we'll feed to RNN Model
		image_input = Input(shape=(2048,))
	elif model_type == 'vgg16':
		# VGG16 outputs a 4096 dimensional vector for each image, which we'll feed to RNN Model
		image_input = Input(shape=(4096,))
	image_model_1 = Dropout(rnnConfig['dropout'])(image_input)
	image_model = Dense(embedding_size, activation='relu')(image_model_1)

	caption_input = Input(shape=(max_len,))
	# mask_zero: We zero pad inputs to the same length, the zero mask ignores those inputs. E.g. it is an efficiency.
	caption_model_1 = Embedding(vocab_size, embedding_size, mask_zero=True)(caption_input)
	caption_model_2 = Dropout(rnnConfig['dropout'])(caption_model_1)
	caption_model = LSTM(rnnConfig['LSTM_units'])(caption_model_2)

	# Merging the models and creating a softmax classifier
	final_model_1 = concatenate([image_model, caption_model])
	final_model_2 = Dense(rnnConfig['dense_units'], activation='relu')(final_model_1)
	final_model = Dense(vocab_size, activation='softmax')(final_model_2)

	model = Model(inputs=[image_input, caption_input], outputs=final_model)
	model.compile(loss='categorical_crossentropy', optimizer='adam')
	return model 
Example #3
Source File: model.py    From CCKS2019-Chinese-Clinical-NER with MIT License 7 votes vote down vote up
def __build_model(self):
        model = Sequential()

        embedding_layer = Embedding(input_dim=len(self.vocab) + 1,
                                    output_dim=self.embedding_dim,
                                    weights=[self.embedding_mat],
                                    trainable=False)
        model.add(embedding_layer)

        bilstm_layer = Bidirectional(LSTM(units=256, return_sequences=True))
        model.add(bilstm_layer)

        model.add(TimeDistributed(Dense(256, activation="relu")))

        crf_layer = CRF(units=len(self.tags), sparse_target=True)
        model.add(crf_layer)

        model.compile(optimizer="adam", loss=crf_loss, metrics=[crf_viterbi_accuracy])
        model.summary()

        return model 
Example #4
Source File: model_keras.py    From Text_Generate with MIT License 6 votes vote down vote up
def model_keras(num_words=3000, num_units=128):
    '''
    生成RNN模型
    :param num_words:词汇数量
    :param num_units:词向量维度,lstm神经元数量默认一样
    :return:
    '''
    data_input = Input(shape=[None])
    embedding = Embedding(input_dim=num_words, output_dim=num_units, mask_zero=True)(data_input)
    lstm = LSTM(units=num_units, return_sequences=True)(embedding)
    x = LSTM(units=num_units, return_sequences=True)(lstm)
    # keras好像不支持内部对y操作,不能像tensorflow那样用reshape
    # x = Reshape(target_shape=[-1, num_units])(x)
    outputs = Dense(units=num_words, activation='softmax')(x)

    model = Model(inputs=data_input, outputs=outputs)
    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer=optimizers.adam(lr=0.01),
                  metrics=['accuracy'])
    return model 
Example #5
Source File: baseline.py    From MELD with GNU General Public License v3.0 6 votes vote down vote up
def get_audio_model(self):

		# Modality specific hyperparameters
		self.epochs = 100
		self.batch_size = 50

		# Modality specific parameters
		self.embedding_dim = self.train_x.shape[2]

		print("Creating Model...")
		
		inputs = Input(shape=(self.sequence_length, self.embedding_dim), dtype='float32')
		masked = Masking(mask_value =0)(inputs)
		lstm = Bidirectional(LSTM(300, activation='tanh', return_sequences = True, dropout=0.4))(masked)
		lstm = Bidirectional(LSTM(300, activation='tanh', return_sequences = True, dropout=0.4), name="utter")(lstm)
		output = TimeDistributed(Dense(self.classes,activation='softmax'))(lstm)

		model = Model(inputs, output)
		return model 
Example #6
Source File: baseline.py    From MELD with GNU General Public License v3.0 6 votes vote down vote up
def get_bimodal_model(self):

		# Modality specific hyperparameters
		self.epochs = 100
		self.batch_size = 10

		# Modality specific parameters
		self.embedding_dim = self.train_x.shape[2]

		print("Creating Model...")
		
		inputs = Input(shape=(self.sequence_length, self.embedding_dim), dtype='float32')
		masked = Masking(mask_value =0)(inputs)
		lstm = Bidirectional(LSTM(300, activation='tanh', return_sequences = True, dropout=0.4), name="utter")(masked)
		output = TimeDistributed(Dense(self.classes,activation='softmax'))(lstm)

		model = Model(inputs, output)
		return model 
Example #7
Source File: RNN.py    From navbot with MIT License 6 votes vote down vote up
def _build(self):
        # the model that will be trained
        rnn_x = Input(shape=(None, Z_DIM + ACTION_DIM))
        lstm = LSTM(HIDDEN_UNITS, return_sequences=True, return_state=True)

        lstm_output, _, _ = lstm(rnn_x)
        mdn = Dense(Z_DIM)(lstm_output)

        rnn = Model(rnn_x, mdn)

        # the model used during prediction
        state_input_h = Input(shape=(HIDDEN_UNITS,))
        state_input_c = Input(shape=(HIDDEN_UNITS,))
        state_inputs = [state_input_h, state_input_c]
        
        _, state_h, state_c = lstm(rnn_x, initial_state=state_inputs)
        forward = Model([rnn_x] + state_inputs, [state_h, state_c])

        optimizer = Adam(lr=0.0001)
        # optimizer = SGD(lr=0.0001, decay=1e-4, momentum=0.9, nesterov=True)
        rnn.compile(loss='mean_squared_error', optimizer=optimizer)

        return [rnn, forward] 
Example #8
Source File: mom_example.py    From rasa_wechat with Apache License 2.0 6 votes vote down vote up
def _build_model(self, num_features, num_actions, max_history_len):
        """Build a keras model and return a compiled model.
        :param max_history_len: The maximum number of historical turns used to
                                decide on next action"""
        from keras.layers import LSTM, Activation, Masking, Dense
        from keras.models import Sequential

        n_hidden = 32  # size of hidden layer in LSTM
        # Build Model
        batch_shape = (None, max_history_len, num_features)

        model = Sequential()
        model.add(Masking(-1, batch_input_shape=batch_shape))
        model.add(LSTM(n_hidden, batch_input_shape=batch_shape))
        model.add(Dense(input_dim=n_hidden, output_dim=num_actions))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])

        logger.debug(model.summary())
        return model 
Example #9
Source File: RNN.py    From malware-prediction-rnn with Apache License 2.0 6 votes vote down vote up
def __middle_hidden_layer(self, return_sequences):

		if self.current_params["layer_type"]  == "GRU":
			layer = GRU(self.current_params["hidden_neurons"], 
				return_sequences=return_sequences, 
				kernel_initializer=self.current_params["kernel_initializer"], 
				recurrent_initializer=self.current_params["recurrent_initializer"], 
				recurrent_regularizer=self.__generate_regulariser(self.current_params["r_l1_reg"], self.current_params["r_l2_reg"]), 
				bias_regularizer=self.__generate_regulariser(self.current_params["b_l1_reg"], self.current_params["b_l2_reg"]),
				dropout=self.current_params["dropout"], 
				recurrent_dropout=self.current_params["recurrent_dropout"]
			)
		else:
			layer = LSTM(self.current_params["hidden_neurons"], 
				return_sequences=return_sequences, 
				kernel_initializer=self.current_params["kernel_initializer"], 
				recurrent_initializer=self.current_params["recurrent_initializer"], 
				recurrent_regularizer=self.__generate_regulariser(self.current_params["r_l1_reg"], self.current_params["r_l2_reg"]), 
				bias_regularizer=self.__generate_regulariser(self.current_params["b_l1_reg"], self.current_params["b_l2_reg"]),
				dropout=self.current_params["dropout"], 
				recurrent_dropout=self.current_params["recurrent_dropout"]
			)

		return layer 
Example #10
Source File: cnn_rnn_crf.py    From Jtyoui with MIT License 6 votes vote down vote up
def create_model():
    inputs = Input(shape=(length,), dtype='int32', name='inputs')
    embedding_1 = Embedding(len(vocab), EMBED_DIM, input_length=length, mask_zero=True)(inputs)
    bilstm = Bidirectional(LSTM(EMBED_DIM // 2, return_sequences=True))(embedding_1)
    bilstm_dropout = Dropout(DROPOUT_RATE)(bilstm)
    embedding_2 = Embedding(len(vocab), EMBED_DIM, input_length=length)(inputs)
    con = Conv1D(filters=FILTERS, kernel_size=2 * HALF_WIN_SIZE + 1, padding='same')(embedding_2)
    con_d = Dropout(DROPOUT_RATE)(con)
    dense_con = TimeDistributed(Dense(DENSE_DIM))(con_d)
    rnn_cnn = concatenate([bilstm_dropout, dense_con], axis=2)
    dense = TimeDistributed(Dense(len(chunk_tags)))(rnn_cnn)
    crf = CRF(len(chunk_tags), sparse_target=True)
    crf_output = crf(dense)
    model = Model(input=[inputs], output=[crf_output])
    model.compile(loss=crf.loss_function, optimizer=Adam(), metrics=[crf.accuracy])
    return model 
Example #11
Source File: NER.py    From Jtyoui with MIT License 6 votes vote down vote up
def train_model():
    if cxl_model:
        embedding_matrix = load_embedding()
    else:
        embedding_matrix = {}
    train, label = vocab_train_label(train_path, vocab=vocab, tags=tag, max_chunk_length=length)
    n = np.array(label, dtype=np.float)
    labels = n.reshape((n.shape[0], n.shape[1], 1))
    model = Sequential([
        Embedding(input_dim=len(vocab), output_dim=300, mask_zero=True, input_length=length, weights=[embedding_matrix],
                  trainable=False),
        SpatialDropout1D(0.2),
        Bidirectional(layer=LSTM(units=150, return_sequences=True, dropout=0.2, recurrent_dropout=0.2)),
        TimeDistributed(Dense(len(tag), activation=relu)),
    ])
    crf_ = CRF(units=len(tag), sparse_target=True)
    model.add(crf_)
    model.compile(optimizer=Adam(), loss=crf_.loss_function, metrics=[crf_.accuracy])
    model.fit(x=np.array(train), y=labels, batch_size=16, epochs=4, callbacks=[RemoteMonitor()])
    model.save(model_path) 
Example #12
Source File: lstm.py    From Classical-Piano-Composer with MIT License 6 votes vote down vote up
def create_network(network_input, n_vocab):
    """ create the structure of the neural network """
    model = Sequential()
    model.add(LSTM(
        512,
        input_shape=(network_input.shape[1], network_input.shape[2]),
        recurrent_dropout=0.3,
        return_sequences=True
    ))
    model.add(LSTM(512, return_sequences=True, recurrent_dropout=0.3,))
    model.add(LSTM(512))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(n_vocab))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    return model 
Example #13
Source File: models.py    From tartarus with MIT License 6 votes vote down vote up
def get_model_41(params):
    embedding_weights = pickle.load(open("../data/datasets/train_data/embedding_weights_w2v-google_MSD-AG.pk","rb"))
    # main sequential model
    model = Sequential()
    model.add(Embedding(len(embedding_weights[0]), params['embedding_dim'], input_length=params['sequence_length'],
                        weights=embedding_weights))
    #model.add(Dropout(params['dropout_prob'][0], input_shape=(params['sequence_length'], params['embedding_dim'])))
    model.add(LSTM(2048))
    #model.add(Dropout(params['dropout_prob'][1]))
    model.add(Dense(output_dim=params["n_out"], init="uniform"))
    model.add(Activation(params['final_activation']))
    logging.debug("Output CNN: %s" % str(model.output_shape))

    if params['final_activation'] == 'linear':
        model.add(Lambda(lambda x :K.l2_normalize(x, axis=1)))

    return model


# CRNN Arch for audio 
Example #14
Source File: predict.py    From Classical-Piano-Composer with MIT License 6 votes vote down vote up
def prepare_sequences(notes, pitchnames, n_vocab):
    """ Prepare the sequences used by the Neural Network """
    # map between notes and integers and back
    note_to_int = dict((note, number) for number, note in enumerate(pitchnames))

    sequence_length = 100
    network_input = []
    output = []
    for i in range(0, len(notes) - sequence_length, 1):
        sequence_in = notes[i:i + sequence_length]
        sequence_out = notes[i + sequence_length]
        network_input.append([note_to_int[char] for char in sequence_in])
        output.append(note_to_int[sequence_out])

    n_patterns = len(network_input)

    # reshape the input into a format compatible with LSTM layers
    normalized_input = numpy.reshape(network_input, (n_patterns, sequence_length, 1))
    # normalize input
    normalized_input = normalized_input / float(n_vocab)

    return (network_input, normalized_input) 
Example #15
Source File: predict.py    From Classical-Piano-Composer with MIT License 6 votes vote down vote up
def create_network(network_input, n_vocab):
    """ create the structure of the neural network """
    model = Sequential()
    model.add(LSTM(
        512,
        input_shape=(network_input.shape[1], network_input.shape[2]),
        recurrent_dropout=0.3,
        return_sequences=True
    ))
    model.add(LSTM(512, return_sequences=True, recurrent_dropout=0.3,))
    model.add(LSTM(512))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(n_vocab))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    # Load the weights to each node
    model.load_weights('weights.hdf5')

    return model 
Example #16
Source File: structure.py    From armchair-expert with MIT License 6 votes vote down vote up
def __init__(self, use_gpu: bool = False):
        import tensorflow as tf
        from keras.models import Sequential
        from keras.layers import Dense, Embedding
        from keras.layers import LSTM
        from keras.backend import set_session

        latent_dim = StructureModel.SEQUENCE_LENGTH * 8

        model = Sequential()
        model.add(
            Embedding(StructureFeatureAnalyzer.NUM_FEATURES, StructureFeatureAnalyzer.NUM_FEATURES,
                      input_length=StructureModel.SEQUENCE_LENGTH))
        model.add(LSTM(latent_dim, dropout=0.2, return_sequences=False))
        model.add(Dense(StructureFeatureAnalyzer.NUM_FEATURES, activation='softmax'))
        model.summary()
        model.compile(loss='sparse_categorical_crossentropy', optimizer='adam')
        self.model = model

        if use_gpu:
            config = tf.ConfigProto()
            config.gpu_options.allow_growth = True
            set_session(tf.Session(config=config)) 
Example #17
Source File: restaurant_example.py    From rasa_wechat with Apache License 2.0 6 votes vote down vote up
def _build_model(self, num_features, num_actions, max_history_len):
        """Build a keras model and return a compiled model.
        :param max_history_len: The maximum number of historical turns used to
                                decide on next action"""
        from keras.layers import LSTM, Activation, Masking, Dense
        from keras.models import Sequential

        n_hidden = 32  # size of hidden layer in LSTM
        # Build Model
        batch_shape = (None, max_history_len, num_features)

        model = Sequential()
        model.add(Masking(-1, batch_input_shape=batch_shape))
        model.add(LSTM(n_hidden, batch_input_shape=batch_shape))
        model.add(Dense(input_dim=n_hidden, output_dim=num_actions))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])

        logger.debug(model.summary())
        return model 
Example #18
Source File: keras_policy.py    From rasa_wechat with Apache License 2.0 6 votes vote down vote up
def _build_model(self, num_features, num_actions, max_history_len):
        """Build a keras model and return a compiled model.

        :param max_history_len: The maximum number of historical
                                turns used to decide on next action
        """
        from keras.layers import LSTM, Activation, Masking, Dense
        from keras.models import Sequential

        n_hidden = 32  # Neural Net and training params
        batch_shape = (None, max_history_len, num_features)
        # Build Model
        model = Sequential()
        model.add(Masking(-1, batch_input_shape=batch_shape))
        model.add(LSTM(n_hidden, batch_input_shape=batch_shape))
        model.add(Dense(input_dim=n_hidden, units=num_actions))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer='rmsprop',
                      metrics=['accuracy'])

        logger.debug(model.summary())
        return model 
Example #19
Source File: model.py    From ancient-Chinese-poem-generator with MIT License 6 votes vote down vote up
def get_training_data2(raw_dict, char_to_index):
	'''
	'Generate data for training LSTM from raw data without considering $
	'
	'raw_dict: 		original data. struct: {'title':"",'strains':'zzppz$ppzzp$...','paragraphs':"12345$67890$..."}
	'char_to_index: 	dictonary map char to index
	'
	'return:
	'	X [input chars sequence]
	'	Y [char label]
	'''
	
	data_X = []
	data_Y = []
	for poem in raw_dict:
		context = poem['paragraphs']
		context.replace('$','')
		n_chars = len(context)
		for i in range(0,n_chars - seq_len - 1,1):
			s_out = context[i+seq_len - 1]
			s_in = context[i:i+seq_len - 1]
			data_X.append([char_to_index[c] for c in s_in])
			data_Y.append(char_to_index[s_out])
	return data_X,data_Y 
Example #20
Source File: deep_models.py    From iust_deep_fuzz with MIT License 6 votes vote down vote up
def model_6(input_dim, output_dim):
    model = Sequential()
    model.add(LSTM(128, input_shape=input_dim, return_sequences=True, recurrent_dropout=0.1))
    model.add(Dropout(0.3))
    model.add(LSTM(128, input_shape=input_dim, return_sequences=False, recurrent_dropout=0.1))
    model.add(Dropout(0.3))
    model.add(Dense(output_dim))
    model.add(Activation('softmax'))
    return model, 'model_6'


# ------------------------------------------------------------------------

# Unidirectional LSTM (Many to One)
#
# Summery of result for this model:
# Try 3:
# batch_size=128, lr=0.001
# With step 1 and neuron size 128 was very bad. Set step=3 and neuron size=256 and step=3
# With Adam Optimizer, Lr=0.001 and step=3. after 61 epoch is the bset model !!!
# Change from RMSProp to Adam fix the learning process
# 
Example #21
Source File: models.py    From SeqGAN with MIT License 6 votes vote down vote up
def GeneratorPretraining(V, E, H):
    '''
    Model for Generator pretraining. This model's weights should be shared with
        Generator.
    # Arguments:
        V: int, Vocabrary size
        E: int, Embedding size
        H: int, LSTM hidden size
    # Returns:
        generator_pretraining: keras Model
            input: word ids, shape = (B, T)
            output: word probability, shape = (B, T, V)
    '''
    # in comment, B means batch size, T means lengths of time steps.
    input = Input(shape=(None,), dtype='int32', name='Input') # (B, T)
    out = Embedding(V, E, mask_zero=True, name='Embedding')(input) # (B, T, E)
    out = LSTM(H, return_sequences=True, name='LSTM')(out)  # (B, T, H)
    out = TimeDistributed(
        Dense(V, activation='softmax', name='DenseSoftmax'),
        name='TimeDenseSoftmax')(out)    # (B, T, V)
    generator_pretraining = Model(input, out)
    return generator_pretraining 
Example #22
Source File: models.py    From SeqGAN with MIT License 6 votes vote down vote up
def __init__(self, sess, B, V, E, H, lr=1e-3):
        '''
        # Arguments:
            B: int, Batch size
            V: int, Vocabrary size
            E: int, Embedding size
            H: int, LSTM hidden size
        # Optional Arguments:
            lr: float, learning rate, default is 0.001
        '''
        self.sess = sess
        self.B = B
        self.V = V
        self.E = E
        self.H = H
        self.lr = lr
        self._build_gragh()
        self.reset_rnn_state() 
Example #23
Source File: model.py    From ancient-Chinese-poem-generator with MIT License 6 votes vote down vote up
def train(X,Y,file,load_path):
	# define model
	model = Sequential()
	model.add(LSTM(n_mmu, input_shape=(X.shape[1], X.shape[2]), return_sequences=True))
	model.add(Dropout(dropout))
	model.add(LSTM(n_mmu, return_sequences=True))
	model.add(Dropout(dropout))
	if n_layer == 3:
		model.add(LSTM(n_mmu))
		model.add(Dropout(dropout))
	model.add(Dense(Y.shape[1], activation='softmax'))
	model.compile(loss='categorical_crossentropy', optimizer='adam')

	model.save(file + "/model-{}-{}.h5".format(n_mmu,dropout))
	# define the checkpoint
	filepath=file + "/weights-improvement-{epoch:02d}-{loss:.4f}.hdf5"
	checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')
	callbacks_list = [checkpoint]
	# loading
	if load_path != "":
		model.load_weights(load_path)
	# training
	model.fit(X, Y, epochs=epoch, batch_size=batch, callbacks=callbacks_list,validation_split = 0.1) 
Example #24
Source File: models.py    From SeqGAN with MIT License 6 votes vote down vote up
def Discriminator(V, E, H=64, dropout=0.1):
    '''
    Disciriminator model.
    # Arguments:
        V: int, Vocabrary size
        E: int, Embedding size
        H: int, LSTM hidden size
        dropout: float
    # Returns:
        discriminator: keras model
            input: word ids, shape = (B, T)
            output: probability of true data or not, shape = (B, 1)
    '''
    input = Input(shape=(None,), dtype='int32', name='Input')   # (B, T)
    out = Embedding(V, E, mask_zero=True, name='Embedding')(input)  # (B, T, E)
    out = LSTM(H)(out)
    out = Highway(out, num_layers=1)
    out = Dropout(dropout, name='Dropout')(out)
    out = Dense(1, activation='sigmoid', name='FC')(out)

    discriminator = Model(input, out)
    return discriminator 
Example #25
Source File: deep_models.py    From iust_deep_fuzz with MIT License 6 votes vote down vote up
def model_2(input_dim, output_dim):
    """
    Total params: 259,168
    Trainable params: 259,168
    Non-trainable params: 0

    :param input_dim:
    :param output_dim:
    :return:
    """
    model = Sequential()
    # model.add(LSTM(128, input_shape=(maxlen, len(chars))))
    model.add(LSTM(128, input_shape=input_dim, return_sequences=True, dropout=0.2, recurrent_dropout=0.1))
    model.add(LSTM(128, input_shape=input_dim, return_sequences=False, dropout=0.2, recurrent_dropout=0.1))
    # model.add(LSTM(128, activation='relu', dropout=0.2))
    model.add(Dense(output_dim))
    model.add(Activation('softmax'))
    return model, 'model_2'


# Summery of result for this model: 
Example #26
Source File: my_social_model.py    From social_lstm_keras_tf with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, config: ModelConfig) -> None:
        self.x_input = Input((config.obs_len, config.max_n_peds, pxy_dim))
        # y_input = Input((config.obs_len, config.max_n_peds, pxy_dim))
        self.grid_input = Input(
            (config.obs_len, config.max_n_peds, config.max_n_peds,
             config.grid_side_squared))
        self.zeros_input = Input(
            (config.obs_len, config.max_n_peds, config.lstm_state_dim))

        # Social LSTM layers
        self.lstm_layer = LSTM(config.lstm_state_dim, return_state=True)
        self.W_e_relu = Dense(config.emb_dim, activation="relu")
        self.W_a_relu = Dense(config.emb_dim, activation="relu")
        self.W_p = Dense(out_dim)

        self._build_model(config) 
Example #27
Source File: deep_models.py    From iust_deep_fuzz with MIT License 6 votes vote down vote up
def model_0(input_dim, output_dim):
    """
    Total params: 127,584
    Trainable params: 127,584
    Non-trainable params: 0

    :param input_dim:
    :param output_dim:
    :return:
    """
    # build the model: a single LSTM
    print('Build model...')
    model = Sequential()
    model.add(LSTM(128, input_shape=input_dim))
    model.add(Dense(output_dim))
    model.add(Activation('softmax'))
    return model, 'model_0'


# summery of result for model_1 (deep 2):
#
# 
Example #28
Source File: test_keras.py    From docker-python with Apache License 2.0 6 votes vote down vote up
def test_lstm(self):
        x_train = np.random.random((100, 100, 100))
        y_train = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
        x_test = np.random.random((20, 100, 100))
        y_test = keras.utils.to_categorical(np.random.randint(10, size=(20, 1)), num_classes=10)

        sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)

        model = Sequential()
        model.add(LSTM(32, return_sequences=True, input_shape=(100, 100)))
        model.add(Flatten())
        model.add(Dense(10, activation='softmax'))


        model.compile(loss='categorical_crossentropy', optimizer=sgd)
        model.fit(x_train, y_train, batch_size=32, epochs=1)
        model.evaluate(x_test, y_test, batch_size=32) 
Example #29
Source File: model.py    From ancient-Chinese-poem-generator with MIT License 6 votes vote down vote up
def get_training_data(raw_dict, char_to_index):
	'''
	'Generate data for training LSTM from raw data
	'
	'raw_dict: 		original data. struct: {'title':"",'strains':'zzppz$ppzzp$...','paragraphs':"12345$67890$..."}
	'char_to_index: 	dictonary map char to index
	'
	'return:
	'	X [input chars sequence]
	'	Y [char label]
	'''
	
	data_X = []
	data_Y = []
	for poem in raw_dict:
		n_chars = len(poem['paragraphs'])
		for i in range(0,n_chars - seq_len,1):
			s_out = poem['paragraphs'][i+seq_len]
			# never output '$'
			if(s_out == '$'):
				continue
			s_in = poem['paragraphs'][i:i+seq_len]
			data_X.append([char_to_index[c] for c in s_in])
			data_Y.append(char_to_index[s_out])
	return data_X,data_Y 
Example #30
Source File: deep_models.py    From iust_deep_fuzz with MIT License 5 votes vote down vote up
def model_3(input_dim, output_dim):
    """
    Total params: 911,456
    Trainable params: 911,456
    Non-trainable params: 0

    :param input_dim:
    :param output_dim:
    :return:
    """
    model = Sequential()
    # model.add(LSTM(128, input_shape=(maxlen, len(chars))))
    model.add(LSTM(256, input_shape=input_dim, return_sequences=True, dropout=0.4, recurrent_dropout=0.2))
    model.add(LSTM(256, input_shape=input_dim, return_sequences=False, dropout=0.4, recurrent_dropout=0.2))
    # model.add(LSTM(128, activation='relu', dropout=0.2))
    model.add(Dense(output_dim))
    model.add(Activation('softmax'))
    return model, 'model_3'


# Summery of result for this model:
# Try 1:
# * When learning rate is 0.01 and batch select sequentially the loss stuck on 2.3454 after about 6 epochs.
# This is very  bad -:(
#
# Try 2:
# So we changed lr and also batch selection:
# Result for: lr=0.001, batch select randomly! (Shuffle), step=3, batch_size=128, maxlen=50
# * Pretty good with above config on small dataset
#
# Try 3:
# Train on large dataset (prefix choose from the train-set not test-set) on generation phase:
# Bad result after 6 epoch (the loss decrease suddenly.
#
# Try 4:
# bach_size=256, lr=0.001, step=1, maxlen=50
#
# Totally not good model