Python keras.layers.RepeatVector() Examples

The following are 30 code examples of keras.layers.RepeatVector(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers , or try the search function .
Example #1
Source Project: coremltools   Author: apple   File: test_keras.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_repeat_vector(self):
        from keras.layers import RepeatVector

        model = Sequential()
        model.add(RepeatVector(3, input_shape=(5,)))

        input_names = ["input"]
        output_names = ["output"]
        spec = keras.convert(model, input_names, output_names).get_spec()
        self.assertIsNotNone(spec)
        # Test the model class
        self.assertIsNotNone(spec.description)
        self.assertTrue(spec.HasField("neuralNetwork"))
        # Test the inputs and outputs
        self.assertEquals(len(spec.description.input), len(input_names))
        six.assertCountEqual(
            self, input_names, [x.name for x in spec.description.input]
        )
        self.assertEquals(len(spec.description.output), len(output_names))
        six.assertCountEqual(
            self, output_names, [x.name for x in spec.description.output]
        )
        layers = spec.neuralNetwork.layers
        self.assertIsNotNone(layers[0].sequenceRepeat) 
Example #2
Source Project: coremltools   Author: apple   File: test_keras2_numeric.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_tiny_babi_rnn(self):
        vocab_size = 10
        embed_hidden_size = 8
        story_maxlen = 5
        query_maxlen = 5

        input_tensor_1 = Input(shape=(story_maxlen,))
        x1 = Embedding(vocab_size, embed_hidden_size)(input_tensor_1)
        x1 = Dropout(0.3)(x1)

        input_tensor_2 = Input(shape=(query_maxlen,))
        x2 = Embedding(vocab_size, embed_hidden_size)(input_tensor_2)
        x2 = Dropout(0.3)(x2)
        x2 = LSTM(embed_hidden_size, return_sequences=False)(x2)
        x2 = RepeatVector(story_maxlen)(x2)

        x3 = add([x1, x2])
        x3 = LSTM(embed_hidden_size, return_sequences=False)(x3)
        x3 = Dropout(0.3)(x3)
        x3 = Dense(vocab_size, activation="softmax")(x3)

        model = Model(inputs=[input_tensor_1, input_tensor_2], outputs=[x3])

        self._test_model(model, one_dim_seq_flags=[True, True]) 
Example #3
Source Project: coremltools   Author: apple   File: test_keras2.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_repeat_vector(self):
        from keras.layers import RepeatVector

        model = Sequential()
        model.add(RepeatVector(3, input_shape=(5,)))

        input_names = ["input"]
        output_names = ["output"]
        spec = keras.convert(model, input_names, output_names).get_spec()
        self.assertIsNotNone(spec)
        # Test the model class
        self.assertIsNotNone(spec.description)
        self.assertTrue(spec.HasField("neuralNetwork"))
        # Test the inputs and outputs
        self.assertEquals(len(spec.description.input), len(input_names))
        self.assertEqual(
            sorted(input_names), sorted(map(lambda x: x.name, spec.description.input))
        )
        self.assertEquals(len(spec.description.output), len(output_names))
        self.assertEqual(
            sorted(output_names), sorted(map(lambda x: x.name, spec.description.output))
        )
        layers = spec.neuralNetwork.layers
        self.assertIsNotNone(layers[0].sequenceRepeat) 
Example #4
Source Project: plastering   Author: plastering   File: ir2tagsets_seq.py    License: MIT License 6 votes vote down vote up
def fit_dep(self, x, y=None):
        timesteps = x.shape[1]
        input_dim = x.shape[2]
        inputs = Input(shape=(timesteps, input_dim))
        encoded = LSTM(self.latent_dim)(inputs)

        decoded = RepeatVector(timesteps)(encoded)
        decoded = LSTM(input_dim, return_sequences=True)(decoded)

        encoded_input = Input(shape=(self.latent_dim,))

        self.sequence_autoencoder = Model(inputs, decoded)
        self.encoder = Model(inputs, encoded)

        self.sequence_autoencoder.compile(
            #loss='binary_crossentropy',
            loss='categorical_crossentropy',
            optimizer='RMSprop',
            metrics=['binary_accuracy']
        )
        self.sequence_autoencoder.fit(x, x) 
Example #5
Source Project: cakechat   Author: lukalabs   File: layers.py    License: Apache License 2.0 6 votes vote down vote up
def repeat_vector(inputs):
    """
    Temporary solution:
    Use this function within a Lambda layer to get a repeated layer with a variable 1-st dimension (seq_len).
    May be useful to further feed it to a Concatenate layer.

    inputs == (layer_for_repeat, layer_for_getting_rep_num):
        layer_for_repeat:           shape == (batch_size, vector_dim)
        layer_for_getting_rep_num:  shape == (batch_size, seq_len, ...)
    :return:
        repeated layer_for_repeat, shape == (batch_size, seq_len, vector_dim)
    """
    layer_for_repeat, layer_for_getting_rep_num = inputs
    repeated_vector = RepeatVector(
        n=K.shape(layer_for_getting_rep_num)[1], name='custom_repeat_vector')(layer_for_repeat)
    # shape == (batch_size, seq_len, vector_dim)
    return repeated_vector 
Example #6
Source Project: Image-Captioning   Author: Shobhit20   File: SceneDesc.py    License: MIT License 6 votes vote down vote up
def create_model(self, ret_model = False):
	       
		image_model = Sequential()
		image_model.add(Dense(EMBEDDING_DIM, input_dim = 4096, activation='relu'))
		image_model.add(RepeatVector(self.max_length))

		lang_model = Sequential()
		lang_model.add(Embedding(self.vocab_size, 256, input_length=self.max_length))
		lang_model.add(LSTM(256,return_sequences=True))
		lang_model.add(TimeDistributed(Dense(EMBEDDING_DIM)))

		model = Sequential()
		model.add(Merge([image_model, lang_model], mode='concat'))
		model.add(LSTM(1000,return_sequences=False))
		model.add(Dense(self.vocab_size))
		model.add(Activation('softmax'))

		print ("Model created!")

		if(ret_model==True):
		    return model

		model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
		return model 
Example #7
Source Project: Image-Caption-Generator   Author: dabasajay   File: model.py    License: MIT License 5 votes vote down vote up
def AlternativeRNNModel(vocab_size, max_len, rnnConfig, model_type):
	embedding_size = rnnConfig['embedding_size']
	if model_type == 'inceptionv3':
		# InceptionV3 outputs a 2048 dimensional vector for each image, which we'll feed to RNN Model
		image_input = Input(shape=(2048,))
	elif model_type == 'vgg16':
		# VGG16 outputs a 4096 dimensional vector for each image, which we'll feed to RNN Model
		image_input = Input(shape=(4096,))
	image_model_1 = Dense(embedding_size, activation='relu')(image_input)
	image_model = RepeatVector(max_len)(image_model_1)

	caption_input = Input(shape=(max_len,))
	# mask_zero: We zero pad inputs to the same length, the zero mask ignores those inputs. E.g. it is an efficiency.
	caption_model_1 = Embedding(vocab_size, embedding_size, mask_zero=True)(caption_input)
	# Since we are going to predict the next word using the previous words
	# (length of previous words changes with every iteration over the caption), we have to set return_sequences = True.
	caption_model_2 = LSTM(rnnConfig['LSTM_units'], return_sequences=True)(caption_model_1)
	# caption_model = TimeDistributed(Dense(embedding_size, activation='relu'))(caption_model_2)
	caption_model = TimeDistributed(Dense(embedding_size))(caption_model_2)

	# Merging the models and creating a softmax classifier
	final_model_1 = concatenate([image_model, caption_model])
	# final_model_2 = LSTM(rnnConfig['LSTM_units'], return_sequences=False)(final_model_1)
	final_model_2 = Bidirectional(LSTM(rnnConfig['LSTM_units'], return_sequences=False))(final_model_1)
	# final_model_3 = Dense(rnnConfig['dense_units'], activation='relu')(final_model_2)
	# final_model = Dense(vocab_size, activation='softmax')(final_model_3)
	final_model = Dense(vocab_size, activation='softmax')(final_model_2)

	model = Model(inputs=[image_input, caption_input], outputs=final_model)
	model.compile(loss='categorical_crossentropy', optimizer='adam')
	# model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
	return model 
Example #8
Source Project: caption_generator   Author: anuragmishracse   File: caption_generator.py    License: MIT License 5 votes vote down vote up
def create_model(self, ret_model = False):
        #base_model = VGG16(weights='imagenet', include_top=False, input_shape = (224, 224, 3))
        #base_model.trainable=False
        image_model = Sequential()
        #image_model.add(base_model)
        #image_model.add(Flatten())
        image_model.add(Dense(EMBEDDING_DIM, input_dim = 4096, activation='relu'))

        image_model.add(RepeatVector(self.max_cap_len))

        lang_model = Sequential()
        lang_model.add(Embedding(self.vocab_size, 256, input_length=self.max_cap_len))
        lang_model.add(LSTM(256,return_sequences=True))
        lang_model.add(TimeDistributed(Dense(EMBEDDING_DIM)))

        model = Sequential()
        model.add(Merge([image_model, lang_model], mode='concat'))
        model.add(LSTM(1000,return_sequences=False))
        model.add(Dense(self.vocab_size))
        model.add(Activation('softmax'))

        print "Model created!"

        if(ret_model==True):
            return model

        model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
        return model 
Example #9
Source Project: coremltools   Author: apple   File: test_recurrent_stress_tests.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _test_one_to_many(self, keras_major_version):
        params = (
            dict(
                input_dims=[1, 10],
                activation="tanh",
                return_sequences=False,
                output_dim=3,
            ),
        )
        number_of_times = 4
        model = Sequential()
        model.add(RepeatVector(number_of_times, input_shape=(10,)))

        if keras_major_version == 2:
            model.add(
                LSTM(
                    params[0]["output_dim"],
                    input_shape=params[0]["input_dims"],
                    activation=params[0]["activation"],
                    recurrent_activation="sigmoid",
                    return_sequences=True,
                )
            )
        else:
            model.add(
                LSTM(
                    output_dim=params[0]["output_dim"],
                    activation=params[0]["activation"],
                    inner_activation="sigmoid",
                    return_sequences=True,
                )
            )
        relative_error, keras_preds, coreml_preds = simple_model_eval(params, model)
        # print relative_error, '\n', keras_preds, '\n', coreml_preds, '\n'
        for i in range(len(relative_error)):
            self.assertLessEqual(relative_error[i], 0.01) 
Example #10
Source Project: stock-price-predict   Author: kaka-lin   File: seq2seq.py    License: MIT License 5 votes vote down vote up
def base_model(feature_len=1, after_day=1, input_shape=(20, 1)):
    model = Sequential()

    model.add(LSTM(units=100, return_sequences=False, input_shape=input_shape))
    #model.add(LSTM(units=100, return_sequences=False, input_shape=input_shape))

    # one to many
    model.add(RepeatVector(after_day))
    model.add(LSTM(200, return_sequences=True))
    #model.add(LSTM(50, return_sequences=True))

    model.add(TimeDistributed(Dense(units=feature_len, activation='linear')))

    return model 
Example #11
Source Project: stock-price-predict   Author: kaka-lin   File: seq2seq_attention_2.py    License: MIT License 5 votes vote down vote up
def seq2seq_attention(feature_len=1, after_day=1, input_shape=(20, 1), time_step=20):
    # Define the inputs of your model with a shape (Tx, feature)
    X = Input(shape=input_shape)

    # Initialize empty list of outputs
    all_outputs = []

    # Encoder: pre-attention LSTM
    encoder = LSTM(units=100, return_state=True, return_sequences=True, name='encoder')
    # Decoder: post-attention LSTM
    decoder = LSTM(units=100, return_state=True, name='decoder')
    # Output
    decoder_output = Dense(units=feature_len, activation='linear', name='output')
    model_output = Reshape((1, feature_len))

    # Attention
    repeator = RepeatVector(time_step)
    concatenator = Concatenate(axis=-1)
    densor = Dense(1, activation = "relu")
    activator = Activation(softmax, name='attention_weights')
    dotor =  Dot(axes = 1)

    encoder_outputs, s, c = encoder(X)

    for t in range(after_day):
        context = one_step_attention(encoder_outputs, s, repeator, concatenator, densor, activator, dotor)

        a, s, c = decoder(context, initial_state=[s, c])

        outputs = decoder_output(a)
        outputs = model_output(outputs)
        all_outputs.append(outputs)

    all_outputs = Lambda(lambda x: K.concatenate(x, axis=1))(all_outputs)
    model = Model(inputs=X, outputs=all_outputs)

    return model 
Example #12
Source Project: stock-price-predict   Author: kaka-lin   File: seq2seq_attention.py    License: MIT License 5 votes vote down vote up
def seq2seq_attention(feature_len=1, after_day=1, input_shape=(20, 1), time_step=20):
    # Define the inputs of your model with a shape (Tx, feature)
    X = Input(shape=input_shape)
    s0 = Input(shape=(100, ), name='s0')
    c0 = Input(shape=(100, ), name='c0')
    s = s0
    c = c0

    # Initialize empty list of outputs
    all_outputs = []

    # Encoder: pre-attention LSTM
    encoder = LSTM(units=100, return_state=False, return_sequences=True, name='encoder')
    # Decoder: post-attention LSTM
    decoder = LSTM(units=100, return_state=True, name='decoder')
    # Output
    decoder_output = Dense(units=feature_len, activation='linear', name='output')
    model_output = Reshape((1, feature_len))

    # Attention
    repeator = RepeatVector(time_step)
    concatenator = Concatenate(axis=-1)
    densor = Dense(1, activation = "relu")
    activator = Activation(softmax, name='attention_weights')
    dotor =  Dot(axes = 1)

    encoder_outputs = encoder(X)

    for t in range(after_day):
        context = one_step_attention(encoder_outputs, s, repeator, concatenator, densor, activator, dotor)

        a, s, c = decoder(context, initial_state=[s, c])

        outputs = decoder_output(a)
        outputs = model_output(outputs)
        all_outputs.append(outputs)

    all_outputs = Lambda(lambda x: K.concatenate(x, axis=1))(all_outputs)
    model = Model(inputs=[X, s0, c0], outputs=all_outputs)

    return model 
Example #13
Source Project: keras-neural-graph-fingerprint   Author: keiserlab   File: layers.py    License: MIT License 5 votes vote down vote up
def call(self, inputs, mask=None):
        # Import (symbolic) dimensions
        max_atoms = K.shape(inputs)[1]

        # By [farizrahman4u](https://github.com/fchollet/keras/issues/3995)
        ones = layers.Lambda(lambda x: (x * 0 + 1)[:, 0, :], output_shape=lambda s: (s[0], s[2]))(inputs)
        dropped = self.dropout_layer(ones)
        dropped = layers.RepeatVector(max_atoms)(dropped)
        return layers.Lambda(lambda x: x[0] * x[1], output_shape=lambda s: s[0])([inputs, dropped]) 
Example #14
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: core_test.py    License: MIT License 5 votes vote down vote up
def test_repeat_vector():
    layer_test(layers.RepeatVector,
               kwargs={'n': 3},
               input_shape=(3, 2)) 
Example #15
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_model_saving.py    License: MIT License 5 votes vote down vote up
def test_sequential_model_saving():
    model = Sequential()
    model.add(Dense(2, input_shape=(3,)))
    model.add(RepeatVector(3))
    model.add(TimeDistributed(Dense(3)))
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy],
                  sample_weight_mode='temporal')
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    new_model = load_model(fname)
    os.remove(fname)

    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test that new updates are the same with both models
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)
    new_model.train_on_batch(x, y)
    out = model.predict(x)
    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #16
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: core_test.py    License: MIT License 5 votes vote down vote up
def test_repeat_vector():
    layer_test(layers.RepeatVector,
               kwargs={'n': 3},
               input_shape=(3, 2)) 
Example #17
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_model_saving.py    License: MIT License 5 votes vote down vote up
def test_sequential_model_saving():
    model = Sequential()
    model.add(Dense(2, input_shape=(3,)))
    model.add(RepeatVector(3))
    model.add(TimeDistributed(Dense(3)))
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy],
                  sample_weight_mode='temporal')
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    new_model = load_model(fname)
    os.remove(fname)

    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test that new updates are the same with both models
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)
    new_model.train_on_batch(x, y)
    out = model.predict(x)
    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #18
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: core_test.py    License: MIT License 5 votes vote down vote up
def test_repeat_vector():
    layer_test(layers.RepeatVector,
               kwargs={'n': 3},
               input_shape=(3, 2)) 
Example #19
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: core_test.py    License: MIT License 5 votes vote down vote up
def test_repeat_vector():
    layer_test(layers.RepeatVector,
               kwargs={'n': 3},
               input_shape=(3, 2)) 
Example #20
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_model_saving.py    License: MIT License 5 votes vote down vote up
def test_sequential_model_saving():
    model = Sequential()
    model.add(Dense(2, input_shape=(3,)))
    model.add(RepeatVector(3))
    model.add(TimeDistributed(Dense(3)))
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy],
                  sample_weight_mode='temporal')
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    new_model = load_model(fname)
    os.remove(fname)

    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test that new updates are the same with both models
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)
    new_model.train_on_batch(x, y)
    out = model.predict(x)
    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #21
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: core_test.py    License: MIT License 5 votes vote down vote up
def test_repeat_vector():
    layer_test(layers.RepeatVector,
               kwargs={'n': 3},
               input_shape=(3, 2)) 
Example #22
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_model_saving.py    License: MIT License 5 votes vote down vote up
def test_sequential_model_saving():
    model = Sequential()
    model.add(Dense(2, input_shape=(3,)))
    model.add(RepeatVector(3))
    model.add(TimeDistributed(Dense(3)))
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy],
                  sample_weight_mode='temporal')
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    new_model = load_model(fname)
    os.remove(fname)

    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test that new updates are the same with both models
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)
    new_model.train_on_batch(x, y)
    out = model.predict(x)
    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #23
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: core_test.py    License: MIT License 5 votes vote down vote up
def test_repeat_vector():
    layer_test(layers.RepeatVector,
               kwargs={'n': 3},
               input_shape=(3, 2)) 
Example #24
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: core_test.py    License: MIT License 5 votes vote down vote up
def test_repeat_vector():
    layer_test(layers.RepeatVector,
               kwargs={'n': 3},
               input_shape=(3, 2)) 
Example #25
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_model_saving.py    License: MIT License 5 votes vote down vote up
def test_sequential_model_saving():
    model = Sequential()
    model.add(Dense(2, input_shape=(3,)))
    model.add(RepeatVector(3))
    model.add(TimeDistributed(Dense(3)))
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy],
                  sample_weight_mode='temporal')
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    new_model = load_model(fname)
    os.remove(fname)

    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test that new updates are the same with both models
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)
    new_model.train_on_batch(x, y)
    out = model.predict(x)
    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #26
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: core_test.py    License: MIT License 5 votes vote down vote up
def test_repeat_vector():
    layer_test(layers.RepeatVector,
               kwargs={'n': 3},
               input_shape=(3, 2)) 
Example #27
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_model_saving.py    License: MIT License 5 votes vote down vote up
def test_sequential_model_saving():
    model = Sequential()
    model.add(Dense(2, input_shape=(3,)))
    model.add(RepeatVector(3))
    model.add(TimeDistributed(Dense(3)))
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy],
                  sample_weight_mode='temporal')
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    new_model = load_model(fname)
    os.remove(fname)

    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test that new updates are the same with both models
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)
    new_model.train_on_batch(x, y)
    out = model.predict(x)
    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #28
Source Project: nlp_xiaojiang   Author: yongzhuo   File: keras_bert_classify_bi_lstm.py    License: MIT License 5 votes vote down vote up
def attention(inputs, single_attention_vector=False):
    # attention机制
    time_steps = k_keras.int_shape(inputs)[1]
    input_dim = k_keras.int_shape(inputs)[2]
    x = Permute((2, 1))(inputs)
    x = Dense(time_steps, activation='softmax')(x)
    if single_attention_vector:
        x = Lambda(lambda x: k_keras.mean(x, axis=1))(x)
        x = RepeatVector(input_dim)(x)

    a_probs = Permute((2, 1))(x)
    output_attention_mul = Multiply()([inputs, a_probs])
    return output_attention_mul 
Example #29
Source Project: nlp_xiaojiang   Author: yongzhuo   File: keras_bert_classify_text_cnn.py    License: MIT License 5 votes vote down vote up
def attention(inputs, single_attention_vector=False):
    # attention机制
    time_steps = k_keras.int_shape(inputs)[1]
    input_dim = k_keras.int_shape(inputs)[2]
    x = Permute((2, 1))(inputs)
    x = Dense(time_steps, activation='softmax')(x)
    if single_attention_vector:
        x = Lambda(lambda x: k_keras.mean(x, axis=1))(x)
        x = RepeatVector(input_dim)(x)

    a_probs = Permute((2, 1))(x)
    output_attention_mul = Multiply()([inputs, a_probs])
    return output_attention_mul 
Example #30
Source Project: Sarcasm-Detection   Author: MirunaPislar   File: dl_models.py    License: MIT License 5 votes vote down vote up
def stateless_attention_model(**kwargs):
    X = LSTM(kwargs['hidden_units'], kernel_initializer='he_normal', activation='tanh',
             dropout=kwargs['dropout'], return_sequences=True)(kwargs['embeddings'])
    attention_layer = Permute((2, 1))(X)
    attention_layer = Dense(kwargs['max_tweet_length'], activation='softmax')(attention_layer)
    attention_layer = Lambda(lambda x: K.mean(x, axis=1), name='dim_reduction')(attention_layer)
    attention_layer = RepeatVector(int(X.shape[2]))(attention_layer)
    attention_probabilities = Permute((2, 1), name='attention_probs')(attention_layer)
    attention_layer = Multiply()([X, attention_probabilities])
    attention_layer = Flatten()(attention_layer)
    return attention_layer