Python keras.layers.dot() Examples

The following are 14 code examples of keras.layers.dot(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers , or try the search function .
Example #1
Source Project: coremltools   Author: apple   File: test_keras2_numeric.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_tiny_cos_random(self):
        np.random.seed(1988)
        input_dim = 10
        num_channels = 6

        # Define a model
        input_tensor = Input(shape=(input_dim,))
        x1 = Dense(num_channels)(input_tensor)
        x2 = Dense(num_channels)(x1)
        x3 = Dense(num_channels)(x1)
        x4 = dot([x2, x3], axes=-1, normalize=True)
        x5 = Dense(num_channels)(x4)

        model = Model(inputs=[input_tensor], outputs=[x5])

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        # Get the coreml model
        self._test_model(model) 
Example #2
Source Project: keras-recommender   Author: chen0040   File: cf.py    License: MIT License 6 votes vote down vote up
def create_model(self):
        user_id_input = Input(shape=[1], name='user')
        item_id_input = Input(shape=[1], name='item')

        user_embedding = Embedding(output_dim=EMBEDDING_SIZE, input_dim=self.max_user_id + 1,
                                   input_length=1, name='user_embedding')(user_id_input)
        item_embedding = Embedding(output_dim=EMBEDDING_SIZE, input_dim=self.max_item_id + 1,
                                   input_length=1, name='item_embedding')(item_id_input)

        # reshape from shape: (batch_size, input_length, embedding_size)
        # to shape: (batch_size, input_length * embedding_size) which is
        # equal to shape: (batch_size, embedding_size)
        user_vecs = Flatten()(user_embedding)
        item_vecs = Flatten()(item_embedding)

        # y = merge([user_vecs, item_vecs], mode='dot', output_shape=(1,))
        y = dot([user_vecs, item_vecs], axes=1)

        model = Model(inputs=[user_id_input, item_id_input], outputs=[y])
        model.compile(optimizer='adam', loss='mae')

        return model 
Example #3
Source Project: contextual-multimodal-fusion   Author: soujanyaporia   File: trimodal_attention_models.py    License: MIT License 6 votes vote down vote up
def self_attention(x):
    
    ''' 
    .  stands for dot product 
    *  stands for elemwise multiplication
        
    m = x . transpose(x)
    n = softmax(m)
    o = n . x  
    a = o * x           
       
    return a
        
    '''

    m = dot([x, x], axes=[2,2])
    n = Activation('softmax')(m)
    o = dot([n, x], axes=[2,1])
    a = multiply([o, x])
        
    return a 
Example #4
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: merge_test.py    License: MIT License 5 votes vote down vote up
def test_merge_dot():
    i1 = layers.Input(shape=(4,))
    i2 = layers.Input(shape=(4,))
    o = layers.dot([i1, i2], axes=1)
    assert o._keras_shape == (None, 1)
    model = models.Model([i1, i2], o)

    dot_layer = layers.Dot(axes=1)
    o2 = dot_layer([i1, i2])
    assert dot_layer.output_shape == (None, 1)

    x1 = np.random.random((2, 4))
    x2 = np.random.random((2, 4))
    out = model.predict([x1, x2])
    assert out.shape == (2, 1)
    expected = np.zeros((2, 1))
    expected[0, 0] = np.dot(x1[0], x2[0])
    expected[1, 0] = np.dot(x1[1], x2[1])
    assert_allclose(out, expected, atol=1e-4)

    # Test with negative tuple of axes.
    o = layers.dot([i1, i2], axes=(-1, -1))
    assert o._keras_shape == (None, 1)
    model = models.Model([i1, i2], o)
    out = model.predict([x1, x2])
    assert out.shape == (2, 1)
    assert_allclose(out, expected, atol=1e-4) 
Example #5
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: merge_test.py    License: MIT License 5 votes vote down vote up
def test_merge_dot():
    i1 = layers.Input(shape=(4,))
    i2 = layers.Input(shape=(4,))
    o = layers.dot([i1, i2], axes=1)
    assert o._keras_shape == (None, 1)
    model = models.Model([i1, i2], o)

    dot_layer = layers.Dot(axes=1)
    o2 = dot_layer([i1, i2])
    assert dot_layer.output_shape == (None, 1)

    x1 = np.random.random((2, 4))
    x2 = np.random.random((2, 4))
    out = model.predict([x1, x2])
    assert out.shape == (2, 1)
    expected = np.zeros((2, 1))
    expected[0, 0] = np.dot(x1[0], x2[0])
    expected[1, 0] = np.dot(x1[1], x2[1])
    assert_allclose(out, expected, atol=1e-4)

    # Test with negative tuple of axes.
    o = layers.dot([i1, i2], axes=(-1, -1))
    assert o._keras_shape == (None, 1)
    model = models.Model([i1, i2], o)
    out = model.predict([x1, x2])
    assert out.shape == (2, 1)
    assert_allclose(out, expected, atol=1e-4) 
Example #6
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: merge_test.py    License: MIT License 5 votes vote down vote up
def test_merge_dot():
    i1 = layers.Input(shape=(4,))
    i2 = layers.Input(shape=(4,))
    o = layers.dot([i1, i2], axes=1)
    assert o._keras_shape == (None, 1)
    model = models.Model([i1, i2], o)

    dot_layer = layers.Dot(axes=1)
    o2 = dot_layer([i1, i2])
    assert dot_layer.output_shape == (None, 1)

    x1 = np.random.random((2, 4))
    x2 = np.random.random((2, 4))
    out = model.predict([x1, x2])
    assert out.shape == (2, 1)
    expected = np.zeros((2, 1))
    expected[0, 0] = np.dot(x1[0], x2[0])
    expected[1, 0] = np.dot(x1[1], x2[1])
    assert_allclose(out, expected, atol=1e-4)

    # Test with negative tuple of axes.
    o = layers.dot([i1, i2], axes=(-1, -1))
    assert o._keras_shape == (None, 1)
    model = models.Model([i1, i2], o)
    out = model.predict([x1, x2])
    assert out.shape == (2, 1)
    assert_allclose(out, expected, atol=1e-4) 
Example #7
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: merge_test.py    License: MIT License 5 votes vote down vote up
def test_merge_dot():
    i1 = layers.Input(shape=(4,))
    i2 = layers.Input(shape=(4,))
    o = layers.dot([i1, i2], axes=1)
    assert o._keras_shape == (None, 1)
    model = models.Model([i1, i2], o)

    dot_layer = layers.Dot(axes=1)
    o2 = dot_layer([i1, i2])
    assert dot_layer.output_shape == (None, 1)

    x1 = np.random.random((2, 4))
    x2 = np.random.random((2, 4))
    out = model.predict([x1, x2])
    assert out.shape == (2, 1)
    expected = np.zeros((2, 1))
    expected[0, 0] = np.dot(x1[0], x2[0])
    expected[1, 0] = np.dot(x1[1], x2[1])
    assert_allclose(out, expected, atol=1e-4)

    # Test with negative tuple of axes.
    o = layers.dot([i1, i2], axes=(-1, -1))
    assert o._keras_shape == (None, 1)
    model = models.Model([i1, i2], o)
    out = model.predict([x1, x2])
    assert out.shape == (2, 1)
    assert_allclose(out, expected, atol=1e-4) 
Example #8
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: merge_test.py    License: MIT License 5 votes vote down vote up
def test_merge_dot():
    i1 = layers.Input(shape=(4,))
    i2 = layers.Input(shape=(4,))
    o = layers.dot([i1, i2], axes=1)
    assert o._keras_shape == (None, 1)
    model = models.Model([i1, i2], o)

    dot_layer = layers.Dot(axes=1)
    o2 = dot_layer([i1, i2])
    assert dot_layer.output_shape == (None, 1)

    x1 = np.random.random((2, 4))
    x2 = np.random.random((2, 4))
    out = model.predict([x1, x2])
    assert out.shape == (2, 1)
    expected = np.zeros((2, 1))
    expected[0, 0] = np.dot(x1[0], x2[0])
    expected[1, 0] = np.dot(x1[1], x2[1])
    assert_allclose(out, expected, atol=1e-4)

    # Test with negative tuple of axes.
    o = layers.dot([i1, i2], axes=(-1, -1))
    assert o._keras_shape == (None, 1)
    model = models.Model([i1, i2], o)
    out = model.predict([x1, x2])
    assert out.shape == (2, 1)
    assert_allclose(out, expected, atol=1e-4) 
Example #9
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: merge_test.py    License: MIT License 5 votes vote down vote up
def test_merge_dot():
    i1 = layers.Input(shape=(4,))
    i2 = layers.Input(shape=(4,))
    o = layers.dot([i1, i2], axes=1)
    assert o._keras_shape == (None, 1)
    model = models.Model([i1, i2], o)

    dot_layer = layers.Dot(axes=1)
    o2 = dot_layer([i1, i2])
    assert dot_layer.output_shape == (None, 1)

    x1 = np.random.random((2, 4))
    x2 = np.random.random((2, 4))
    out = model.predict([x1, x2])
    assert out.shape == (2, 1)
    expected = np.zeros((2, 1))
    expected[0, 0] = np.dot(x1[0], x2[0])
    expected[1, 0] = np.dot(x1[1], x2[1])
    assert_allclose(out, expected, atol=1e-4)

    # Test with negative tuple of axes.
    o = layers.dot([i1, i2], axes=(-1, -1))
    assert o._keras_shape == (None, 1)
    model = models.Model([i1, i2], o)
    out = model.predict([x1, x2])
    assert out.shape == (2, 1)
    assert_allclose(out, expected, atol=1e-4) 
Example #10
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: merge_test.py    License: MIT License 5 votes vote down vote up
def test_merge_dot():
    i1 = layers.Input(shape=(4,))
    i2 = layers.Input(shape=(4,))
    o = layers.dot([i1, i2], axes=1)
    assert o._keras_shape == (None, 1)
    model = models.Model([i1, i2], o)

    dot_layer = layers.Dot(axes=1)
    o2 = dot_layer([i1, i2])
    assert dot_layer.output_shape == (None, 1)

    x1 = np.random.random((2, 4))
    x2 = np.random.random((2, 4))
    out = model.predict([x1, x2])
    assert out.shape == (2, 1)
    expected = np.zeros((2, 1))
    expected[0, 0] = np.dot(x1[0], x2[0])
    expected[1, 0] = np.dot(x1[1], x2[1])
    assert_allclose(out, expected, atol=1e-4)

    # Test with negative tuple of axes.
    o = layers.dot([i1, i2], axes=(-1, -1))
    assert o._keras_shape == (None, 1)
    model = models.Model([i1, i2], o)
    out = model.predict([x1, x2])
    assert out.shape == (2, 1)
    assert_allclose(out, expected, atol=1e-4) 
Example #11
Source Project: funcom   Author: mcmillco   File: ast_attendgru_xtra.py    License: GNU General Public License v3.0 5 votes vote down vote up
def create_model(self):
        
        dat_input = Input(shape=(self.tdatlen,))
        com_input = Input(shape=(self.comlen,))
        sml_input = Input(shape=(self.smllen,))
        
        ee = Embedding(output_dim=self.embdims, input_dim=self.tdatvocabsize, mask_zero=False)(dat_input)
        se = Embedding(output_dim=self.smldims, input_dim=self.smlvocabsize, mask_zero=False)(sml_input)

        se_enc = CuDNNGRU(self.recdims, return_state=True, return_sequences=True)
        seout, state_sml = se_enc(se)

        enc = CuDNNGRU(self.recdims, return_state=True, return_sequences=True)
        encout, state_h = enc(ee, initial_state=state_sml)
        
        de = Embedding(output_dim=self.embdims, input_dim=self.comvocabsize, mask_zero=False)(com_input)
        dec = CuDNNGRU(self.recdims, return_sequences=True)
        decout = dec(de, initial_state=state_h)

        attn = dot([decout, encout], axes=[2, 2])
        attn = Activation('softmax')(attn)
        context = dot([attn, encout], axes=[2, 1])

        ast_attn = dot([decout, seout], axes=[2, 2])
        ast_attn = Activation('softmax')(ast_attn)
        ast_context = dot([ast_attn, seout], axes=[2, 1])

        context = concatenate([context, decout, ast_context])

        out = TimeDistributed(Dense(self.recdims, activation="relu"))(context)

        out = Flatten()(out)
        out = Dense(self.comvocabsize, activation="softmax")(out)
        
        model = Model(inputs=[dat_input, com_input, sml_input], outputs=out)

        if self.config['multigpu']:
            model = keras.utils.multi_gpu_model(model, gpus=2)
        
        model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
        return self.config, model 
Example #12
Source Project: contextual-multimodal-fusion   Author: soujanyaporia   File: trimodal_attention_models.py    License: MIT License 5 votes vote down vote up
def bi_modal_attention(x, y):
    
    ''' 
    .  stands for dot product 
    *  stands for elemwise multiplication
    {} stands for concatenation
        
    m1 = x . transpose(y) ||  m2 = y . transpose(x) 
    n1 = softmax(m1)      ||  n2 = softmax(m2)
    o1 = n1 . y           ||  o2 = m2 . x
    a1 = o1 * x           ||  a2 = o2 * y
       
    return {a1, a2}
        
    '''
     
    m1 = dot([x, y], axes=[2, 2])
    n1 = Activation('softmax')(m1)
    o1 = dot([n1, y], axes=[2, 1])
    a1 = multiply([o1, x])

    m2 = dot([y, x], axes=[2, 2])
    n2 = Activation('softmax')(m2)
    o2 = dot([n2, x], axes=[2, 1])
    a2 = multiply([o2, y])

    return concatenate([a1, a2]) 
Example #13
Source Project: Fabrik   Author: Cloud-CV   File: layers_export.py    License: GNU General Public License v3.0 5 votes vote down vote up
def eltwise(layer, layer_in, layerId):
    out = {}
    if (layer['params']['layer_type'] == 'Multiply'):
        # This input reverse is to handle visualization
        out[layerId] = multiply(layer_in[::-1])
    elif (layer['params']['layer_type'] == 'Sum'):
        out[layerId] = add(layer_in[::-1])
    elif (layer['params']['layer_type'] == 'Average'):
        out[layerId] = average(layer_in[::-1])
    elif (layer['params']['layer_type'] == 'Dot'):
        out[layerId] = dot(layer_in[::-1], -1)
    else:
        out[layerId] = maximum(layer_in[::-1])
    return out 
Example #14
Source Project: MovieTaster-Open   Author: lujiaying   File: keras_item2vec.py    License: MIT License 4 votes vote down vote up
def skipgram_model(vocab_size, embedding_dim=100, paradigm='Functional'):
    # Sequential paradigm
    if paradigm == 'Sequential':
        target = Sequential()
        target.add(Embedding(vocab_size, embedding_dim, input_length=1))
        context = Sequential()
        context.add(Embedding(vocab_size, embedding_dim, input_length=1))

        # merge the pivot and context models
        model = Sequential()
        model.add(Merge([target, context], mode='dot'))
        model.add(Reshape((1,), input_shape=(1,1)))
        model.add(Activation('sigmoid'))
        model.compile(optimizer='adam', loss='binary_crossentropy')
        return model

    # Functional paradigm
    elif paradigm == 'Functional':
        target = Input(shape=(1,), name='target')
        context = Input(shape=(1,), name='context')
        #print target.shape, context.shape
        shared_embedding = Embedding(vocab_size, embedding_dim, input_length=1, name='shared_embedding')
        embedding_target = shared_embedding(target)
        embedding_context = shared_embedding(context)
        #print embedding_target.shape, embedding_context.shape

        merged_vector = dot([embedding_target, embedding_context], axes=-1)
        reshaped_vector = Reshape((1,), input_shape=(1,1))(merged_vector)
        #print merged_vector.shape
        prediction = Dense(1, input_shape=(1,), activation='sigmoid')(reshaped_vector)
        #print prediction.shape

        model = Model(inputs=[target, context], outputs=prediction)
        model.compile(optimizer='adam', loss='binary_crossentropy')
        return model

    else:
        print('paradigm error')
        return None