Python keras.layers.Multiply() Examples

The following are 30 code examples of keras.layers.Multiply(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers , or try the search function .
Example #1
Source Project: MobileNetV3   Author: xiaochus   File: mobilenet_base.py    License: MIT License 8 votes vote down vote up
def _squeeze(self, inputs):
        """Squeeze and Excitation.
        This function defines a squeeze structure.

        # Arguments
            inputs: Tensor, input tensor of conv layer.
        """
        input_channels = int(inputs.shape[-1])

        x = GlobalAveragePooling2D()(inputs)
        x = Dense(input_channels, activation='relu')(x)
        x = Dense(input_channels, activation='hard_sigmoid')(x)
        x = Reshape((1, 1, input_channels))(x)
        x = Multiply()([inputs, x])

        return x 
Example #2
Source Project: Recommender-Systems-Samples   Author: wyl6   File: GMF.py    License: MIT License 6 votes vote down vote up
def get_model(num_users, num_items, latent_dim, regs=[0,0]):
    user_input = Input(shape=(1,), dtype='int32', name='user_input')
    item_input = Input(shape=(1,), dtype='int32', name='item_input')
    
    MF_Embedding_User = Embedding(input_dim=num_users, output_dim=latent_dim, name='user_embedding',
                                  embeddings_regularizer = l2(regs[0]), input_length=1)
    MF_Embedding_Item = Embedding(input_dim=num_items, output_dim=latent_dim, name='item_embedding',
                                  embeddings_regularizer = l2(regs[1]), input_length=1)
    
    user_latent = Flatten()(MF_Embedding_User(user_input))
    item_latent = Flatten()(MF_Embedding_Item(item_input))
    
    predict_vector = Multiply()([user_latent, item_latent])
    prediction = Dense(1, activation='sigmoid', kernel_initializer='lecun_uniform', name = 'prediction')(predict_vector)
    model = Model(inputs=[user_input, item_input], outputs=prediction)
    
    return model 
Example #3
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: merge_test.py    License: MIT License 6 votes vote down vote up
def test_merge_multiply():
    i1 = layers.Input(shape=(4, 5))
    i2 = layers.Input(shape=(4, 5))
    i3 = layers.Input(shape=(4, 5))
    o = layers.multiply([i1, i2, i3])
    assert o._keras_shape == (None, 4, 5)
    model = models.Model([i1, i2, i3], o)

    mul_layer = layers.Multiply()
    o2 = mul_layer([i1, i2, i3])
    assert mul_layer.output_shape == (None, 4, 5)

    x1 = np.random.random((2, 4, 5))
    x2 = np.random.random((2, 4, 5))
    x3 = np.random.random((2, 4, 5))
    out = model.predict([x1, x2, x3])
    assert out.shape == (2, 4, 5)
    assert_allclose(out, x1 * x2 * x3, atol=1e-4) 
Example #4
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: merge_test.py    License: MIT License 6 votes vote down vote up
def test_merge_multiply():
    i1 = layers.Input(shape=(4, 5))
    i2 = layers.Input(shape=(4, 5))
    i3 = layers.Input(shape=(4, 5))
    o = layers.multiply([i1, i2, i3])
    assert o._keras_shape == (None, 4, 5)
    model = models.Model([i1, i2, i3], o)

    mul_layer = layers.Multiply()
    o2 = mul_layer([i1, i2, i3])
    assert mul_layer.output_shape == (None, 4, 5)

    x1 = np.random.random((2, 4, 5))
    x2 = np.random.random((2, 4, 5))
    x3 = np.random.random((2, 4, 5))
    out = model.predict([x1, x2, x3])
    assert out.shape == (2, 4, 5)
    assert_allclose(out, x1 * x2 * x3, atol=1e-4) 
Example #5
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: merge_test.py    License: MIT License 6 votes vote down vote up
def test_merge_multiply():
    i1 = layers.Input(shape=(4, 5))
    i2 = layers.Input(shape=(4, 5))
    i3 = layers.Input(shape=(4, 5))
    o = layers.multiply([i1, i2, i3])
    assert o._keras_shape == (None, 4, 5)
    model = models.Model([i1, i2, i3], o)

    mul_layer = layers.Multiply()
    o2 = mul_layer([i1, i2, i3])
    assert mul_layer.output_shape == (None, 4, 5)

    x1 = np.random.random((2, 4, 5))
    x2 = np.random.random((2, 4, 5))
    x3 = np.random.random((2, 4, 5))
    out = model.predict([x1, x2, x3])
    assert out.shape == (2, 4, 5)
    assert_allclose(out, x1 * x2 * x3, atol=1e-4) 
Example #6
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: merge_test.py    License: MIT License 6 votes vote down vote up
def test_merge_multiply():
    i1 = layers.Input(shape=(4, 5))
    i2 = layers.Input(shape=(4, 5))
    i3 = layers.Input(shape=(4, 5))
    o = layers.multiply([i1, i2, i3])
    assert o._keras_shape == (None, 4, 5)
    model = models.Model([i1, i2, i3], o)

    mul_layer = layers.Multiply()
    o2 = mul_layer([i1, i2, i3])
    assert mul_layer.output_shape == (None, 4, 5)

    x1 = np.random.random((2, 4, 5))
    x2 = np.random.random((2, 4, 5))
    x3 = np.random.random((2, 4, 5))
    out = model.predict([x1, x2, x3])
    assert out.shape == (2, 4, 5)
    assert_allclose(out, x1 * x2 * x3, atol=1e-4) 
Example #7
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: merge_test.py    License: MIT License 6 votes vote down vote up
def test_merge_multiply():
    i1 = layers.Input(shape=(4, 5))
    i2 = layers.Input(shape=(4, 5))
    i3 = layers.Input(shape=(4, 5))
    o = layers.multiply([i1, i2, i3])
    assert o._keras_shape == (None, 4, 5)
    model = models.Model([i1, i2, i3], o)

    mul_layer = layers.Multiply()
    o2 = mul_layer([i1, i2, i3])
    assert mul_layer.output_shape == (None, 4, 5)

    x1 = np.random.random((2, 4, 5))
    x2 = np.random.random((2, 4, 5))
    x3 = np.random.random((2, 4, 5))
    out = model.predict([x1, x2, x3])
    assert out.shape == (2, 4, 5)
    assert_allclose(out, x1 * x2 * x3, atol=1e-4) 
Example #8
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: merge_test.py    License: MIT License 6 votes vote down vote up
def test_merge_multiply():
    i1 = layers.Input(shape=(4, 5))
    i2 = layers.Input(shape=(4, 5))
    i3 = layers.Input(shape=(4, 5))
    o = layers.multiply([i1, i2, i3])
    assert o._keras_shape == (None, 4, 5)
    model = models.Model([i1, i2, i3], o)

    mul_layer = layers.Multiply()
    o2 = mul_layer([i1, i2, i3])
    assert mul_layer.output_shape == (None, 4, 5)

    x1 = np.random.random((2, 4, 5))
    x2 = np.random.random((2, 4, 5))
    x3 = np.random.random((2, 4, 5))
    out = model.predict([x1, x2, x3])
    assert out.shape == (2, 4, 5)
    assert_allclose(out, x1 * x2 * x3, atol=1e-4) 
Example #9
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: merge_test.py    License: MIT License 6 votes vote down vote up
def test_merge_multiply():
    i1 = layers.Input(shape=(4, 5))
    i2 = layers.Input(shape=(4, 5))
    i3 = layers.Input(shape=(4, 5))
    o = layers.multiply([i1, i2, i3])
    assert o._keras_shape == (None, 4, 5)
    model = models.Model([i1, i2, i3], o)

    mul_layer = layers.Multiply()
    o2 = mul_layer([i1, i2, i3])
    assert mul_layer.output_shape == (None, 4, 5)

    x1 = np.random.random((2, 4, 5))
    x2 = np.random.random((2, 4, 5))
    x3 = np.random.random((2, 4, 5))
    out = model.predict([x1, x2, x3])
    assert out.shape == (2, 4, 5)
    assert_allclose(out, x1 * x2 * x3, atol=1e-4) 
Example #10
Source Project: inpainting-gmcnn-keras   Author: tlatkowski   File: discriminator.py    License: MIT License 6 votes vote down vote up
def model(self):
    inputs_img = Input(shape=(self.img_height, self.img_width, self.num_channels))
    inputs_mask = Input(shape=(self.img_height, self.img_width, self.num_channels))
    
    inputs = Multiply()([inputs_img, inputs_mask])
    
    # Local discriminator
    l_dis = Conv2D(filters=64, kernel_size=5, strides=(2, 2), padding='same')(inputs)
    l_dis = LeakyReLU()(l_dis)
    l_dis = Conv2D(filters=128, kernel_size=5, strides=(2, 2), padding='same')(l_dis)
    l_dis = LeakyReLU()(l_dis)
    l_dis = Conv2D(filters=256, kernel_size=5, strides=(2, 2), padding='same')(l_dis)
    l_dis = LeakyReLU()(l_dis)
    l_dis = Conv2D(filters=512, kernel_size=5, strides=(2, 2), padding='same')(l_dis)
    l_dis = LeakyReLU()(l_dis)
    l_dis = Conv2D(filters=256, kernel_size=5, strides=(2, 2), padding='same')(l_dis)
    l_dis = LeakyReLU()(l_dis)
    l_dis = Conv2D(filters=128, kernel_size=5, strides=(2, 2), padding='same')(l_dis)
    l_dis = LeakyReLU()(l_dis)
    l_dis = Flatten()(l_dis)
    l_dis = Dense(units=1)(l_dis)
    
    model = Model(name=self.model_name, inputs=[inputs_img, inputs_mask], outputs=l_dis)
    return model 
Example #11
Source Project: sfcn-opi   Author: zhuyiche   File: model.py    License: MIT License 6 votes vote down vote up
def joint_branch(self, trainable=True, softmax_trainable=False):
        """
        joint branch of detection and classification
        :param trainable: unfreeze detection branch layer if set to true
        """
        input_img = Input(shape=self.input_shape)
        x_future_det_one, x_future_cls_det_two = self.share_layer(input_img, trainable=trainable)
        x_detection = self.detection_branch_wrapper(x_future_det_one, x_future_cls_det_two, trainable=trainable,
                                                    softmax_trainable=softmax_trainable)
        x_classification = self.classification_branch_wrapper(x_future_cls_det_two,
                                                              softmax_trainable=softmax_trainable)
        joint_x = Multiply()([x_detection, x_classification], name='joint_multiply_layer')
        input_img = Input(shape=self.input_shape)

        joint_model = Model(inputs=input_img,
                            outputs=joint_x)
        return joint_model 
Example #12
def GMF_get_model(num_users, num_items, latent_dim, regs=[0,0]):
    # Input variables
    user_input = Input(shape=(1,), dtype='int32', name = 'user_input')
    item_input = Input(shape=(1,), dtype='int32', name = 'item_input')

    MF_Embedding_User = Embedding(input_dim = num_users, output_dim = latent_dim, name = 'user_embedding',
                                  embeddings_initializer = 'random_normal', embeddings_regularizer = l2(regs[0]), input_length=1)
    MF_Embedding_Item = Embedding(input_dim = num_items, output_dim = latent_dim, name = 'item_embedding',
                                  embeddings_initializer = 'random_normal', embeddings_regularizer = l2(regs[1]), input_length=1)

    # Crucial to flatten an embedding vector!
    user_latent = Flatten()(MF_Embedding_User(user_input))
    item_latent = Flatten()(MF_Embedding_Item(item_input))

    # Element-wise product of user and item embeddings
    predict_vector = Multiply()([user_latent, item_latent])

    # Final prediction layer
    prediction = Dense(1, activation='sigmoid', kernel_initializer='lecun_uniform', name = 'prediction')(predict_vector)

    model = Model(inputs=[user_input, item_input],
                outputs=prediction)

    return model 
Example #13
Source Project: DeepLearn   Author: GauravBh1010tt   File: eval_fnc.py    License: MIT License 5 votes vote down vote up
def prepare_model(ninputs=9600, n_feats=45,nclass=4,n_tfidf=10001):
    inp1 = Input(shape=(ninputs,))
    inp2 = Input(shape=(n_feats,))
    inp3 = Input(shape=(n_tfidf,))
    reg = 0.00005
    out_neurons1 = 500
    #out_neurons2 = 20
    #out_neurons2 = 10
    m1 = Dense(input_dim=ninputs, output_dim=out_neurons1,activation='sigmoid'\
                      ,kernel_regularizer=regularizers.l2(0.00000001))(inp1)
    m1 = Dropout(0.2)(m1)
    m1 = Dense(100,activation='sigmoid')(m1)
    #m1 = Dropout(0.2)(m1)
    #m1 = Dense(4, activation='sigmoid')(m1)
    
    #m2 = Dense(input_dim=n_feats, output_dim=n_feats,activation='relu')(inp2)
    m2 = Dense(50,activation='relu')(inp2)
    #m2=Dense(4,activation='relu')(m2)
    
    m3 = Dense(500, input_dim=n_tfidf, activation='relu',\
                    kernel_regularizer=regularizers.l2(reg))(inp3)
    
    m3 = Dropout(0.4)(m3)
    m3 = Dense(50, activation='relu')(m3)
    #m3 = Dropout(0.4)(m3)
    #m3 = Dense(4, activation='softmax')(m3)
    
    
    #m1 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='sigmoid')(m1)
    #m2 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='softmax')(m2)
    
    m = Merge(mode='concat')([m1,m2,m3])
    
    #mul = Multiply()([m1,m2])
    #add = Abs()([m1,m2])
    #m = Merge(mode='concat')([mul,add])
    
    score = Dense(output_dim=nclass,activation='softmax')(m)
    model = Model([inp1,inp2,inp3],score)
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    return model 
Example #14
Source Project: DeepLearn   Author: GauravBh1010tt   File: eval_fnc.py    License: MIT License 5 votes vote down vote up
def prepare_model2(ninputs=9600, n_feats=45,nclass=4,n_tfidf=10001):
    inp1 = Input(shape=(ninputs,))
    inp2 = Input(shape=(n_feats,))
    inp3 = Input(shape=(n_tfidf,))
    reg = 0.00005
    out_neurons1 = 500
    #out_neurons2 = 20
    #out_neurons2 = 10
    m1 = Dense(input_dim=ninputs, output_dim=out_neurons1,activation='sigmoid'\
                      ,kernel_regularizer=regularizers.l2(0.00000001))(inp1)
    m1 = Dropout(0.2)(m1)
    m1 = Dense(100,activation='sigmoid')(m1)
    #m1 = Dropout(0.2)(m1)
    #m1 = Dense(4, activation='sigmoid')(m1)
    
    m2 = Dense(input_dim=n_feats, output_dim=n_feats,activation='relu')(inp2)
    m2 = Dense(4,activation='relu')(inp2)
    #m2=Dense(4,activation='relu')(m2)
    
    m3 = Dense(500, input_dim=n_tfidf, activation='relu',\
                    kernel_regularizer=regularizers.l2(reg))(inp3)
    
    m3 = Dropout(0.4)(m3)
    m3 = Dense(50, activation='relu')(m3)
    #m3 = Dropout(0.4)(m3)
    #m3 = Dense(4, activation='softmax')(m3)
    
    
    #m1 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='sigmoid')(m1)
    #m2 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='softmax')(m2)
    
    m = Merge(mode='concat')([m1,m2,m3])
    
    #mul = Multiply()([m1,m2])
    #add = Abs()([m1,m2])
    #m = Merge(mode='concat')([mul,add])
    
    score = Dense(output_dim=nclass,activation='softmax')(m)
    model = Model([inp1,inp2,inp3],score)
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    return model 
Example #15
Source Project: PIEPredict   Author: aras62   File: pie_predict.py    License: Apache License 2.0 5 votes vote down vote up
def attention_temporal(self, input_data, sequence_length):
        """
        A temporal attention layer
        :param input_data: Network input
        :param sequence_length: Length of the input sequence
        :return: The output of attention layer
        """
        a = Permute((2, 1))(input_data)
        a = Dense(sequence_length, activation='sigmoid')(a)
        a_probs = Permute((2, 1))(a)
        output_attention_mul = Multiply()([input_data, a_probs])
        return output_attention_mul 
Example #16
Source Project: PIEPredict   Author: aras62   File: pie_predict.py    License: Apache License 2.0 5 votes vote down vote up
def attention_element(self, input_data, input_dim):
        """
        A self-attention unit
        :param input_data: Network input
        :param input_dim: The feature dimension of the input
        :return: The output of the attention network
        """
        input_data_probs = Dense(input_dim, activation='sigmoid')(input_data)  # sigmoid
        output_attention_mul = Multiply()([input_data, input_data_probs])  # name='att_mul'
        return output_attention_mul 
Example #17
Source Project: social_lstm_keras_tf   Author: t2kasa   File: tf_normal_sampler.py    License: GNU General Public License v3.0 5 votes vote down vote up
def _to_normal2d(output_batch) -> ds.MultivariateNormalTriL:
    """
    :param output_batch: (n_samples, 5)
    :return
    """

    # mean of x and y
    x_mean = Lambda(lambda o: o[:, 0])(output_batch)
    y_mean = Lambda(lambda o: o[:, 1])(output_batch)

    # std of x and y
    # std is must be 0 or positive
    x_std = Lambda(lambda o: K.exp(o[:, 2]))(output_batch)
    y_std = Lambda(lambda o: K.exp(o[:, 3]))(output_batch)

    # correlation coefficient
    # correlation coefficient range is [-1, 1]
    cor = Lambda(lambda o: K.tanh(o[:, 4]))(output_batch)

    loc = Concatenate()([
        Lambda(lambda x_mean: K.expand_dims(x_mean, 1))(x_mean),
        Lambda(lambda y_mean: K.expand_dims(y_mean, 1))(y_mean)
    ])

    x_var = Lambda(lambda x_std: K.square(x_std))(x_std)
    y_var = Lambda(lambda y_std: K.square(y_std))(y_std)
    xy_cor = Multiply()([x_std, y_std, cor])

    cov = Lambda(lambda inputs: K.stack(inputs, axis=0))(
        [x_var, xy_cor, xy_cor, y_var])
    cov = Lambda(lambda cov: K.permute_dimensions(cov, (1, 0)))(cov)
    cov = Reshape((2, 2))(cov)

    scale_tril = Lambda(lambda cov: tf.cholesky(cov))(cov)
    mvn = ds.MultivariateNormalTriL(loc, scale_tril)

    return mvn 
Example #18
Source Project: Keras-TextClassification   Author: yongzhuo   File: graph_yoon_kim.py    License: MIT License 5 votes vote down vote up
def call(self, x):
        dim = K.int_shape(x)[-1]
        transform_gate = self.dense_1(x)
        transform_gate = Activation("sigmoid")(transform_gate)
        carry_gate = Lambda(lambda x: 1.0 - x, output_shape=(dim,))(transform_gate)
        transformed_data = self.dense_2(x)
        transformed_data = Activation(self.activation)(transformed_data)
        transformed_gated = Multiply()([transform_gate, transformed_data])
        identity_gated = Multiply()([carry_gate, x])
        value = Add()([transformed_gated, identity_gated])
        return value 
Example #19
Source Project: Deep-Image-Matting   Author: foamliu   File: unpooling_layer.py    License: MIT License 5 votes vote down vote up
def call(self, inputs, **kwargs):
        x = inputs[:, 1]
        # print('x.shape: ' + str(K.int_shape(x)))
        bool_mask = Lambda(lambda t: K.greater_equal(t[:, 0], t[:, 1]),
                           output_shape=K.int_shape(x)[1:])(inputs)
        # print('bool_mask.shape: ' + str(K.int_shape(bool_mask)))
        mask = Lambda(lambda t: K.cast(t, dtype='float32'))(bool_mask)
        # print('mask.shape: ' + str(K.int_shape(mask)))
        x = Multiply()([mask, x])
        # print('x.shape: ' + str(K.int_shape(x)))
        return x 
Example #20
Source Project: nlp_xiaojiang   Author: yongzhuo   File: keras_bert_classify_bi_lstm.py    License: MIT License 5 votes vote down vote up
def attention(inputs, single_attention_vector=False):
    # attention机制
    time_steps = k_keras.int_shape(inputs)[1]
    input_dim = k_keras.int_shape(inputs)[2]
    x = Permute((2, 1))(inputs)
    x = Dense(time_steps, activation='softmax')(x)
    if single_attention_vector:
        x = Lambda(lambda x: k_keras.mean(x, axis=1))(x)
        x = RepeatVector(input_dim)(x)

    a_probs = Permute((2, 1))(x)
    output_attention_mul = Multiply()([inputs, a_probs])
    return output_attention_mul 
Example #21
Source Project: nlp_xiaojiang   Author: yongzhuo   File: keras_bert_classify_text_cnn.py    License: MIT License 5 votes vote down vote up
def attention(inputs, single_attention_vector=False):
    # attention机制
    time_steps = k_keras.int_shape(inputs)[1]
    input_dim = k_keras.int_shape(inputs)[2]
    x = Permute((2, 1))(inputs)
    x = Dense(time_steps, activation='softmax')(x)
    if single_attention_vector:
        x = Lambda(lambda x: k_keras.mean(x, axis=1))(x)
        x = RepeatVector(input_dim)(x)

    a_probs = Permute((2, 1))(x)
    output_attention_mul = Multiply()([inputs, a_probs])
    return output_attention_mul 
Example #22
Source Project: NPRF   Author: ucasir   File: nprf_knrm.py    License: Apache License 2.0 5 votes vote down vote up
def build(self):
    # qd_input = Input((self.config.kernel_size,), name="qd_input")
    dd_input = Input((self.config.nb_supervised_doc, self.config.kernel_size), name='dd_input')
    # z = Dense(self.config.hidden_size, activation='tanh', name="qd_hidden")(qd_input)
    # qd_out = Dense(self.config.out_size, name="qd_out")(z)

    z = Dense(self.config.hidden_size, activation='tanh', name="dd_hidden")(dd_input)
    dd_init_out = Dense(self.config.out_size, name='dd_init_out')(z)

    dd_gate = Input((self.config.nb_supervised_doc, 1), name='baseline_doc_score')
    dd_w = Dense(1, kernel_initializer=self.initializer_gate, use_bias=False, name='dd_gate')(dd_gate)
    # dd_w = Lambda(lambda x: softmax(x, axis=1), output_shape=(self.config.nb_supervised_doc,), name='dd_softmax')(dd_w)

    dd_w = Reshape((self.config.nb_supervised_doc,))(dd_w)
    dd_init_out = Reshape((self.config.nb_supervised_doc,))(dd_init_out)

    if self.config.method in [1, 3]: # no doc gating, with dense layer
      z = dd_init_out
    elif self.config.method == 2:
      logging.info("Apply doc gating")
      z = Multiply(name='dd_out')([dd_init_out, dd_w])
    else:
      raise ValueError("Method not initialized, please check config file")

    if self.config.method in [1, 2]:
      logging.info("Dense layer on top")
      z = Dense(self.config.merge_hidden, activation='tanh', name='merge_hidden')(z)
      out = Dense(self.config.merge_out, name='score')(z)
    else:
      logging.info("Apply doc gating, No dense layer on top, sum up scores")
      out = Dot(axes=[1, 1], name='score')([z, dd_w])

    model = Model(inputs=[dd_input, dd_gate], outputs=[out])
    print(model.summary())

    return model 
Example #23
Source Project: Sarcasm-Detection   Author: MirunaPislar   File: dl_models.py    License: MIT License 5 votes vote down vote up
def stateless_attention_model(**kwargs):
    X = LSTM(kwargs['hidden_units'], kernel_initializer='he_normal', activation='tanh',
             dropout=kwargs['dropout'], return_sequences=True)(kwargs['embeddings'])
    attention_layer = Permute((2, 1))(X)
    attention_layer = Dense(kwargs['max_tweet_length'], activation='softmax')(attention_layer)
    attention_layer = Lambda(lambda x: K.mean(x, axis=1), name='dim_reduction')(attention_layer)
    attention_layer = RepeatVector(int(X.shape[2]))(attention_layer)
    attention_probabilities = Permute((2, 1), name='attention_probs')(attention_layer)
    attention_layer = Multiply()([X, attention_probabilities])
    attention_layer = Flatten()(attention_layer)
    return attention_layer 
Example #24
Source Project: bidaf-keras   Author: ParikhKadam   File: highway_layer.py    License: GNU General Public License v3.0 5 votes vote down vote up
def call(self, x):
        dim = K.int_shape(x)[-1]
        transform_gate = self.dense_1(x)
        transform_gate = Activation("sigmoid")(transform_gate)
        carry_gate = Lambda(lambda x: 1.0 - x, output_shape=(dim,))(transform_gate)
        transformed_data = self.dense_2(x)
        transformed_data = Activation(self.activation)(transformed_data)
        transformed_gated = Multiply()([transform_gate, transformed_data])
        identity_gated = Multiply()([carry_gate, x])
        value = Add()([transformed_gated, identity_gated])
        return value 
Example #25
Source Project: CIKM-AnalytiCup-2018   Author: zake7749   File: utils.py    License: Apache License 2.0 5 votes vote down vote up
def interaction(input_1, input_2):
    "Get the interaction then concatenate results"
    mult = Multiply()([input_1, input_2])
    add = Add()([input_1, input_2])
    sub = substract(input_1, input_2)
    #distance = el_distance(input_1, input_2)
    
    out_= Concatenate()([sub, mult, add,])
    return out_ 
Example #26
Source Project: Look-Into-Person   Author: foamliu   File: unpooling_layer.py    License: MIT License 5 votes vote down vote up
def call(self, inputs, **kwargs):
        x = inputs[:, 1]
        # print('x.shape: ' + str(K.int_shape(x)))
        bool_mask = Lambda(lambda t: K.greater_equal(t[:, 0], t[:, 1]),
                           output_shape=K.int_shape(x)[1:])(inputs)
        # print('bool_mask.shape: ' + str(K.int_shape(bool_mask)))
        mask = Lambda(lambda t: K.cast(t, dtype='float32'))(bool_mask)
        # print('mask.shape: ' + str(K.int_shape(mask)))
        x = Multiply()([mask, x])
        # print('x.shape: ' + str(K.int_shape(x)))
        return x 
Example #27
def NeuCF_get_model(num_users, num_items, mf_dim=10, layers=[10], reg_layers=[0], reg_mf=0.0):
    assert len(layers) == len(reg_layers)
    num_layer = len(layers) #Number of layers in the MLP
    # Input variables
    user_input = Input(shape=(1,), dtype='int32', name = 'user_input')
    item_input = Input(shape=(1,), dtype='int32', name = 'item_input')

    # Embedding layer
    MF_Embedding_User = Embedding(input_dim = num_users, output_dim = mf_dim, name = 'mf_embedding_user',
                                  embeddings_initializer = 'random_normal', embeddings_regularizer = l2(reg_mf), input_length=1)
    MF_Embedding_Item = Embedding(input_dim = num_items, output_dim = mf_dim, name = 'mf_embedding_item',
                                  embeddings_initializer = 'random_normal', embeddings_regularizer = l2(reg_mf), input_length=1)

    MLP_Embedding_User = Embedding(input_dim = num_users, output_dim = int(layers[0]/2), name = "mlp_embedding_user",
                                   embeddings_initializer = 'random_normal', embeddings_regularizer = l2(reg_layers[0]), input_length=1)
    MLP_Embedding_Item = Embedding(input_dim = num_items, output_dim = int(layers[0]/2), name = 'mlp_embedding_item',
                                   embeddings_initializer = 'random_normal', embeddings_regularizer = l2(reg_layers[0]), input_length=1)

    # MF part
    mf_user_latent = Flatten()(MF_Embedding_User(user_input))
    mf_item_latent = Flatten()(MF_Embedding_Item(item_input))
    mf_vector = Multiply()([mf_user_latent, mf_item_latent]) # element-wise multiply

    # MLP part
    mlp_user_latent = Flatten()(MLP_Embedding_User(user_input))
    mlp_item_latent = Flatten()(MLP_Embedding_Item(item_input))
    mlp_vector = Concatenate()([mlp_user_latent, mlp_item_latent])
    for idx in range(1, num_layer):
        layer = Dense(layers[idx], kernel_regularizer= l2(reg_layers[idx]), activation='relu', name="layer%d" %idx)
        mlp_vector = layer(mlp_vector)

    # Concatenate MF and MLP parts
    predict_vector = Concatenate()([mf_vector, mlp_vector])

    # Final prediction layer
    prediction = Dense(1, activation='sigmoid', kernel_initializer='lecun_uniform', name = "prediction")(predict_vector)

    model = Model(inputs=[user_input, item_input],
                  outputs=prediction)

    return model 
Example #28
Source Project: Policy-Gradient-and-Actor-Critic-Keras   Author: Alexander-H-Liu   File: agent_actorcritic.py    License: MIT License 5 votes vote down vote up
def __init__(self, env, args):
        super(Agent_ActorCritic,self).__init__(env)

        self.log_path = './actor_critic.log'

        self.env = env
        self.actions_avialbe = env.action_space.n
        self.feature_dim = env.observation_space.shape[0]
        self.t = 0
        self.prev_x = None
        self.actor_learning_rate  = 1e-3
        self.critic_learning_rate = 1e-3
        self.gamma = 0.9

        self.dummy_act_picked = np.zeros((1,self.actions_avialbe))

        # Actor
        input_frame  = Input(shape=(self.feature_dim,))
        act_picked = Input(shape=(self.actions_avialbe,))
        hidden_f = Dense(20,activation='relu')(input_frame)

        act_prob = Dense(self.actions_avialbe,activation='softmax')(hidden_f)
        selected_act_prob = Multiply()([act_prob,act_picked])
        selected_act_prob = Lambda(lambda x:K.sum(x, axis=-1, keepdims=True),output_shape=(1,))(selected_act_prob)

        model = Model(inputs=[input_frame,act_picked], outputs=[act_prob, selected_act_prob])

        opt = Adam(lr=self.actor_learning_rate)
        model.compile(loss=['mse',categorical_crossentropy], loss_weights=[0.0,1.0],optimizer=opt)
        self.actor = model

        # Critic
        model = Sequential()
        model.add(Dense(20,activation='relu',input_shape=(self.feature_dim,)))
        model.add(Dense(1))

        opt = Adam(lr=self.critic_learning_rate)
        model.compile(loss='mse', optimizer=opt)
        self.critic = model 
Example #29
Source Project: kinship_prediction   Author: CVxTz   File: vgg_face.py    License: MIT License 5 votes vote down vote up
def baseline_model():
    input_1 = Input(shape=(224, 224, 3))
    input_2 = Input(shape=(224, 224, 3))

    base_model = VGGFace(model='resnet50', include_top=False)

    for x in base_model.layers[:-3]:
        x.trainable = True

    x1 = base_model(input_1)
    x2 = base_model(input_2)

    # x1_ = Reshape(target_shape=(7*7, 2048))(x1)
    # x2_ = Reshape(target_shape=(7*7, 2048))(x2)
    #
    # x_dot = Dot(axes=[2, 2], normalize=True)([x1_, x2_])
    # x_dot = Flatten()(x_dot)

    x1 = Concatenate(axis=-1)([GlobalMaxPool2D()(x1), GlobalAvgPool2D()(x1)])
    x2 = Concatenate(axis=-1)([GlobalMaxPool2D()(x2), GlobalAvgPool2D()(x2)])

    x3 = Subtract()([x1, x2])
    x3 = Multiply()([x3, x3])

    x = Multiply()([x1, x2])

    x = Concatenate(axis=-1)([x, x3])

    x = Dense(100, activation="relu")(x)
    x = Dropout(0.01)(x)
    out = Dense(1, activation="sigmoid")(x)

    model = Model([input_1, input_2], out)

    model.compile(loss="binary_crossentropy", metrics=['acc'], optimizer=Adam(0.00001))

    model.summary()

    return model 
Example #30
Source Project: kinship_prediction   Author: CVxTz   File: baseline.py    License: MIT License 5 votes vote down vote up
def baseline_model():
    input_1 = Input(shape=(224, 224, 3))
    input_2 = Input(shape=(224, 224, 3))

    base_model = ResNet50(weights='imagenet', include_top=False)

    for x in base_model.layers[:-3]:
        x.trainable = True

    x1 = base_model(input_1)
    x2 = base_model(input_2)

    # x1_ = Reshape(target_shape=(7*7, 2048))(x1)
    # x2_ = Reshape(target_shape=(7*7, 2048))(x2)
    #
    # x_dot = Dot(axes=[2, 2], normalize=True)([x1_, x2_])
    # x_dot = Flatten()(x_dot)

    x1 = Concatenate(axis=-1)([GlobalMaxPool2D()(x1), GlobalAvgPool2D()(x1)])
    x2 = Concatenate(axis=-1)([GlobalMaxPool2D()(x2), GlobalAvgPool2D()(x2)])

    x3 = Subtract()([x1, x2])
    x3 = Multiply()([x3, x3])

    x = Multiply()([x1, x2])

    x = Concatenate(axis=-1)([x, x3])

    x = Dense(100, activation="relu")(x)
    x = Dropout(0.01)(x)
    out = Dense(1, activation="sigmoid")(x)

    model = Model([input_1, input_2], out)

    model.compile(loss="binary_crossentropy", metrics=['acc'], optimizer=Adam(0.00001))

    model.summary()

    return model