Python keras.layers.merge.Multiply() Examples

The following are 3 code examples of keras.layers.merge.Multiply(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers.merge , or try the search function .
Example #1
Source File: attention_model.py    From neural-tweet-search with Apache License 2.0 5 votes vote down vote up
def add_attention_layer_with_doc_weighting(query_embedding, doc_embedding, layer_name, attention_level,
                                           query_weight, doc_weight, max_query_len, max_doc_len,
                                           query_mask=None, doc_mask=None, mask=False):
    dot_prod = Dot(axes=-1, name=layer_name)([doc_embedding, query_embedding])
    norm_sim = Activation('softmax')(dot_prod)
    reshaped_query_weight = Reshape((ATTENTION_DEEP_LEVEL, max_query_len, 1),
                                    input_shape=(ATTENTION_DEEP_LEVEL, max_query_len,))(query_weight)
    repeated_query_weight = Lambda(lambda x: repeat_vector(x[0], x[1], x[2]), output_shape=lambda inp_shp:(
        inp_shp[0][0], inp_shp[0][1], max_query_len, max_doc_len,))([reshaped_query_weight, max_doc_len, -1])
    reshaped_doc_weight = Reshape((ATTENTION_DEEP_LEVEL, 1, max_doc_len),
                                  input_shape=(ATTENTION_DEEP_LEVEL, max_doc_len,))(doc_weight)
    repeated_doc_weight = Lambda(lambda x: repeat_vector(x[0], x[1], x[2]), output_shape=lambda inp_shp: (
        inp_shp[0][0], inp_shp[0][1], max_query_len, max_doc_len,))([reshaped_doc_weight, max_query_len, -2])
    weight_product = Multiply()([repeated_query_weight, repeated_doc_weight])
    transformed_weight_product = Dense(max_doc_len, activation='relu')(weight_product)
    setattr(K, 'params', {'attention_level': attention_level})
    norm_sim = Lambda(lambda x: attention_weighting_prod(x[0], x[1]), output_shape=lambda inp_shp:(
        inp_shp[0][0], inp_shp[0][1], inp_shp[0][2]))([norm_sim, transformed_weight_product])
    if mask:
        max_sim = Lambda(lambda x: max_pooling_with_mask(x[0], x[1]), output_shape=lambda inp_shp: (
            inp_shp[0][0], inp_shp[0][2],))([norm_sim, query_mask])
        mean_sim = Lambda(lambda x: mean_pooling_with_mask(x[0], x[1], x[2]), output_shape=lambda inp_shp: (
            inp_shp[0][0], inp_shp[0][2],))([norm_sim, doc_mask, query_mask])
    else:
        max_sim = Lambda(max_pooling, output_shape=lambda inp_shp: (inp_shp[0], inp_shp[2], ))(norm_sim)
        mean_sim = Lambda(mean_pooling, output_shape=lambda inp_shp: (inp_shp[0], inp_shp[2],))(norm_sim)
    return norm_sim, max_sim, mean_sim


########################## Our model implementation ######################### 
Example #2
Source File: main_crnn_sed.py    From dcase2017_task4_cvssp with MIT License 5 votes vote down vote up
def block(input):
    cnn = Conv2D(128, (3, 3), padding="same", activation="linear", use_bias=False)(input)
    cnn = BatchNormalization(axis=-1)(cnn)

    cnn1 = Lambda(slice1, output_shape=slice1_output_shape)(cnn)
    cnn2 = Lambda(slice2, output_shape=slice2_output_shape)(cnn)

    cnn1 = Activation('linear')(cnn1)
    cnn2 = Activation('sigmoid')(cnn2)

    out = Multiply()([cnn1, cnn2])
    return out 
Example #3
Source File: care_projection.py    From CSBDeep with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def _build(self):
        # get parameters
        proj = self.proj_params
        proj_axis = axes_dict(self.config.axes)[proj.axis]

        # define surface projection network (3D -> 2D)
        inp = u = Input(self.config.unet_input_shape)
        def conv_layers(u):
            for _ in range(proj.n_conv_per_depth):
                u = Conv3D(proj.n_filt, proj.kern, padding='same', activation='relu')(u)
            return u
        # down
        for _ in range(proj.n_depth):
            u = conv_layers(u)
            u = MaxPooling3D(proj.pool)(u)
        # middle
        u = conv_layers(u)
        # up
        for _ in range(proj.n_depth):
            u = UpSampling3D(proj.pool)(u)
            u = conv_layers(u)
        u = Conv3D(1, proj.kern, padding='same', activation='linear')(u)
        # convert learned features along Z to surface probabilities
        # (add 1 to proj_axis because of batch dimension in tensorflow)
        u = Lambda(lambda x: softmax(x, axis=1+proj_axis))(u)
        # multiply Z probabilities with Z values in input stack
        u = Multiply()([inp, u])
        # perform surface projection by summing over weighted Z values
        u = Lambda(lambda x: K.sum(x, axis=1+proj_axis))(u)
        model_projection = Model(inp, u)

        # define denoising network (2D -> 2D)
        # (remove projected axis from input_shape)
        input_shape = list(self.config.unet_input_shape)
        del input_shape[proj_axis]
        model_denoising = nets.common_unet(
            n_dim           = self.config.n_dim-1,
            n_channel_out   = self.config.n_channel_out,
            prob_out        = self.config.probabilistic,
            residual        = self.config.unet_residual,
            n_depth         = self.config.unet_n_depth,
            kern_size       = self.config.unet_kern_size,
            n_first         = self.config.unet_n_first,
            last_activation = self.config.unet_last_activation,
        )(tuple(input_shape))

        # chain models together
        return Model(inp, model_denoising(model_projection(inp)))