Python tensorflow.python.keras.layers.Embedding() Examples

The following are 7 code examples of tensorflow.python.keras.layers.Embedding(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.keras.layers , or try the search function .
Example #1
Source File: rnn.py    From cxplain with MIT License 6 votes vote down vote up
def build(self, input_layer):
        last_layer = input_layer
        input_shape = K.int_shape(input_layer)

        if self.with_embedding:
            if input_shape[-1] != 1:
                raise ValueError("Only one feature (the index) can be used with embeddings, "
                                 "i.e. the input shape should be (num_samples, length, 1). "
                                 "The actual shape was: " + str(input_shape))

            last_layer = Lambda(lambda x: K.squeeze(x, axis=-1),
                                output_shape=K.int_shape(last_layer)[:-1])(last_layer)  # Remove feature dimension.
            last_layer = Embedding(self.embedding_size, self.embedding_dimension,
                                   input_length=input_shape[-2])(last_layer)

        for _ in range(self.num_layers):
            last_layer = Dense(self.num_units, activation=self.activation)(last_layer)
            if self.with_bn:
                last_layer = BatchNormalization()(last_layer)
            if not np.isclose(self.p_dropout, 0):
                last_layer = Dropout(self.p_dropout)(last_layer)
        return last_layer 
Example #2
Source File: gcn.py    From GraphNeuralNetwork with MIT License 6 votes vote down vote up
def GCN(adj_dim,feature_dim,n_hidden, num_class, num_layers=2,activation=tf.nn.relu,dropout_rate=0.5, l2_reg=0, feature_less=True, ):
    Adj = Input(shape=(None,), sparse=True)
    if feature_less:
        X_in = Input(shape=(1,), )

        emb = Embedding(adj_dim, feature_dim,
                        embeddings_initializer=Identity(1.0), trainable=False)
        X_emb = emb(X_in)
        h = Reshape([X_emb.shape[-1]])(X_emb)
    else:
        X_in = Input(shape=(feature_dim,), )

        h = X_in

    for i in range(num_layers):
        if i == num_layers - 1:
            activation = tf.nn.softmax
            n_hidden = num_class
        h = GraphConvolution(n_hidden, activation=activation, dropout_rate=dropout_rate, l2_reg=l2_reg)([h,Adj])

    output = h
    model = Model(inputs=[X_in,Adj], outputs=output)

    return model 
Example #3
Source File: inputs.py    From DeepCTR with Apache License 2.0 6 votes vote down vote up
def create_embedding_dict(sparse_feature_columns, varlen_sparse_feature_columns, seed, l2_reg,
                          prefix='sparse_', seq_mask_zero=True):
    sparse_embedding = {}
    for feat in sparse_feature_columns:
        emb = Embedding(feat.vocabulary_size, feat.embedding_dim,
                        embeddings_initializer=feat.embeddings_initializer,
                        embeddings_regularizer=l2(l2_reg),
                        name=prefix + '_emb_' + feat.embedding_name)
        emb.trainable = feat.trainable
        sparse_embedding[feat.embedding_name] = emb

    if varlen_sparse_feature_columns and len(varlen_sparse_feature_columns) > 0:
        for feat in varlen_sparse_feature_columns:
            # if feat.name not in sparse_embedding:
            emb = Embedding(feat.vocabulary_size, feat.embedding_dim,
                            embeddings_initializer=feat.embeddings_initializer,
                            embeddings_regularizer=l2(
                                l2_reg),
                            name=prefix + '_seq_emb_' + feat.name,
                            mask_zero=seq_mask_zero)
            emb.trainable = feat.trainable
            sparse_embedding[feat.embedding_name] = emb
    return sparse_embedding 
Example #4
Source File: mlr.py    From icme2019 with MIT License 5 votes vote down vote up
def get_embedding(region_num, region_feature_dim_dict, base_feature_dim_dict, bias_feature_dim_dict, init_std, seed, l2_reg_linear):

    region_embeddings = [[Embedding(feat.dimension, 1, embeddings_initializer=TruncatedNormal(stddev=init_std, seed=seed+j), embeddings_regularizer=l2(l2_reg_linear),
                                    name='region_emb_' + str(j)+'_' + str(i)) for
                          i, feat in enumerate(region_feature_dim_dict['sparse'])] for j in range(region_num)]
    base_embeddings = [[Embedding(feat.dimension, 1,
                                  embeddings_initializer=TruncatedNormal(stddev=init_std, seed=seed + j), embeddings_regularizer=l2(l2_reg_linear),
                                  name='base_emb_' + str(j) + '_' + str(i)) for
                        i, feat in enumerate(base_feature_dim_dict['sparse'])] for j in range(region_num)]
    bias_embedding = [Embedding(feat.dimension, 1, embeddings_initializer=TruncatedNormal(stddev=init_std, seed=seed), embeddings_regularizer=l2(l2_reg_linear),
                                name='embed_bias' + '_' + str(i)) for
                      i, feat in enumerate(bias_feature_dim_dict['sparse'])]

    return region_embeddings, base_embeddings, bias_embedding 
Example #5
Source File: line.py    From GraphEmbedding with MIT License 5 votes vote down vote up
def create_model(numNodes, embedding_size, order='second'):

    v_i = Input(shape=(1,))
    v_j = Input(shape=(1,))

    first_emb = Embedding(numNodes, embedding_size, name='first_emb')
    second_emb = Embedding(numNodes, embedding_size, name='second_emb')
    context_emb = Embedding(numNodes, embedding_size, name='context_emb')

    v_i_emb = first_emb(v_i)
    v_j_emb = first_emb(v_j)

    v_i_emb_second = second_emb(v_i)
    v_j_context_emb = context_emb(v_j)

    first = Lambda(lambda x: tf.reduce_sum(
        x[0]*x[1], axis=-1, keep_dims=False), name='first_order')([v_i_emb, v_j_emb])
    second = Lambda(lambda x: tf.reduce_sum(
        x[0]*x[1], axis=-1, keep_dims=False), name='second_order')([v_i_emb_second, v_j_context_emb])

    if order == 'first':
        output_list = [first]
    elif order == 'second':
        output_list = [second]
    else:
        output_list = [first, second]

    model = Model(inputs=[v_i, v_j], outputs=output_list)

    return model, {'first': first_emb, 'second': second_emb} 
Example #6
Source File: backend.py    From FATE with Apache License 2.0 4 votes vote down vote up
def build(self, lambda_u=0.0001, lambda_v=0.0001, optimizer='rmsprop',
              loss='mse', metrics='mse', initializer='uniform'):
        """
        Init session and create model architecture.
        :param lambda_u: lambda value of l2 norm for user embeddings.
        :param lambda_v: lambda value of l2 norm for item embeddings.
        :param optimizer: optimizer type.
        :param loss: loss type.
        :param metrics: evaluation metrics.
        :param initializer: initializer of embedding
        :return:
        """
        # init session on first time ref
        sess = self.session
        # user embedding
        user_input_layer = Input(shape=(1,), dtype='int32', name='user_input')
        user_embedding_layer = Embedding(
            input_dim=self.user_num,
            output_dim=self.embedding_dim,
            input_length=1,
            name='user_embedding',
            embeddings_regularizer=l2(lambda_u),
            embeddings_initializer=initializer)(user_input_layer)
        user_embedding_layer = Flatten(name='user_flatten')(user_embedding_layer)

        # item embedding
        item_input_layer = Input(shape=(1,), dtype='int32', name='item_input')
        item_embedding_layer = Embedding(
            input_dim=self.item_num,
            output_dim=self.embedding_dim,
            input_length=1,
            name='item_embedding',
            embeddings_regularizer=l2(lambda_v),
            embeddings_initializer=initializer)(item_input_layer)
        item_embedding_layer = Flatten(name='item_flatten')(item_embedding_layer)

        # rating prediction
        dot_layer = Dot(axes=-1,
                        name='dot_layer')([user_embedding_layer,
                                           item_embedding_layer])
        self._model = Model(
            inputs=[user_input_layer, item_input_layer], outputs=[dot_layer])

        # compile model
        optimizer_instance = getattr(
            tf.keras.optimizers, optimizer.optimizer)(**optimizer.kwargs)
        losses = getattr(tf.keras.losses, loss)
        self._model.compile(optimizer=optimizer_instance,
                            loss=losses, metrics=metrics)
        # pick user_embedding for aggregating
        self._trainable_weights = {v.name.split(
            "/")[0]: v for v in self._model.trainable_weights}
        self._aggregate_weights = {
            "user_embedding": self._trainable_weights["user_embedding"]} 
Example #7
Source File: backend.py    From FATE with Apache License 2.0 4 votes vote down vote up
def _build(self, lamda_u=0.0001, lamda_v=0.0001, optimizer='rmsprop',
               loss='mse', metrics='mse', initializer='uniform'):
        # init session on first time ref
        sess = self.session

        # user embedding
        user_InputLayer = Input(shape=(1,), dtype='int32', name='user_input')
        user_EmbeddingLayer = Embedding(input_dim=self.user_num,
                                        output_dim=self.embedding_dim,
                                        input_length=1,
                                        name='user_embedding',
                                        embeddings_regularizer=l2(lamda_u),
                                        embeddings_initializer=initializer)(user_InputLayer)
        user_EmbeddingLayer = Flatten(name='user_flatten')(user_EmbeddingLayer)

        # user bias
        user_BiasLayer = Embedding(input_dim=self.user_num, output_dim=1, input_length=1,
                                   name='user_bias', embeddings_regularizer=l2(lamda_u),
                                   embeddings_initializer=Zeros())(user_InputLayer)
        user_BiasLayer = Flatten(name='user_bias_flatten')(user_BiasLayer)

        # item embedding
        item_InputLayer = Input(shape=(1,), dtype='int32', name='item_input')
        item_EmbeddingLayer = Embedding(input_dim=self.item_num,
                                        output_dim=self.embedding_dim,
                                        input_length=1,
                                        name='item_embedding',
                                        embeddings_regularizer=l2(lamda_v),
                                        embeddings_initializer=initializer)(item_InputLayer)
        item_EmbeddingLayer = Flatten(name='item_flatten')(item_EmbeddingLayer)

        # item bias
        item_BiasLayer = Embedding(input_dim=self.item_num, output_dim=1, input_length=1,
                                   name='item_bias', embeddings_regularizer=l2(lamda_v),
                                   embeddings_initializer=Zeros())(item_InputLayer)
        item_BiasLayer = Flatten(name='item_bias_flatten')(item_BiasLayer)

        # rating prediction
        dotLayer = Dot(axes=-1, name='dot_layer')([user_EmbeddingLayer, item_EmbeddingLayer])

        # add mu, user bias and item bias
        dotLayer = ConstantLayer(mu=self.mu)(dotLayer)
        dotLayer = Add()([dotLayer, user_BiasLayer])
        dotLayer = Add()([dotLayer, item_BiasLayer])

        # create model
        self._model = Model(inputs=[user_InputLayer, item_InputLayer], outputs=[dotLayer])

        # compile model
        optimizer_instance = getattr(tf.keras.optimizers, optimizer.optimizer)(**optimizer.kwargs)
        losses = getattr(tf.keras.losses, loss)
        self._model.compile(optimizer=optimizer_instance,
                            loss=losses, metrics=metrics)
        # pick user_embedding and user_bias for aggregating
        self._trainable_weights = {v.name.split("/")[0]: v for v in self._model.trainable_weights}
        LOGGER.debug(f"trainable weights {self._trainable_weights}")
        self._aggregate_weights = {"user_embedding": self._trainable_weights["user_embedding"],
                                   "user_bias": self._trainable_weights["user_bias"]}