Python tensorflow.python.keras.backend.sum() Examples

The following are 8 code examples of tensorflow.python.keras.backend.sum(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.keras.backend , or try the search function .
Example #1
Source Project: GraphEmbedding   Author: shenweichen   File: sdne.py    License: MIT License 6 votes vote down vote up
def _create_A_L(self, graph, node2idx):
        node_size = graph.number_of_nodes()
        A_data = []
        A_row_index = []
        A_col_index = []

        for edge in graph.edges():
            v1, v2 = edge
            edge_weight = graph[v1][v2].get('weight', 1)

            A_data.append(edge_weight)
            A_row_index.append(node2idx[v1])
            A_col_index.append(node2idx[v2])

        A = sp.csr_matrix((A_data, (A_row_index, A_col_index)), shape=(node_size, node_size))
        A_ = sp.csr_matrix((A_data + A_data, (A_row_index + A_col_index, A_col_index + A_row_index)),
                           shape=(node_size, node_size))

        D = sp.diags(A_.sum(axis=1).flatten().tolist()[0])
        L = D - A_
        return A, L 
Example #2
Source Project: code2vec   Author: tech-srl   File: keras_attention_layer.py    License: MIT License 5 votes vote down vote up
def call(self, inputs, **kwargs):
        inputs = inputs if isinstance(inputs, list) else [inputs]

        if len(inputs) < 1 or len(inputs) > 2:
            raise ValueError("AttentionLayer expect one or two inputs.")

        actual_input = inputs[0]
        mask = inputs[1] if len(inputs) > 1 else None
        if mask is not None and not (((len(mask.shape) == 3 and mask.shape[2] == 1) or len(mask.shape) == 2)
                                     and mask.shape[1] == self.input_length):
            raise ValueError("`mask` should be of shape (batch, input_length) or (batch, input_length, 1) "
                             "when calling an AttentionLayer.")

        assert actual_input.shape[-1] == self.attention_param.shape[0]

        # (batch, input_length, input_dim) * (input_dim, 1) ==> (batch, input_length, 1)
        attention_weights = K.dot(actual_input, self.attention_param)

        if mask is not None:
            if len(mask.shape) == 2:
                mask = K.expand_dims(mask, axis=2)  # (batch, input_length, 1)
            mask = K.log(mask)
            attention_weights += mask

        attention_weights = K.softmax(attention_weights, axis=1)  # (batch, input_length, 1)
        result = K.sum(actual_input * attention_weights, axis=1)  # (batch, input_length)  [multiplication uses broadcast]
        return result, attention_weights 
Example #3
Source Project: GraphEmbedding   Author: shenweichen   File: sdne.py    License: MIT License 5 votes vote down vote up
def l_2nd(beta):
    def loss_2nd(y_true, y_pred):
        b_ = np.ones_like(y_true)
        b_[y_true != 0] = beta
        x = K.square((y_true - y_pred) * b_)
        t = K.sum(x, axis=-1, )
        return K.mean(t)

    return loss_2nd 
Example #4
Source Project: FATE   Author: FederatedAI   File: losses.py    License: Apache License 2.0 5 votes vote down vote up
def keep_predict_loss(y_true, y_pred):
    y_pred = ops.convert_to_tensor(y_pred)
    return K.sum(y_true * y_pred) 
Example #5
Source Project: FATE   Author: FederatedAI   File: losses.py    License: Apache License 2.0 5 votes vote down vote up
def keep_predict_loss(y_true, y_pred):
    y_pred = ops.convert_to_tensor(y_pred)
    return K.sum(y_true * y_pred) 
Example #6
Source Project: FATE   Author: FederatedAI   File: backend.py    License: Apache License 2.0 5 votes vote down vote up
def call(self, x, mask=None):
        """1, mask is a bool type tensor, need casting before compute.
           2, mask shape in 2 dimension (batch_size, feature_dimension)
        """
        if mask is not None:
            mask = K.repeat(mask, x.shape[-1])
            mask = tf.transpose(mask, [0, 2, 1])
            mask = tf.cast(mask, tf.float32)
            x = x * mask
            return K.sum(x, axis=1) / K.sum(mask, axis=1)
        else:
            return K.mean(x, axis=1) 
Example #7
Source Project: BVAE-tf   Author: alecGraves   File: sample_layer.py    License: The Unlicense 5 votes vote down vote up
def call(self, x, training=None):
        if len(x) != 2:
            raise Exception('input layers must be a list: mean and logvar')
        if len(x[0].shape) != 2 or len(x[1].shape) != 2:
            raise Exception('input shape is not a vector [batchSize, latentSize]')

        mean = x[0]
        logvar = x[1]

        # trick to allow setting batch at train/eval time
        if mean.shape[0].value == None or  logvar.shape[0].value == None:
            return mean + 0*logvar # Keras needs the *0 so the gradinent is not None

        if self.reg is not None:
            # kl divergence:
            latent_loss = -0.5 * (1 + logvar
                                - K.square(mean)
                                - K.exp(logvar))
            latent_loss = K.sum(latent_loss, axis=-1) # sum over latent dimension
            latent_loss = K.mean(latent_loss, axis=0) # avg over batch

            # use beta to force less usage of vector space:
            latent_loss = self.beta * latent_loss
            self.add_loss(latent_loss, x)

        def reparameterization_trick():
            epsilon = K.random_normal(shape=logvar.shape,
                              mean=0., stddev=1.)
            stddev = K.exp(logvar*0.5)
            return mean + stddev * epsilon

        return K.in_train_phase(reparameterization_trick, mean + 0*logvar, training=training) # TODO figure out why this is not working in the specified tf version??? 
Example #8
Source Project: DiPS   Author: malllabiisc   File: util.py    License: Apache License 2.0 5 votes vote down vote up
def call(self, x, **kwargs):
		self.result = K.exp(-K.sum(K.abs(x[0] - x[1]), axis=1, keepdims=True))
		return self.result

	# return output shape