Python tensorflow.sparse_tensor_dense_matmul() Examples

The following are 30 code examples of tensorflow.sparse_tensor_dense_matmul(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: model.py    From PathCon with MIT License 6 votes vote down vote up
def _build_model(self):
        # define initial relation features
        if self.use_context or (self.use_path and self.path_type == 'rnn'):
            self._build_relation_feature()

        self.scores = 0.0

        if self.use_context:
            edges_list, mask_list = self._get_neighbors_and_masks(self.labels, self.entity_pairs, self.train_edges)
            self.aggregators = self._get_neighbor_aggregators()  # define aggregators for each layer
            self.aggregated_neighbors = self._aggregate_neighbors(edges_list, mask_list)  # [batch_size, n_relations]
            self.scores += self.aggregated_neighbors

        if self.use_path:
            if self.path_type == 'embedding':
                self.W, self.b = self._get_weight_and_bias(self.n_paths, self.n_relations)  # [batch_size, n_relations]
                self.scores += tf.sparse_tensor_dense_matmul(self.path_features, self.W) + self.b

            elif self.path_type == 'rnn':
                rnn_output = self._rnn(self.path_ids)  # [batch_size, path_samples, n_relations]
                self.scores += self._aggregate_paths(rnn_output)

        # narrow the range of scores to [0, 1] for the ease of calculating ranking-based metrics
        self.scores_normalized = tf.sigmoid(self.scores) 
Example #2
Source File: graphsage.py    From gnn-benchmark with MIT License 6 votes vote down vote up
def aggregate_maxpool(features, agg_transform_size, adj_with_self_loops_indices, num_features, name):
    with tf.name_scope(name):
        fc_weights = tf.get_variable(f"{name}-fc_weights",
                                     shape=[num_features, agg_transform_size],
                                     dtype=tf.float32,
                                     initializer=tf.glorot_uniform_initializer(),
                                     )
        # dims: num_nodes x num_features, num_features x agg_transform_size -> num_nodes x agg_transform_size
        if isinstance(features, tf.SparseTensor):
            transformed_features = tf.sparse_tensor_dense_matmul(features, fc_weights)
        else:
            transformed_features = tf.matmul(features, fc_weights)
        transformed_features = tf.nn.relu(transformed_features)

        # Spread out the transformed features to neighbours.
        # dims: num_nodes x agg_transform_size, num_nodes x max_degree -> num_nodes x agg_transform_size x max_degree
        neighbours_features = tf.gather(transformed_features, adj_with_self_loops_indices)

        # employ the max aggregator
        output = tf.reduce_max(neighbours_features, axis=1)
        return output


# dims:
#   features: num_nodes x num_features 
Example #3
Source File: layers.py    From GraphSAINT with MIT License 6 votes vote down vote up
def _call(self, inputs):
        # vecs: input feature of the current layer. 
        # adj_partition_list: the row partitions of the full graph adj 
        #       (only used in full-batch evaluation on the val/test sets)
        vecs, adj_norm, len_feat, adj_partition_list, _ = inputs
        vecs = tf.nn.dropout(vecs, 1-self.dropout)
        vecs_hop = [tf.identity(vecs) for o in range(self.order+1)]
        for o in range(self.order):
            for a in range(o+1):
                ans1 = tf.sparse_tensor_dense_matmul(adj_norm,vecs_hop[o+1])
                ans_partition = [tf.sparse_tensor_dense_matmul(adj,vecs_hop[o+1]) for adj in adj_partition_list]
                ans2 = tf.concat(ans_partition,0)
                vecs_hop[o+1]=tf.cond(self.is_train,lambda: tf.identity(ans1),lambda: tf.identity(ans2))
        vecs_hop = [self._F_nonlinear(v,o) for o,v in enumerate(vecs_hop)]    
        if self.aggr == 'mean':
            ret = vecs_hop[0]
            for o in range(len(vecs_hop)-1):
                ret += vecs_hop[o+1]
        elif self.aggr == 'concat':
            ret = tf.concat(vecs_hop,axis=1)
        else:
            raise NotImplementedError
        return ret 
Example #4
Source File: labelprop.py    From gnn-benchmark with MIT License 6 votes vote down vote up
def optimize(self, learning_rate, global_step):
        if self.prop_type == 'vanilla':
            # dims: num_nodes x num_nodes, num_nodes x num_labels, num_nodes -> num_nodes x num_labels
            new_predicted_labels = tf.sparse_tensor_dense_matmul(self.graph_adj, self.predicted_labels) / self.degrees
            # set entries where we have a label to zero...
            new_predicted_labels *= self._get_labelled_nodes_mask()
            # ... and add already known labels
            new_predicted_labels += self.initial_predicted_labels
        else:
            new_predicted_labels = (1 - self.return_prob) * tf.sparse_tensor_dense_matmul(self.graph_adj,
                                                                                          self.predicted_labels) \
                                   + self.return_prob * self.initial_predicted_labels

        # update predictions variable
        predicted_labels_update_op = self.predicted_labels.assign(new_predicted_labels)
        return predicted_labels_update_op, global_step.assign_add(1) 
Example #5
Source File: mlp.py    From gnn-benchmark with MIT License 6 votes vote down vote up
def fully_connected_layer(inputs, output_dim, activation_fn, dropout_prob, weight_decay, name):
    with tf.name_scope(name):
        input_dim = int(inputs.get_shape()[1])
        weights = tf.get_variable("%s-weights" % name, [input_dim, output_dim], dtype=tf.float32,
                                  initializer=tf.glorot_uniform_initializer(),
                                  regularizer=slim.l2_regularizer(weight_decay))

        # Apply dropout to inputs if required
        inputs = tf.cond(
            tf.cast(dropout_prob, tf.bool),
            true_fn=(lambda: dropout_supporting_sparse_tensors(inputs, 1 - dropout_prob)),
            false_fn=(lambda: inputs),
        )

        if isinstance(inputs, tf.SparseTensor):
            output = tf.sparse_tensor_dense_matmul(inputs, weights)
        else:
            output = tf.matmul(inputs, weights)
        output = tf.contrib.layers.bias_add(output)
        return activation_fn(output) if activation_fn else output 
Example #6
Source File: recommendation_graphs.py    From tensorrec with Apache License 2.0 6 votes vote down vote up
def project_biases(tf_features, n_features):
    """
    Projects the biases from the feature space to calculate bias per actor
    :param tf_features:
    :param n_features:
    :return:
    """
    tf_feature_biases = tf.Variable(tf.zeros([n_features, 1]))

    # The reduce sum is to perform a rank reduction
    tf_projected_biases = tf.reduce_sum(
        tf.sparse_tensor_dense_matmul(tf_features, tf_feature_biases),
        axis=1
    )

    return tf_feature_biases, tf_projected_biases 
Example #7
Source File: model.py    From graph2gauss with MIT License 6 votes vote down vote up
def __build(self):
        w_init = tf.contrib.layers.xavier_initializer

        sizes = [self.D] + self.n_hidden

        for i in range(1, len(sizes)):
            W = tf.get_variable(name='W{}'.format(i), shape=[sizes[i - 1], sizes[i]], dtype=tf.float32,
                                initializer=w_init())
            b = tf.get_variable(name='b{}'.format(i), shape=[sizes[i]], dtype=tf.float32, initializer=w_init())

            if i == 1:
                encoded = tf.sparse_tensor_dense_matmul(self.X, W) + b
            else:
                encoded = tf.matmul(encoded, W) + b

            encoded = tf.nn.relu(encoded)

        W_mu = tf.get_variable(name='W_mu', shape=[sizes[-1], self.L], dtype=tf.float32, initializer=w_init())
        b_mu = tf.get_variable(name='b_mu', shape=[self.L], dtype=tf.float32, initializer=w_init())
        self.mu = tf.matmul(encoded, W_mu) + b_mu

        W_sigma = tf.get_variable(name='W_sigma', shape=[sizes[-1], self.L], dtype=tf.float32, initializer=w_init())
        b_sigma = tf.get_variable(name='b_sigma', shape=[self.L], dtype=tf.float32, initializer=w_init())
        log_sigma = tf.matmul(encoded, W_sigma) + b_sigma
        self.sigma = tf.nn.elu(log_sigma) + 1 + 1e-14 
Example #8
Source File: representation_graphs.py    From tensorrec with Apache License 2.0 6 votes vote down vote up
def connect_representation_graph(self, tf_features, n_components, n_features, node_name_ending):

        # Infer ReLU layer size if necessary
        if self.relu_size is None:
            relu_size = 4 * n_components
        else:
            relu_size = self.relu_size

        # Create variable nodes
        tf_relu_weights = tf.Variable(tf.random_normal([n_features, relu_size], stddev=.5),
                                      name='relu_weights_{}'.format(node_name_ending))
        tf_relu_biases = tf.Variable(tf.zeros([1, relu_size]),
                                     name='relu_biases_{}'.format(node_name_ending))
        tf_linear_weights = tf.Variable(tf.random_normal([relu_size, n_components], stddev=.5),
                                        name='linear_weights_{}'.format(node_name_ending))

        # Create ReLU layer
        tf_relu = tf.nn.relu(tf.add(tf.sparse_tensor_dense_matmul(tf_features, tf_relu_weights),
                                    tf_relu_biases))
        tf_repr = tf.matmul(tf_relu, tf_linear_weights)

        # Return repr layer and variables
        return tf_repr, [tf_relu_weights, tf_linear_weights, tf_relu_biases] 
Example #9
Source File: sparse_tensor_dense_matmul_op_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testShapeInference(self):
    x = np.random.rand(10, 10)
    x[np.abs(x) < 0.5] = 0  # Make it sparse
    y = np.random.randn(10, 20)
    x_indices = np.vstack(np.where(x)).astype(np.int64).T
    x_values = x[np.where(x)]
    x_shape = x.shape
    x_st = tf.SparseTensor(x_indices, x_values, x_shape)
    result = tf.sparse_tensor_dense_matmul(x_st, y)
    self.assertEqual(result.get_shape(), (10, 20))

    x_shape_unknown = tf.placeholder(dtype=tf.int64, shape=None)
    x_st_shape_unknown = tf.SparseTensor(x_indices, x_values, x_shape_unknown)
    result_left_shape_unknown = tf.sparse_tensor_dense_matmul(
        x_st_shape_unknown, y)
    self.assertEqual(
        result_left_shape_unknown.get_shape().as_list(), [None, 20])

    x_shape_inconsistent = [10, 15]
    x_st_shape_inconsistent = tf.SparseTensor(
        x_indices, x_values, x_shape_inconsistent)
    with self.assertRaisesRegexp(ValueError, "Dimensions must be equal"):
      tf.sparse_tensor_dense_matmul(x_st_shape_inconsistent, y)

  # Tests setting one dimension to be a high value. 
Example #10
Source File: sparse_tensor_dense_matmul_op_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _sparse_tensor_dense_vs_dense_matmul_benchmark_sparse(
    x_ind, x_val, x_shape, y, adjoint_a, adjoint_b):
  sp_x = tf.SparseTensor(indices=x_ind, values=x_val, shape=x_shape)

  def body(t, prev):
    with tf.control_dependencies([prev]):
      return (t + 1,
              sparse_ops.sparse_tensor_dense_matmul(
                  sp_x, y, adjoint_a=adjoint_a, adjoint_b=adjoint_b))

  t0 = tf.constant(0)
  v0 = tf.constant(0.0)
  def _timeit(iterations, _):
    (_, final) = tf.while_loop(
        lambda t, _: t < iterations, body, (t0, v0),
        parallel_iterations=1, back_prop=False)
    return [final]
  return _timeit 
Example #11
Source File: sparse_tensor_dense_matmul_grad_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _testGradients(self, adjoint_a, adjoint_b, name, np_dtype):
    n, k, m = np.random.randint(1, 10, size=3)
    sp_t, nnz = self._randomTensor(
        [n, k], np_dtype, adjoint=adjoint_a, sparse=True)
    dense_t = self._randomTensor([k, m], np_dtype, adjoint=adjoint_b)

    matmul = tf.sparse_tensor_dense_matmul(
        sp_t, dense_t, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name=name)

    with self.test_session(use_gpu=True):
      dense_t_shape = [m, k] if adjoint_b else [k, m]
      sp_t_val_shape = [nnz]
      err = tf.test.compute_gradient_error([dense_t, sp_t.values],
                                           [dense_t_shape, sp_t_val_shape],
                                           matmul, [n, m])
      print("%s gradient err = %s" % (name, err))
      self.assertLess(err, 1e-3) 
Example #12
Source File: conf_gcn.py    From ConfGCN with Apache License 2.0 6 votes vote down vote up
def matmul(self, a, b, is_sparse=False):
		"""
		Performs matrix multiplication between a and b, based on whether a is sparse or not.

		Parameters
		----------
		a, b:		Tensors to multiply
		is_sparse: 	Whether 'a' is sparse or not

		Returns
		-------
		Matrix multiplication output of 'a' and 'b'

		"""
		if is_sparse: 	return tf.sparse_tensor_dense_matmul(a, b)
		else: 		return tf.matmul(a, b) 
Example #13
Source File: chem_tensorflow_gcn.py    From gated-graph-neural-network-samples with MIT License 6 votes vote down vote up
def compute_final_node_representations(self):
        with tf.variable_scope('gcn_scope'):
            cur_node_states = self.placeholders['initial_node_representation']  # number of nodes in batch v x D
            num_nodes = tf.shape(self.placeholders['initial_node_representation'], out_type=tf.int64)[0]

            adjacency_matrix = tf.SparseTensor(indices=self.placeholders['adjacency_list'],
                                               values=self.placeholders['adjacency_weights'],
                                               dense_shape=[num_nodes, num_nodes])

            for layer_idx in range(self.params['num_timesteps']):
                scaled_cur_node_states = tf.sparse_tensor_dense_matmul(adjacency_matrix, cur_node_states)  # v x D
                new_node_states = tf.matmul(scaled_cur_node_states, self.weights['edge_weights'][layer_idx])

                if self.params['gcn_use_bias']:
                    new_node_states += self.weights['edge_biases'][layer_idx]  # v x D

                # On all but final layer do ReLU and dropout:
                if layer_idx < self.params['num_timesteps'] - 1:
                    new_node_states = tf.nn.relu(new_node_states)
                    new_node_states = tf.nn.dropout(new_node_states, keep_prob=self.placeholders['graph_state_keep_prob'])

                cur_node_states = new_node_states

            return cur_node_states 
Example #14
Source File: layers.py    From DGFraud with Apache License 2.0 6 votes vote down vote up
def node_attention(inputs, adj, return_weights=False):
        hidden_size = inputs.shape[-1].value
        H_v = tf.Variable(tf.random_normal([hidden_size, 1], stddev=0.1))

        # convert adj to sparse tensor
        zero = tf.constant(0, dtype=tf.float32)
        where = tf.not_equal(adj, zero)
        indices = tf.where(where)
        values = tf.gather_nd(adj, indices)
        adj = tf.SparseTensor(indices=indices,
                              values=values,
                              dense_shape=adj.shape)

        with tf.name_scope('v'):
            v = adj * tf.squeeze(tf.tensordot(inputs, H_v, axes=1))

        weights = tf.sparse_softmax(v, name='alphas')  # [nodes,nodes]
        output = tf.sparse_tensor_dense_matmul(weights, inputs)

        if not return_weights:
            return output
        else:
            return output, weights

    # view-level attention (equation (4) in SemiGNN) 
Example #15
Source File: NGCF.py    From neural_graph_collaborative_filtering with MIT License 6 votes vote down vote up
def _create_gcmc_embed(self):
        A_fold_hat = self._split_A_hat(self.norm_adj)

        embeddings = tf.concat([self.weights['user_embedding'], self.weights['item_embedding']], axis=0)

        all_embeddings = []

        for k in range(0, self.n_layers):
            temp_embed = []
            for f in range(self.n_fold):
                temp_embed.append(tf.sparse_tensor_dense_matmul(A_fold_hat[f], embeddings))
            embeddings = tf.concat(temp_embed, 0)
            # convolutional layer.
            embeddings = tf.nn.leaky_relu(tf.matmul(embeddings, self.weights['W_gc_%d' % k]) + self.weights['b_gc_%d' % k])
            # dense layer.
            mlp_embeddings = tf.matmul(embeddings, self.weights['W_mlp_%d' %k]) + self.weights['b_mlp_%d' %k]
            mlp_embeddings = tf.nn.dropout(mlp_embeddings, 1 - self.mess_dropout[k])

            all_embeddings += [mlp_embeddings]
        all_embeddings = tf.concat(all_embeddings, 1)

        u_g_embeddings, i_g_embeddings = tf.split(all_embeddings, [self.n_users, self.n_items], 0)
        return u_g_embeddings, i_g_embeddings 
Example #16
Source File: utils.py    From Deep-Learning-with-TensorFlow-Second-Edition with MIT License 6 votes vote down vote up
def matmul_wrapper(A, B, optype):
    """Wrapper for handling sparse and dense versions of `tf.matmul` operation.

    Parameters
    ----------
    A : tf.Tensor
    B : tf.Tensor
    optype : str, {'dense', 'sparse'}

    Returns
    -------
    tf.Tensor
    """
    with tf.name_scope('matmul_wrapper') as scope:
        if optype == 'dense':
            return tf.matmul(A, B)
        elif optype == 'sparse':
            return tf.sparse_tensor_dense_matmul(A, B)
        else:
            raise NameError('Unknown input type in matmul_wrapper') 
Example #17
Source File: NGCF.py    From neural_graph_collaborative_filtering with MIT License 6 votes vote down vote up
def _create_gcn_embed(self):
        A_fold_hat = self._split_A_hat(self.norm_adj)
        embeddings = tf.concat([self.weights['user_embedding'], self.weights['item_embedding']], axis=0)


        all_embeddings = [embeddings]

        for k in range(0, self.n_layers):
            temp_embed = []
            for f in range(self.n_fold):
                temp_embed.append(tf.sparse_tensor_dense_matmul(A_fold_hat[f], embeddings))

            embeddings = tf.concat(temp_embed, 0)
            embeddings = tf.nn.leaky_relu(tf.matmul(embeddings, self.weights['W_gc_%d' %k]) + self.weights['b_gc_%d' %k])
            embeddings = tf.nn.dropout(embeddings, 1 - self.mess_dropout[k])

            all_embeddings += [embeddings]

        all_embeddings = tf.concat(all_embeddings, 1)
        u_g_embeddings, i_g_embeddings = tf.split(all_embeddings, [self.n_users, self.n_items], 0)
        return u_g_embeddings, i_g_embeddings 
Example #18
Source File: loss.py    From JAPE with MIT License 6 votes vote down vote up
def sim_loss_sparse_with_kb12(ents_1, ents_2, cross_sim_mat, kb1_sim_mat, kb2_sim_mat):
    opt_vars = [v for v in tf.trainable_variables() if v.name.startswith("relation2vec")]
    trans_ents = tf.sparse_tensor_dense_matmul(cross_sim_mat, ents_2)
    trans_ents = tf.nn.l2_normalize(trans_ents, 1)
    base_loss = tf.reduce_sum(tf.reduce_sum(tf.pow(ents_1 - trans_ents, 2), 1))

    if inner_sim_param > 0.0:
        trans_kb1_ents = tf.sparse_tensor_dense_matmul(kb1_sim_mat, ents_1)
        trans_kb1_ents = tf.nn.l2_normalize(trans_kb1_ents, 1)
        base_loss += inner_sim_param * tf.reduce_sum(tf.reduce_sum(tf.pow(ents_1 - trans_kb1_ents, 2), 1))
        trans_kb2_ents = tf.sparse_tensor_dense_matmul(kb2_sim_mat, ents_2)
        trans_kb2_ents = tf.nn.l2_normalize(trans_kb2_ents, 1)
        base_loss += inner_sim_param * tf.reduce_sum(tf.reduce_sum(tf.pow(ents_2 - trans_kb2_ents, 2), 1))

    loss = sim_loss_param * base_loss
    optimizer = tf.train.AdagradOptimizer(learning_rate).minimize(loss, var_list=opt_vars)
    return optimizer, loss 
Example #19
Source File: layers.py    From QGforQA with MIT License 5 votes vote down vote up
def dot(x, y):
    """Modified from keras==2.0.6
    Multiplies 2 tensors (and/or variables) and returns a *tensor*.

    When attempting to multiply a nD tensor
    with a nD tensor, it reproduces the Theano behavior.
    (e.g. `(2, 3) * (4, 3, 5) -> (2, 4, 5)`)

    # Arguments
        x: Tensor or variable.
        y: Tensor or variable.

    # Returns
        A tensor, dot product of `x` and `y`.
    """
    if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2):
        x_shape = []
        for i, s in zip(x.get_shape().as_list(), tf.unstack(tf.shape(x))):
            if i is not None:
                x_shape.append(i)
            else:
                x_shape.append(s)
        x_shape = tuple(x_shape)
        y_shape = []
        for i, s in zip(y.get_shape().as_list(), tf.unstack(tf.shape(y))):
            if i is not None:
                y_shape.append(i)
            else:
                y_shape.append(s)
        y_shape = tuple(y_shape)
        y_permute_dim = list(range(ndim(y)))
        y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim
        xt = tf.reshape(x, [-1, x_shape[-1]])
        yt = tf.reshape(tf.transpose(y, perm=y_permute_dim), [y_shape[-2], -1])
        return tf.reshape(tf.matmul(xt, yt),
                          x_shape[:-1] + y_shape[:-2] + y_shape[-1:])
    if isinstance(x, tf.SparseTensor):
        out = tf.sparse_tensor_dense_matmul(x, y)
    else:
        out = tf.matmul(x, y)
    return out 
Example #20
Source File: nn_utils.py    From Transferable-E2E-ABSA with MIT License 5 votes vote down vote up
def dense(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu, input_type='dense'):
    with tf.name_scope(layer_name):
        weight = tf.Variable(tf.truncated_normal([input_dim, output_dim], stddev=1. / tf.sqrt(input_dim / 2.)), name='weight')
        bias = tf.Variable(tf.constant(0.1, shape=[output_dim]), name='bias')
        if input_type == 'sparse':
            activations = act(tf.sparse_tensor_dense_matmul(input_tensor, weight) + bias)
        else:
            activations = act(tf.matmul(input_tensor, weight) + bias)
        return activations 
Example #21
Source File: util.py    From pregel with MIT License 5 votes vote down vote up
def get_dotproduct_op(sparse_features=True):
    if (sparse_features):
        return tf.sparse_tensor_dense_matmul
    else:
        return tf.matmul 
Example #22
Source File: tf_sparse.py    From mercari-solution with MIT License 5 votes vote down vote up
def sparse_linear(xs, shape, name: str, actfunc=identity):
    assert len(shape) == 2
    w = tf.get_variable(name, initializer=tf.glorot_normal_initializer(),
                        shape=shape)
    bias = tf.Variable(tf.zeros(shape[1]))
    return actfunc(tf.sparse_tensor_dense_matmul(xs, w) + bias), w 
Example #23
Source File: layers.py    From NPHard with MIT License 5 votes vote down vote up
def dot(x, y, sparse=False):
    """Wrapper for tf.matmul (sparse vs dense)."""
    if sparse:
        res = tf.sparse_tensor_dense_matmul(x, y)
    else:
        res = tf.matmul(x, y)
    return res 
Example #24
Source File: nn.py    From ASKCOS with Mozilla Public License 2.0 5 votes vote down vote up
def sparse_linear(input_, input_size, output_size, scope, init_bias=0.0):
    with tf.variable_scope(scope):
        W = tf.get_variable("Matrix", [input_size, output_size], tf.float32, tf.random_normal_initializer(stddev=1.0 / math.sqrt(output_size)))
        b = tf.get_variable("bias", [output_size], initializer=tf.constant_initializer(init_bias))
    return tf.sparse_tensor_dense_matmul(input_, W) + b 
Example #25
Source File: kernels.py    From GGP with Apache License 2.0 5 votes vote down vote up
def K(self, X, X2=None):
        X = tf.reshape(tf.cast(X, tf.int32), [-1])
        X2 = tf.reshape(tf.cast(X2, tf.int32), [-1]) if X2 is not None else X
        base_K_mat = (self.variance * tf.matmul(self.denseFeatureMat, self.denseFeatureMat, transpose_b = True) + self.offset) ** self.degree
        t1 = tf.sparse_tensor_dense_matmul(self.sparse_P, base_K_mat)
        t2 = tf.sparse_tensor_dense_matmul(self.sparse_P, t1, adjoint_b=True)
        return tf.gather(tf.gather(t2, X), X2, axis=1) 
Example #26
Source File: SparseFullyConnectedLayer.py    From NeuralResponseRanking with MIT License 5 votes vote down vote up
def call(self, x, mask=None):
        #sys.stderr.write("sparse fuylly connected layer input data %s type:%s\n" % (x.name, K.type(x)))
        #sys.stderr.write("sparse fuylly connected layer weight type:%s\n" % (K.type(self.W)))
        print(str(K.ndim(x)))
        return self.activation(tf.sparse_tensor_dense_matmul(x, self.W) + self.b) 
Example #27
Source File: layers.py    From GPF with MIT License 5 votes vote down vote up
def dot(x, y, sparse=False):
    """Wrapper for tf.matmul (sparse vs dense)."""
    if sparse:
        res = tf.sparse_tensor_dense_matmul(x, y)
    else:
        res = tf.matmul(x, y)
    return res 
Example #28
Source File: gcn_basis_concat.py    From RelationPrediction with MIT License 5 votes vote down vote up
def combine_messages(self, forward_messages, backward_messages, self_loop_messages, previous_code, mode='train'):
        mtr_f = self.get_graph().forward_incidence_matrix(normalization=('global', 'recalculated'))
        mtr_b = self.get_graph().backward_incidence_matrix(normalization=('global', 'recalculated'))

        collected_messages_f = tf.sparse_tensor_dense_matmul(mtr_f, forward_messages)
        collected_messages_b = tf.sparse_tensor_dense_matmul(mtr_b, backward_messages)

        updated_vertex_embeddings = collected_messages_f + collected_messages_b

        if self.use_nonlinearity:
            activated = tf.nn.relu(updated_vertex_embeddings + self_loop_messages)
        else:
            activated = updated_vertex_embeddings + self_loop_messages

        return activated 
Example #29
Source File: gcn.py    From neural-structured-learning with Apache License 2.0 5 votes vote down vote up
def dot(x, y, sparse=False):
  """Wrapper for tf.matmul (sparse vs dense)."""
  if sparse:
    res = tf.sparse_tensor_dense_matmul(x, y)
  else:
    res = tf.matmul(x, y)
  return res 
Example #30
Source File: representation_graphs.py    From tensorrec with Apache License 2.0 5 votes vote down vote up
def connect_representation_graph(self, tf_features, n_components, n_features, node_name_ending):

        # Weights are normalized before building the variable
        raw_weights = tf.random_normal([n_features, n_components], stddev=1.0)
        normalized_weights = tf.nn.l2_normalize(raw_weights, 1)

        # Create variable nodes
        tf_linear_weights = tf.Variable(normalized_weights, name='linear_weights_{}'.format(node_name_ending))
        tf_repr = tf.sparse_tensor_dense_matmul(tf_features, tf_linear_weights)

        # Return repr layer and variables
        return tf_repr, [tf_linear_weights]