Python torch.spmm() Examples

The following are 30 code examples of torch.spmm(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: layers.py    From DropEdge with MIT License 7 votes vote down vote up
def forward(self, input, adj):
        support = torch.mm(input, self.weight)
        output = torch.spmm(adj, support)

        # Self-loop
        if self.self_weight is not None:
            output = output + torch.mm(input, self.self_weight)

        if self.bias is not None:
            output = output + self.bias
        # BN
        if self.bn is not None:
            output = self.bn(output)
        # Res
        if self.res:
            return self.sigma(output) + input
        else:
            return self.sigma(output) 
Example #2
Source File: utils.py    From SGC with MIT License 6 votes vote down vote up
def sgc_precompute(adj, features, degree, index_dict):
    assert degree==1, "Only supporting degree 2 now"
    feat_dict = {}
    start = perf_counter()
    train_feats = features[:, index_dict["train"]].cuda()
    train_feats = torch.spmm(adj, train_feats).t()
    train_feats_max, _ = train_feats.max(dim=0, keepdim=True)
    train_feats_min, _ = train_feats.min(dim=0, keepdim=True)
    train_feats_range = train_feats_max-train_feats_min
    useful_features_dim = train_feats_range.squeeze().gt(0).nonzero().squeeze()
    train_feats = train_feats[:, useful_features_dim]
    train_feats_range = train_feats_range[:, useful_features_dim]
    train_feats_min = train_feats_min[:, useful_features_dim]
    train_feats = (train_feats-train_feats_min)/train_feats_range
    feat_dict["train"] = train_feats
    for phase in ["test", "val"]:
        feats = features[:, index_dict[phase]].cuda()
        feats = torch.spmm(adj, feats).t()
        feats = feats[:, useful_features_dim]
        feat_dict[phase] = ((feats-train_feats_min)/train_feats_range).cpu() # adj is symmetric!
    precompute_time = perf_counter()-start
    return feat_dict, precompute_time 
Example #3
Source File: train_pytorch_U2GNN_UnSup.py    From Graph-Transformer with Apache License 2.0 6 votes vote down vote up
def evaluate():
    model.eval() # Turn on the evaluation mode
    with torch.no_grad():
        # evaluating
        node_embeddings = model.ss.weight
        graph_embeddings = torch.spmm(graph_pool, node_embeddings).data.cpu().numpy()
        acc_10folds = []
        for fold_idx in range(10):
            train_idx, test_idx = separate_data_idx(graphs, fold_idx)
            train_graph_embeddings = graph_embeddings[train_idx]
            test_graph_embeddings = graph_embeddings[test_idx]
            train_labels = graph_labels[train_idx]
            test_labels = graph_labels[test_idx]

            cls = LogisticRegression(solver="liblinear", tol=0.001)
            cls.fit(train_graph_embeddings, train_labels)
            ACC = cls.score(test_graph_embeddings, test_labels)
            acc_10folds.append(ACC)
            print('epoch ', epoch, ' fold ', fold_idx, ' acc ', ACC)

        mean_10folds = statistics.mean(acc_10folds)
        std_10folds = statistics.stdev(acc_10folds)
        # print('epoch ', epoch, ' mean: ', str(mean_10folds), ' std: ', str(std_10folds))

    return mean_10folds, std_10folds 
Example #4
Source File: pytorch_U2GNN_Sup.py    From Graph-Transformer with Apache License 2.0 6 votes vote down vote up
def forward(self, input_x, graph_pool, X_concat):
        prediction_scores = 0
        input_Tr = F.embedding(input_x, X_concat)
        for layer_idx in range(self.num_U2GNN_layers):
            #
            output_Tr = self.u2gnn_layers[layer_idx](input_Tr)
            output_Tr = torch.split(output_Tr, split_size_or_sections=1, dim=1)[0]
            output_Tr = torch.squeeze(output_Tr, dim=1)
            #new input for next layer
            input_Tr = F.embedding(input_x, output_Tr)
            #sum pooling
            graph_embeddings = torch.spmm(graph_pool, output_Tr)
            graph_embeddings = self.dropouts[layer_idx](graph_embeddings)
            # Produce the final scores
            prediction_scores += self.predictions[layer_idx](graph_embeddings)

        return prediction_scores 
Example #5
Source File: mesh.py    From GraphCMR with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def downsample(self, x, n1=0, n2=None):
        """Downsample mesh."""
        if n2 is None:
            n2 = self.num_downsampling
        if x.ndimension() < 3:
            for i in range(n1, n2):
                x = spmm(self._D[i], x)
        elif x.ndimension() == 3:
            out = []
            for i in range(x.shape[0]):
                y = x[i]
                for j in range(n1, n2):
                    y = spmm(self._D[j], y)
                out.append(y)
            x = torch.stack(out, dim=0)
        return x 
Example #6
Source File: pgd.py    From DeepRobust with MIT License 6 votes vote down vote up
def prox_nuclear_truncated_2(self, data, alpha, k=50):
        import tensorly as tl
        tl.set_backend('pytorch')
        U, S, V = tl.truncated_svd(data.cpu(), n_eigenvecs=k)
        U, S, V = torch.FloatTensor(U).cuda(), torch.FloatTensor(S).cuda(), torch.FloatTensor(V).cuda()
        self.nuclear_norm = S.sum()
        # print("nuclear norm: %.4f" % self.nuclear_norm)

        S = torch.clamp(S-alpha, min=0)
        indices = torch.tensor(range(0, U.shape[0]),range(0, U.shape[0])).cuda()
        values = S
        diag_S = torch.sparse.FloatTensor(indices, values, torch.Size(U.shape))
        # diag_S = torch.diag(torch.clamp(S-alpha, min=0))
        U = torch.spmm(U, diag_S)
        V = torch.matmul(U, V)
        return V 
Example #7
Source File: r_gcn.py    From DeepRobust with MIT License 6 votes vote down vote up
def forward(self, previous_miu, previous_sigma, adj_norm1=None, adj_norm2=None, gamma=1):

        if adj_norm1 is None and adj_norm2 is None:
            return torch.mm(previous_miu, self.weight_miu), \
                    torch.mm(previous_miu, self.weight_miu)
                    # torch.mm(previous_sigma, self.weight_sigma)

        Att = torch.exp(-gamma * previous_sigma)
        M = adj_norm1 @ (previous_miu * Att) @ self.weight_miu
        Sigma = adj_norm2 @ (previous_sigma * Att * Att) @ self.weight_sigma
        return M, Sigma

        # M = torch.mm(torch.mm(adj, previous_miu * A), self.weight_miu)
        # Sigma = torch.mm(torch.mm(adj, previous_sigma * A * A), self.weight_sigma)

        # TODO sparse implemention
        # support = torch.mm(input, self.weight)
        # output = torch.spmm(adj, support)
        # return output + self.bias 
Example #8
Source File: nipa_q_net_node.py    From DeepRobust with MIT License 6 votes vote down vote up
def get_graph_embedding(self, adj):
        if self.node_features.data.is_sparse:
            node_embed = torch.spmm(self.node_features, self.w_n2l)
        else:
            node_embed = torch.mm(self.node_features, self.w_n2l)

        node_embed += self.bias_n2l

        input_message = node_embed
        node_embed = F.relu(input_message)

        for i in range(self.max_lv):
            n2npool = torch.spmm(adj, node_embed)
            node_linear = self.conv_params(n2npool)
            merged_linear = node_linear + input_message
            node_embed = F.relu(merged_linear)

        graph_embed = torch.mean(node_embed, dim=0, keepdim=True)
        return graph_embed, node_embed 
Example #9
Source File: mesh.py    From GraphCMR with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def upsample(self, x, n1=1, n2=0):
        """Upsample mesh."""
        if x.ndimension() < 3:
            for i in reversed(range(n2, n1)):
                x = spmm(self._U[i], x)
        elif x.ndimension() == 3:
            out = []
            for i in range(x.shape[0]):
                y = x[i]
                for j in reversed(range(n2, n1)):
                    y = spmm(self._U[j], y)
                out.append(y)
            x = torch.stack(out, dim=0)
        return x 
Example #10
Source File: utils.py    From learn-to-cluster with MIT License 5 votes vote down vote up
def forward(self, features, A):
        if features.dim() == 2:
            x = torch.spmm(A, features)
        elif features.dim() == 3:
            x = torch.bmm(A, features)
        else:
            raise RuntimeError('the dimension of features should be 2 or 3')
        return x 
Example #11
Source File: dsgcn.py    From learn-to-cluster with MIT License 5 votes vote down vote up
def forward(self, x, adj, D=None):
        if x.dim() == 3:
            xw = torch.matmul(x, self.weight)
            output = torch.bmm(adj, xw)
        elif x.dim() == 2:
            xw = torch.mm(x, self.weight)
            output = torch.spmm(adj, xw)
        if D is not None:
            output = output * 1. / D
        return output 
Example #12
Source File: mesh.py    From GraphCMR with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def ref_vertices(self):
        """Return the template vertices at the specified subsampling level."""
        ref_vertices = self._ref_vertices
        for i in range(self.num_downsampling):
            ref_vertices = torch.spmm(self._D[i], ref_vertices)
        return ref_vertices 
Example #13
Source File: binary_pair_net.py    From Visual-Template-Free-Form-Parsing with GNU General Public License v3.0 5 votes vote down vote up
def forward(self, node_features, adjacencyMatrix, numBBs):
        #expects edge_features as batch currently

        #adj = torch.spmm(self.weight,edge_features) + self.bias 
        


        #return
        return None,node_features 
Example #14
Source File: dsgcn.py    From learn-to-cluster with MIT License 5 votes vote down vote up
def extract(self, x, adj):
        adj.detach_()
        D = adj.sum(dim=2, keepdim=True)
        D.detach_()
        assert (
            D >
            0).all(), "D should larger than 0, otherwise gradient will be NaN."
        for _ in range(self.degree):
            if x.dim() == 3:
                x = torch.bmm(adj, x) / D
            elif x.dim() == 2:
                x = torch.spmm(adj, x) / D
        x = self.pool(x)
        x = self.classifier(x)
        return x 
Example #15
Source File: layers.py    From graph-cnn.pytorch with MIT License 5 votes vote down vote up
def forward(self, input, adj):
        support = torch.mm(input, self.weight)
        output = torch.spmm(adj, support)
        if self.bias is not None:
            return output + self.bias
        else:
            return output 
Example #16
Source File: graph_cnn_encoder.py    From AntNRE with Apache License 2.0 5 votes vote down vote up
def get_output_dim(self) -> int:
        return self.hidden_dim

#  g = GCN(10, 10, 1)
#  inputs = torch.randn(2, 10)
#  i = torch.LongTensor([[0, 1],
                      #  [1, 0]])
#  v = torch.FloatTensor([1, 1])
#  adj = torch.sparse.FloatTensor(i, v, torch.Size([2, 2]))
#  print(adj)
#  print(torch.spmm(adj, inputs))
#  print(adj.to_dense())
#  print(torch.spmm(adj.to_dense(), inputs))
#  print(inputs)
#  print(g(inputs, adj)) 
Example #17
Source File: graph_cnn_encoder.py    From AntNRE with Apache License 2.0 5 votes vote down vote up
def forward(self,
                inputs: torch.FloatTensor,
                adj: torch.sparse.FloatTensor) -> torch.FloatTensor:
        support = torch.mm(inputs, self.weight)
        #  print(support)
        output = torch.spmm(adj, support)
        #  print(output)
        #  print("======")
        if self.bias is not None:
            return output + self.bias
        return output 
Example #18
Source File: layers.py    From graph-tutorial.pytorch with MIT License 5 votes vote down vote up
def forward(self, input, adj):
        support = torch.mm(input, self.weight)
        output = torch.spmm(adj, support)
        if self.bias is not None:
            return output + self.bias
        else:
            return output 
Example #19
Source File: models.py    From SGC with MIT License 5 votes vote down vote up
def forward(self, input, adj):
        support = self.W(input)
        output = torch.spmm(adj, support) 
Example #20
Source File: utils.py    From SGC with MIT License 5 votes vote down vote up
def sgc_precompute(features, adj, degree):
    t = perf_counter()
    for i in range(degree):
        features = torch.spmm(adj, features)
    precompute_time = perf_counter()-t
    return features, precompute_time 
Example #21
Source File: layers.py    From gae-pytorch with MIT License 5 votes vote down vote up
def forward(self, input, adj):
        input = F.dropout(input, self.dropout, self.training)
        support = torch.mm(input, self.weight)
        output = torch.spmm(adj, support)
        output = self.act(output)
        return output 
Example #22
Source File: mixhop_layer.py    From cogdl with MIT License 5 votes vote down vote up
def adj_pow_x(self, x, adj, p):
        for _ in range(p):
            x = torch.spmm(adj, x)
        return x 
Example #23
Source File: gin.py    From cogdl with MIT License 5 votes vote down vote up
def forward(self, x, edge_index, edge_weight=None):
        edge_index, _ = remove_self_loops(edge_index)
        edge_weight = torch.ones(edge_index.shape[1]) if edge_weight is None else edge_weight
        adj = torch.sparse_coo_tensor(edge_index, edge_weight, (x.shape[0], x.shape[0]))
        adj = adj.to(x.device)
        out = (1 + self.eps) * x + torch.spmm(adj, x)
        if self.apply_func is not None:
            out = self.apply_func(out)
        return out 
Example #24
Source File: fastgcn.py    From cogdl with MIT License 5 votes vote down vote up
def forward(self, input, adj):
        support = torch.mm(input, self.weight)
        output = torch.spmm(adj, support)
        if self.bias is not None:
            return output + self.bias
        else:
            return output 
Example #25
Source File: gcn.py    From cogdl with MIT License 5 votes vote down vote up
def forward(self, input, edge_index):
        adj = torch.sparse_coo_tensor(
            edge_index,
            torch.ones(edge_index.shape[1]).float(),
            (input.shape[0], input.shape[0]),
        ).to(input.device)
        support = torch.mm(input, self.weight)
        output = torch.spmm(adj, support)
        if self.bias is not None:
            return output + self.bias
        else:
            return output 
Example #26
Source File: asgcn.py    From cogdl with MIT License 5 votes vote down vote up
def forward(self, input, adj):
        support = torch.mm(input, self.weight)
        output = torch.spmm(adj, support)
        if self.bias is not None:
            return output + self.bias
        else:
            return output 
Example #27
Source File: layers.py    From ilf with Apache License 2.0 5 votes vote down vote up
def forward(self, input, adj):
        support = torch.mm(input, self.weight)
        output = torch.spmm(adj, support)
        if self.bias is not None:
            return output + self.bias
        else:
            return output 
Example #28
Source File: graphnet.py    From FEAT with MIT License 5 votes vote down vote up
def forward(self, input, adj):
        support = torch.mm(input, self.weight)
        output = torch.spmm(adj, support)
        if self.bias is not None:
            return output + self.bias
        else:
            return output 
Example #29
Source File: pgd.py    From DeepRobust with MIT License 5 votes vote down vote up
def prox_nuclear_cuda(self, data, alpha):

        U, S, V = torch.svd(data)
        # self.nuclear_norm = S.sum()
        # print(f"rank = {len(S.nonzero())}")
        S = torch.clamp(S-alpha, min=0)
        indices = torch.tensor([range(0, U.shape[0]),range(0, U.shape[0])]).cuda()
        values = S
        diag_S = torch.sparse.FloatTensor(indices, values, torch.Size(U.shape))
        # diag_S = torch.diag(torch.clamp(S-alpha, min=0))
        # print(f"rank_after = {len(diag_S.nonzero())}")
        V = torch.spmm(diag_S, V.t_())
        V = torch.matmul(U, V)
        return V 
Example #30
Source File: mettack.py    From DeepRobust with MIT License 5 votes vote down vote up
def inner_train(self, features, modified_adj, idx_train, idx_unlabeled, labels, labels_self_training):
        adj_norm = utils.normalize_adj_tensor(modified_adj)

        for j in range(self.train_iters):
            hidden = features
            for w, b in zip(self.weights, self.biases):
                if self.sparse_features:
                    hidden = adj_norm @ torch.spmm(hidden, w) + b
                else:
                    hidden = adj_norm @ hidden @ w + b
                if self.with_relu:
                    hidden = F.relu(hidden)

            output = F.log_softmax(hidden, dim=1)
            loss_labeled = F.nll_loss(output[idx_train], labels[idx_train])
            loss_unlabeled = F.nll_loss(output[idx_unlabeled], labels_self_training[idx_unlabeled])

            if self.lambda_ == 1:
                attack_loss = loss_labeled
            elif self.lambda_ == 0:
                attack_loss = loss_unlabeled
            else:
                attack_loss = self.lambda_ * loss_labeled + (1 - self.lambda_) * loss_unlabeled

            self.optimizer.zero_grad()
            loss_labeled.backward(retain_graph=True)
            self.optimizer.step()

            if self.attack_structure:
                self.adj_changes.grad.zero_()
                self.adj_grad_sum += torch.autograd.grad(attack_loss, self.adj_changes, retain_graph=True)[0]
            if self.attack_features:
                self.feature_changes.grad.zero_()
                self.feature_grad_sum += torch.autograd.grad(attack_loss, self.feature_changes, retain_graph=True)[0]

        loss_test_val = F.nll_loss(output[idx_unlabeled], labels[idx_unlabeled])
        print('GCN loss on unlabled data: {}'.format(loss_test_val.item()))
        print('GCN acc on unlabled data: {}'.format(utils.accuracy(output[idx_unlabeled], labels[idx_unlabeled]).item()))