Python networkx.readwrite.json_graph.node_link_graph() Examples

The following are 10 code examples of networkx.readwrite.json_graph.node_link_graph(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module networkx.readwrite.json_graph , or try the search function .
Example #1
Source File: smoothness.py    From CS-GNN with MIT License 6 votes vote down vote up
def compute_feature_smoothness(path, times=0):
    G_org = json_graph.node_link_graph(json.load(open(path+'-G.json')))
    # G_org = remove_unlabeled(G_org)
    if nx.is_directed(G_org):
        G_org = G_org.to_undirected()
    edge_num = G_org.number_of_edges()
    G = pygsp.graphs.Graph(nx.adjacency_matrix(G_org))
    feats = np.load(path+'-feats.npy')
    # smooth
    for i in range(times):
        feats = feature_broadcast(feats, G_org)
    np.save(path+'-feats_'+str(times)+'.npy', feats)

    min_max_scaler = preprocessing.MinMaxScaler()
    feats = min_max_scaler.fit_transform(feats)
    smoothness = np.zeros(feats.shape[1])
    for src, dst in G_org.edges():
        smoothness += (feats[src]-feats[dst])*(feats[src]-feats[dst])
    smoothness = np.linalg.norm(smoothness,ord=1)
    print('The smoothness is: ', 2*smoothness/edge_num/feats.shape[1]) 
Example #2
Source File: smoothness.py    From CS-GNN with MIT License 6 votes vote down vote up
def compute_label_smoothness(path, rate=0.):
    G_org = json_graph.node_link_graph(json.load(open(path+'-G.json')))
    # G_org = remove_unlabeled(G_org)
    if nx.is_directed(G_org):
        G_org = G_org.to_undirected()
    class_map = json.load(open(path+'-class_map.json'))
    for k, v in class_map.items():
        if type(v) != list:
            class_map = convert_list(class_map)
        break
    labels = convert_ndarray(class_map)
    labels = np.squeeze(label_to_vector(labels))

    # smooth
    G_org = label_broadcast(G_org, labels, rate)
    with open(path+'-G_'+str(rate)+'.json', 'w') as f:
        f.write(json.dumps(json_graph.node_link_data(G_org)))

    edge_num = G_org.number_of_edges()
    G = pygsp.graphs.Graph(nx.adjacency_matrix(G_org))
    smoothness = 0
    for src, dst in G_org.edges():
        if labels[src] != labels[dst]:
            smoothness += 1
    print('The smoothness is: ', 2*smoothness/edge_num) 
Example #3
Source File: topology.py    From vitrage with Apache License 2.0 6 votes vote down vote up
def as_tree(graph, root=OPENSTACK_CLUSTER, reverse=False):
        if nx.__version__ >= '2.0':
            linked_graph = json_graph.node_link_graph(
                graph, attrs={'name': 'graph_index'})
        else:
            linked_graph = json_graph.node_link_graph(graph)
        if 0 == nx.number_of_nodes(linked_graph):
            return {}
        if reverse:
            linked_graph = linked_graph.reverse()
        if nx.__version__ >= '2.0':
            return json_graph.tree_data(
                linked_graph,
                root=root,
                attrs={'id': 'graph_index', 'children': 'children'})
        else:
            return json_graph.tree_data(linked_graph, root=root) 
Example #4
Source File: transformer.py    From kgx with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def deserialize(data: Dict) -> nx.MultiDiGraph:
        """
        Deserialize a networkx.MultiDiGraph from a dictionary.

        Parameters
        ----------
        data: dict
            Dictionary containing nodes and edges

        Returns
        -------
        networkx.MultiDiGraph
            A networkx.MultiDiGraph representation

        """
        g = json_graph.node_link_graph(data)
        return g 
Example #5
Source File: node_link_data.py    From EDeN with MIT License 5 votes vote down vote up
def _node_link_data_to_eden(serialized_list):
    """Takes a string list in the serialised node_link_data JSON format and yields networkx graphs."""
    for serial_data in serialized_list:
        py_obj = json.loads(serial_data)
        graph = json_graph.node_link_graph(py_obj)
        yield graph 
Example #6
Source File: ppi.py    From dgl with Apache License 2.0 5 votes vote down vote up
def _load(self):
        """Loads input data.

        train/test/valid_graph.json => the graph data used for training,
          test and validation as json format;
        train/test/valid_feats.npy => the feature vectors of nodes as
          numpy.ndarry object, it's shape is [n, v],
          n is the number of nodes, v is the feature's dimension;
        train/test/valid_labels.npy=> the labels of the input nodes, it
          is a numpy ndarry, it's like[[0, 0, 1, ... 0], 
          [0, 1, 1, 0 ...1]], shape of it is n*h, n is the number of nodes,
          h is the label's dimension;
        train/test/valid/_graph_id.npy => the element in it indicates which
          graph the nodes belong to, it is a one dimensional numpy.ndarray
          object and the length of it is equal the number of nodes,
          it's like [1, 1, 2, 1...20]. 
        """
        print('Loading G...')
        if self.mode == 'train':
            with open('{}/ppi/train_graph.json'.format(self._dir)) as jsonfile:
                g_data = json.load(jsonfile)
            self.labels = np.load('{}/ppi/train_labels.npy'.format(self._dir))
            self.features = np.load('{}/ppi/train_feats.npy'.format(self._dir))
            self.graph = DGLGraph(nx.DiGraph(json_graph.node_link_graph(g_data)))
            self.graph_id = np.load('{}/ppi/train_graph_id.npy'.format(self._dir))
        if self.mode == 'valid':
            with open('{}/ppi/valid_graph.json'.format(self._dir)) as jsonfile:
                g_data = json.load(jsonfile)
            self.labels = np.load('{}/ppi/valid_labels.npy'.format(self._dir))
            self.features = np.load('{}/ppi/valid_feats.npy'.format(self._dir))
            self.graph = DGLGraph(nx.DiGraph(json_graph.node_link_graph(g_data)))
            self.graph_id = np.load('{}/ppi/valid_graph_id.npy'.format(self._dir))
        if self.mode == 'test':
            with open('{}/ppi/test_graph.json'.format(self._dir)) as jsonfile:
                g_data = json.load(jsonfile)
            self.labels = np.load('{}/ppi/test_labels.npy'.format(self._dir))
            self.features = np.load('{}/ppi/test_feats.npy'.format(self._dir))
            self.graph = DGLGraph(nx.DiGraph(json_graph.node_link_graph(g_data)))
            self.graph_id = np.load('{}/ppi/test_graph_id.npy'.format(self._dir)) 
Example #7
Source File: utils.py    From CS-GNN with MIT License 5 votes vote down vote up
def loadG(x, d):
    return json_graph.node_link_graph(json.load(open(x+'-G.json')), d) 
Example #8
Source File: ppi.py    From pytorch_geometric with MIT License 4 votes vote down vote up
def process(self):
        for s, split in enumerate(['train', 'valid', 'test']):
            path = osp.join(self.raw_dir, '{}_graph.json').format(split)
            with open(path, 'r') as f:
                G = nx.DiGraph(json_graph.node_link_graph(json.load(f)))

            x = np.load(osp.join(self.raw_dir, '{}_feats.npy').format(split))
            x = torch.from_numpy(x).to(torch.float)

            y = np.load(osp.join(self.raw_dir, '{}_labels.npy').format(split))
            y = torch.from_numpy(y).to(torch.float)

            data_list = []
            path = osp.join(self.raw_dir, '{}_graph_id.npy').format(split)
            idx = torch.from_numpy(np.load(path)).to(torch.long)
            idx = idx - idx.min()

            for i in range(idx.max().item() + 1):
                mask = idx == i

                G_s = G.subgraph(mask.nonzero().view(-1).tolist())
                edge_index = torch.tensor(list(G_s.edges)).t().contiguous()
                edge_index = edge_index - edge_index.min()
                edge_index, _ = remove_self_loops(edge_index)

                data = Data(edge_index=edge_index, x=x[mask], y=y[mask])

                if self.pre_filter is not None and not self.pre_filter(data):
                    continue

                if self.pre_transform is not None:
                    data = self.pre_transform(data)

                data_list.append(data)
            torch.save(self.collate(data_list), self.processed_paths[s]) 
Example #9
Source File: Spreadsheet.py    From koala with GNU General Public License v3.0 4 votes vote down vote up
def from_dict(input_data):

        data = dict(input_data)

        nodes = list(
            map(Cell.from_dict,
                filter(
                    lambda item: not isinstance(item['value'], dict),
                    data['nodes'])))
        cellmap = {n.address(): n for n in nodes}

        def cell_from_dict(d):
            return Cell.from_dict(d, cellmap=cellmap)

        nodes.extend(
            list(
                map(cell_from_dict,
                    filter(
                        lambda item: isinstance(item['value'], dict),
                        data['nodes']))))

        data["nodes"] = [{'id': node} for node in nodes]

        links = []
        idmap = { node.address(): node for node in nodes }
        for el in data['links']:
            source_address = el['source']
            target_address = el['target']
            link = {
                'source': idmap[source_address],
                'target': idmap[target_address],
            }
            links.append(link)

        data['links'] = links

        G = json_graph.node_link_graph(data)
        cellmap = {n.address(): n for n in G.nodes()}

        named_ranges = data["named_ranges"]
        inputs = data["inputs"]
        outputs = data["outputs"]

        spreadsheet = Spreadsheet()
        spreadsheet.build_spreadsheet(
            G, cellmap, named_ranges,
            inputs=inputs, outputs=outputs)
        return spreadsheet 
Example #10
Source File: utils.py    From DGFraud with Apache License 2.0 4 votes vote down vote up
def load_data_ori(prefix, normalize=True, load_walks=False):
    G_data = json.load(open(prefix + "-G.json"))
    G = json_graph.node_link_graph(G_data)
    if isinstance(G.nodes()[0], int):
        conversion = lambda n : int(n)
    else:
        conversion = lambda n : n

    if os.path.exists(prefix + "-feats.npy"):
        feats = np.load(prefix + "-feats.npy")
    else:
        print("No features present.. Only identity features will be used.")
        feats = None
    id_map = json.load(open(prefix + "-id_map.json"))
    id_map = {conversion(k):int(v) for k,v in id_map.items()}
    walks = []
    class_map = json.load(open(prefix + "-class_map.json"))
    if isinstance(list(class_map.values())[0], list):
        lab_conversion = lambda n : n
    else:
        lab_conversion = lambda n : int(n)

    class_map = {conversion(k):lab_conversion(v) for k,v in class_map.items()}

    ## Remove all nodes that do not have val/test annotations
    ## (necessary because of networkx weirdness with the Reddit data)
    broken_count = 0
    for node in G.nodes():
        if not 'val' in G.node[node] or not 'test' in G.node[node]:
            G.remove_node(node)
            broken_count += 1
    print("Removed {:d} nodes that lacked proper annotations due to networkx versioning issues".format(broken_count))

    ## Make sure the graph has edge train_removed annotations
    ## (some datasets might already have this..)
    print("Loaded data.. now preprocessing..")
    for edge in G.edges():
        if (G.node[edge[0]]['val'] or G.node[edge[1]]['val'] or
            G.node[edge[0]]['test'] or G.node[edge[1]]['test']):
            G[edge[0]][edge[1]]['train_removed'] = True
        else:
            G[edge[0]][edge[1]]['train_removed'] = False

    if normalize and not feats is None:
        from sklearn.preprocessing import StandardScaler
        train_ids = np.array([id_map[n] for n in G.nodes() if not G.node[n]['val'] and not G.node[n]['test']])
        train_feats = feats[train_ids]
        scaler = StandardScaler()
        scaler.fit(train_feats)
        feats = scaler.transform(feats)
    
    if load_walks:
        with open(prefix + "-walks.txt") as fp:
            for line in fp:
                walks.append(map(conversion, line.split()))

    return G, feats, id_map, walks, class_map