Python networkx.readwrite.json_graph.node_link_graph() Examples

The following are code examples for showing how to use networkx.readwrite.json_graph.node_link_graph(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: python-vitrageclient   Author: openstack   File: formatters.py    Apache License 2.0 6 votes vote down vote up
def emit_one(self, column_names, data, stdout, _=None):
        data = {n: i for n, i in zip(column_names, data)}

        # vitrage properties are not standard
        # to convert with networkx we need to
        # use the standard properties
        # some converters have issues with multigraph
        # so disable it (currently we don't have real multigraphs)
        self._reformat(data)

        if nx.__version__ >= '2.0':
            graph = json_graph.node_link_graph(
                data, attrs={'name': 'graph_index'})
        else:
            graph = json_graph.node_link_graph(data)

        self._write_format(graph, stdout) 
Example 2
Project: catpy   Author: catmaid   File: export.py    MIT License 6 votes vote down vote up
def get_networkx(self, *skeleton_ids):
        """
        Get a networkx MultiDiGraph of the given skeletons.

        Parameters
        ----------
        skeleton_ids : array-like of (int or str)

        Returns
        -------
        networkx.MultiDiGraph
        """
        data = self.get_networkx_dict(*skeleton_ids)
        if NX_VERSION_INFO >= (2, 0):
            data = convert_nodelink_data(data)
        return json_graph.node_link_graph(data, directed=True) 
Example 3
Project: GPS   Author: golsun   File: plot_flux_graph.py    MIT License 6 votes vote down vote up
def get_edges(fld, traced='C'):
	raw = load_raw(os.path.join(fld, 'raw.npz'))
	tt = raw['axis0']

	integral = dict()
	for i_pnt in range(1, len(tt)):
		print 'reading point '+str(i_pnt)

		path_graph = os.path.join(fld, 'graph', traced+'_'+str(i_pnt)+'.json')
		flux_graph = json_graph.node_link_graph(json.load(open(path_graph, 'r')))
		for edge in flux_graph.edges():
			flux = flux_graph[edge[0]][edge[1]]['flux']
			if edge not in integral:
				integral[edge] = 0.
			integral[edge] += flux * (tt[i_pnt] - tt[i_pnt-1])

	return integral 
Example 4
Project: JARVIS   Author: aaiijmrtt   File: tester.py    MIT License 6 votes vote down vote up
def testLabeler():
	from networkx.readwrite import json_graph
	import labeler, grapher
	sourcesentence = 'This sentence is a test.'
	sourcegraph = json_graph.node_link_graph({'directed': True, 'graph': [], 'nodes': [{'token': 'ROOT', 'id': 0}, {'token': 'This', 'id': 1}, {'token': 'sentence', 'id': 2}, {'token': 'is', 'id': 3}, {'token': 'a', 'id': 4}, {'token': 'test', 'id': 5}], 'links': [{'source': 0, 'relation': 'root', 'target': 5}, {'source': 2, 'relation': 'det', 'target': 1}, {'source': 5, 'relation': 'nsubj', 'target': 2}, {'source': 5, 'relation': 'cop', 'target': 3}, {'source': 5, 'relation': 'det', 'target': 4}], 'multigraph': False})
	targetgraph = json_graph.node_link_graph({'directed': True, 'graph': [], 'nodes': [{'token': 'ROOT', 'id': 0}, {'token': 'This', 'id': 1}, {'token': 'sentence', 'id': 2}, {'token': 'is', 'id': 3}, {'token': 'another', 'id': 4}, {'token': 'test', 'id': 5}], 'links': [{'source': 0, 'relation': 'root', 'target': 5}, {'source': 2, 'relation': 'det', 'target': 1}, {'source': 5, 'relation': 'nsubj', 'target': 2}, {'source': 5, 'relation': 'cop', 'target': 3}, {'source': 5, 'relation': 'det', 'target': 4}], 'multigraph': False})
	annotations = [(1, 5, 13, 'subject'), (1, 14, 16, 'verb'), (1, 19, 23, 'object')]
	alignment = [None, 1, 2, 3, 4, 5]
	realignment = [None, 1, 2, 3, None, 4]
	parsedlist = [['ROOT', ['S', ['NP', ['DT', 'This'], ['NN', 'sentence']], ['VP', ['VBZ', 'is'], ['NP', ['DT', 'a'], ['NN', 'test']]], ['.', '.']]]]

	labeler.labelgraph(sourcegraph, sourcesentence, annotations)
	labeler.labelalign(sourcegraph, targetgraph, alignment)
	grapher.subgraphplot(sourcegraph, label = True)
	grapher.subgraphplot(targetgraph, label = True)
	labeler.labellist(parsedlist[0], targetgraph, realignment)
	labeler.trimlist(parsedlist[0])
	labeler.relabellist(parsedlist[0], targetgraph)
	print parsedlist 
Example 5
Project: GraphZoom   Author: GraphZoom-iclr   File: scoring.py    MIT License 6 votes vote down vote up
def lr(dataset_dir, data_dir, dataset):
    print("%%%%%% Starting Evaluation %%%%%%")
    print("Loading data...")
    G = json_graph.node_link_graph(json.load(open(dataset_dir + "/{}-G.json".format(dataset))))
    labels = json.load(open(dataset_dir + "/{}-class_map.json".format(dataset)))
    
    train_ids = [n for n in G.nodes() if not G.node[n]['val'] and not G.node[n]['test']]
    test_ids = [n for n in G.nodes() if G.node[n]['test']]
    test_ids = test_ids[:1000]
    train_labels = [labels[str(i)] for i in train_ids]
    test_labels = [labels[str(i)] for i in test_ids]
    
    embeds = np.load(data_dir)
    train_embeds = embeds[[id for id in train_ids]] 
    test_embeds = embeds[[id for id in test_ids]] 
    print("Running regression..")
    run_regression(train_embeds, train_labels, test_embeds, test_labels) 
Example 6
Project: iok   Author: rustielin   File: iok.py    MIT License 6 votes vote down vote up
def write_to_file(self, filename=AWESOME_FILE):
        """Writes awesome-list"""
        with open(filename, 'w') as f:
            f.write(self.build_str())

    # def read_from_file(self, filename=AWESOME_FILE):
    #     """Reads graph from JSON file in data link format"""
    #     with open(filename, 'r') as f:
    #         dat = json.load(f)
    #     self.graph = json_graph.node_link_graph(dat)

    # def write_to_file(self, filename=AWESOME_FILE):
    #     """Writes awesome-list"""
    #     # XXX: random to avoid collision for now, until read impl
    #     filename += random_string(5)
    #     with open(filename, 'w') as f:
    #         json.dump(data, f) 
Example 7
Project: vitrage   Author: openstack   File: topology.py    Apache License 2.0 6 votes vote down vote up
def as_tree(graph, root=OPENSTACK_CLUSTER, reverse=False):
        if nx.__version__ >= '2.0':
            linked_graph = json_graph.node_link_graph(
                graph, attrs={'name': 'graph_index'})
        else:
            linked_graph = json_graph.node_link_graph(graph)
        if 0 == nx.number_of_nodes(linked_graph):
            return {}
        if reverse:
            linked_graph = linked_graph.reverse()
        if nx.__version__ >= '2.0':
            return json_graph.tree_data(
                linked_graph,
                root=root,
                attrs={'id': 'graph_index', 'children': 'children'})
        else:
            return json_graph.tree_data(linked_graph, root=root) 
Example 8
Project: kgx   Author: NCATS-Tangerine   File: transformer.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def restore(data: Dict) -> nx.MultiDiGraph:
        """
        Deserialize a networkx.MultiDiGraph from a dictionary.

        Parameters
        ----------
        data: dict
            Dictionary containing nodes and edges

        Returns
        -------
        networkx.MultiDiGraph
            A networkx.MultiDiGraph representation

        """
        g = json_graph.node_link_graph(data)
        return g 
Example 9
Project: pytorch_geometric   Author: rusty1s   File: ppi.py    MIT License 5 votes vote down vote up
def process(self):
        for s, split in enumerate(['train', 'valid', 'test']):
            path = osp.join(self.raw_dir, '{}_graph.json').format(split)
            with open(path, 'r') as f:
                G = nx.DiGraph(json_graph.node_link_graph(json.load(f)))

            x = np.load(osp.join(self.raw_dir, '{}_feats.npy').format(split))
            x = torch.from_numpy(x).to(torch.float)

            y = np.load(osp.join(self.raw_dir, '{}_labels.npy').format(split))
            y = torch.from_numpy(y).to(torch.float)

            data_list = []
            path = osp.join(self.raw_dir, '{}_graph_id.npy').format(split)
            idx = torch.from_numpy(np.load(path)).to(torch.long)
            idx = idx - idx.min()

            for i in range(idx.max().item() + 1):
                mask = idx == i

                G_s = G.subgraph(mask.nonzero().view(-1).tolist())
                edge_index = torch.tensor(list(G_s.edges)).t().contiguous()
                edge_index = edge_index - edge_index.min()
                edge_index, _ = remove_self_loops(edge_index)

                data = Data(edge_index=edge_index, x=x[mask], y=y[mask])

                if self.pre_filter is not None and not self.pre_filter(data):
                    continue

                if self.pre_transform is not None:
                    data = self.pre_transform(data)

                data_list.append(data)
            torch.save(self.collate(data_list), self.processed_paths[s]) 
Example 10
Project: pygraphs   Author: vlivashkin   File: test_avrachenkov.py    MIT License 5 votes vote down vote up
def setUpClass(cls):
        sizes = np.array([100, 100])
        folder = os.path.dirname(os.path.abspath(__file__))
        with open(pj(folder, "sample_graphs/Graphs_g100_100x100.json"), "r") as fp:
            DATA = json.load(fp)
        R_COMMS = cls.real_comms(sizes)
        GS = [jg.node_link_graph(d) for d in DATA["GS"]]
        GS = [(np.array(np.array(nx.adjacency_matrix(g).todense())), R_COMMS) for g in GS]
        cls.graphs = GS 
Example 11
Project: soil   Author: gsi-upm   File: environment.py    Apache License 2.0 5 votes vote down vote up
def __setstate__(self, state):
        for prop in _CONFIG_PROPS:
            self.__dict__[prop] = state[prop]
        self._env_agents = state['environment_agents']
        self.G = json_graph.node_link_graph(state['G'])
        self._history = state['history'] 
Example 12
Project: soil   Author: gsi-upm   File: simulation.py    Apache License 2.0 5 votes vote down vote up
def __setstate__(self, state):
        self.__dict__ = state
        self.load_module = getattr(self, 'load_module', None)
        if self.dir_path not in sys.path:
            sys.path += [self.dir_path, os.getcwd()]
        self.topology = json_graph.node_link_graph(state['topology'])
        self.network_agents = agents.calculate_distribution(agents._convert_agent_types(self.network_agents))
        self.environment_agents = agents._convert_agent_types(self.environment_agents,
                                                              known_modules=[self.load_module])
        self.environment_class = serialization.deserialize(self.environment_class,
                                                   known_modules=[self.load_module, 'soil.environment', ])  # func, name
        return state 
Example 13
Project: catpy   Author: catmaid   File: test_export.py    MIT License 5 votes vote down vote up
def expected_graph():
    version = '1-11' if nx_version < (2, 0) else '2-2'

    fpath = os.path.join(FIXTURE_ROOT, "nodelink-nx{}.json".format(version))
    with open(fpath) as f:
        return json_graph.node_link_graph(json.load(f)) 
Example 14
Project: catpy   Author: catmaid   File: test_export.py    MIT License 5 votes vote down vote up
def test_nx2_convert_nowarn(nodelink_json, expected_graph):
    with pytest.warns(None) as record:
        d = convert_nodelink_data(nodelink_json)
    assert len(record) == 0
    assert_same_graph(json_graph.node_link_graph(d), expected_graph) 
Example 15
Project: catpy   Author: catmaid   File: test_export.py    MIT License 5 votes vote down vote up
def test_nx1_convert_warn(nodelink_json):
    # with pytest.warns(UserWarning):  # this assertion fails even though the warning is raised!
    d = convert_nodelink_data(nodelink_json)

    with pytest.raises(Exception):
        json_graph.node_link_graph(d) 
Example 16
Project: disparity_filter   Author: DerwenAI   File: disparity.py    MIT License 5 votes vote down vote up
def load_graph (graph_path):
    """
    load a graph from JSON
    """
    with open(graph_path) as f:
        data = json.load(f)
        graph = json_graph.node_link_graph(data, directed=True)
        return graph 
Example 17
Project: JARVIS   Author: aaiijmrtt   File: tester.py    MIT License 5 votes vote down vote up
def testAligner():
	from networkx.readwrite import json_graph
	import aligner
	sourcegraph = json_graph.node_link_graph({'directed': True, 'graph': [], 'nodes': [{'token': 'ROOT', 'id': 0}, {'token': 'This', 'id': 1}, {'token': 'sentence', 'id': 2}, {'token': 'is', 'id': 3}, {'token': 'a', 'id': 4}, {'token': 'test', 'id': 5}], 'links': [{'source': 0, 'relation': 'root', 'target': 5}, {'source': 2, 'relation': 'det', 'target': 1}, {'source': 5, 'relation': 'nsubj', 'target': 2}, {'source': 5, 'relation': 'cop', 'target': 3}, {'source': 5, 'relation': 'det', 'target': 4}], 'multigraph': False})
	targetgraph = json_graph.node_link_graph({'directed': True, 'graph': [], 'nodes': [{'token': 'ROOT', 'id': 0}, {'token': 'This', 'id': 1}, {'token': 'sentence', 'id': 2}, {'token': 'is', 'id': 3}, {'token': 'another', 'id': 4}, {'token': 'test', 'id': 5}], 'links': [{'source': 0, 'relation': 'root', 'target': 5}, {'source': 2, 'relation': 'det', 'target': 1}, {'source': 5, 'relation': 'nsubj', 'target': 2}, {'source': 5, 'relation': 'cop', 'target': 3}, {'source': 5, 'relation': 'det', 'target': 4}], 'multigraph': False})

	score, alignment = aligner.align(sourcegraph, targetgraph)
	aligner.score(sourcegraph, targetgraph, alignment) 
Example 18
Project: JARVIS   Author: aaiijmrtt   File: tester.py    MIT License 5 votes vote down vote up
def testModeler():
	from networkx.readwrite import json_graph
	import connector, pyper, parser, modeler, grapher
	ingraph = json_graph.node_link_graph({'directed': True, 'graph': [], 'nodes': [{'token': 'ROOT', 'id': 0}, {'token': 'This', 'POS': 'DT', 'id': 1}, {'token': 'sentence', 'POS': 'NN', 'id': 2}, {'token': 'is', 'POS': 'VBZ', 'id': 3}, {'token': 'a', 'POS': 'DT', 'id': 4}, {'token': 'test', 'POS': 'NN', 'id': 5}], 'links': [{'source': 0, 'relation': 'root', 'target': 5}, {'source': 2, 'relation': 'det', 'target': 1}, {'source': 5, 'relation': 'nsubj', 'target': 2}, {'source': 5, 'relation': 'cop', 'target': 3}, {'source': 5, 'relation': 'det', 'target': 4}], 'multigraph': False})
	outgraph = json_graph.node_link_graph({'directed': True, 'graph': [], 'nodes': [{'token': 'ROOT', 'span': set([0, 1, 2, 3, 4, 5]), 'id': 0, 'label': None}, {'token': 'This', 'label': None, 'span': set([1]), 'id': 1, 'POS': 'DT'}, {'token': 'sentence', 'label': None, 'span': set([1, 2]), 'id': 2, 'POS': 'NN'}, {'token': 'is', 'label': None, 'span': set([3]), 'id': 3, 'POS': 'VBZ'}, {'token': 'a', 'label': None, 'span': set([4]), 'id': 4, 'POS': 'DT'}, {'token': 'test', 'label': None, 'span': set([1, 2, 3, 4, 5]), 'id': 5, 'POS': 'NN'}], 'links': [{'source': 0, 'relation': 'root', 'target': 5}, {'source': 2, 'relation': 'det', 'target': 1}, {'source': 5, 'relation': 'nsubj', 'target': 2}, {'source': 5, 'relation': 'cop', 'target': 3}, {'source': 5, 'relation': 'det', 'target': 4}], 'multigraph': False})

	connector.initialize()
	pyper.initialize()
	parser.initialize()
	modeler.stimulate(ingraph)
	grapher.insertactivenode(107, outgraph), aligner, grapher, labeler
	modeler.respond()
	parser.terminate()
	pyper.terminate()
	connector.terminate() 
Example 19
Project: pyblk   Author: mulkieran   File: _readwrite.py    GNU General Public License v2.0 5 votes vote down vote up
def readin(data):
        """
        Read data from a string input.

        :param data: the JSON formatted data
        :returns: the graph
        :rtype: DiGraph
        """
        graph = json_graph.node_link_graph(data)
        Rewriter.destringize(graph)
        return graph 
Example 20
Project: GraphZoom   Author: GraphZoom-iclr   File: utils.py    MIT License 5 votes vote down vote up
def json2mtx(dataset):
    G_data = json.load(open("dataset/{}/{}-G.json".format(dataset, dataset)))
    G = json_graph.node_link_graph(G_data)

    laplacian = laplacian_matrix(G)
    file = open("dataset/{}/{}.mtx".format(dataset, dataset), "wb") 
    mmwrite("dataset/{}/{}.mtx".format(dataset, dataset), laplacian)
    file.close()

    return laplacian 
Example 21
Project: iok   Author: rustielin   File: iok.py    MIT License 5 votes vote down vote up
def read_from_file(self, filename=FILENAME):
        """Reads graph from JSON file in data link format"""
        with open(filename, 'r') as f:
            dat = json.load(f)
        self.graph = json_graph.node_link_graph(dat) 
Example 22
Project: PGE   Author: yaobaiwei   File: utils.py    MIT License 5 votes vote down vote up
def loadG(x, d):
    return json_graph.node_link_graph(json.load(open(x+'-G.json')), d) 
Example 23
Project: PGE   Author: yifan-h   File: utils.py    MIT License 5 votes vote down vote up
def loadG(x, d):
    return json_graph.node_link_graph(json.load(open(x+'-G.json')), d) 
Example 24
Project: dgl   Author: dmlc   File: ppi.py    Apache License 2.0 4 votes vote down vote up
def _load(self):
        """Loads input data.

        train/test/valid_graph.json => the graph data used for training,
          test and validation as json format;
        train/test/valid_feats.npy => the feature vectors of nodes as
          numpy.ndarry object, it's shape is [n, v],
          n is the number of nodes, v is the feature's dimension;
        train/test/valid_labels.npy=> the labels of the input nodes, it
          is a numpy ndarry, it's like[[0, 0, 1, ... 0], 
          [0, 1, 1, 0 ...1]], shape of it is n*h, n is the number of nodes,
          h is the label's dimension;
        train/test/valid/_graph_id.npy => the element in it indicates which
          graph the nodes belong to, it is a one dimensional numpy.ndarray
          object and the length of it is equal the number of nodes,
          it's like [1, 1, 2, 1...20]. 
        """
        name = 'ppi'
        dir = get_download_dir()
        zip_file_path = '{}/{}.zip'.format(dir, name)
        download(_get_dgl_url(_url), path=zip_file_path)
        extract_archive(zip_file_path,
                        '{}/{}'.format(dir, name))
        print('Loading G...')
        if self.mode == 'train':
            with open('{}/ppi/train_graph.json'.format(dir)) as jsonfile:
                g_data = json.load(jsonfile)
            self.labels = np.load('{}/ppi/train_labels.npy'.format(dir))
            self.features = np.load('{}/ppi/train_feats.npy'.format(dir))
            self.graph = DGLGraph(nx.DiGraph(json_graph.node_link_graph(g_data)))
            self.graph_id = np.load('{}/ppi/train_graph_id.npy'.format(dir))
        if self.mode == 'valid':
            with open('{}/ppi/valid_graph.json'.format(dir)) as jsonfile:
                g_data = json.load(jsonfile)
            self.labels = np.load('{}/ppi/valid_labels.npy'.format(dir))
            self.features = np.load('{}/ppi/valid_feats.npy'.format(dir))
            self.graph = DGLGraph(nx.DiGraph(json_graph.node_link_graph(g_data)))
            self.graph_id = np.load('{}/ppi/valid_graph_id.npy'.format(dir))
        if self.mode == 'test':
            with open('{}/ppi/test_graph.json'.format(dir)) as jsonfile:
                g_data = json.load(jsonfile)
            self.labels = np.load('{}/ppi/test_labels.npy'.format(dir))
            self.features = np.load('{}/ppi/test_feats.npy'.format(dir))
            self.graph = DGLGraph(nx.DiGraph(json_graph.node_link_graph(g_data)))
            self.graph_id = np.load('{}/ppi/test_graph_id.npy'.format(dir)) 
Example 25
Project: soil   Author: gsi-upm   File: simulation.py    Apache License 2.0 4 votes vote down vote up
def __init__(self, name=None, group=None, topology=None, network_params=None,
                 network_agents=None, agent_type=None, states=None,
                 default_state=None, interval=1, num_trials=1,
                 max_time=100, load_module=None, seed=None,
                 dir_path=None, environment_agents=None,
                 environment_params=None, environment_class=None,
                 **kwargs):

        self.seed = str(seed) or str(time.time())
        self.load_module = load_module
        self.network_params = network_params
        self.name = name or 'Unnamed_' + time.strftime("%Y-%m-%d_%H:%M:%S")
        self.group = group or None
        self.num_trials = num_trials
        self.max_time = max_time
        self.default_state = default_state or {}
        self.dir_path = dir_path or os.getcwd()
        self.interval = interval

        sys.path += list(x for x in [os.getcwd(), self.dir_path] if x not in sys.path)

        if topology is None:
            topology = serialization.load_network(network_params,
                                                  dir_path=self.dir_path)
        elif isinstance(topology, basestring) or isinstance(topology, dict):
            topology = json_graph.node_link_graph(topology)
        self.topology = nx.Graph(topology)


        self.environment_params = environment_params or {}
        self.environment_class = serialization.deserialize(environment_class,
                                                   known_modules=['soil.environment', ]) or Environment

        environment_agents = environment_agents or []
        self.environment_agents = agents._convert_agent_types(environment_agents,
                                                              known_modules=[self.load_module])

        distro = agents.calculate_distribution(network_agents,
                                               agent_type)
        self.network_agents = agents._convert_agent_types(distro,
                                                          known_modules=[self.load_module])

        self.states = agents._validate_states(states,
                                              self.topology) 
Example 26
Project: GPS   Author: golsun   File: def_painter.py    MIT License 4 votes vote down vote up
def	find_node_edges(dir_raw, node, extra):

	n_edge = extra['n_edge']
	species = extra['species']
	traced = extra['traced']


	i_pnt = 0
	s = node
	mat_flux = dict()
	for t in species:
		mat_flux[t] = []

	dir_graph = os.path.join(dir_raw,'graph')
	while True:
		path_graph = os.path.join(dir_graph, traced+'_'+str(i_pnt) + '.json')
		print path_graph

		if os.path.exists(path_graph):
			data = json.load(open(path_graph, 'r'))
			flux_graph = json_graph.node_link_graph(data)

			sum_flux = 0
			for t in species:
				try:
					flux = max(0, flux_graph[s][t]['flux'])
				except KeyError:
					flux = 0
				mat_flux[t].append(flux)
				sum_flux += flux
			
			for t in species:
				mat_flux[t][-1] = 100.0 * mat_flux[t][-1] / sum_flux


		else:
			break
		i_pnt += 1


	peak_flux = dict()
	for t in mat_flux.keys():
		peak_flux[t] = max(mat_flux[t])

	tt = keys_sorted(peak_flux)
	cc_all = []
	ff_all = []
	for i_t in range(n_edge):
		t = tt[i_t]
		cc = mat_flux[t]
		cc_all.append(cc)
		ff_all.append(t)


	return cc_all, ff_all 
Example 27
Project: watsongraph   Author: ResidentMario   File: conceptmodel.py    MIT License 4 votes vote down vote up
def load_from_json(self, data_repr):
        """
        Generates a ConceptModel out of a JSON representation. Counter-operation to `to_dict()`.

        :param data_repr: The dictionary being passed to the method.

        :return: The generated ConceptModel.
        """
        flattened_graph = json_graph.node_link_graph(data_repr)
        m = {concept: Node(concept) for concept in flattened_graph.nodes()}
        self.graph = nx.relabel_nodes(flattened_graph, m)
        for node in data_repr['nodes']:
            for key in [key for key in node.keys() if key != 'id']:
                self.set_property(node['id'], key, node[key])

                # def visualize(self, filename='graphistry_credentials.json'):
                #     """
                #     Generates a ConceptModel visualization. WIP. Need to get a graphistry key first...
                #     :param filename -- The filename at which Graphistry service credentials are stored. Defaults to
                #     `graphistry_credentials.json`.
                #     :return: The generated visualization.
                #     """
                #     graphistry_token = import_graphistry_credentials(filename=filename)
                #     graphistry.register(key=graphistry_token)
                #     flattened_model = nx.relabel_nodes(self.graph, {node: node.concept for node in self.nodes()})
                #     flattened_model_dataframe = nx.convert_matrix.to_pandas_dataframe(flattened_model)
                #     for key in flattened_model_dataframe.keys():
                #         flattened_model_dataframe[key] = flattened_model_dataframe[key].astype(str)
                #     g = graphistry.bind(source='source', destination='target')
                #     g.plot(flattened_model_dataframe)


# def import_graphistry_credentials(filename='graphistry_credentials.json'):
#     """
#     Internal method which finds the credentials file describing the token that's needed to access Graphistry
#     services. Graphistry is an alpha-level in-development backend that is used here for visualizing the
#     ConceptModel, so keys are given out on a per-user basis; see https://github.com/graphistry/pygraphistry for more
#     information.
#
#     See also `watsongraph.event_insight_lib.import_credentials()`, which replicates this operation for the (
# required) Concept Insights API service key.
#
#     :param filename -- The filename at which Graphistry service credentials are stored. Defaults to
#     `graphistry_credentials.json`.
#     """
#     if filename in [f for f in os.listdir('.') if os.path.isfile(f)]:
#         return json.load(open(filename))['credentials']['key']
#     else:
#         raise IOError(
#                 'The visualization methods that come with the watsongraph library require a Graphistry credentials '
#                 'token to work. Did you forget to define one? For more information refer '
#                 'to:\n\nhttps://github.com/graphistry/pygraphistry#api-key') 
Example 28
Project: relational-ERM   Author: wooden-spoon   File: graph_sage_preprocess.py    MIT License 4 votes vote down vote up
def graphsage_load_data(prefix, normalize=True):

    if prefix is None:
        prefix = "../data/reddit/reddit"

    G_data = json.load(open(prefix + "-G.json"))
    G = json_graph.node_link_graph(G_data)
    if isinstance(G.nodes()[0], int):
        conversion = lambda n: int(n)
    else:
        conversion = lambda n: n

    if os.path.exists(prefix + "-feats.npy"):
        feats = np.load(prefix + "-feats.npy")
    else:
        print("No features present.. Only identity features will be used.")
        feats = None

    id_map = json.load(open(prefix + "-id_map.json"))
    id_map = {conversion(k): int(v) for k, v in id_map.items()}
    class_map = json.load(open(prefix + "-class_map.json"))
    if isinstance(list(class_map.values())[0], list):
        lab_conversion = lambda n: n
    else:
        lab_conversion = lambda n: int(n)

    class_map = {conversion(k): lab_conversion(v) for k, v in class_map.items()}

    ## Remove all nodes that do not have val/test annotations
    ## (necessary because of networkx weirdness with the Reddit data)
    # broken_count = 0
    # for node in G.nodes():
    #     if not 'val' in G.node[node] or not 'test' in G.node[node]:
    #         G.remove_node(node)
    #         broken_count += 1
    # print("Removed {:d} nodes that lacked proper annotations due to networkx versioning issues".format(broken_count))

    ## Make sure the graph has edge train_removed annotations
    ## (some datasets might already have this..)
    # print("Loaded data.. now preprocessing..")
    # for edge in G.edges():
    #     if (G.node[edge[0]]['val'] or G.node[edge[1]]['val'] or
    #             G.node[edge[0]]['test'] or G.node[edge[1]]['test']):
    #         G[edge[0]][edge[1]]['train_removed'] = True
    #     else:
    #         G[edge[0]][edge[1]]['train_removed'] = False

    # if normalize and not feats is None:
    #     from sklearn.preprocessing import StandardScaler
    #     train_ids = np.array([id_map[n] for n in G.nodes() if not G.node[n]['val'] and not G.node[n]['test']])
    #     train_feats = feats[train_ids]
    #     scaler = StandardScaler()
    #     scaler.fit(train_feats)
    #     feats = scaler.transform(feats)

    data = {'graph': G, 'feats': feats, 'id_map': id_map, 'class_map': class_map}
    return data 
Example 29
Project: OpenANE   Author: houchengbin   File: utils.py    MIT License 4 votes vote down vote up
def load_data(prefix, normalize=True, load_walks=False):
    G_data = json.load(open(prefix + "-G.json"))
    G = json_graph.node_link_graph(G_data)
    def conversion(n): return int(n)  # compatible with networkx >2.0

    if os.path.exists(prefix + "-feats.npy"):
        feats = np.load(prefix + "-feats.npy")
    else:
        print("No features present.. Only identity features will be used.")
        feats = None
    id_map = json.load(open(prefix + "-id_map.json"))
    id_map = {conversion(k): int(v) for k, v in id_map.items()}
    walks = []
    class_map = json.load(open(prefix + "-class_map.json"))
    if isinstance(list(class_map.values())[0], list):
        def lab_conversion(n): return n
    else:
        def lab_conversion(n): return int(n)

    class_map = {conversion(k): lab_conversion(v) for k, v in class_map.items()}

    # Remove all nodes that do not have val/test annotations
    # (necessary because of networkx weirdness with the Reddit data)
    broken_count = 0
    for node in G.nodes():
        if not 'val' in G.node[node] or not 'test' in G.node[node]:
            G.remove_node(node)
            broken_count += 1
    print("Removed {:d} nodes that lacked proper annotations due to networkx versioning issues".format(broken_count))

    # Make sure the graph has edge train_removed annotations
    # some datasets might already have this
    print("Loaded data.. now preprocessing..")
    for edge in G.edges():
        if (G.node[edge[0]]['val'] or G.node[edge[1]]['val'] or
                G.node[edge[0]]['test'] or G.node[edge[1]]['test']):
            G[edge[0]][edge[1]]['train_removed'] = True
        else:
            G[edge[0]][edge[1]]['train_removed'] = False

    if normalize and not feats is None:
        from sklearn.preprocessing import StandardScaler
        train_ids = np.array([id_map[n] for n in G.nodes() if not G.node[n]['val'] and not G.node[n]['test']])
        train_feats = feats[train_ids]
        scaler = StandardScaler()
        scaler.fit(train_feats)
        feats = scaler.transform(feats)

    if load_walks:
        with open(prefix + "-walks.txt") as fp:
            for line in fp:
                walks.append(map(conversion, line.split()))

    return G, feats, id_map, walks, class_map