org.neo4j.graphdb.traversal.Evaluators Java Examples

The following examples show how to use org.neo4j.graphdb.traversal.Evaluators. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: GraphApi.java    From SciGraph with Apache License 2.0 5 votes vote down vote up
/***
 * @param parent
 * @param relationship
 * @param traverseEquivalentEdges
 * @return the entailment
 */
public Collection<Node> getEntailment(Node parent, DirectedRelationshipType relationship,
    boolean traverseEquivalentEdges) {
  Set<Node> entailment = new HashSet<>();
  TraversalDescription description = graphDb.traversalDescription().depthFirst()
      .relationships(relationship.getType(), relationship.getDirection())
      .evaluator(Evaluators.fromDepth(0)).evaluator(Evaluators.all());
  if (traverseEquivalentEdges) {
    description = description.relationships(OwlRelationships.OWL_EQUIVALENT_CLASS);
  }
  for (Path path : description.traverse(parent)) {
    entailment.add(path.endNode());
  }
  return entailment;
}
 
Example #2
Source File: DataRelationshipManager.java    From graphify with Apache License 2.0 5 votes vote down vote up
public void getOrCreateNode(Long start, Long end, GraphDatabaseService db) {
    List<Long> relList = relationshipCache.getIfPresent(start);

    Node startNode = db.getNodeById(start);

    if (relList == null) {
        List<Long> nodeList = new ArrayList<>();
        for(Node endNodes : db.traversalDescription()
                .depthFirst()
                .relationships(withName(relationshipType), Direction.OUTGOING)
                .evaluator(Evaluators.fromDepth(1))
                .evaluator(Evaluators.toDepth(1))
                .traverse(startNode)
                .nodes())
        {
            nodeList.add(endNodes.getId());
        }

        relList = nodeList;
        relationshipCache.put(start, relList);
    }

    if (!relList.contains(end)) {
        Transaction tx = db.beginTx();
        try {
            Node endNode = db.getNodeById(end);
            startNode.createRelationshipTo(endNode, withName(relationshipType));
            tx.success();
        } catch (final Exception e) {
            tx.failure();
        } finally {
            tx.close();
            relList.add(end);
            relationshipCache.put(start, relList);
        }
    }
}
 
Example #3
Source File: RelationshipCache.java    From graphify with Apache License 2.0 5 votes vote down vote up
private List<Long> getLongs(Long start, GraphDatabaseService db, List<Long> relList, Node startNode) {
    if (relList == null) {
        relList = new ArrayList<>();
        try (Transaction tx = db.beginTx()) {
            ResourceIterable<Node> nodes = db.traversalDescription()
                    .depthFirst()
                    .relationships(withName(getRelationshipType()), Direction.OUTGOING)
                    .evaluator(Evaluators.fromDepth(1))
                    .evaluator(Evaluators.toDepth(1))
                    .traverse(startNode)
                    .nodes();

            final List<Long> finalRelList = relList;
            nodes.forEach(a -> finalRelList.add(a.getId()));

            tx.success();
        } catch(Exception ex) {
            if(relList.size() == 0) {
                return relList;
            }
        }

        relList = new HashSet<>(relList).stream().map(n -> n).collect(Collectors.toList());

        if (relList.size() > 0) {
            String propertyKey = getRelationshipAggregateKey();
            Integer propertyValue = relList.size();
            try (Transaction tx = db.beginTx()) {
                startNode.setProperty(propertyKey, propertyValue);
                tx.success();
            }
        }

        getRelationshipCache().put(start, relList);
    }
    return relList;
}
 
Example #4
Source File: VectorUtil.java    From graphify with Apache License 2.0 5 votes vote down vote up
public static Map<Long, Integer> getTermFrequencyMapForDocument(GraphDatabaseService db, Long classId)
{
    Map<Long, Integer> termDocumentMatrix;

    String cacheKey = "TERM_DOCUMENT_FREQUENCY_" + classId;

    if(vectorSpaceModelCache.getIfPresent(cacheKey) == null) {
        Node classNode = db.getNodeById(classId);

        termDocumentMatrix = new HashMap<>();

        IteratorUtil.asCollection(db.traversalDescription()
                .depthFirst()
                .relationships(withName("HAS_CLASS"), Direction.INCOMING)
                .evaluator(Evaluators.fromDepth(1))
                .evaluator(Evaluators.toDepth(1))
                .traverse(classNode)).stream()
                .forEach(p ->
                {
                    int matches = (Integer) p.lastRelationship().getProperty("matches");
                    termDocumentMatrix.put(p.endNode().getId(), matches);
                });



        vectorSpaceModelCache.put(cacheKey, termDocumentMatrix);
    }
    else
    {
        termDocumentMatrix =  (Map<Long, Integer>)vectorSpaceModelCache.getIfPresent(cacheKey);
    }

    return termDocumentMatrix;
}
 
Example #5
Source File: VectorUtil.java    From graphify with Apache License 2.0 5 votes vote down vote up
public static double getFeatureMatchDistribution(GraphDatabaseService db, Long patternId)
{
    Transaction tx = db.beginTx();
    Node startNode = db.getNodeById(patternId);

    // Feature match distribution
    List<Double> matches = IteratorUtil.asCollection(db.traversalDescription()
            .depthFirst()
            .relationships(withName("HAS_CLASS"), Direction.OUTGOING)
            .evaluator(Evaluators.fromDepth(1))
            .evaluator(Evaluators.toDepth(1))
            .traverse(startNode)
            .relationships())
            .stream()
            .map(p -> ((Integer)p.getProperty("matches")).doubleValue())
            .collect(Collectors.toList());

    tx.success();
    tx.close();

    double variance = 1.0;

    if(matches.size() > 1) {
        Double[] matchArr = matches.toArray(new Double[matches.size()]);
        // Get the standard deviation
        DescriptiveStatistics ds = new DescriptiveStatistics();
        matches.forEach(m -> ds.addValue(m.doubleValue() / StatUtils.sum(ArrayUtils.toPrimitive(matchArr))));
        variance = ds.getStandardDeviation();
    }

    return variance;
}
 
Example #6
Source File: VectorUtil.java    From graphify with Apache License 2.0 5 votes vote down vote up
public static int getDocumentSizeForFeature(GraphDatabaseService db, Long id)
{
    int documentSize;

    String cacheKey = "DOCUMENT_SIZE_FEATURE_" + id;

    if(vectorSpaceModelCache.getIfPresent(cacheKey) == null) {
        Node startNode = db.getNodeById(id);

        Iterator<Node> classes = db.traversalDescription()
                .depthFirst()
                .relationships(withName("HAS_CLASS"), Direction.OUTGOING)
                .evaluator(Evaluators.fromDepth(1))
                .evaluator(Evaluators.toDepth(1))
                .traverse(startNode)
                .nodes().iterator();

        documentSize = IteratorUtil.count(classes);

        vectorSpaceModelCache.put(cacheKey, documentSize);
    }
    else
    {
        documentSize = (Integer)vectorSpaceModelCache.getIfPresent(cacheKey);
    }

    return documentSize;
}
 
Example #7
Source File: VectorUtil.java    From graphify with Apache License 2.0 5 votes vote down vote up
private static List<LinkedHashMap<String, Object>> getFeaturesForClass(GraphDatabaseService db, Node classNode) {
    List<LinkedHashMap<String, Object>> patternIds = new ArrayList<>();

    for (Path p : db.traversalDescription()
            .depthFirst()
            .relationships(withName("HAS_CLASS"), Direction.INCOMING)
            .evaluator(Evaluators.fromDepth(1))
            .evaluator(Evaluators.toDepth(1))
            .traverse(classNode)) {

        if(getFeatureMatchDistribution(db, p.endNode().getId()) > CONFIDENCE_INTERVAL) {

            LinkedHashMap<String, Object> featureMap = new LinkedHashMap<>();

            if (p.relationships().iterator().hasNext()) {
                featureMap.put("frequency", p.relationships().iterator().next().getProperty("matches"));
            } else {
                featureMap.put("frequency", 0);
            }

            featureMap.put("feature", ((Long) p.endNode().getId()).intValue());

            patternIds.add(featureMap);
        }
    }
    return patternIds;
}
 
Example #8
Source File: AtlasTraversals.java    From atlas with GNU General Public License v3.0 4 votes vote down vote up
public TraversalDescription getWayTraversalDescriptionExcludingWayStartNode(GraphDatabaseService db, Node wayStart) {
    return getWayTraversalDescriptionIncludingWayStartNode(db,wayStart).evaluator(Evaluators.excludeStartPosition());
}
 
Example #9
Source File: Writer.java    From neo4j-mazerunner with Apache License 2.0 4 votes vote down vote up
public static Path exportPartitionToHDFSParallel(GraphDatabaseService db, Node partitionNode, PartitionDescription partitionDescription) throws IOException, URISyntaxException {
    FileSystem fs = FileUtil.getHadoopFileSystem();
    Path pt = new Path(ConfigurationLoader.getInstance().getHadoopHdfsUri() + EDGE_LIST_RELATIVE_FILE_PATH.replace("{job_id}", partitionDescription.getPartitionId().toString()));
    BufferedWriter br = new BufferedWriter(new OutputStreamWriter(fs.create(pt)));

    Integer reportBlockSize = 20000;

    Transaction tx = db.beginTx();

    ResourceIterable<Node> nodes = db.traversalDescription()
            .depthFirst()
            .relationships(withName(partitionDescription.getGroupRelationship()), Direction.OUTGOING)
            .evaluator(Evaluators.toDepth(1))
            .traverse(partitionNode)
            .nodes();

    if (nodes.iterator().hasNext()) {

        br.write("# Adacency list" + "\n");

        List<Spliterator<Node>> spliteratorList = new ArrayList<>();
        boolean hasSpliterator = true;
        Spliterator<Node> nodeSpliterator = nodes.spliterator();

        while (hasSpliterator) {
            Spliterator<Node> localSpliterator = nodeSpliterator.trySplit();
            hasSpliterator = localSpliterator != null;
            if (hasSpliterator)
                spliteratorList.add(localSpliterator);
        }


        counter = 0;

        if (spliteratorList.size() > 4) {
            // Fork join
            ParallelWriter parallelWriter = new ParallelWriter<Node>(spliteratorList.toArray(new Spliterator[spliteratorList.size()]),
                    new GraphWriter(0, spliteratorList.size(), br, spliteratorList.size(), reportBlockSize, db, partitionDescription.getTargetRelationship()));
            ForkJoinPool pool = new ForkJoinPool();
            pool.invoke(parallelWriter);
        } else {
            // Sequential
            spliteratorList.forEach(sl -> sl.forEachRemaining(n -> {
                try {
                    writeBlockForNode(n, db, br, reportBlockSize, partitionDescription.getTargetRelationship());
                } catch (IOException e) {
                    e.printStackTrace();
                }
            }));
        }

        System.out.println("Mazerunner Partition Export Status: " + MessageFormat.format("{0,number,#.##%}", 1.0));

        br.flush();
        br.close();

        tx.success();
        tx.close();

        return pt;
    } else {
        return null;
    }
}
 
Example #10
Source File: Writer.java    From neo4j-mazerunner with Apache License 2.0 4 votes vote down vote up
/**
 * Applies the result of the analysis as a partitioned value connecting the partition node to the target node.
 *
 * @param line             The line from the HDFS text file containing the analysis results.
 * @param db               The Neo4j graph database context.
 * @param reportBlockSize  The report block size for progress status.
 * @param processorMessage The processor message containing the description of the analysis.
 * @param partitionNode    The partition node that will be the source node for creating partitioned relationships to the target node.
 */
public static void updatePartitionBlockForRow(String line, GraphDatabaseService db, int reportBlockSize, ProcessorMessage processorMessage, Node partitionNode) {
    if (line != null && !line.startsWith("#")) {
        String[] rowVal = line.split("\\s");
        Long nodeId = Long.parseLong(rowVal[0]);
        Double weight = Double.parseDouble(rowVal[1]);
        Node targetNode = db.getNodeById(nodeId);

        Iterator<Relationship> rels = db.traversalDescription()
                .depthFirst()
                .relationships(withName(processorMessage.getAnalysis()), Direction.INCOMING)
                .evaluator(Evaluators.fromDepth(1))
                .evaluator(Evaluators.toDepth(1))
                .traverse(targetNode)
                .relationships()
                .iterator();

        // Get the relationship to update
        Relationship updateRel = null;

        // Scan the relationships
        while (rels.hasNext() && updateRel == null) {
            Relationship currentRel = rels.next();
            if (currentRel.getStartNode().getId() == partitionNode.getId())
                updateRel = currentRel;
        }

        // Create or update the relationship for the analysis on the partition
        if (updateRel != null) {
            updateRel.setProperty("value", weight);
        } else {
            Relationship newRel = partitionNode.createRelationshipTo(targetNode, withName(processorMessage.getAnalysis()));
            newRel.setProperty("value", weight);
        }

        Writer.updateCounter++;
        if (Writer.updateCounter % reportBlockSize == 0) {
            System.out.println("Nodes updated: " + Writer.updateCounter);
        }
    }
}
 
Example #11
Source File: Writer.java    From neo4j-mazerunner with Apache License 2.0 4 votes vote down vote up
public static void updateCollaborativeFilteringForRow(String line, GraphDatabaseService db, int reportBlockSize) {
    if (line != null && !line.startsWith("#")) {
        String[] rowVal = line.split(",");
        Long from = Long.parseLong(rowVal[0]);
        Long to = Long.parseLong(rowVal[1]);
        Integer rank = Integer.parseInt(rowVal[2]);
        Node fromNode = db.getNodeById(from);

        final String recommendation = "RECOMMENDATION";

        Iterator<Relationship> rels = db.traversalDescription()
                .depthFirst()
                .relationships(withName(recommendation), Direction.INCOMING)
                .evaluator(Evaluators.fromDepth(1))
                .evaluator(Evaluators.toDepth(1))
                .traverse(fromNode)
                .relationships()
                .iterator();

        Relationship updateRel = null;

        // Scan the relationships
        while (rels.hasNext()) {
            Relationship currentRel = rels.next();
            if(currentRel.hasProperty("rank") && Objects.equals(currentRel.getProperty("rank"), rank)) {
                if(currentRel.getEndNode().getId() != to) {
                    currentRel.delete();
                } else updateRel = currentRel;

                break;
            }
        }

        // Create or update the relationship for the analysis on the partition
        if (updateRel == null) {
            Relationship newRel = fromNode.createRelationshipTo(db.getNodeById(to), withName(recommendation));
            newRel.setProperty("rank", rank);
        }

        Writer.updateCounter++;
        if (Writer.updateCounter % reportBlockSize == 0) {
            System.out.println("Nodes updated: " + Writer.updateCounter);
        }

    }
}
 
Example #12
Source File: Writer.java    From neo4j-mazerunner with Apache License 2.0 4 votes vote down vote up
public static Path exportSubgraphToHDFS(GraphDatabaseService db) throws IOException, URISyntaxException {
    FileSystem fs = FileUtil.getHadoopFileSystem();
    Path pt = new Path(ConfigurationLoader.getInstance().getHadoopHdfsUri() + EDGE_LIST_RELATIVE_FILE_PATH.replace("/{job_id}", ""));
    BufferedWriter br = new BufferedWriter(new OutputStreamWriter(fs.create(pt)));

    Transaction tx = db.beginTx();

    // Get all nodes in the graph
    Iterable<Node> nodes = GlobalGraphOperations.at(db)
            .getAllNodes();

    br.write("# Adacency list" + "\n");

    int nodeTotal = IteratorUtil.count(nodes);
    final int[] nodeCount = {0};
    final int[] pathCount = {0};
    int pathCountBlocks = 10000;

    int size = IteratorUtil.count(nodes.iterator());

    //System.out.println(nodes.spliterator().trySplit().estimateSize());

    // Fork join

    nodes.iterator().forEachRemaining(n -> {
        // Filter nodes by all paths connected by the relationship type described in the configuration properties
        Iterable<org.neo4j.graphdb.Path> nPaths = db.traversalDescription()
                .depthFirst()
                .relationships(withName(ConfigurationLoader.getInstance().getMazerunnerRelationshipType()), Direction.OUTGOING)
                .evaluator(Evaluators.fromDepth(1))
                .evaluator(Evaluators.toDepth(1))
                .traverse(n);

        for (org.neo4j.graphdb.Path path : nPaths) {
            try {
                String line = path.startNode().getId() + " " + path.endNode().getId();
                br.write(line + "\n");
                pathCount[0]++;
                if (pathCount[0] > pathCountBlocks) {
                    pathCount[0] = 0;
                    System.out.println("Mazerunner Export Status: " + MessageFormat.format("{0,number,#%}", ((double) nodeCount[0] / (double) nodeTotal)));
                }
            } catch (Exception ex) {
                System.out.println(ex.getMessage());
            }
        }
        nodeCount[0]++;
    });

    System.out.println("Mazerunner Export Status: " + MessageFormat.format("{0,number,#.##%}", 1.0));

    br.flush();
    br.close();
    tx.success();
    tx.close();

    return pt;
}