edu.stanford.nlp.trees.Tree Java Examples
The following examples show how to use
edu.stanford.nlp.trees.Tree.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CoreNLP.java From Criteria2Query with Apache License 2.0 | 6 votes |
/** * with improve for ADJP and QP * * */ public String extractTree(Tree tree){ StringBuffer sb=new StringBuffer(); List<Tree> tree_list = tree.getLeaves(); for (Tree treeunit : tree_list) { if(treeunit.numChildren()==0){ //System.out.println(treeunit+"\t"+treeunit.parent(tree).label().toString()+"\t"+treeunit.parent(tree).parent(tree).label().toString()); //System.out.println(treeunit.parent(tree).parent(tree).label().toString()); if(treeunit.parent(tree).parent(tree).parent(tree).label().toString().equals("NP")&&(treeunit.parent(tree).parent(tree).label().toString().equals("ADJP")||treeunit.parent(tree).parent(tree).label().toString().equals("PRN"))){ sb.append(treeunit+"\t"+treeunit.parent(tree).label().toString()+"\t"+"NP"+"\n"); }else{ sb.append(treeunit+"\t"+treeunit.parent(tree).label().toString()+"\t"+treeunit.parent(tree).parent(tree).label().toString()+"\n"); } } } return sb.toString(); }
Example #2
Source File: SentimentAnalyzer.java From hazelcast-jet-demos with Apache License 2.0 | 6 votes |
private double getScore(List<CoreMap> sentences, double overallSentiment) { int matrixIndex = overallSentiment < -0.5 ? 0 // very negative : overallSentiment < 0.0 ? 1 // negative : overallSentiment < 0.5 ? 3 // positive : 4; // very positive double sum = 0; int numberOfSentences = 0; for (CoreMap sentence : sentences) { Tree sentiments = sentence.get(SentimentCoreAnnotations.SentimentAnnotatedTree.class); int predictedClass = RNNCoreAnnotations.getPredictedClass(sentiments); if (predictedClass == 2) { // neutral continue; } SimpleMatrix matrix = RNNCoreAnnotations.getPredictions(sentiments); sum += matrix.get(matrixIndex); numberOfSentences++; } return sum / numberOfSentences; }
Example #3
Source File: NERTool.java From Criteria2Query with Apache License 2.0 | 6 votes |
/** * Word Dependency Author:chi Date:2017-3-22 * */ public Collection<TypedDependency> outputDependency(Tree t) { TreebankLanguagePack tlp = new PennTreebankLanguagePack(); // tlp.setGenerateOriginalDependencies(true); Standford Dependency GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory(); GrammaticalStructure gs = gsf.newGrammaticalStructure(t); Collection<TypedDependency> tdl = gs.typedDependenciesCCprocessed(); int countforitem = 0; int source = 0; int target = 0; for (TypedDependency item : tdl) { System.out.println(item); } return tdl; }
Example #4
Source File: CoreNLP.java From gAnswer with BSD 3-Clause "New" or "Revised" License | 6 votes |
public Tree getParseTree (String text) { // create an empty Annotation just with the given text Annotation document = new Annotation(text); // run all Annotators on this text pipeline_lemma.annotate(document); // these are all the sentences in this document // a CoreMap is essentially a Map that uses class objects as keys and has values with custom types List<CoreMap> sentences = document.get(SentencesAnnotation.class); for(CoreMap sentence: sentences) { // this is the parse tree of the current sentence return sentence.get(TreeAnnotation.class); } return null; }
Example #5
Source File: StanfordParser.java From gAnswer with BSD 3-Clause "New" or "Revised" License | 6 votes |
public GrammaticalStructure getGrammaticalStructure (String sentence) { List<CoreLabel> rawWords2 = tokenizerFactory.getTokenizer(new StringReader(sentence)).tokenize(); // Converts a Sentence/List/String into a Tree. // In all circumstances, the input will be treated as a single sentence to be parsed. Tree parse = lp.apply(rawWords2); return gsf.newGrammaticalStructure(parse); /*List<TypedDependency> tdl = gs.typedDependencies(false); for (TypedDependency td : tdl) { System.out.println(td.reln().getShortName()+"("+td.gov()+","+td.dep()+")"); System.out.println("gov="+td.gov() +"\tgov.index=" +td.gov().index() +"\tgov.value=" +td.gov().value() +"\tgov.pos=" +((TreeGraphNode)td.gov().parent()).value()); }*/ //System.out.println(tdl); }
Example #6
Source File: SourceTextAnalyzer.java From phrasal with GNU General Public License v3.0 | 6 votes |
/** * Syntactic complexity as defined by Lin (1996). * * @param tree * @return */ private static int complexityOf(Tree tree) { tree.indexLeaves(); tree.percolateHeads(new CollinsHeadFinder()); tree.percolateHeadIndices(); Set<Dependency<Label,Label,Object>> deps = tree.dependencies(); int complexity = 0; for (Dependency<Label,Label,Object> dep : deps) { if (!(dep instanceof UnnamedConcreteDependency)) { throw new RuntimeException("Cannot measure syntactic complexity."); } UnnamedConcreteDependency uDep = (UnnamedConcreteDependency) dep; int headIndex = uDep.getGovernorIndex(); int depIndex = uDep.getDependentIndex(); complexity += Math.abs(headIndex - depIndex); } return complexity; }
Example #7
Source File: Trees.java From uncc2014watsonsim with GNU General Public License v2.0 | 6 votes |
public static List<CoreMap> parse(String text) { // create an empty Annotation just with the given text Annotation document = new Annotation(text); // run all Annotators on this text pipeline.annotate(document); // these are all the sentences in this document // a CoreMap is essentially a Map that uses class objects as keys and has values with custom types List<CoreMap> sentences = document.get(SentencesAnnotation.class); List<Tree> trees = new ArrayList<>(); List<Tree> dependencies = new ArrayList<>(); for(CoreMap sentence: sentences) { // this is the parse tree of the current sentence Tree t = sentence.get(TreeAnnotation.class); SemanticGraph graph = sentence.get(CollapsedCCProcessedDependenciesAnnotation.class); trees.add(t); } return sentences; }
Example #8
Source File: SentimentAnalyzer.java From blog-codes with Apache License 2.0 | 6 votes |
public SentimentResult getSentimentResult(String text) { SentimentClassification classification = new SentimentClassification(); SentimentResult sentimentResult = new SentimentResult(); if (text != null && text.length() > 0) { Annotation annotation = pipeline.process(text); for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) { Tree tree = sentence.get(SentimentCoreAnnotations.SentimentAnnotatedTree.class); SimpleMatrix simpleMatrix = RNNCoreAnnotations.getPredictions(tree); classification.setVeryNegative((double) Math.round(simpleMatrix.get(0) * 100d)); classification.setNegative((double) Math.round(simpleMatrix.get(1) * 100d)); classification.setNeutral((double) Math.round(simpleMatrix.get(2) * 100d)); classification.setPositive((double) Math.round(simpleMatrix.get(3) * 100d)); classification.setVeryPositive((double) Math.round(simpleMatrix.get(4) * 100d)); String setimentType = sentence.get(SentimentCoreAnnotations.SentimentClass.class); sentimentResult.setSentimentType(setimentType); sentimentResult.setSentimentClass(classification); sentimentResult.setSentimentScore(RNNCoreAnnotations.getPredictedClass(tree)); } } return sentimentResult; }
Example #9
Source File: TreePrinter.java From UDepLambda with Apache License 2.0 | 6 votes |
static StringBuilder toStringBuilder(Tree tree, StringBuilder sb, boolean printOnlyLabelValue, String offset) { if (tree.isLeaf()) { if (tree.label() != null) sb.append(printOnlyLabelValue ? tree.label().value() : tree.label()); return sb; } sb.append('('); if (tree.label() != null) { if (printOnlyLabelValue) { if (tree.value() != null) sb.append(tree.label().value()); // don't print a null, just nothing! } else { sb.append(tree.label()); } } Tree[] kids = tree.children(); if (kids != null) { for (Tree kid : kids) { if (kid.isLeaf()) sb.append(' '); else sb.append('\n').append(offset).append(' '); toStringBuilder(kid, sb, printOnlyLabelValue,offset + " "); } } return sb.append(')'); }
Example #10
Source File: Postprocess.java From phrases with Apache License 2.0 | 6 votes |
public List<Pattern> run(List<Pattern> patterns) { Properties props = new Properties(); props.setProperty("annotators", "tokenize, ssplit, pos, lemma, parse, sentiment"); StanfordCoreNLP pipeline = new StanfordCoreNLP(props); for (Pattern pattern : patterns) { Annotation annotation = pipeline.process(pattern.toSentences()); for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) { Tree tree = sentence.get(SentimentCoreAnnotations.AnnotatedTree.class); int sentiment = RNNCoreAnnotations.getPredictedClass(tree); for (CoreLabel token : sentence.get(CoreAnnotations.TokensAnnotation.class)) { String lemma = token.get(CoreAnnotations.LemmaAnnotation.class); } } } return null; }
Example #11
Source File: ClueType.java From uncc2014watsonsim with GNU General Public License v2.0 | 6 votes |
/** * A very simple LAT detector. It wants the lowest subtree with both a determiner and a noun */ private static Analysis detectPart(Tree t) { switch (t.value()) { case "WDT": case "DT": return new Analysis(t, null); case "NN": case "NNS": return new Analysis(null, t); default: Analysis l = new Analysis((Tree) null, null); // The last noun tends to be the most general List<Tree> kids = t.getChildrenAsList(); Collections.reverse(kids); for (Tree kid : kids) l = merge(l, detectPart(kid)); return l; } }
Example #12
Source File: ProcessConjunctions.java From ambiverse-nlu with Apache License 2.0 | 6 votes |
/** Checks if a node depending on one conjoint also depends to the other */ //"He buys and sells electronic products" "Is products depending on both sells and buys?" private static boolean isDescendant(Tree parse, int indexCheck, int indexPivot, int indexElement) { Tree pivot = parse.getLeaves().get(indexPivot - 1); // because tree parse indexing system // starts with 0 Tree check = parse.getLeaves().get(indexCheck - 1); Tree element = parse.getLeaves().get(indexElement - 1); while ((!element.value().equals("ROOT"))) {// find a common parent between the head conjoint // and the constituent of the element if (element.pathNodeToNode(element, pivot) != null) // is this efficient enough? break; element = element.parent(parse); } List<Tree> path = element.pathNodeToNode(element, check); // find a path between the common // parent and the other conjoint if (path != null) return true; else return false; }
Example #13
Source File: CoreNLPSentenceSimilarityTest.java From uncc2014watsonsim with GNU General Public License v2.0 | 6 votes |
@Test public void testParseToTree() { // Empty case assertEquals(new ArrayList<>(), Trees.parse("")); // Simple case assertEquals(Tree.valueOf("(ROOT (NP (NN Example)))"), Trees.parse("Example").get(0)); // Challenging case // fails: "Buffalo buffalo Buffalo buffalo buffalo buffalo Buffalo buffalo." // succeeds, or at least it looks generally right to me: assertEquals(Tree.valueOf("(ROOT (S (NP (NNP Niel) (NNP Armstrong)) " + "(VP (VBD was) (NP (DT the) (JJ first) (NN man)" + "(S (VP (TO to) (VP (VB walk) " + "(PP (IN on) (NP (DT the) (NN moon)))))))) (. .)))"), Trees.parse("Niel Armstrong was the first man to walk on the moon.").get(0)); assertEquals( Tree.valueOf("(ROOT (S (NP (PRP I)) (VP (VBP am) (ADJP (JJ tall))) (. .)))"), Trees.parse("I am tall. You are short.").get(0)); assertEquals( Tree.valueOf("(ROOT (S (NP (PRP You)) (VP (VBP are) (ADJP (JJ short))) (. .)))"), Trees.parse("I am tall. You are short.").get(1)); }
Example #14
Source File: MainTest.java From dependensee with GNU General Public License v2.0 | 6 votes |
/** * Test of writeImage method, of class Main. */ @Test public void testWriteImage() throws Exception { String text = "A quick brown fox jumped over the lazy dog."; TreebankLanguagePack tlp = new PennTreebankLanguagePack(); GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory(); LexicalizedParser lp = LexicalizedParser.loadModel(); lp.setOptionFlags(new String[]{"-maxLength", "500", "-retainTmpSubcategories"}); TokenizerFactory<CoreLabel> tokenizerFactory = PTBTokenizer.factory(new CoreLabelTokenFactory(), ""); List<CoreLabel> wordList = tokenizerFactory.getTokenizer(new StringReader(text)).tokenize(); Tree tree = lp.apply(wordList); GrammaticalStructure gs = gsf.newGrammaticalStructure(tree); Collection<TypedDependency> tdl = gs.typedDependenciesCollapsed(); Main.writeImage(tdl, "image.png", 3); assert (new File("image.png").exists()); }
Example #15
Source File: DpUtils.java From ambiverse-nlu with Apache License 2.0 | 6 votes |
/** Correspondence between nodes in Tree and SemanticGraph */ public static Tree getNode(IndexedWord word, Tree depTree, SemanticGraph semanticGraph) { int indexSC = semanticGraph.vertexListSorted().indexOf(word); int indexDT = Integer.MAX_VALUE; Tree result = null; List<Tree> descTree = depTree.getLeaves(); for (int i = descTree.size() - 1; i >= 0; i--) { if (descTree.get(i).toString().equals(word.word())) { if (i - indexSC < 0) break; else if ((i - indexSC) < indexDT) { result = descTree.get(i); indexDT = i - indexSC; } } } return result; }
Example #16
Source File: DpUtils.java From ambiverse-nlu with Apache License 2.0 | 6 votes |
/** Correspondence between nodes in Tree and SemanticGraph */ public static IndexedWord getIndexedWord(Tree word, Tree depTree, SemanticGraph semanticGraph) { List<IndexedWord> iwList = semanticGraph.vertexListSorted(); List<Tree> descTree = depTree.getLeaves(); int indexDT = descTree.indexOf(word); int indexSC = Integer.MAX_VALUE; ; IndexedWord result = null; for (int i = iwList.size() - 1; i >= 0; i--) { if (iwList.get(i).word().equals(word.toString())) { if ((i - indexSC) < indexDT) { result = iwList.get(i); indexDT = i - indexSC; } } } return result; }
Example #17
Source File: ClauseTypeLabeller.java From phrasal with GNU General Public License v3.0 | 6 votes |
private int getClauseStart(List<Label> posTags, List<Label> gloss) { Label npLabel = null; int start = -1; /* Find the first NP child. */ for (Tree child : tree.getChildrenAsList()) { if (child.value().equals("NP")) { npLabel = child.getLeaves().get(0).label(); break; } } /* Get its index in the clause. */ if (npLabel != null) { int i = 0; for (Label word : gloss) { if (word.equals(npLabel)) { start = i; break; } i++; } } return start; }
Example #18
Source File: ProcessConjunctions.java From ambiverse-nlu with Apache License 2.0 | 6 votes |
/** Checks if a node depending on one conjoint also depends to the other */ //"He buys and sells electronic products" "Is products depending on both sells and buys?" private static boolean isDescendant(Tree parse, SemanticGraph semanticGraph, IndexedWord checkIW, IndexedWord pivotIW, IndexedWord elementIW) { Tree pivot = DpUtils.getNode(pivotIW, parse, semanticGraph); Tree check = DpUtils.getNode(checkIW, parse, semanticGraph); Tree element = DpUtils.getNode(elementIW, parse, semanticGraph); while ((!element.value().equals("ROOT"))) {// find a common parent between the head conjoint // and the constituent of the element if (element.pathNodeToNode(element, pivot) != null) // is this efficient enough? break; element = element.parent(parse); } return element.dominates(check); }
Example #19
Source File: HeadVerbFinder.java From Graphene with GNU General Public License v3.0 | 5 votes |
public Optional<String> findHeadVerb(Tree parseTree) { TregexPattern pattern = TregexPattern.compile("ROOT <<: (__ < (VP=vp [ <+(VP) (VP=lowestvp !< VP) | ==(VP=lowestvp !< VP) ]))"); TregexMatcher matcher = pattern.matcher(parseTree); while (matcher.findAt(parseTree)) { Tree lowestvp = matcher.getNode("lowestvp"); return Optional.of(ParseTreeExtractionUtils.getContainingWords(lowestvp).get(0).word()); } return Optional.empty(); }
Example #20
Source File: CoreNLPParser.java From Heracles with GNU General Public License v3.0 | 5 votes |
@Override public void validatedProcess(Dataset dataset, String spanTypeOfSentenceUnit) { Properties prop1 = new Properties(); prop1.setProperty("annotators", "parse"); StanfordCoreNLP pipeline = new StanfordCoreNLP(prop1, false); for (Span span : dataset.getSpans(spanTypeOfSentenceUnit)){ HashMap<Integer, Word> wordIndex = new HashMap<>(); Annotation a = CoreNLPHelper.reconstructStanfordAnnotations(span, wordIndex); // Annotation a = new Annotation((String)span.getAnnotations().get("text")); if (a == null){ System.out.println(a); } pipeline.annotate(a); for (CoreMap sentence : a.get(SentencesAnnotation.class)){ //per sentence, check the syntax tree Tree tree = sentence.get(TreeAnnotation.class); // tree.percolateHeadAnnotations(headFinder); // tree.indentedListPrint(); try { analyzeTree(tree, span, wordIndex); } catch (IllegalSpanException e) { // TODO Auto-generated catch block e.printStackTrace(); } } } }
Example #21
Source File: SentimentAnalyzer.java From computoser with GNU Affero General Public License v3.0 | 5 votes |
/** * Synchronized method to obtain the sentiment of the set of documents. * Synchronization is fine, because the method is invoked via a scheduled job * and only one execution at a time is permitted. * That allows to optimize the loading of the model as well * @param documents * @return */ public synchronized SentimentResult getSentiment(Set<String> documents, TimelineMusic meta) { double sentimentSum = 0; for (String document: documents) { int mainSentiment = 0; if (document != null && document.length() > 0) { int longest = 0; try { Annotation annotation = pipeline.process(document); // mainSentiment is the sentiment of the whole document. We find // the whole document by comparing the length of individual // annotated "fragments" for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) { Tree tree = sentence.get(SentimentCoreAnnotations.AnnotatedTree.class); int sentiment = RNNCoreAnnotations.getPredictedClass(tree); String partText = sentence.toString(); if (partText.length() > longest) { mainSentiment = sentiment; longest = partText.length(); } } } catch (Exception ex) { logger.error("Problem analyzing document sentiment. " + document, ex); continue; } } sentimentSum += mainSentiment; } double average = sentimentSum / documents.size(); meta.setAverageSentiment(average); if (average >= 2.25) { return SentimentResult.POSITIVE; } else if (average <= 1.75) { return SentimentResult.NEGATIVE; } return SentimentResult.NEUTRAL; }
Example #22
Source File: CoreNLP.java From Criteria2Query with Apache License 2.0 | 5 votes |
/** * Word Dependency Author:chi Date:2017-3-22 * */ public Collection<TypedDependency> outputDependency(Tree t) { TreebankLanguagePack tlp = new PennTreebankLanguagePack(); // tlp.setGenerateOriginalDependencies(true); Standford Dependency GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory(); GrammaticalStructure gs = gsf.newGrammaticalStructure(t); Collection<TypedDependency> tdl = gs.typedDependenciesCCprocessed(); int countforitem = 0; int source = 0; int target = 0; return tdl; }
Example #23
Source File: TreeTransformer.java From UDepLambda with Apache License 2.0 | 5 votes |
private static String replaceNamedVars(TregexMatcher matcher, String original) { Pattern namedNodePattern = Pattern.compile("\\{(.+?)\\}"); Matcher namedNodematcher = namedNodePattern.matcher(original); while (namedNodematcher.find()) { String namedNodeString = namedNodematcher.group(1); Tree namedNode = matcher.getNode(namedNodeString); original = original.replace(String.format("{%s}", namedNodeString), replaceSpecialChars(namedNode.label().value())); namedNodematcher = namedNodePattern.matcher(original); } return original; }
Example #24
Source File: DependencyBnBPreorderer.java From phrasal with GNU General Public License v3.0 | 5 votes |
private static int computeCrossingScore(Tree a, Tree b, SymmetricalWordAlignment alignment) { List<Tree> aChildren = a.getChildrenAsList(); List<Tree> bChildren = b.getChildrenAsList(); int score = 0; Set<Integer> aAlignment = new HashSet<Integer>(); int aIdx = ((IndexedWord) a.label()).index() - 1; aAlignment.addAll(alignment.f2e(aIdx)); for (Tree aChild : aChildren) { aIdx = ((IndexedWord) aChild.label()).index() - 1; aAlignment.addAll(alignment.f2e(aIdx)); } Set<Integer> bAlignment = new HashSet<Integer>(); int bIdx = ((IndexedWord) b.label()).index() - 1; bAlignment.addAll(alignment.f2e(bIdx)); for (Tree bChild : bChildren) { bIdx = ((IndexedWord) bChild.label()).index() - 1; bAlignment.addAll(alignment.f2e(bIdx)); } for (int i : aAlignment) { for (int j : bAlignment) { if (i > j) score++; } } return score; }
Example #25
Source File: DistortionProbability.java From phrasal with GNU General Public License v3.0 | 5 votes |
@Override public void initialize(int sourceInputId, Sequence<IString> source) { Tree parseTree = CoreNLPCache.get(sourceInputId).get(TreeAnnotation.class); this.posTags = parseTree.preTerminalYield(); }
Example #26
Source File: DependencyBnBPreorderer.java From phrasal with GNU General Public License v3.0 | 5 votes |
FeatureNode(Tree node, IndexedWord hw) { List<Label> yield = node.yield(); this.word = (IndexedWord) node.label(); this.hw = hw; this.lm = (IndexedWord) yield.get(0); this.rm = (IndexedWord) yield.get(yield.size() - 1); this.dst = hw.index() - this.word.index(); }
Example #27
Source File: DependencyBnBPreorderer.java From phrasal with GNU General Public License v3.0 | 5 votes |
private static Tree generateShallowTree(HashMap<Integer, Pair<IndexedWord, List<Integer>>> dependencies) { if (dependencies.get(0) == null || dependencies.get(0).second.isEmpty()) { return new LabeledScoredTreeNode(); } return generateSubTree(dependencies, dependencies.get(0).second.get(0)); }
Example #28
Source File: DependencyBnBPreorderer.java From phrasal with GNU General Public License v3.0 | 5 votes |
private static String preorder(Tree tree) { List<Tree> queue = new LinkedList<>(); queue.add(tree); while ( ! queue.isEmpty()) { Tree currentNode = queue.remove(0); if (currentNode.isLeaf()) continue; Tree children[] = currentNode.children(); int childCount = children.length; IndexedWord hw = (IndexedWord) currentNode.label(); List<FeatureNode> featureNodes = new ArrayList<>(childCount); for (int i = 0; i < childCount; i++) { featureNodes.add(new FeatureNode(children[i], hw)); queue.add(children[i]); } if (childCount < 8) { Pair<Double, List<Integer>> result = search(featureNodes, new LinkedList<Integer>(), Double.NEGATIVE_INFINITY); if (result != null) { List<Integer> permutation = result.second; List<Tree> newChildren = new ArrayList<>(Arrays.asList(children)); for (int i = 0; i < childCount; i++) { int idx = permutation.get(i); newChildren.set(idx, children[i]); } currentNode.setChildren(newChildren); } else { System.err.println("Warning: No path found."); } } } return StringUtils.join(tree.yieldWords()); }
Example #29
Source File: RawFrenchToJSON.java From phrasal with GNU General Public License v3.0 | 5 votes |
/** * Extract chunks. * * @param tree * @return */ static int[] getChunkVector(Tree tree) { String[] iobVector = new String[tree.yield().size()]; Arrays.fill(iobVector, "O"); // NOTE: The order in which these patterns are applied is important. // Base XPs TregexPattern baseXPPattern = TregexPattern.compile("__ < (__ < (__ !< __)) !< (__ < (__ < __))"); // Non-recursive NPs TregexPattern NPPattern = TregexPattern.compile("@NP < (__ $ __) !<< (@NP < (__ $ __)) !<< @PP"); // Non-recursive PPs TregexPattern PPattern = TregexPattern.compile("@PP !<< @PP"); TregexMatcher tregexMatcher = baseXPPattern.matcher(tree); CoreNLPToJSON.fillVectorWithYield(iobVector, tregexMatcher); tregexMatcher = NPPattern.matcher(tree); CoreNLPToJSON.fillVectorWithYield(iobVector, tregexMatcher); tregexMatcher = PPattern.matcher(tree); CoreNLPToJSON.fillVectorWithYield(iobVector, tregexMatcher); int[] indexVector = CoreNLPToJSON.iobToIndices(iobVector); return indexVector; }
Example #30
Source File: TreeTransformer.java From UDepLambda with Apache License 2.0 | 5 votes |
private static boolean applyRuleGroupsOnSubTree( TransformationRuleGroups ruleGroups, DependencyTree tree, DependencyTree subTree) { Preconditions.checkNotNull(tree); // Apply ruleGroups at the current node. boolean ruleGroupFound = applyRuleGroupsOnNode(ruleGroups, tree, subTree); // Apply ruleGroups at children trees. for (Tree child : subTree.children()) { ruleGroupFound |= applyRuleGroupsOnSubTree(ruleGroups, tree, (DependencyTree) child); } return ruleGroupFound; }