org.antlr.v4.runtime.Token Java Examples
The following examples show how to use
org.antlr.v4.runtime.Token.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BatfishANTLRErrorStrategy.java From batfish with Apache License 2.0 | 6 votes |
/** * Consume all tokens a whole line at a time until the next token is one expected by the current * rule. Each line (as delimited by supplied separator token) starting from the current line up to * the last line consumed is placed in an {@link ErrorNode} and inserted as a child of the current * rule. * * @param recognizer The {@link Parser} to whom to delegate creation of each {@link ErrorNode} */ private void consumeBlocksUntilWanted(Parser recognizer) { IntervalSet expecting = recognizer.getExpectedTokens(); IntervalSet whatFollowsLoopIterationOrRule = expecting.or(getErrorRecoverySet(recognizer)); int nextToken; do { // Eat tokens until we are at the end of the line consumeUntilEndOfLine(recognizer); // Get the line number and separator text from the separator token Token separatorToken = recognizer.getCurrentToken(); // Insert the current line as an {@link ErrorNode} as a child of the current rule createErrorNode(recognizer, recognizer.getContext(), separatorToken); // Eat the separator token recognizer.consume(); nextToken = recognizer.getInputStream().LA(1); } while (!whatFollowsLoopIterationOrRule.contains(nextToken) && nextToken != Lexer.EOF); }
Example #2
Source File: StsExprCreatorVisitor.java From theta with Apache License 2.0 | 6 votes |
private Expr<?> createMutliplicativeExpr(final Expr<?> opsHead, final List<? extends Expr<?>> opsTail, final List<? extends Token> opers) { checkArgument(opsTail.size() == opers.size()); if (opsTail.isEmpty()) { return opsHead; } else { final Expr<?> newOpsHead = opsTail.get(0); final List<? extends Expr<?>> newOpsTail = opsTail.subList(1, opsTail.size()); final Token operHead = opers.get(0); final List<? extends Token> opersTail = opers.subList(1, opers.size()); final Expr<?> subExpr = createMultiplicativeSubExpr(opsHead, newOpsHead, operHead); return createMutliplicativeExpr(subExpr, newOpsTail, opersTail); } }
Example #3
Source File: AbstractProtoParserListener.java From protostuff-compiler with Apache License 2.0 | 6 votes |
private Optional<Token> findTrailingComment(@Nullable List<Token> tokensAfter) { if (tokensAfter == null) { return Optional.empty(); } Optional<Token> trailingComment = Optional.empty(); for (Token token : tokensAfter) { if (token.getType() == LINE_COMMENT) { trailingComment = Optional.of(token); } // Try to find single trailing comment (on the same line). // If we hit newline, then we can stop right away. if (trailingComment.isPresent() || token.getType() == NL) { break; } } return trailingComment; }
Example #4
Source File: CfaExpression.java From theta with Apache License 2.0 | 6 votes |
private Expr<?> createAdditiveExpr(final Expr<?> opsHead, final List<? extends Expr<?>> opsTail, final List<? extends Token> opers) { checkArgument(opsTail.size() == opers.size()); if (opsTail.isEmpty()) { return opsHead; } else { final Expr<?> newOpsHead = opsTail.get(0); final List<? extends Expr<?>> newOpsTail = opsTail.subList(1, opsTail.size()); final Token operHead = opers.get(0); final List<? extends Token> opersTail = opers.subList(1, opers.size()); final Expr<?> subExpr = createAdditiveSubExpr(opsHead, newOpsHead, operHead); return createAdditiveExpr(subExpr, newOpsTail, opersTail); } }
Example #5
Source File: BeetlAntlrErrorStrategy.java From beetl2.0 with BSD 3-Clause "New" or "Revised" License | 6 votes |
protected void reportUnwantedToken(@NotNull Parser recognizer) { if (inErrorRecoveryMode(recognizer)) { return; } beginErrorCondition(recognizer); Token t = recognizer.getCurrentToken(); String tokenName = getTokenErrorDisplay(t); IntervalSet expecting = getExpectedTokens(recognizer); String msg = "多余输入 " + tokenName + " 期望 " + expecting.toString(recognizer.getTokenNames()); BeetlException exception = new BeetlParserException(BeetlException.PARSER_MISS_ERROR, msg); // exception.token = this.getGrammarToken(t); exception.pushToken(this.getGrammarToken(t)); throw exception; }
Example #6
Source File: RefactorUtils.java From intellij-plugin-v4 with BSD 3-Clause "New" or "Revised" License | 6 votes |
public static List<TerminalNode> getAllRuleRefNodes(Parser parser, ParseTree tree, String ruleName) { List<TerminalNode> nodes = new ArrayList<TerminalNode>(); Collection<ParseTree> ruleRefs; if ( Grammar.isTokenName(ruleName) ) { ruleRefs = XPath.findAll(tree, "//lexerRuleBlock//TOKEN_REF", parser); } else { ruleRefs = XPath.findAll(tree, "//ruleBlock//RULE_REF", parser); } for (ParseTree node : ruleRefs) { TerminalNode terminal = (TerminalNode)node; Token rrefToken = terminal.getSymbol(); String r = rrefToken.getText(); if ( r.equals(ruleName) ) { nodes.add(terminal); } } if ( nodes.size()==0 ) return null; return nodes; }
Example #7
Source File: BoaErrorListener.java From compiler with Apache License 2.0 | 6 votes |
public void error(final String kind, final TokenSource tokens, final Object offendingSymbol, final int line, final int charPositionInLine, final int length, final String msg, final Exception e) { hasError = true; final String filename = tokens.getSourceName(); System.err.print(filename.substring(filename.lastIndexOf(File.separator) + 1) + ": compilation failed: "); System.err.print("Encountered " + kind + " error "); if (offendingSymbol != null) System.err.print("\"" + offendingSymbol + "\" "); System.err.print("at line " + line + ", "); if (length > 0) System.err.print("columns " + charPositionInLine + "-" + (charPositionInLine + length - 1)); else System.err.print("column " + charPositionInLine); System.err.println(". " + msg); underlineError(tokens, (Token)offendingSymbol, line, charPositionInLine, length); if (e != null) for (final StackTraceElement st : e.getStackTrace()) System.err.println("\tat " + st); else System.err.println("\tat unknown stack"); }
Example #8
Source File: RefactorUtils.java From intellij-plugin-v4 with BSD 3-Clause "New" or "Revised" License | 6 votes |
/** Get start/stop of an entire rule including semi and then clean up * WS at end. */ public static String getRuleText(CommonTokenStream tokens, ParserRuleContext ruleDefNode) { Token stop = ruleDefNode.getStop(); Token semi = stop; TerminalNode colonNode = ruleDefNode.getToken(ANTLRv4Parser.COLON, 0); Token colon = colonNode.getSymbol(); Token beforeSemi = tokens.get(stop.getTokenIndex()-1); Token afterColon = tokens.get(colon.getTokenIndex()+1); // trim whitespace/comments before / after rule text List<Token> ignoreBefore = tokens.getHiddenTokensToRight(colon.getTokenIndex()); List<Token> ignoreAfter = tokens.getHiddenTokensToLeft(semi.getTokenIndex()); Token textStart = afterColon; Token textStop = beforeSemi; if ( ignoreBefore!=null ) { Token lastWSAfterColon = ignoreBefore.get(ignoreBefore.size()-1); textStart = tokens.get(lastWSAfterColon.getTokenIndex()+1); } if ( ignoreAfter!=null ) { int firstWSAtEndOfRule = ignoreAfter.get(0).getTokenIndex()-1; textStop = tokens.get(firstWSAtEndOfRule); // stop before 1st ignore token at end } return tokens.getText(textStart, textStop); }
Example #9
Source File: AstBuilder.java From rainbow with Apache License 2.0 | 6 votes |
private static IntervalLiteral.IntervalField getIntervalFieldType(Token token) { switch (token.getType()) { case SqlBaseLexer.YEAR: return IntervalLiteral.IntervalField.YEAR; case SqlBaseLexer.MONTH: return IntervalLiteral.IntervalField.MONTH; case SqlBaseLexer.DAY: return IntervalLiteral.IntervalField.DAY; case SqlBaseLexer.HOUR: return IntervalLiteral.IntervalField.HOUR; case SqlBaseLexer.MINUTE: return IntervalLiteral.IntervalField.MINUTE; case SqlBaseLexer.SECOND: return IntervalLiteral.IntervalField.SECOND; } throw new IllegalArgumentException("Unsupported interval field: " + token.getText()); }
Example #10
Source File: NodeFactory.java From grcuda with BSD 3-Clause "New" or "Revised" License | 6 votes |
public ArithmeticNode createBinary(Token opToken, ExpressionNode leftNode, ExpressionNode rightNode) { final ArithmeticNode result; switch (opToken.getText()) { case "+": result = new ArithmeticNode(ArithmeticNode.Operation.ADD, leftNode, rightNode); break; case "-": result = new ArithmeticNode(ArithmeticNode.Operation.SUBTRACT, leftNode, rightNode); break; case "*": result = new ArithmeticNode(ArithmeticNode.Operation.MULTIPLY, leftNode, rightNode); break; case "/": result = new ArithmeticNode(ArithmeticNode.Operation.DIVIDE, leftNode, rightNode); break; case "%": result = new ArithmeticNode(ArithmeticNode.Operation.MODULO, leftNode, rightNode); break; default: // should not happen due to lexer throw new GrCUDAInternalException("unexpected operation: " + opToken.getText()); } return result; }
Example #11
Source File: BatfishANTLRErrorStrategy.java From batfish with Apache License 2.0 | 6 votes |
/** * Create an error node with the text of the current line and insert it into parse tree * * @param recognizer The recognizer with which to create the error node * @param separator The token that ends the unrecognized link. This is also used to determine the * index of the line to return in error messages. * @return The token contained in the error node */ private Token createErrorNode(Parser recognizer, ParserRuleContext ctx, Token separator) { if (_recoveredAtEof) { _recoveredAtEof = false; throw new BatfishRecognitionException(recognizer, recognizer.getInputStream(), ctx); } if (separator.getType() == Lexer.EOF) { _recoveredAtEof = true; } String lineText = _lines[separator.getLine() - 1] + separator.getText(); Token lineToken = new UnrecognizedLineToken(lineText, separator.getLine(), _parserStateAtRecovery); ErrorNode errorNode = recognizer.createErrorNode(ctx, lineToken); ctx.addErrorNode(errorNode); return lineToken; }
Example #12
Source File: GrammarPredicates.java From beakerx with Apache License 2.0 | 6 votes |
public static boolean isClassName(TokenStream _input) { try { int i=1; Token token = _input.LT(i); while (token!=null && i < _input.size() && _input.LT(i+1).getType() == GroovyParser.DOT) { i = i + 2; token = _input.LT(i); } if(token==null) return false; // TODO here return Character.isUpperCase(Character.codePointAt(token.getText(), 0)); } catch(Exception e) { e.printStackTrace(); } return false; }
Example #13
Source File: ExecutableCodeDef.java From bookish with MIT License | 6 votes |
public ExecutableCodeDef(ParserRuleContext tree, String inputFilename, int index, Token startOrRefToken, Map<String,String> attributes, String code) { super(index, startOrRefToken); this.tree = tree; this.inputFilename = inputFilename; this.code = expandTabs(code, 4); this.attributes = attributes; if ( attributes!=null ) { this.label = attributes.get("label"); if ( attributes.containsKey("hide") ) { isCodeVisible = !attributes.get("hide").equals("true"); } if ( attributes.containsKey("output") ) { displayExpr = attributes.get("output"); } } }
Example #14
Source File: DocCommentManager.java From zserio with BSD 3-Clause "New" or "Revised" License | 6 votes |
/** * Finds appropriate documentation comment which belongs to the given context. * Overloaded version for structure field. * * @param ctx Parser context. * * @return Parsed documentation comment or null. */ public DocComment findDocComment(ZserioParser.StructureFieldDefinitionContext ctx) { // before field alignment Token docToken = findDocTokenBefore(ctx.fieldAlignment()); if (docToken != null) return parseDocComment(docToken); // before field offset docToken = findDocTokenBefore(ctx.fieldOffset()); if (docToken != null) return parseDocComment(docToken); // before optional keyword docToken = findDocTokenBefore(ctx.OPTIONAL()); if (docToken != null) return parseDocComment(docToken); // before field type docToken = findDocTokenBefore(ctx.fieldTypeId()); if (docToken != null) return parseDocComment(docToken); return null; }
Example #15
Source File: CodeBuffTokenStream.java From codebuff with BSD 2-Clause "Simplified" License | 5 votes |
public CodeBuffTokenStream(CommonTokenStream stream) { super(stream.getTokenSource()); this.fetchedEOF = false; for (Token t : stream.getTokens()) { tokens.add(new CommonToken(t)); } reset(); }
Example #16
Source File: ZserioAstBuilder.java From zserio with BSD 3-Clause "New" or "Revised" License | 5 votes |
@Override public ExternType visitExternType(ZserioParser.ExternTypeContext ctx) { final Token token = ctx.getStart(); final AstLocation location = new AstLocation(token); return new ExternType(location, token.getText()); }
Example #17
Source File: Declaration.java From trygve with GNU General Public License v2.0 | 5 votes |
public MethodDeclaration(final String name, final StaticScope myEnclosedScope, final Type returnType, final AccessQualifier accessQualifier, final Token token, final boolean isStatic) { super(name); signature_ = new MethodSignature(name(), returnType, accessQualifier, token, isStatic); isAConstructor_ = calculateConstructorStatus(myEnclosedScope); this.commonInit(myEnclosedScope, returnType, accessQualifier, token); }
Example #18
Source File: AutoSuggester.java From antlr4-autosuggest with Apache License 2.0 | 5 votes |
private Token getAddedToken(String suggestedCompletion) { String completedText = this.input + suggestedCompletion; List<? extends Token> completedTextTokens = this.lexerWrapper.tokenizeNonDefaultChannel(completedText).tokens; if (completedTextTokens.size() <= inputTokens.size()) { return null; // Completion didn't yield whole token, could be just a token fragment } logger.debug("TOKENS IN COMPLETED TEXT: " + completedTextTokens); Token newToken = completedTextTokens.get(completedTextTokens.size() - 1); return newToken; }
Example #19
Source File: ZserioAstBuilder.java From zserio with BSD 3-Clause "New" or "Revised" License | 5 votes |
@Override public FloatType visitFloatType(ZserioParser.FloatTypeContext ctx) { final Token token = ctx.getStart(); final AstLocation location = new AstLocation(token); return new FloatType(location, token.getText(), token.getType()); }
Example #20
Source File: ParseTreeUtils.java From yangtools with Eclipse Public License 1.0 | 5 votes |
static Token verifyToken(final ParseTree parent, final int offset, final int expected) { final TerminalNode node = verifyTerminal(parent.getChild(offset)); final Token ret = node.getSymbol(); final int type = ret.getType(); verify(type == expected, "Item %s has type %s, expected %s", node, type, expected); return ret; }
Example #21
Source File: Trainer.java From codebuff with BSD 2-Clause "Simplified" License | 5 votes |
public static TerminalNode getMatchingLeftSymbol(Corpus corpus, InputDocument doc, TerminalNode node) { ParserRuleContext parent = (ParserRuleContext)node.getParent(); int curTokensParentRuleIndex = parent.getRuleIndex(); Token curToken = node.getSymbol(); if (corpus.ruleToPairsBag != null) { String ruleName = doc.parser.getRuleNames()[curTokensParentRuleIndex]; RuleAltKey ruleAltKey = new RuleAltKey(ruleName, parent.getAltNumber()); List<Pair<Integer, Integer>> pairs = corpus.ruleToPairsBag.get(ruleAltKey); if ( pairs!=null ) { // Find appropriate pair given current token // If more than one pair (a,b) with b=current token pick first one // or if a common pair like ({,}), then give that one preference. // or if b is punctuation, prefer a that is punct List<Integer> viableMatchingLeftTokenTypes = viableLeftTokenTypes(parent, curToken, pairs); Vocabulary vocab = doc.parser.getVocabulary(); if ( !viableMatchingLeftTokenTypes.isEmpty() ) { int matchingLeftTokenType = CollectTokenPairs.getMatchingLeftTokenType(curToken, viableMatchingLeftTokenTypes, vocab); List<TerminalNode> matchingLeftNodes = parent.getTokens(matchingLeftTokenType); // get matching left node by getting last node to left of current token List<TerminalNode> nodesToLeftOfCurrentToken = filter(matchingLeftNodes, n -> n.getSymbol().getTokenIndex()<curToken.getTokenIndex()); TerminalNode matchingLeftNode = nodesToLeftOfCurrentToken.get(nodesToLeftOfCurrentToken.size()-1); if (matchingLeftNode == null) { System.err.println("can't find matching node for "+node.getSymbol()); } return matchingLeftNode; } } } return null; }
Example #22
Source File: Pass3Listener.java From trygve with GNU General Public License v2.0 | 5 votes |
@Override protected <ExprType> Expression expressionFromReturnStatement(final ExprType ctxExpr, final RuleContext ctxParent, final Token ctxGetStart) { Expression expressionReturned = null; if (null != ctxExpr) { expressionReturned = parsingData_.popExpression(); } final MethodDeclaration methodDecl = (MethodDeclaration)findProperMethodScopeAround(ctxExpr, ctxParent, ctxGetStart); assert null == methodDecl || methodDecl instanceof MethodDeclaration; if (null == methodDecl) { expressionReturned = new ErrorExpression(null); ErrorLogger.error(ErrorIncidenceType.Fatal, ctxGetStart, "Return statement must be within a method scope ", "", "", ""); } else { if (methodDecl.returnType() == null || methodDecl.returnType().name().equals("void")) { if (null == expressionReturned || expressionReturned.type().name().equals("void")) { ; } else { ErrorLogger.error(ErrorIncidenceType.Fatal, ctxGetStart, "Return expression `", expressionReturned.getText(), "' of type ", expressionReturned.type().getText(), " is incompatible with method that returns no value.", ""); expressionReturned = new ErrorExpression(null); } } else if (methodDecl.returnType().canBeConvertedFrom(expressionReturned.type())) { ; } else { if (expressionReturned.isntError()) { ErrorLogger.error(ErrorIncidenceType.Fatal, ctxGetStart, "Return expression `" + expressionReturned.getText(), "`' of type `", expressionReturned.type().getText(), "' is not compatible with declared return type `", methodDecl.returnType().getText(), "'."); expressionReturned = new ErrorExpression(expressionReturned); } } } return expressionReturned; }
Example #23
Source File: CiscoXrBaseParser.java From batfish with Apache License 2.0 | 5 votes |
/** Returns {@code true} iff {@code t}'s text represents a valid vlan ID (1-4094) in base 10. */ protected static boolean isVlanId(Token t) { try { int val = Integer.parseInt(t.getText()); checkArgument(1 <= val && val <= 4094); } catch (IllegalArgumentException e) { return false; } return true; }
Example #24
Source File: PSTLatexdrawListener.java From latexdraw with GNU General Public License v3.0 | 5 votes |
private void setRectangularShape(final RectangularShape sh, final double x, final double y, final double width, final double height, final PSTContext ctx, final Token cmd) { sh.setPosition(x, y); sh.setWidth(Math.max(0.1, width)); sh.setHeight(Math.max(0.1, height)); setShapeParameters(sh, ctx); if(ctx.starredCmd(cmd)) { setShapeForStar(sh); } }
Example #25
Source File: Declaration.java From trygve with GNU General Public License v2.0 | 5 votes |
public ObjectDeclaration(final String name, final Type type, final Token token) { super(name); token_ = token; lineNumber_ = (null == token_)? 0: token_.getLine(); type_ = type; accessQualifier_ = AccessQualifier.DefaultAccess; }
Example #26
Source File: Declaration.java From trygve with GNU General Public License v2.0 | 5 votes |
public MethodDeclaration(final MethodSignature signature, final StaticScope myEnclosedScope, final Token token) { super(signature.name()); signature_ = signature; isAConstructor_ = calculateConstructorStatus(myEnclosedScope); this.commonInit(myEnclosedScope, signature.returnType(), signature.accessQualifier(), token); }
Example #27
Source File: SQLParseError.java From incubator-tajo with Apache License 2.0 | 5 votes |
public SQLParseError(Token offendingToken, int line, int charPositionInLine, String msg, String errorLine) { super(msg); this.offendingToken = offendingToken; this.charPositionInLine = charPositionInLine; this.line = line; this.errorLine = errorLine; this.header = msg; }
Example #28
Source File: Declaration.java From trygve with GNU General Public License v2.0 | 5 votes |
public void elaborateFromTemplate(final TemplateDeclaration templateDeclaration, final TemplateInstantiationInfo newTypes, Token token) { templateDeclaration_ = templateDeclaration; // This turns a TemplateDeclaration into a class assert null == baseClass_ || baseClass_.type() instanceof ClassType; final ClassType baseClassType = null == baseClass_? null: (ClassType)baseClass_.type(); myEnclosedScope_ = new StaticScope(enclosedScope(), "copy", templateDeclaration.enclosingScope(), this, newTypes); final ClassType newClassType = new ClassType(name(), myEnclosedScope_, baseClassType); newClassType.elaborateFromTemplate(templateDeclaration, baseClassType, myEnclosedScope_, this, token); this.setType(newClassType); }
Example #29
Source File: CSSParserVisitorImpl.java From jStyleParser with GNU Lesser General Public License v3.0 | 5 votes |
@Override /** * media_expression : LPAREN S* IDENT S* (COLON S* terms)? RPAREN */ public MediaExpression visitMedia_expression(CSSParser.Media_expressionContext ctx) { logEnter("mediaexpression: ", ctx); if (ctxHasErrorNode(ctx)) { mq.invalid = true; return null; } MediaExpression expr = rf.createMediaExpression(); Declaration decl; declaration_stack.push(new declaration_scope()); declaration_stack.peek().d = decl = rf.createDeclaration(); declaration_stack.peek().invalid = false; String property = extractTextUnescaped(ctx.IDENT().getText()); decl.setProperty(property); Token token = ctx.IDENT().getSymbol(); decl.setSource(extractSource((CSSToken) token)); if (ctx.terms() != null) { List<Term<?>> t = visitTerms(ctx.terms()); decl.replaceAll(t); } if (declaration_stack.peek().d != null && !declaration_stack.peek().invalid) { //if the declaration is valid expr.setFeature(decl.getProperty()); expr.replaceAll(decl); } declaration_stack.pop(); logLeave("mediaexpression"); return expr; }
Example #30
Source File: Trainer.java From codebuff with BSD 2-Clause "Simplified" License | 5 votes |
public int[] getFeatures(int i) { CodeBuffTokenStream tokens = doc.tokens; TerminalNode node = tokenToNodeMap.get(tokens.get(i)); if ( node==null ) { System.err.println("### No node associated with token "+tokens.get(i)); return null; } Token curToken = node.getSymbol(); Token prevToken = tokens.getPreviousRealToken(i); Token prevPrevToken = prevToken!=null ? doc.tokens.getPreviousRealToken(prevToken.getTokenIndex()) : null; boolean prevTokenStartsLine = false; if ( prevToken!=null && prevPrevToken!=null ) { prevTokenStartsLine = prevToken.getLine()>prevPrevToken.getLine(); } boolean curTokenStartsNewLine = false; if ( prevToken==null ) curTokenStartsNewLine = true; // we must be at start of file else if ( curToken.getLine() > prevToken.getLine() ) curTokenStartsNewLine = true; int[] features = getContextFeatures(corpus, tokenToNodeMap, doc, i); setListInfoFeatures(corpus.tokenToListInfo, features, curToken); features[INDEX_PREV_FIRST_ON_LINE] = prevTokenStartsLine ? 1 : 0; features[INDEX_FIRST_ON_LINE] = curTokenStartsNewLine ? 1 : 0; return features; }