Java Code Examples for org.antlr.v4.tool.Grammar#defineTokenName()

The following examples show how to use org.antlr.v4.tool.Grammar#defineTokenName() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SemanticPipeline.java    From codebuff with BSD 2-Clause "Simplified" License 5 votes vote down vote up
void assignTokenTypes(Grammar g, List<GrammarAST> tokensDefs,
					  List<GrammarAST> tokenIDs, List<GrammarAST> terminals)
{
	//Grammar G = g.getOutermostGrammar(); // put in root, even if imported

	// create token types for tokens { A, B, C } ALIASES
	for (GrammarAST alias : tokensDefs) {
		if (g.getTokenType(alias.getText()) != Token.INVALID_TYPE) {
			g.tool.errMgr.grammarError(ErrorType.TOKEN_NAME_REASSIGNMENT, g.fileName, alias.token, alias.getText());
		}

		g.defineTokenName(alias.getText());
	}

	// DEFINE TOKEN TYPES FOR TOKEN REFS LIKE ID, INT
	for (GrammarAST idAST : tokenIDs) {
		if (g.getTokenType(idAST.getText()) == Token.INVALID_TYPE) {
			g.tool.errMgr.grammarError(ErrorType.IMPLICIT_TOKEN_DEFINITION, g.fileName, idAST.token, idAST.getText());
		}

		g.defineTokenName(idAST.getText());
	}

	// VERIFY TOKEN TYPES FOR STRING LITERAL REFS LIKE 'while', ';'
	for (GrammarAST termAST : terminals) {
		if (termAST.getType() != ANTLRParser.STRING_LITERAL) {
			continue;
		}

		if (g.getTokenType(termAST.getText()) == Token.INVALID_TYPE) {
			g.tool.errMgr.grammarError(ErrorType.IMPLICIT_STRING_DEFINITION, g.fileName, termAST.token, termAST.getText());
		}
	}

	g.tool.log("semantics", "tokens="+g.tokenNameToTypeMap);
       g.tool.log("semantics", "strings="+g.stringLiteralToTypeMap);
}
 
Example 2
Source File: SemanticPipeline.java    From codebuff with BSD 2-Clause "Simplified" License 4 votes vote down vote up
void assignLexerTokenTypes(Grammar g, List<GrammarAST> tokensDefs) {
	Grammar G = g.getOutermostGrammar(); // put in root, even if imported
	for (GrammarAST def : tokensDefs) {
		// tokens { id (',' id)* } so must check IDs not TOKEN_REF
		if ( Grammar.isTokenName(def.getText()) ) {
			G.defineTokenName(def.getText());
		}
	}

	/* Define token types for nonfragment rules which do not include a 'type(...)'
	 * or 'more' lexer command.
	 */
	for (Rule r : g.rules.values()) {
		if ( !r.isFragment() && !hasTypeOrMoreCommand(r) ) {
			G.defineTokenName(r.name);
		}
	}

	// FOR ALL X : 'xxx'; RULES, DEFINE 'xxx' AS TYPE X
	List<Pair<GrammarAST,GrammarAST>> litAliases =
		Grammar.getStringLiteralAliasesFromLexerRules(g.ast);
	Set<String> conflictingLiterals = new HashSet<String>();
	if ( litAliases!=null ) {
		for (Pair<GrammarAST,GrammarAST> pair : litAliases) {
			GrammarAST nameAST = pair.a;
			GrammarAST litAST = pair.b;
			if ( !G.stringLiteralToTypeMap.containsKey(litAST.getText()) ) {
				G.defineTokenAlias(nameAST.getText(), litAST.getText());
			}
			else {
				// oops two literal defs in two rules (within or across modes).
				conflictingLiterals.add(litAST.getText());
			}
		}
		for (String lit : conflictingLiterals) {
			// Remove literal if repeated across rules so it's not
			// found by parser grammar.
			Integer value = G.stringLiteralToTypeMap.remove(lit);
			if (value != null && value > 0 && value < G.typeToStringLiteralList.size() && lit.equals(G.typeToStringLiteralList.get(value))) {
				G.typeToStringLiteralList.set(value, null);
			}
		}
	}

}