Java Code Examples for com.intellij.lexer.Lexer#advance()

The following examples show how to use com.intellij.lexer.Lexer#advance() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SoyLexer.java    From bamboo-soy with Apache License 2.0 6 votes vote down vote up
@Override
public MergeFunction getMergeFunction() {
  return ((final IElementType type, final Lexer originalLexer) -> {
    if (type == SoyTypes.OTHER || type == TokenType.WHITE_SPACE) {
      IElementType returnType =  type;
      while (originalLexer.getTokenType() == SoyTypes.OTHER
          || originalLexer.getTokenType() == TokenType.WHITE_SPACE) {
        if (originalLexer.getTokenType() == SoyTypes.OTHER) {
          returnType = SoyTypes.OTHER;
        }
        originalLexer.advance();
      }
      return returnType;
    }

    return type;
  });
}
 
Example 2
Source File: MergingLexer.java    From BashSupport with Apache License 2.0 6 votes vote down vote up
@Override
public IElementType merge(IElementType type, Lexer lexer) {
    for (MergeTuple currentTuple : mergeTuples) {
        TokenSet tokensToMerge = currentTuple.getTokensToMerge();

        if (tokensToMerge.contains(type)) {
            IElementType current = lexer.getTokenType();
            //merge all upcoming tokens into the target token type
            while (tokensToMerge.contains(current)) {
                lexer.advance();

                current = lexer.getTokenType();
            }

            return currentTuple.getTargetType();
        }
    }

    return type;
}
 
Example 3
Source File: HaxeProjectStructureDetector.java    From intellij-haxe with Apache License 2.0 6 votes vote down vote up
@Nullable
static String readQualifiedName(final CharSequence charSequence, final Lexer lexer, boolean allowStar) {
  final StringBuilder buffer = StringBuilderSpinAllocator.alloc();
  try {
    while (true) {
      if (lexer.getTokenType() != HaxeTokenTypes.ID && !(allowStar && lexer.getTokenType() != HaxeTokenTypes.OMUL)) break;
      buffer.append(charSequence, lexer.getTokenStart(), lexer.getTokenEnd());
      if (lexer.getTokenType() == HaxeTokenTypes.OMUL) break;
      lexer.advance();
      if (lexer.getTokenType() != HaxeTokenTypes.ODOT) break;
      buffer.append('.');
      lexer.advance();
    }
    String packageName = buffer.toString();
    if (StringUtil.endsWithChar(packageName, '.')) return null;
    return packageName;
  }
  finally {
    StringBuilderSpinAllocator.dispose(buffer);
  }
}
 
Example 4
Source File: XQueryLexerTest.java    From intellij-xquery with Apache License 2.0 6 votes vote down vote up
private static void assertProducedTokens(String text, String[] expectedTokens, Lexer lexer) {
    lexer.start(text);
    int idx = 0;
    while (lexer.getTokenType() != null) {
        if (idx >= expectedTokens.length) fail("Too many tokens");
        String tokenName = lexer.getTokenType().toString();
        String expectedTokenType = expectedTokens[idx++];
        String expectedTokenText = expectedTokens[idx++];
        assertEquals(expectedTokenType, tokenName);
        String tokenText = lexer.getBufferSequence().subSequence(lexer.getTokenStart(),
                lexer.getTokenEnd()).toString();
        assertEquals(expectedTokenText, tokenText);
        lexer.advance();
    }

    if (idx < expectedTokens.length) fail("Not enough tokens");
}
 
Example 5
Source File: LexerTestCase.java    From consulo with Apache License 2.0 6 votes vote down vote up
protected void checkCorrectRestart(String text) {
  Lexer mainLexer = createLexer();
  String allTokens = printTokens(text, 0, mainLexer);

  Lexer auxLexer = createLexer();
  auxLexer.start(text);
  while (true) {
    IElementType type = auxLexer.getTokenType();
    if (type == null) {
      break;
    }
    if (auxLexer.getState() == 0) {
      int tokenStart = auxLexer.getTokenStart();
      String subTokens = printTokens(text, tokenStart, mainLexer);
      if (!allTokens.endsWith(subTokens)) {
        assertEquals("Restarting impossible from offset " + tokenStart + "; lexer state should not return 0 at this point", allTokens, subTokens);
      }
    }
    auxLexer.advance();
  }
}
 
Example 6
Source File: LexerTestCase.java    From consulo with Apache License 2.0 6 votes vote down vote up
public static String printTokens(CharSequence text, int start, Lexer lexer) {
  lexer.start(text, start, text.length());
  String result = "";
  while (true) {
    IElementType tokenType = lexer.getTokenType();
    if (tokenType == null) {
      break;
    }
    String tokenText = getTokenText(lexer);
    String tokenTypeName = tokenType.toString();
    String line = tokenTypeName + " ('" + tokenText + "')\n";
    result += line;
    lexer.advance();
  }
  return result;
}
 
Example 7
Source File: LightLexerTestCase.java    From consulo with Apache License 2.0 6 votes vote down vote up
@Override
public String transform(String testName, String[] data) throws Exception {
  final StringBuilder output = new StringBuilder();
  Lexer lexer = getLexer();
  final String text = data[0].replaceAll("$(\\n+)", "");
  lexer.start(text);
  while (lexer.getTokenType() != null) {
    final int s = lexer.getTokenStart();
    final int e = lexer.getTokenEnd();
    final IElementType tokenType = lexer.getTokenType();
    final String str = tokenType + ": [" + s + ", " + e + "], {" + text.substring(s, e) + "}\n";
    output.append(str);

    lexer.advance();
  }
  return output.toString();
}
 
Example 8
Source File: ConsoleViewUtil.java    From consulo with Apache License 2.0 6 votes vote down vote up
public static void printWithHighlighting(@Nonnull ConsoleView console, @Nonnull String text, @Nonnull SyntaxHighlighter highlighter, Runnable doOnNewLine) {
  Lexer lexer = highlighter.getHighlightingLexer();
  lexer.start(text, 0, text.length(), 0);

  IElementType tokenType;
  while ((tokenType = lexer.getTokenType()) != null) {
    ConsoleViewContentType contentType = getContentTypeForToken(tokenType, highlighter);
    StringTokenizer eolTokenizer = new StringTokenizer(lexer.getTokenText(), "\n", true);
    while (eolTokenizer.hasMoreTokens()) {
      String tok = eolTokenizer.nextToken();
      console.print(tok, contentType);
      if (doOnNewLine != null && "\n".equals(tok)) {
        doOnNewLine.run();
      }
    }

    lexer.advance();
  }
}
 
Example 9
Source File: SelectWordUtil.java    From consulo with Apache License 2.0 6 votes vote down vote up
public static void addWordHonoringEscapeSequences(CharSequence editorText,
                                                  TextRange literalTextRange,
                                                  int cursorOffset,
                                                  Lexer lexer,
                                                  List<TextRange> result) {
  lexer.start(editorText, literalTextRange.getStartOffset(), literalTextRange.getEndOffset());

  while (lexer.getTokenType() != null) {
    if (lexer.getTokenStart() <= cursorOffset && cursorOffset < lexer.getTokenEnd()) {
      if (StringEscapesTokenTypes.STRING_LITERAL_ESCAPES.contains(lexer.getTokenType())) {
        result.add(new TextRange(lexer.getTokenStart(), lexer.getTokenEnd()));
      }
      else {
        TextRange word = getWordSelectionRange(editorText, cursorOffset, JAVA_IDENTIFIER_PART_CONDITION);
        if (word != null) {
          result.add(new TextRange(Math.max(word.getStartOffset(), lexer.getTokenStart()),
                                   Math.min(word.getEndOffset(), lexer.getTokenEnd())));
        }
      }
      break;
    }
    lexer.advance();
  }
}
 
Example 10
Source File: Assert.java    From intellij-latte with MIT License 5 votes vote down vote up
/**
 * Checks that given lexer returns the correct tokens.
 *
 * @param lexer
 * @param expectedTokens
 */
public static void assertTokens(Lexer lexer, Pair<IElementType, String>[] expectedTokens) {
	int i;
	for (i = 0; lexer.getTokenType() != null; i++) {
		if (i == expectedTokens.length) fail("Too many tokens from lexer; unexpected " + lexer.getTokenType());
		assertEquals("Wrong token type at index " + i, expectedTokens[i].first, lexer.getTokenType());
		assertEquals("Wrong token text at index " + i, expectedTokens[i].second, lexer.getTokenText());
		lexer.advance();
	}
	if (i < expectedTokens.length) fail("Not enough tokens from lexer; expected " + expectedTokens[i].first + " but got nothing.");
}
 
Example 11
Source File: BaseFilterLexerUtil.java    From consulo with Apache License 2.0 5 votes vote down vote up
public static ScanContent scanContent(FileContent content, IdAndToDoScannerBasedOnFilterLexer indexer) {
  ScanContent data = content.getUserData(scanContentKey);
  if (data != null) {
    content.putUserData(scanContentKey, null);
    return data;
  }

  final boolean needTodo = content.getFile().getFileSystem() instanceof LocalFileSystem;
  final boolean needIdIndex = IdTableBuilding.getFileTypeIndexer(content.getFileType()) instanceof LexerBasedIdIndexer;

  final IdDataConsumer consumer = needIdIndex? new IdDataConsumer():null;
  final OccurrenceConsumer todoOccurrenceConsumer = new OccurrenceConsumer(consumer, needTodo);
  final Lexer filterLexer = indexer.createLexer(todoOccurrenceConsumer);
  filterLexer.start(content.getContentAsText());

  while (filterLexer.getTokenType() != null) filterLexer.advance();

  Map<TodoIndexEntry,Integer> todoMap = null;
  if (needTodo) {
    for (IndexPattern indexPattern : IndexPatternUtil.getIndexPatterns()) {
        final int count = todoOccurrenceConsumer.getOccurrenceCount(indexPattern);
        if (count > 0) {
          if (todoMap == null) todoMap = new THashMap<TodoIndexEntry, Integer>();
          todoMap.put(new TodoIndexEntry(indexPattern.getPatternString(), indexPattern.isCaseSensitive()), count);
        }
      }
  }

  data = new ScanContent(
    consumer != null? consumer.getResult():Collections.<IdIndexEntry, Integer>emptyMap(),
    todoMap != null ? todoMap: Collections.<TodoIndexEntry,Integer>emptyMap()
  );
  if (needIdIndex && needTodo) content.putUserData(scanContentKey, data);
  return data;
}
 
Example 12
Source File: HaxeProjectStructureDetector.java    From intellij-haxe with Apache License 2.0 5 votes vote down vote up
@Nullable
public static String readPackageName(final CharSequence charSequence, final Lexer lexer) {
  skipWhiteSpaceAndComments(lexer);
  if (!HaxeTokenTypes.KPACKAGE.equals(lexer.getTokenType())) {
    return "";
  }
  lexer.advance();
  skipWhiteSpaceAndComments(lexer);

  return readQualifiedName(charSequence, lexer, false);
}
 
Example 13
Source File: LanguageUtil.java    From consulo with Apache License 2.0 5 votes vote down vote up
@Nonnull
@RequiredReadAction
public static ParserDefinition.SpaceRequirements canStickTokensTogetherByLexer(ASTNode left, ASTNode right, Lexer lexer) {
  String textStr = left.getText() + right.getText();

  lexer.start(textStr, 0, textStr.length());
  if(lexer.getTokenType() != left.getElementType()) return ParserDefinition.SpaceRequirements.MUST;
  if(lexer.getTokenEnd() != left.getTextLength()) return ParserDefinition.SpaceRequirements.MUST;
  lexer.advance();
  if(lexer.getTokenEnd() != textStr.length()) return ParserDefinition.SpaceRequirements.MUST;
  if(lexer.getTokenType() != right.getElementType()) return ParserDefinition.SpaceRequirements.MUST;
  return ParserDefinition.SpaceRequirements.MAY;
}
 
Example 14
Source File: FilePackageSetParserExtension.java    From consulo with Apache License 2.0 4 votes vote down vote up
private static String parseFilePattern(Lexer lexer) throws ParsingException {
  StringBuffer pattern = new StringBuffer();
  boolean wasIdentifier = false;
  while (true) {
    if (lexer.getTokenType() == ScopeTokenTypes.DIV) {
      wasIdentifier = false;
      pattern.append("/");
    }
    else if (lexer.getTokenType() == ScopeTokenTypes.IDENTIFIER || lexer.getTokenType() == ScopeTokenTypes.INTEGER_LITERAL) {
      if (wasIdentifier) error(lexer, AnalysisScopeBundle.message("error.packageset.token.expectations", getTokenText(lexer)));
      wasIdentifier = lexer.getTokenType() == ScopeTokenTypes.IDENTIFIER;
      pattern.append(getTokenText(lexer));
    }
    else if (lexer.getTokenType() == ScopeTokenTypes.ASTERISK) {
      wasIdentifier = false;
      pattern.append("*");
    }
    else if (lexer.getTokenType() == ScopeTokenTypes.DOT) {
      wasIdentifier = false;
      pattern.append(".");
    }
    else if (lexer.getTokenType() == ScopeTokenTypes.WHITE_SPACE) {
      wasIdentifier = false;
      pattern.append(" ");
    }
    else if (lexer.getTokenType() == ScopeTokenTypes.MINUS) {
      wasIdentifier = false;
      pattern.append("-");
    }
    else if (lexer.getTokenType() == ScopeTokenTypes.TILDE) {
      wasIdentifier = false;
      pattern.append("~");
    }
    else if (lexer.getTokenType() == ScopeTokenTypes.SHARP) {
      wasIdentifier = false;
      pattern.append("#");
    }
    else {
      break;
    }
    lexer.advance();
  }

  if (pattern.length() == 0) {
    error(lexer, AnalysisScopeBundle.message("error.packageset.pattern.expectations"));
  }

  return pattern.toString();
}
 
Example 15
Source File: MacroParser.java    From consulo with Apache License 2.0 4 votes vote down vote up
private static void advance(Lexer lexer) {
  lexer.advance();
  skipWhitespaces(lexer);
}
 
Example 16
Source File: EnterHandler.java    From consulo with Apache License 2.0 4 votes vote down vote up
public static boolean isCommentComplete(PsiComment comment, CodeDocumentationAwareCommenter commenter, Editor editor) {
  for (CommentCompleteHandler handler : CommentCompleteHandler.EP_NAME.getExtensionList()) {
    if (handler.isApplicable(comment, commenter)) {
      return handler.isCommentComplete(comment, commenter, editor);
    }
  }

  String commentText = comment.getText();
  final boolean docComment = isDocComment(comment, commenter);
  final String expectedCommentEnd = docComment ? commenter.getDocumentationCommentSuffix() : commenter.getBlockCommentSuffix();
  if (!commentText.endsWith(expectedCommentEnd)) return false;

  final PsiFile containingFile = comment.getContainingFile();
  final Language language = containingFile.getLanguage();
  ParserDefinition parserDefinition = LanguageParserDefinitions.INSTANCE.forLanguage(language);
  if (parserDefinition == null) {
    return true;
  }
  Lexer lexer = parserDefinition.createLexer(containingFile.getLanguageVersion());
  final String commentPrefix = docComment ? commenter.getDocumentationCommentPrefix() : commenter.getBlockCommentPrefix();
  lexer.start(commentText, commentPrefix == null ? 0 : commentPrefix.length(), commentText.length());
  QuoteHandler fileTypeHandler = TypedHandler.getQuoteHandler(containingFile, editor);
  JavaLikeQuoteHandler javaLikeQuoteHandler = fileTypeHandler instanceof JavaLikeQuoteHandler ? (JavaLikeQuoteHandler)fileTypeHandler : null;

  while (true) {
    IElementType tokenType = lexer.getTokenType();
    if (tokenType == null) {
      return false;
    }

    if (javaLikeQuoteHandler != null && javaLikeQuoteHandler.getStringTokenTypes() != null && javaLikeQuoteHandler.getStringTokenTypes().contains(tokenType)) {
      String text = commentText.substring(lexer.getTokenStart(), lexer.getTokenEnd());
      int endOffset = comment.getTextRange().getEndOffset();

      if (text.endsWith(expectedCommentEnd) && endOffset < containingFile.getTextLength() && containingFile.getText().charAt(endOffset) == '\n') {
        return true;
      }
    }
    if (tokenType == commenter.getDocumentationCommentTokenType() || tokenType == commenter.getBlockCommentTokenType()) {
      return false;
    }
    if (tokenType == commenter.getLineCommentTokenType() && lexer.getTokenText().contains(commentPrefix)) {
      return false;
    }
    if (lexer.getTokenEnd() == commentText.length()) {
      if (tokenType == commenter.getLineCommentTokenType()) {
        String prefix = commenter.getLineCommentPrefix();
        lexer.start(commentText, lexer.getTokenStart() + (prefix == null ? 0 : prefix.length()), commentText.length());
        lexer.advance();
        continue;
      }
      else if (isInvalidPsi(comment)) {
        return false;
      }
      return true;
    }
    lexer.advance();
  }
}
 
Example 17
Source File: ChunkExtractor.java    From consulo with Apache License 2.0 4 votes vote down vote up
@Nonnull
public TextChunk[] createTextChunks(@Nonnull UsageInfo2UsageAdapter usageInfo2UsageAdapter,
                                    @Nonnull CharSequence chars,
                                    int start,
                                    int end,
                                    boolean selectUsageWithBold,
                                    @Nonnull List<TextChunk> result) {
  final Lexer lexer = myHighlighter.getHighlightingLexer();
  final SyntaxHighlighterOverEditorHighlighter highlighter = myHighlighter;

  LOG.assertTrue(start <= end);

  int i = StringUtil.indexOf(chars, '\n', start, end);
  if (i != -1) end = i;

  if (myDocumentStamp != myDocument.getModificationStamp()) {
    highlighter.restart(chars);
    myDocumentStamp = myDocument.getModificationStamp();
  }
  else if (lexer.getTokenType() == null || lexer.getTokenStart() > start) {
    highlighter.resetPosition(0);  // todo restart from nearest position with initial state
  }

  boolean isBeginning = true;

  for (; lexer.getTokenType() != null; lexer.advance()) {
    int hiStart = lexer.getTokenStart();
    int hiEnd = lexer.getTokenEnd();

    if (hiStart >= end) break;

    hiStart = Math.max(hiStart, start);
    hiEnd = Math.min(hiEnd, end);
    if (hiStart >= hiEnd) {
      continue;
    }

    if (isBeginning) {
      String text = chars.subSequence(hiStart, hiEnd).toString();
      if (text.trim().isEmpty()) continue;
    }
    isBeginning = false;
    IElementType tokenType = lexer.getTokenType();
    TextAttributesKey[] tokenHighlights = highlighter.getTokenHighlights(tokenType);

    processIntersectingRange(usageInfo2UsageAdapter, chars, hiStart, hiEnd, tokenHighlights, selectUsageWithBold, result);
  }

  return result.toArray(new TextChunk[result.size()]);
}
 
Example 18
Source File: HaxeProjectStructureDetector.java    From intellij-haxe with Apache License 2.0 4 votes vote down vote up
public static void skipWhiteSpaceAndComments(Lexer lexer) {
  while (HaxeTokenTypeSets.COMMENTS.contains(lexer.getTokenType()) || HaxeTokenTypeSets.WHITESPACES.contains(lexer.getTokenType())) {
    lexer.advance();
  }
}
 
Example 19
Source File: CSharpLexer.java    From consulo-csharp with Apache License 2.0 4 votes vote down vote up
@Override
public IElementType merge(final IElementType mergeToken, final Lexer originalLexer)
{
	if(!ourMergeSet.contains(mergeToken))
	{
		return mergeToken;
	}

	while(true)
	{
		IElementType currentToken = originalLexer.getTokenType();
		if(currentToken == null)
		{
			break;
		}

		// we need merge two docs if one line between
		if(mergeToken == CSharpTokensImpl.LINE_DOC_COMMENT && currentToken == CSharpTokens.WHITE_SPACE)
		{
			LexerPosition currentPosition = originalLexer.getCurrentPosition();
			originalLexer.advance();
			boolean docIsNext = originalLexer.getTokenType() == CSharpTokensImpl.LINE_DOC_COMMENT;
			originalLexer.restore(currentPosition);
			if(docIsNext)
			{
				currentToken = CSharpTokensImpl.LINE_DOC_COMMENT;
			}
			else
			{
				break;
			}
		}

		if(currentToken != mergeToken)
		{
			break;
		}

		originalLexer.advance();
	}
	return mergeToken;
}
 
Example 20
Source File: UnescapingPsiBuilder.java    From BashSupport with Apache License 2.0 4 votes vote down vote up
private void initTokenListAndCharSequence(Lexer lexer) {
    lexer.start(processedText);

    myShrunkSequence = new ArrayList<MyShiftedToken>(512); //assume a larger token size by default
    StringBuilder charSequenceBuilder = new StringBuilder();

    int realPos = 0;
    int shrunkPos = 0;
    while (lexer.getTokenType() != null) {
        final IElementType tokenType = lexer.getTokenType();
        final String tokenText = lexer.getTokenText();

        int tokenStart = lexer.getTokenStart();
        int tokenEnd = lexer.getTokenEnd();
        int realLength = tokenEnd - tokenStart;

        int delta = textProcessor.getContentRange().getStartOffset();
        int originalStart = textProcessor.getOffsetInHost(tokenStart - delta);
        int originalEnd = textProcessor.getOffsetInHost(tokenEnd - delta);

        if (textProcessor.containsRange(tokenStart, tokenEnd) && originalStart != -1 && originalEnd != -1) {
            realLength = originalEnd - originalStart;
            int masqueLength = tokenEnd - tokenStart;

            myShrunkSequence.add(new MyShiftedToken(tokenType,
                    realPos, realPos + realLength,
                    shrunkPos, shrunkPos + masqueLength, tokenText));
            charSequenceBuilder.append(tokenText);

            shrunkPos += masqueLength;
        } else {
            myShrunkSequence.add(new MyShiftedToken(tokenType,
                    realPos, realPos + realLength,
                    shrunkPos, shrunkPos + realLength, tokenText));
            charSequenceBuilder.append(tokenText);

            shrunkPos += realLength;
        }

        realPos += realLength;

        lexer.advance();
    }

    myShrunkCharSequence = charSequenceBuilder.toString();
    myShrunkSequenceSize = myShrunkSequence.size();
}