Java Code Examples for com.intellij.lexer.Lexer#getTokenStart()

The following examples show how to use com.intellij.lexer.Lexer#getTokenStart() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: LexerTestCase.java    From consulo with Apache License 2.0 6 votes vote down vote up
protected void checkCorrectRestart(String text) {
  Lexer mainLexer = createLexer();
  String allTokens = printTokens(text, 0, mainLexer);

  Lexer auxLexer = createLexer();
  auxLexer.start(text);
  while (true) {
    IElementType type = auxLexer.getTokenType();
    if (type == null) {
      break;
    }
    if (auxLexer.getState() == 0) {
      int tokenStart = auxLexer.getTokenStart();
      String subTokens = printTokens(text, tokenStart, mainLexer);
      if (!allTokens.endsWith(subTokens)) {
        assertEquals("Restarting impossible from offset " + tokenStart + "; lexer state should not return 0 at this point", allTokens, subTokens);
      }
    }
    auxLexer.advance();
  }
}
 
Example 2
Source File: LightLexerTestCase.java    From consulo with Apache License 2.0 6 votes vote down vote up
@Override
public String transform(String testName, String[] data) throws Exception {
  final StringBuilder output = new StringBuilder();
  Lexer lexer = getLexer();
  final String text = data[0].replaceAll("$(\\n+)", "");
  lexer.start(text);
  while (lexer.getTokenType() != null) {
    final int s = lexer.getTokenStart();
    final int e = lexer.getTokenEnd();
    final IElementType tokenType = lexer.getTokenType();
    final String str = tokenType + ": [" + s + ", " + e + "], {" + text.substring(s, e) + "}\n";
    output.append(str);

    lexer.advance();
  }
  return output.toString();
}
 
Example 3
Source File: SelectWordUtil.java    From consulo with Apache License 2.0 6 votes vote down vote up
public static void addWordHonoringEscapeSequences(CharSequence editorText,
                                                  TextRange literalTextRange,
                                                  int cursorOffset,
                                                  Lexer lexer,
                                                  List<TextRange> result) {
  lexer.start(editorText, literalTextRange.getStartOffset(), literalTextRange.getEndOffset());

  while (lexer.getTokenType() != null) {
    if (lexer.getTokenStart() <= cursorOffset && cursorOffset < lexer.getTokenEnd()) {
      if (StringEscapesTokenTypes.STRING_LITERAL_ESCAPES.contains(lexer.getTokenType())) {
        result.add(new TextRange(lexer.getTokenStart(), lexer.getTokenEnd()));
      }
      else {
        TextRange word = getWordSelectionRange(editorText, cursorOffset, JAVA_IDENTIFIER_PART_CONDITION);
        if (word != null) {
          result.add(new TextRange(Math.max(word.getStartOffset(), lexer.getTokenStart()),
                                   Math.min(word.getEndOffset(), lexer.getTokenEnd())));
        }
      }
      break;
    }
    lexer.advance();
  }
}
 
Example 4
Source File: UnescapingPsiBuilder.java    From BashSupport with Apache License 2.0 4 votes vote down vote up
private void initTokenListAndCharSequence(Lexer lexer) {
    lexer.start(processedText);

    myShrunkSequence = new ArrayList<MyShiftedToken>(512); //assume a larger token size by default
    StringBuilder charSequenceBuilder = new StringBuilder();

    int realPos = 0;
    int shrunkPos = 0;
    while (lexer.getTokenType() != null) {
        final IElementType tokenType = lexer.getTokenType();
        final String tokenText = lexer.getTokenText();

        int tokenStart = lexer.getTokenStart();
        int tokenEnd = lexer.getTokenEnd();
        int realLength = tokenEnd - tokenStart;

        int delta = textProcessor.getContentRange().getStartOffset();
        int originalStart = textProcessor.getOffsetInHost(tokenStart - delta);
        int originalEnd = textProcessor.getOffsetInHost(tokenEnd - delta);

        if (textProcessor.containsRange(tokenStart, tokenEnd) && originalStart != -1 && originalEnd != -1) {
            realLength = originalEnd - originalStart;
            int masqueLength = tokenEnd - tokenStart;

            myShrunkSequence.add(new MyShiftedToken(tokenType,
                    realPos, realPos + realLength,
                    shrunkPos, shrunkPos + masqueLength, tokenText));
            charSequenceBuilder.append(tokenText);

            shrunkPos += masqueLength;
        } else {
            myShrunkSequence.add(new MyShiftedToken(tokenType,
                    realPos, realPos + realLength,
                    shrunkPos, shrunkPos + realLength, tokenText));
            charSequenceBuilder.append(tokenText);

            shrunkPos += realLength;
        }

        realPos += realLength;

        lexer.advance();
    }

    myShrunkCharSequence = charSequenceBuilder.toString();
    myShrunkSequenceSize = myShrunkSequence.size();
}
 
Example 5
Source File: ChunkExtractor.java    From consulo with Apache License 2.0 4 votes vote down vote up
@Nonnull
public TextChunk[] createTextChunks(@Nonnull UsageInfo2UsageAdapter usageInfo2UsageAdapter,
                                    @Nonnull CharSequence chars,
                                    int start,
                                    int end,
                                    boolean selectUsageWithBold,
                                    @Nonnull List<TextChunk> result) {
  final Lexer lexer = myHighlighter.getHighlightingLexer();
  final SyntaxHighlighterOverEditorHighlighter highlighter = myHighlighter;

  LOG.assertTrue(start <= end);

  int i = StringUtil.indexOf(chars, '\n', start, end);
  if (i != -1) end = i;

  if (myDocumentStamp != myDocument.getModificationStamp()) {
    highlighter.restart(chars);
    myDocumentStamp = myDocument.getModificationStamp();
  }
  else if (lexer.getTokenType() == null || lexer.getTokenStart() > start) {
    highlighter.resetPosition(0);  // todo restart from nearest position with initial state
  }

  boolean isBeginning = true;

  for (; lexer.getTokenType() != null; lexer.advance()) {
    int hiStart = lexer.getTokenStart();
    int hiEnd = lexer.getTokenEnd();

    if (hiStart >= end) break;

    hiStart = Math.max(hiStart, start);
    hiEnd = Math.min(hiEnd, end);
    if (hiStart >= hiEnd) {
      continue;
    }

    if (isBeginning) {
      String text = chars.subSequence(hiStart, hiEnd).toString();
      if (text.trim().isEmpty()) continue;
    }
    isBeginning = false;
    IElementType tokenType = lexer.getTokenType();
    TextAttributesKey[] tokenHighlights = highlighter.getTokenHighlights(tokenType);

    processIntersectingRange(usageInfo2UsageAdapter, chars, hiStart, hiEnd, tokenHighlights, selectUsageWithBold, result);
  }

  return result.toArray(new TextChunk[result.size()]);
}
 
Example 6
Source File: FilePackageSetParserExtension.java    From consulo with Apache License 2.0 4 votes vote down vote up
private static String getTokenText(Lexer lexer) {
  int start = lexer.getTokenStart();
  int end = lexer.getTokenEnd();
  return lexer.getBufferSequence().subSequence(start, end).toString();
}