Java Code Examples for org.netbeans.api.lexer.TokenSequence#moveNext()

The following examples show how to use org.netbeans.api.lexer.TokenSequence#moveNext() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: XMLSyntaxSupport.java    From netbeans with Apache License 2.0 6 votes vote down vote up
/**
 * Executes user code on token sequence from the document.
 * Read-locks the document, obtains {@link TokenSequence} from the Lexer and executes {@code userCode} 
 * passing the initialized sequence. The sequence is moved to the desired offset and the token that contains
 * or starts at that position. The client can move the sequence elsewhere.
 * <p/>
 * If the {@code userCode} calls this {@code SyntaxSupport} methods like {@link #getNextToken(int)}, they will use
 * the <b>same TokenSequence</b> as passed to {@code userCode}. This allows to combine navigation calls from {@link XMLSyntaxSupport}
 * with client's own sequence movements. The TokenSequence instance passed to {@code userCode} can be queried for
 * current token offset after navigation.
 * 
 * @param <T>
 * @param offset offset to position the sequence at
 * @param userCode code to execute
 * @return user-defined value
 * @throws BadLocationException if the user code throws BadLocationException
 * @throws IllegalStateException if the user code throws a checked exception
 */
@NullUnknown
public <T> T runWithSequence(int offset, SequenceCallable<T> userCode) throws BadLocationException {
    T result;
    TokenSequence old = null;
    try {
        ((AbstractDocument)document).readLock();
        old = cachedSequence.get();
        cachedSequence.remove();
        TokenSequence ts = getSequence();
        if (ts == null) {
            throw new BadLocationException("No sequence for position", offset); // NOI18N
        }
        cachedSequence.set(ts);
        synchronized (ts) {
            ts.move(offset);
            ts.moveNext();
            result = userCode.call(ts);
        }
    } finally {
        cachedSequence.set(old);
        ((AbstractDocument)document).readUnlock();
    }
    return result;
}
 
Example 2
Source File: AddPropertyCodeGenerator.java    From netbeans with Apache License 2.0 6 votes vote down vote up
private static int scanForSemicolon(Document doc, int[] offset, int start, int end) throws BadLocationException {
    TokenHierarchy<Document> th = doc != null ? TokenHierarchy.get(doc) : null;
    List<TokenSequence<?>> embeddedSequences = th != null ? th.embeddedTokenSequences(offset[0], false) : null;
    TokenSequence<?> seq = embeddedSequences != null ? embeddedSequences.get(embeddedSequences.size() - 1) : null;

    if (seq == null) {
        return offset[0];
    }   

    seq.move(start);

    int semicolon = -1;
    while(seq.moveNext()) {
        int tokenOffset = seq.offset();
        if(tokenOffset > end) {
            break;
        }
        Token<?> t = seq.token();
        if(t != null && t.id() == JavaTokenId.SEMICOLON ) {
            semicolon = tokenOffset;
            break;
        }
    }
    return semicolon;
}
 
Example 3
Source File: FindLocalUsagesQuery.java    From netbeans with Apache License 2.0 6 votes vote down vote up
@Override
public Void visitCompilationUnit(CompilationUnitTree node, Void p) {
    if (!searchComment) {
        return super.visitCompilationUnit(node, p);
    }

    if (toFind.getKind() == ElementKind.PARAMETER) {
        renameParameterInMethodComments(toFind);
    } else {
        String originalName = toFind.getSimpleName().toString();
        if (originalName!=null) {
            TokenSequence<JavaTokenId> ts = info.getTokenHierarchy().tokenSequence(JavaTokenId.language());

            while (ts.moveNext()) {
                Token<JavaTokenId> t = ts.token();
                
                if (isComment(t)) {
                    findAllInComment(t.text().toString(), ts.offset(), originalName);
                }
            }
        }
    }
    return super.visitCompilationUnit(node, p);
}
 
Example 4
Source File: ElementResultItem.java    From netbeans with Apache License 2.0 6 votes vote down vote up
@Override
protected int removeTextLength(JTextComponent component, int offset, int removeLength) {
    if (removeLength <= 0) {
        return super.removeTextLength(component, offset, removeLength);
    }
    TokenSequence s = createTokenSequence(component);
    s.move(offset);
    s.moveNext();
    if (s.token().id() == XMLTokenId.TAG || s.token().id() == XMLTokenId.TEXT) {
        // replace entire tag, minus starting >
        if (s.token().text().toString().startsWith(CompletionUtil.TAG_FIRST_CHAR)) {
            return s.token().length() - (offset - s.offset());
        }
    }
    return super.removeTextLength(component, offset, removeLength);
}
 
Example 5
Source File: LexUtilities.java    From netbeans with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
private static TokenSequence<GroovyTokenId> findRhtmlDelimited(TokenSequence t, int offset) {
    if (t.language().mimeType().equals("text/x-gsp")) {
        t.move(offset);
        if (t.moveNext() && t.token() != null && "groovy-delimiter".equals(t.token().id().primaryCategory())) { // NOI18N
            // It's a delimiter - move ahead and see if we find it
            if (t.moveNext() && t.token() != null && "groovy".equals(t.token().id().primaryCategory())) { // NOI18N
                TokenSequence<?> ets = t.embedded();
                if (ets != null) {
                    return (TokenSequence<GroovyTokenId>) ets;
                }
            }
        }
    }
    return null;
}
 
Example 6
Source File: JavadocCompletionUtils.java    From netbeans with Apache License 2.0 6 votes vote down vote up
public static boolean isJavadocContext(TokenHierarchy hierarchy, int offset) {
    TokenSequence<JavaTokenId> ts = SourceUtils.getJavaTokenSequence(hierarchy, offset);
    if (!movedToJavadocToken(ts, offset)) {
        return false;
    }
    
    TokenSequence<JavadocTokenId> jdts = ts.embedded(JavadocTokenId.language());
    if (jdts == null) {
        return false;
    } else if (jdts.isEmpty()) {
        return isEmptyJavadoc(ts.token(), offset - ts.offset());
    }
    
    jdts.move(offset);
    if (!jdts.moveNext() && !jdts.movePrevious()) {
        return false;
    }
    
    // this checks /** and */ headers
    return isInsideToken(jdts, offset) && !isInsideIndent(jdts.token(), offset - jdts.offset());
}
 
Example 7
Source File: LexerUtils.java    From netbeans with Apache License 2.0 6 votes vote down vote up
/**
 * Gets instance of {@link TokenSequence} for the given
 * {@link TokenHierarchy} and offset.
 *
 * @since 1.55
 * @param th
 * @param offset
 * @param language
 * @param joined
 * @return
 */
public static TokenSequence getTokenSequence(TokenHierarchy th, int offset, Language language, boolean joined) {
    TokenSequence ts = th.tokenSequence();
    if (ts == null) {
        return null;
    }
    ts.move(offset);

    while (ts.moveNext() || ts.movePrevious()) {
        if (ts.language() == language) {
            return ts;
        }

        ts = ts.embeddedJoined();

        if (ts == null) {
            break;
        }

        //position the embedded ts so we can search deeper
        ts.move(offset);
    }

    return null;

}
 
Example 8
Source File: TagBasedLexerFormatter.java    From netbeans with Apache License 2.0 6 votes vote down vote up
private boolean indexWithinCurrentLanguage(BaseDocument doc, int index) throws BadLocationException{
    TokenHierarchy tokenHierarchy = TokenHierarchy.get(doc);
    TokenSequence[] tokenSequences = (TokenSequence[]) tokenHierarchy.tokenSequenceList(supportedLanguagePath(), 0, Integer.MAX_VALUE).toArray(new TokenSequence[0]);
    
    for (TokenSequence tokenSequence: tokenSequences){
        TextBounds languageBounds = findTokenSequenceBounds(doc, tokenSequence);
        
        if (languageBounds.getAbsoluteStart() <= index && languageBounds.getAbsoluteEnd() >= index){
            tokenSequence.move(index);
            
            if (tokenSequence.moveNext()){
                // the newly entered \n character may or may not
                // form a separate token - work it around
                if (isWSToken(tokenSequence.token())){
                    tokenSequence.movePrevious(); 
                }
                
                return tokenSequence.embedded() == null && !isWSToken(tokenSequence.token());
            }
        }
    }
    
    return false;
}
 
Example 9
Source File: CompletionProviderImpl.java    From netbeans with Apache License 2.0 6 votes vote down vote up
private static TokenSequence getDeepestTokenSequence (
    TokenHierarchy  tokenHierarchy,
    int             offset
) {
    TokenSequence tokenSequence = tokenHierarchy.tokenSequence ();
    while(tokenSequence != null) {
        tokenSequence.move(offset - 1);
        if(!tokenSequence.moveNext()) {
            break;
        }
        TokenSequence ts = tokenSequence.embedded();
        if(ts == null) {
            return tokenSequence;
        } else {
            tokenSequence = ts;
        }
    }
    return tokenSequence;
}
 
Example 10
Source File: JsCamelCaseInterceptor.java    From netbeans with Apache License 2.0 5 votes vote down vote up
protected static int getWordOffset(Document doc, int offset, boolean reverse) {
    TokenSequence<? extends JsTokenId> ts = LexUtilities.getJsTokenSequence(doc, offset);
    if (ts == null) {
        return -1;
    }
    ts.move(offset);
    if (!ts.moveNext() && !ts.movePrevious()) {
        return -1;
    }
    if (reverse && ts.offset() == offset && !ts.movePrevious()) {
        return -1;
    }

    Token<? extends JsTokenId> token = ts.token();

    if (token.id() == JsTokenId.WHITESPACE) {
        // Eat up spaces
        if ((reverse && ts.offset() < offset) || (!reverse && ts.offset() > offset)) {
            return ts.offset();
        }
    }

    if (token.id() == JsTokenId.IDENTIFIER) {
        String image = token.text().toString();
        int imageLength = image.length();
        int offsetInImage = offset - ts.offset();

        if (reverse) {
            return getPreviousIdentifierWordOffset(ts, image, imageLength, offsetInImage);
        } else {
            return getNextIdentifierWordOffset(ts, image, imageLength, offsetInImage);
        }
    }

    return -1;
}
 
Example 11
Source File: GspBracesMatcher.java    From netbeans with Apache License 2.0 5 votes vote down vote up
/**
 * Search forwards in the token sequence until a corresponding token of type
 * <code>closingTokenID</code> is found. It doesn't matter whether there are
 * some other pairs of the same Open/Close tokenIDs between. The methods counts
 * the number of pairs inside and find appropriate one.
 *
 * @param tokenSequence sequence that we are iterating over
 * @param openingTokenID tokenID which opens the pair
 * @param closingTokenID tokenID which closes the pair
 * @param independentTokenID tokenID which closes the independent tag
 * @return OffsetRange of the closing tokenID
 */
private Token<GspTokenId> findClosingToken(
        TokenSequence<GspTokenId> tokenSequence,
        TokenId openingTokenID,
        TokenId closingTokenID,
        TokenId independentTokenID) {
    int balance = 0;

    while (tokenSequence.moveNext()) {
        if (MatcherContext.isTaskCanceled()) {
            return null;
        }

        Token<GspTokenId> token = tokenSequence.token();
        TokenId tokenID = token.id();

        if (tokenID == openingTokenID) {
            balance++;
        } else if (tokenID == independentTokenID) {
            balance--;
        } else if (tokenID == closingTokenID) {
            if (balance == 0) {
                return token;
            }
            balance--;
        }
    }

    return null;
}
 
Example 12
Source File: CreateInterceptor.java    From netbeans with Apache License 2.0 5 votes vote down vote up
private String findTheName(JsObject configObject) {
    String name = null;
    FileObject fo = configObject.getFileObject();
    Source source = Source.create(fo);
    TokenHierarchy<?> th = source.createSnapshot().getTokenHierarchy();
    if (th == null) {
        return null;
    }
    TokenSequence<? extends JsTokenId> ts = LexUtilities.getJsTokenSequence(th, configObject.getOffset());
    Token<? extends JsTokenId> token;
    if (ts == null) {
        return null;
    }
    ts.move(configObject.getOffset());
    if (!ts.moveNext()) {
        return null;
    }
    token = LexUtilities.findPreviousToken(ts, Arrays.asList(JsTokenId.IDENTIFIER));
    while (!(token.id() == JsTokenId.IDENTIFIER && CLASS.equals(token.text().toString())) && ts.movePrevious()) {
        token = ts.token();
    }

    if (token.id() == JsTokenId.IDENTIFIER && CLASS.equals(token.text().toString()) && ts.movePrevious()) {
        // we are at ^Class.create(...)
        // now we need to find = and the identifier after =
        List<JsTokenId> skipTokens = Arrays.asList(JsTokenId.WHITESPACE, JsTokenId.EOL, JsTokenId.BLOCK_COMMENT, JsTokenId.LINE_COMMENT);
        token = LexUtilities.findPrevious(ts, skipTokens);
        if (token.id() == JsTokenId.OPERATOR_ASSIGNMENT && ts.movePrevious()) {
            token = LexUtilities.findPrevious(ts, skipTokens);
            if (token.id() == JsTokenId.IDENTIFIER) {
                name = token.text().toString();
            }
        }

    }
    return name;
}
 
Example 13
Source File: CssTypedTextInterceptor.java    From netbeans with Apache License 2.0 5 votes vote down vote up
private static boolean findHtmlValueToken(TokenHierarchy<?> hi, int offset, boolean backward) {
    List<TokenSequence<?>> embeddedTokenSequences = hi.embeddedTokenSequences(offset, backward);
    for (TokenSequence<?> htmlts : embeddedTokenSequences) {
        if (htmlts.language().mimeType().equals("text/html")) {
            //it relly looks like our parent ts
            int ediff = htmlts.move(offset);
            if (ediff == 0 && backward && htmlts.movePrevious() || htmlts.moveNext()) {
                TokenId id = htmlts.token().id();
                //XXX !!!! DEPENDENCY to HtmlTokenId !!!!
                return id.name().equals("VALUE_CSS");
            }
        }
    }
    return false;
}
 
Example 14
Source File: JavadocCompletionQuery.java    From netbeans with Apache License 2.0 5 votes vote down vote up
private void analyzeContext(JavadocContext jdctx) {
    TokenSequence<JavadocTokenId> jdts = jdctx.jdts;
    if (jdts == null) {
        return;
    }
    
    jdts.move(this.caretOffset);
    if (!jdts.moveNext() && !jdts.movePrevious()) {
        // XXX solve /***/
        // provide block tags, inline tags, html
        return;
    }
    
    if (this.caretOffset - jdts.offset() == 0) {
        // if position in token == 0 resolve CC according to previous token
        jdts.movePrevious();
    }
    
    switch (jdts.token().id()) {
        case TAG:
            resolveTagToken(jdctx);
            break;
        case IDENT:
            resolveIdent(jdctx);
            break;
        case DOT:
            resolveDotToken(jdctx);
            break;
        case HASH:
            resolveHashToken(jdctx);
            break;
        case OTHER_TEXT:
            resolveOtherText(jdctx, jdts);
            break;
        case HTML_TAG:
            resolveHTMLToken(jdctx);
            break;
    }
}
 
Example 15
Source File: JavaElementFoldVisitor.java    From netbeans with Apache License 2.0 4 votes vote down vote up
@Override
@SuppressWarnings("fallthrough")
public Object visitCompilationUnit(CompilationUnitTree node, Object p) {
    int importsStart = Integer.MAX_VALUE;
    int importsEnd   = -1;

    TokenHierarchy<?> th = info.getTokenHierarchy();
    TokenSequence<JavaTokenId>  ts = th.tokenSequence(JavaTokenId.language());
    
    IMP: for (ImportTree imp : node.getImports()) {
        // eliminate invalid imports; erroenous imports at the start/end of import block
        // will not be folder.
        Tree qualIdent = imp.getQualifiedIdentifier();
        if (qualIdent == null) {
            continue;
        }
        while (qualIdent.getKind() == Tree.Kind.MEMBER_SELECT) {
            MemberSelectTree mst = (MemberSelectTree)qualIdent;
            if (mst.getIdentifier().contentEquals("<error>")) { // NOI18N
                // ignore erroneous imports
                continue IMP;
            }
            qualIdent = mst.getExpression();
        }

        // don't rely on Javac for the end position: in case of missing semicolon the import consumes all whitespace
        // including comments / javadocs up to the following declaration or text. Rather scan tokens and consume only the import
        // identifier + semi.
        int start = (int) sp.getStartPosition(cu, imp);
        int identPos = (int) sp.getStartPosition(cu, qualIdent);
        int end = identPos;
        boolean firstNewline = true;
        ts.move(identPos);
        IDENT: while (ts.moveNext()) {
            Token<JavaTokenId>  tukac = ts.token();
            switch (tukac.id()) {
                case IDENTIFIER:
                case DOT:
                case STAR:
                    firstNewline = false;
                    end = ts.offset() + tukac.length();
                    break;
                case SEMICOLON:
                    end = (int) sp.getEndPosition(cu, imp);
                    break IDENT;
                case WHITESPACE: {
                    if (firstNewline) {
                        int endl = tukac.text().toString().indexOf("\n"); // NOI18N
                        if (endl > -1) {
                            // remember the first newline after some ident/star/dot content
                            end = ts.offset() + endl; 
                            firstNewline = true;
                        }
                    }
                    // fall through
                }
                case LINE_COMMENT: case BLOCK_COMMENT: 
                    continue;
                default:
                    break IDENT;
            }
        }

        if (importsStart > start)
            importsStart = start;

        if (end > importsEnd) {
            importsEnd = end;
        }
    }

    if (importsEnd != (-1) && importsStart != (-1)) {
        if (importsStart < initialCommentStopPos) {
            initialCommentStopPos = importsStart;
        }
        importsStart += 7/*"import ".length()*/;

        if (importsStart < importsEnd) {
            addFold(creator.createImportsFold(importsStart, importsEnd), importsStart);
        }
    }
    return super.visitCompilationUnit(node, p);
}
 
Example 16
Source File: ContextAnalyzer.java    From netbeans with Apache License 2.0 4 votes vote down vote up
public static TreePath validateSelection(CompilationInfo ci, int start, int end, Set<TypeKind> ignoredTypes) {
    if(start == end) {
        TokenSequence<JavaTokenId> cts = ci.getTokenHierarchy().tokenSequence(JavaTokenId.language());

        if (cts != null) {
            cts.move(start);

            if (cts.moveNext() && cts.token().id() != JavaTokenId.WHITESPACE && cts.offset() == start) {
                start = end += 1;
            }
        }
    }
    
    TreePath tp = ci.getTreeUtilities().pathFor(start == end? start : (start + end) / 2 + 1);

    for ( ; tp != null; tp = tp.getParentPath()) {
        Tree leaf = tp.getLeaf();

        if (   !ExpressionTree.class.isAssignableFrom(leaf.getKind().asInterface())
            && (leaf.getKind() != Tree.Kind.VARIABLE || ((VariableTree) leaf).getInitializer() == null)) {
            continue;
        }

        long treeStart = ci.getTrees().getSourcePositions().getStartPosition(ci.getCompilationUnit(), leaf);
        long treeEnd   = ci.getTrees().getSourcePositions().getEndPosition(ci.getCompilationUnit(), leaf);

        if (start != end) {
            if (treeStart != start || treeEnd != end) {
                continue;
            }
        } else {
            if (treeStart != start && treeEnd != end) {
                continue;
            } 
        }

        TypeMirror type = ci.getTrees().getTypeMirror(tp);

        if (type != null && type.getKind() == TypeKind.ERROR) {
            type = ci.getTrees().getOriginalType((ErrorType) type);
        }

        if (type == null || ignoredTypes.contains(type.getKind()))
            continue;

        if(tp.getLeaf().getKind() == Tree.Kind.ASSIGNMENT)
            continue;

        if (tp.getLeaf().getKind() == Tree.Kind.ANNOTATION)
            continue;

        if (!isInsideClass(tp))
            return null;

        TreePath candidate = tp;

        tp = tp.getParentPath();

        while (tp != null) {
            switch (tp.getLeaf().getKind()) {
                case VARIABLE:
                    VariableTree vt = (VariableTree) tp.getLeaf();
                    if (vt.getInitializer() == leaf) {
                        return candidate;
                    } else {
                        return null;
                    }
                case NEW_CLASS:
                    NewClassTree nct = (NewClassTree) tp.getLeaf();
                    
                    if (nct.getIdentifier().equals(candidate.getLeaf())) { //avoid disabling hint ie inside of anonymous class higher in treepath
                        for (Tree p : nct.getArguments()) {
                            if (p == leaf) {
                                return candidate;
                            }
                        }

                        return null;
                    }
            }

            leaf = tp.getLeaf();
            tp = tp.getParentPath();
        }

        return candidate;
    }

    return null;
}
 
Example 17
Source File: TokenListUpdaterTest.java    From netbeans with Apache License 2.0 4 votes vote down vote up
public void testRemoveUnfinishedLexingZeroLookaheadToken() throws Exception {
    Document doc = new ModificationTextDocument();
    // Assign a language to the document
    String text = "a+b";
    doc.insertString(0, text, null);

    doc.putProperty(Language.class,TestTokenId.language());
    TokenHierarchy<?> hi = TokenHierarchy.get(doc);
    ((AbstractDocument)doc).readLock();
    TokenSequence<?> ts;
    try {
        ts = hi.tokenSequence();
        assertTrue(ts.moveNext());
        LexerTestUtilities.assertTokenEquals(ts,TestTokenId.IDENTIFIER, "a", 0);
        assertTrue(ts.moveNext());
        LexerTestUtilities.assertTokenEquals(ts,TestTokenId.PLUS, "+", 1);
    } finally {
        ((AbstractDocument)doc).readUnlock();
    }
    
    // Remove "+"
    doc.remove(1, 1);
    ((AbstractDocument)doc).readLock();
    try {
        ts.moveNext();
        fail("Should not get there");
    } catch (ConcurrentModificationException e) {
        // Expected
    } finally {
        ((AbstractDocument)doc).readUnlock();
    }

    ((AbstractDocument)doc).readLock();
    try {
        ts = hi.tokenSequence();
        assertTrue(ts.moveNext());
        LexerTestUtilities.assertTokenEquals(ts,TestTokenId.IDENTIFIER, "ab", 0);
        // Extra ending '\n' of the document returned by DocumentUtilities.getText(doc) and lexed
        assertTrue(ts.moveNext());
        LexerTestUtilities.assertTokenEquals(ts,TestTokenId.WHITESPACE, "\n", 2);
        assertFalse(ts.moveNext());
    } finally {
        ((AbstractDocument)doc).readUnlock();
    }
}
 
Example 18
Source File: SanitizingParser.java    From netbeans with Apache License 2.0 4 votes vote down vote up
/**
 * This method try to analyze the text and says whether the snapshot should be file
 * @param snapshot
 * @return whether the snapshot should be parsed
 */
private boolean isParsable (Snapshot snapshot) {
    FileObject fo = snapshot.getSource().getFileObject();
    boolean isEmbeded = !getMimeType().equals(snapshot.getMimePath().getPath());
    Long size;
    CharSequence text = snapshot.getText();
    String scriptName;
    scriptName = (fo != null) ? snapshot.getSource().getFileObject().getNameExt() : getDefaultScriptName();
    size = (fo != null && !isEmbeded) ? fo.getSize() : (long)text.length();

    if (!PARSE_BIG_FILES) {
        if (size > MAX_FILE_SIZE_TO_PARSE) {
            if (LOGGER.isLoggable(Level.FINE)) {
                LOGGER.log(Level.FINE, "The file {0} was not parsed because the size is too big.", scriptName);
            }
            return false;
        }

        if (size > MAX_MINIMIZE_FILE_SIZE_TO_PARSE && !(snapshot.getMimeType().equals(JsTokenId.JSON_MIME_TYPE) || snapshot.getMimeType().equals(JsTokenId.PACKAGE_JSON_MIME_TYPE)||snapshot.getMimeType().equals(JsTokenId.BOWER_JSON_MIME_TYPE))) {
            // try to find only for the file that has size bigger then 1/3 of the max size
            boolean isMinified = false;
            TokenSequence<? extends JsTokenId> ts = LexUtilities.getTokenSequence(snapshot, 0, language);
            if (ts != null) {
                int offset = 0;
                int countedLines = 0;
                int countChars = 0;
                while (!isMinified && ts.moveNext() && countedLines < 5) {
                    LexUtilities.findNext(ts, Arrays.asList(JsTokenId.DOC_COMMENT, JsTokenId.BLOCK_COMMENT, JsTokenId.LINE_COMMENT, JsTokenId.EOL, JsTokenId.WHITESPACE));
                    offset = ts.offset();
                    LexUtilities.findNextToken(ts, Arrays.asList(JsTokenId.EOL));
                    countChars += (ts.offset() - offset);
                    countedLines++;
                }
                if (countedLines > 0 && (countChars / countedLines) > 200) {
                    if (LOGGER.isLoggable(Level.FINE)) {
                        LOGGER.log(Level.FINE, "The file {0} was not parsed because it is minimized and the size is too big.", scriptName);
                    }
                    return false;
                }
            }
        } else if (snapshot.getMimeType().equals(JsTokenId.JSON_MIME_TYPE)) {
            int index = text.length() - 1;
            if (index < 0) {
                // NETBEANS-2881
                return false;
            }
            char ch = text.charAt(index);
            while (index > 0 && ch != '}') {
                index--;
                ch = text.charAt(index);
            }
            int count = 0;
            while (index > 0 && ch == '}' && count <= MAX_RIGHT_CURLY_BRACKETS) {
                index--;
                count++;
                ch = text.charAt(index);
                
            }
            if (count >= MAX_RIGHT_CURLY_BRACKETS) {   // See issue 247274
                return false;
            }
        }
    }
    return true;
}
 
Example 19
Source File: ValueResultItem.java    From netbeans with Apache License 2.0 4 votes vote down vote up
@Override
protected int removeTextLength(JTextComponent component, int offset, int removeLength) {
    TokenSequence<XMLTokenId> s = createTokenSequence(component);
    s.move(offset);
    if (!s.moveNext()) {
        return super.removeTextLength(component, offset, removeLength);
    }
    TokenId id = s.token().id();
    if (id == XMLTokenId.VALUE) {
        // check up to the end of tag, that there's not an error, e.g. an unterminated attribute
        int off = s.offset();
        String t = s.token().text().toString();
        char c = t.charAt(t.length() - 1);
        int len = t.length();
        boolean error = false;
        
        L: if (s.moveNext()) {
            XMLTokenId tid = s.token().id();
            if (tid != XMLTokenId.ARGUMENT && tid != XMLTokenId.TAG) {
                error = true;
                // only replace up to and excluding the first whitespace:
                for (int i = 0; i < len; i++) {
                    if (Character.isWhitespace(t.charAt(i))) {
                        len = i;
                        break L;
                    }
                }
            }
        }
        
        int l = off + t.length() - offset;
        if (t.isEmpty()) {
            return 0;
        }
        if (c == '\'' || c == '"') {
            return t.length() == -1 ? 0 : l - 1;
        } else {
            return 0;
        }
    }
    return super.removeTextLength(component, offset, removeLength);
}
 
Example 20
Source File: TagParser.java    From netbeans with Apache License 2.0 4 votes vote down vote up
private static Result parseTags(TokenSequence<JavaTokenId> ts) {
    while (ts.moveNext()) {
        if (ts.token().id() == JavaTokenId.BLOCK_COMMENT || ts.token().id() == JavaTokenId.JAVADOC_COMMENT) {
            String text = ts.token().text().toString();

            if (text.contains("@test")) {
                List<Tag> tags = new ArrayList<>();
                int start = -1;
                int end = -1;
                int tagStart = -1;
                int tagEnd = -1;

                text = text.substring(0, text.length() - 2);

                String tagName = null;
                StringBuilder tagText = new StringBuilder();
                int prefix = ts.token().id() == JavaTokenId.BLOCK_COMMENT ? 2 : 3;
                String[] lines = text.substring(prefix).split("\n");
                int pos = ts.offset() + prefix;

                for (String line : lines) {
                    if (line.replaceAll("[*\\s]+", "").isEmpty()) {
                        pos += line.length() + 1;
                        continue;
                    }
                    Matcher m = TAG_PATTERN.matcher(line);
                    if (m.find()) {
                        if (tagName != null) {
                            tags.add(new Tag(start, pos, tagStart, tagEnd, tagName, tagText.toString()));
                            tagText.delete(0, tagText.length());
                        }

                        tagName = m.group(1);

                        start = pos;
                        tagStart = pos + m.start();
                        tagEnd = pos + m.end(1);
                        tagText.append(line.substring(m.end(1)));
                    } else if (tagName != null) {
                        int asterisk = line.indexOf('*');
                        tagText.append(line.substring(asterisk + 1));
                    }

                    pos += line.length() + 1;

                    if (tagName != null) {
                        end = pos;
                    }
                }

                if (tagName != null) {
                    tags.add(new Tag(start, end, tagStart, tagEnd, tagName, tagText.toString()));
                }

                Map<String, List<Tag>> result = new HashMap<>();

                for (Tag tag : tags) {
                    List<Tag> innerTags = result.get(tag.getName());

                    if (innerTags == null) {
                        result.put(tag.getName(), innerTags = new ArrayList<>());
                    }

                    innerTags.add(tag);
                }

                return new Result(tags, result);
            }
        }
    }

    return new Result(Collections.<Tag>emptyList(), Collections.<String, List<Tag>>emptyMap());
}