Python sqlparse.sql.Comment() Examples

The following are 22 code examples of sqlparse.sql.Comment(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module sqlparse.sql , or try the search function .
Example #1
Source File: grouping.py    From SublimeText-SQLTools with GNU General Public License v3.0 7 votes vote down vote up
def group_identifier_list(tlist):
    m_role = T.Keyword, ('null', 'role')
    sqlcls = (sql.Function, sql.Case, sql.Identifier, sql.Comparison,
              sql.IdentifierList, sql.Operation)
    ttypes = (T_NUMERICAL + T_STRING + T_NAME +
              (T.Keyword, T.Comment, T.Wildcard))

    def match(token):
        return token.match(T.Punctuation, ',')

    def valid(token):
        return imt(token, i=sqlcls, m=m_role, t=ttypes)

    def post(tlist, pidx, tidx, nidx):
        return pidx, nidx

    valid_prev = valid_next = valid
    _group(tlist, sql.IdentifierList, match,
           valid_prev, valid_next, post, extend=True) 
Example #2
Source File: others.py    From SublimeText-SQLTools with GNU General Public License v3.0 6 votes vote down vote up
def _process(tlist):
        def get_next_comment():
            # TODO(andi) Comment types should be unified, see related issue38
            return tlist.token_next_by(i=sql.Comment, t=T.Comment)

        tidx, token = get_next_comment()
        while token:
            pidx, prev_ = tlist.token_prev(tidx, skip_ws=False)
            nidx, next_ = tlist.token_next(tidx, skip_ws=False)
            # Replace by whitespace if prev and next exist and if they're not
            # whitespaces. This doesn't apply if prev or next is a paranthesis.
            if (prev_ is None or next_ is None or
                    prev_.is_whitespace or prev_.match(T.Punctuation, '(') or
                    next_.is_whitespace or next_.match(T.Punctuation, ')')):
                tlist.tokens.remove(token)
            else:
                tlist.tokens[tidx] = sql.Token(T.Whitespace, ' ')

            tidx, token = get_next_comment() 
Example #3
Source File: filters.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def process(self, stack, stream):
        for token_type, value in stream:
            if token_type in Comment:
                yield token_type, value 
Example #4
Source File: sql.py    From django-perf-rec with MIT License 5 votes vote down vote up
def sql_recursively_strip(node):
    for sub_node in node.get_sublists():
        sql_recursively_strip(sub_node)

    if isinstance(node, Comment):
        return node

    sql_strip(node)

    # strip duplicate whitespaces between parenthesis
    if isinstance(node, Parenthesis):
        sql_trim(node, 1)
        sql_trim(node, -2)

    return node 
Example #5
Source File: filters.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def process(self, _, stmt):
        def custom_flaten(token):
            """
                コメントはflatenしないflaten
            """
            if isinstance(token, sql.TokenList) and not tu.is_comment(token):
                for tkn in token.tokens:
                    for item in custom_flaten(tkn):
                        yield item
            else:
                yield token
        is_prev_cr = True
        for token in custom_flaten(stmt):
            if tu.is_plain_line_comment(token, self.local_config.comment_syntax):
                # コメントクラス置き換え
                parent = token.parent
                index = parent.tokens.index(token)
                comment = LineDescriptionLineCommentFilter.Comment(token.tokens)
                for tkn in token.tokens:
                    tkn.parent = comment
                comment.parent = parent
                parent.tokens[index] = comment
                # フラグセット
                comment.is_line_description = not is_prev_cr # pylint: disable=attribute-defined-outside-init
            elif token.is_whitespace():
                if is_inc_cr(token):
                    is_prev_cr = True
            else:
                is_prev_cr = False 
Example #6
Source File: tokenutils.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def is_comment(token):
    """
        コメント判定
    """
    return isinstance(token, sql.Comment) 
Example #7
Source File: tokenutils.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def is_line_comment(token):
    """
        ラインコメント判定
    """
    if is_comment(token):
        comment = token.token_next_by_type(0, T.Comment)
        return comment.value not in ["/*", "*/"]
    return False 
Example #8
Source File: tokenutils.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def is_block_comment(token):
    """
        ブロックコメント判定
    """
    if is_comment(token):
        comment = token.token_next_by_type(0, T.Comment)
        return comment.value in ["/*", "*/"]

    return False 
Example #9
Source File: grouping.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def align_comments(tlist):
    [align_comments(sgroup) for sgroup in tlist.get_sublists()]
    idx = 0
    token = tlist.token_next_by_instance(idx, sql.Comment)
    while token:
        before = tlist.token_prev(tlist.token_index(token))
        if isinstance(before, sql.TokenList):
            grp = tlist.tokens_between(before, token)[1:]
            before.tokens.extend(grp)
            for t in grp:
                tlist.tokens.remove(t)
            idx = tlist.token_index(before) + 1
        else:
            idx = tlist.token_index(token) + 1
        token = tlist.token_next_by_instance(idx, sql.Comment) 
Example #10
Source File: filters.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _get_next_comment(self, tlist):
        # TODO(andi) Comment types should be unified, see related issue38
        token = tlist.token_next_by_instance(0, sql.Comment)
        if token is None:
            token = tlist.token_next_by_type(0, T.Comment)
        return token 
Example #11
Source File: filters.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def process(self, stack, stream):
        for token_type, value in stream:
            if token_type not in Comment:
                yield token_type, value 
Example #12
Source File: sql_handler.py    From mysql_streamer with Apache License 2.0 5 votes vote down vote up
def tokens(self):
        return [
            token for token in self.statement.tokens
            if not token.is_whitespace() and not isinstance(token, Comment)
        ] 
Example #13
Source File: grouping.py    From codenn with MIT License 5 votes vote down vote up
def align_comments(tlist):
    [align_comments(sgroup) for sgroup in tlist.get_sublists()]
    idx = 0
    token = tlist.token_next_by_instance(idx, sql.Comment)
    while token:
        before = tlist.token_prev(tlist.token_index(token))
        if isinstance(before, sql.TokenList):
            grp = tlist.tokens_between(before, token)[1:]
            before.tokens.extend(grp)
            for t in grp:
                tlist.tokens.remove(t)
            idx = tlist.token_index(before) + 1
        else:
            idx = tlist.token_index(token) + 1
        token = tlist.token_next_by_instance(idx, sql.Comment) 
Example #14
Source File: filters.py    From codenn with MIT License 5 votes vote down vote up
def _get_next_comment(self, tlist):
        # TODO(andi) Comment types should be unified, see related issue38
        token = tlist.token_next_by_instance(0, sql.Comment)
        if token is None:
            token = tlist.token_next_by_type(0, T.Comment)
        return token 
Example #15
Source File: filters.py    From codenn with MIT License 5 votes vote down vote up
def process(self, stack, stream):
        for token_type, value in stream:
            if token_type not in Comment:
                yield token_type, value 
Example #16
Source File: filters.py    From codenn with MIT License 5 votes vote down vote up
def process(self, stack, stream):
        for token_type, value in stream:
            if token_type in Comment:
                yield token_type, value 
Example #17
Source File: grouping.py    From SublimeText-SQLTools with GNU General Public License v3.0 5 votes vote down vote up
def align_comments(tlist):
    tidx, token = tlist.token_next_by(i=sql.Comment)
    while token:
        pidx, prev_ = tlist.token_prev(tidx)
        if isinstance(prev_, sql.TokenList):
            tlist.group_tokens(sql.TokenList, pidx, tidx, extend=True)
            tidx = pidx
        tidx, token = tlist.token_next_by(i=sql.Comment, idx=tidx) 
Example #18
Source File: grouping.py    From SublimeText-SQLTools with GNU General Public License v3.0 5 votes vote down vote up
def group_comments(tlist):
    tidx, token = tlist.token_next_by(t=T.Comment)
    while token:
        eidx, end = tlist.token_not_matching(
            lambda tk: imt(tk, t=T.Comment) or tk.is_whitespace, idx=tidx)
        if end is not None:
            eidx, end = tlist.token_prev(eidx, skip_ws=False)
            tlist.group_tokens(sql.Comment, tidx, eidx)

        tidx, token = tlist.token_next_by(t=T.Comment, idx=tidx) 
Example #19
Source File: grouping.py    From codenn with MIT License 4 votes vote down vote up
def group_identifier_list(tlist):
    [group_identifier_list(sgroup) for sgroup in tlist.get_sublists()
     if not isinstance(sgroup, sql.IdentifierList)]
    idx = 0
    # Allowed list items
    fend1_funcs = [lambda t: isinstance(t, (sql.Identifier, sql.Function,
                                            sql.Case)),
                   lambda t: t.is_whitespace(),
                   lambda t: t.ttype == T.Name,
                   lambda t: t.ttype == T.Wildcard,
                   lambda t: t.match(T.Keyword, 'null'),
                   lambda t: t.match(T.Keyword, 'role'),
                   lambda t: t.ttype == T.Number.Integer,
                   lambda t: t.ttype == T.String.Single,
                   lambda t: t.ttype == T.Name.Placeholder,
                   lambda t: t.ttype == T.Keyword,
                   lambda t: isinstance(t, sql.Comparison),
                   lambda t: isinstance(t, sql.Comment),
                   lambda t: t.ttype == T.Comment.Multiline,
                   ]
    tcomma = tlist.token_next_match(idx, T.Punctuation, ',')
    start = None
    while tcomma is not None:
        before = tlist.token_prev(tcomma)
        after = tlist.token_next(tcomma)
        # Check if the tokens around tcomma belong to a list
        bpassed = apassed = False
        for func in fend1_funcs:
            if before is not None and func(before):
                bpassed = True
            if after is not None and func(after):
                apassed = True
        if not bpassed or not apassed:
            # Something's wrong here, skip ahead to next ","
            start = None
            tcomma = tlist.token_next_match(tlist.token_index(tcomma) + 1,
                                            T.Punctuation, ',')
        else:
            if start is None:
                start = before
            next_ = tlist.token_next(after)
            if next_ is None or not next_.match(T.Punctuation, ','):
                # Reached the end of the list
                tokens = tlist.tokens_between(start, after)
                group = tlist.group_tokens(sql.IdentifierList, tokens)
                start = None
                tcomma = tlist.token_next_match(tlist.token_index(group) + 1,
                                                T.Punctuation, ',')
            else:
                tcomma = next_ 
Example #20
Source File: filters.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def process(self, stack, stream):
        mode = 0
        oldValue = ""
        parenthesis = 0

        for token_type, value in stream:
            # Ignore comments
            if token_type in Comment:
                continue

            # We have not detected a SELECT statement
            if mode == 0:
                if token_type in Keyword and value == 'SELECT':
                    mode = 1

            # We have detected a SELECT statement
            elif mode == 1:
                if value == 'FROM':
                    if oldValue:
                        yield oldValue

                    mode = 3    # Columns have been checked

                elif value == 'AS':
                    oldValue = ""
                    mode = 2

                elif (token_type == Punctuation
                      and value == ',' and not parenthesis):
                    if oldValue:
                        yield oldValue
                    oldValue = ""

                elif token_type not in Whitespace:
                    if value == '(':
                        parenthesis += 1
                    elif value == ')':
                        parenthesis -= 1

                    oldValue += value

            # We are processing an AS keyword
            elif mode == 2:
                # We check also for Keywords because a bug in SQLParse
                if token_type == Name or token_type == Keyword:
                    yield value
                    mode = 1


# ---------------------------
# postprocess 
Example #21
Source File: grouping.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def group_identifier_list(tlist):
    [group_identifier_list(sgroup) for sgroup in tlist.get_sublists()
     if not isinstance(sgroup, sql.IdentifierList)]
    # Allowed list items
    fend1_funcs = [lambda t: isinstance(t, (sql.Identifier, sql.Function,
                                            sql.Case)),
                   lambda t: t.is_whitespace(),
                   lambda t: t.ttype == T.Name,
                   lambda t: t.ttype == T.Wildcard,
                   lambda t: t.match(T.Keyword, 'null'),
                   lambda t: t.match(T.Keyword, 'role'),
                   lambda t: t.ttype == T.Number.Integer,
                   lambda t: t.ttype == T.String.Single,
                   lambda t: t.ttype == T.Name.Placeholder,
                   lambda t: t.ttype == T.Keyword,
                   lambda t: isinstance(t, sql.Comparison),
                   lambda t: isinstance(t, sql.Comment),
                   lambda t: t.ttype == T.Comment.Multiline,
                   ]
    tcomma = tlist.token_next_match(0, T.Punctuation, ',')
    start = None
    while tcomma is not None:
        # Go back one idx to make sure to find the correct tcomma
        idx = tlist.token_index(tcomma)
        before = tlist.token_prev(idx)
        after = tlist.token_next(idx)
        # Check if the tokens around tcomma belong to a list
        bpassed = apassed = False
        for func in fend1_funcs:
            if before is not None and func(before):
                bpassed = True
            if after is not None and func(after):
                apassed = True
        if not bpassed or not apassed:
            # Something's wrong here, skip ahead to next ","
            start = None
            tcomma = tlist.token_next_match(idx + 1,
                                            T.Punctuation, ',')
        else:
            if start is None:
                start = before
            after_idx = tlist.token_index(after, start=idx)
            next_ = tlist.token_next(after_idx)
            if next_ is None or not next_.match(T.Punctuation, ','):
                # Reached the end of the list
                tokens = tlist.tokens_between(start, after)
                group = tlist.group_tokens(sql.IdentifierList, tokens)
                start = None
                tcomma = tlist.token_next_match(tlist.token_index(group) + 1,
                                                T.Punctuation, ',')
            else:
                tcomma = next_ 
Example #22
Source File: filters.py    From codenn with MIT License 4 votes vote down vote up
def process(self, stack, stream):
        mode = 0
        oldValue = ""
        parenthesis = 0

        for token_type, value in stream:
            # Ignore comments
            if token_type in Comment:
                continue

            # We have not detected a SELECT statement
            if mode == 0:
                if token_type in Keyword and value == 'SELECT':
                    mode = 1

            # We have detected a SELECT statement
            elif mode == 1:
                if value == 'FROM':
                    if oldValue:
                        yield oldValue

                    mode = 3    # Columns have been checked

                elif value == 'AS':
                    oldValue = ""
                    mode = 2

                elif (token_type == Punctuation
                      and value == ',' and not parenthesis):
                    if oldValue:
                        yield oldValue
                    oldValue = ""

                elif token_type not in Whitespace:
                    if value == '(':
                        parenthesis += 1
                    elif value == ')':
                        parenthesis -= 1

                    oldValue += value

            # We are processing an AS keyword
            elif mode == 2:
                # We check also for Keywords because a bug in SQLParse
                if token_type == Name or token_type == Keyword:
                    yield value
                    mode = 1


# ---------------------------
# postprocess