Python sqlparse.sql.Token() Examples

The following are 30 code examples of sqlparse.sql.Token(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module sqlparse.sql , or try the search function .
Example #1
Source File: aligned_indent.py    From SublimeText-SQLTools with GNU General Public License v3.0 8 votes vote down vote up
def _process_case(self, tlist):
        offset_ = len('case ') + len('when ')
        cases = tlist.get_cases(skip_ws=True)
        # align the end as well
        end_token = tlist.token_next_by(m=(T.Keyword, 'END'))[1]
        cases.append((None, [end_token]))

        condition_width = [len(' '.join(map(text_type, cond))) if cond else 0
                           for cond, _ in cases]
        max_cond_width = max(condition_width)

        for i, (cond, value) in enumerate(cases):
            # cond is None when 'else or end'
            stmt = cond[0] if cond else value[0]

            if i > 0:
                tlist.insert_before(stmt, self.nl(
                    offset_ - len(text_type(stmt))))
            if cond:
                ws = sql.Token(T.Whitespace, self.char * (
                    max_cond_width - condition_width[i]))
                tlist.insert_after(cond[-1], ws) 
Example #2
Source File: sql_tokens.py    From djongo with GNU Affero General Public License v3.0 6 votes vote down vote up
def statement2col_defs(cls, token: Token):
        from djongo.base import DatabaseWrapper
        supported_data_types = set(DatabaseWrapper.data_types.values())

        defs = token.value.strip('()').split(',')
        for col in defs:
            col = col.strip()
            name, other = col.split(' ', 1)
            if name == 'CONSTRAINT':
                yield SQLColumnConstraint()
            else:
                if col[0] != '"':
                    raise SQLDecodeError('Column identifier not quoted')
                name, other = col[1:].split('"', 1)
                other = other.strip()

                data_type, constraint_sql = other.split(' ', 1)
                if data_type not in supported_data_types:
                    raise NotSupportedError(f'Data of type: {data_type}')

                col_constraints = set(SQLColumnDef._get_constraints(constraint_sql))
                yield SQLColumnDef(name=name,
                                   data_type=data_type,
                                   col_constraints=col_constraints) 
Example #3
Source File: sql_handler.py    From mysql_streamer with Apache License 2.0 6 votes vote down vote up
def pop(self):
        next_val = self.peek()
        self.index += 1
        # We need to handle three cases here where the next_val could be:
        # 1. <table_name> ('business')
        # 2. <database_name>.<table_name> ('yelp.business')
        # 3. <database_name>.<table_name> <extended_query>
        # ('yelp.business change col_one col_two')
        # In all the cases we should return a token consisting of only the table
        # name or if the database name is present then the database name and the
        # table name. Case #3 occurs because SQLParse incorrectly parses certain
        # queries.
        if isinstance(next_val, Identifier):
            tokens = next_val.tokens
            if len(tokens) > 1 and tokens[1].value == '.':
                str_token = "{db_name}{punctuation}{table_name}".format(
                    db_name=tokens[0].value,
                    punctuation=tokens[1].value,
                    table_name=tokens[2].value
                )
                return TK(Token.Name, str_token)
            else:
                return next_val.token_first()
        return next_val 
Example #4
Source File: filters.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __custom_process_insert_values_lr(self, tlist):
        #INSERT の場合VALUES前後に空白1つをセット
        values_token = tlist.token_next_match(0, T.Keyword, "VALUES")
        if values_token:
            prv = tlist.token_prev(values_token, skip_ws=False)
            if prv and prv.is_whitespace():
                prv.value = " "
                prv = tlist.token_prev(prv, skip_ws=False)
                while prv and prv.is_whitespace():
                    prv.value = ""
                    prv = tlist.token_prev(prv, skip_ws=False)
            else:
                tlist.insert_before(values_token, sql.Token(T.Whitespace, " "))

            nxt = tlist.token_next(values_token, skip_ws=False)
            if nxt and nxt.is_whitespace():
                nxt.value = " "
                nxt = tlist.token_next(nxt, skip_ws=False)
                while nxt and nxt.is_whitespace():
                    nxt.value = ""
                    nxt = tlist.token_next(nxt, skip_ws=False)
            else:
                tlist.insert_after(values_token, sql.Token(T.Whitespace, " ")) 
Example #5
Source File: filters.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _get_alias(self, token):
        tkw = token.token_next_match(0, T.Keyword, 'AS')
        if tkw is not None:
            return tu.token_next_enable(token, tkw)

        left = tu.token_next_enable(token)
        if not left:
            return None

        def is_space(tkn):
            return tkn.is_whitespace() and tkn.value

        spl = token.token_matching(token.token_index(left), [is_space])
        if spl:
            return tu.token_next_enable(token, spl)

        if tu.is_parenthesis(left):
            tkn = tu.token_next_enable(token, left)
            if tkn and (tu.is_identifier(tkn) or (tkn.ttype in T.Name)):
                # (・・・)ALIAS の場合
                space = sql.Token(T.Whitespace, "\t") # スペースを付与
                token.insert_after(left, space)
                return tkn

        return None 
Example #6
Source File: filters.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _process(self, tlist):
        token = self._get_next_comment(tlist)
        while token:
            tidx = tlist.token_index(token)
            prev = tlist.token_prev(tidx, False)
            next_ = tlist.token_next(tidx, False)
            # Replace by whitespace if prev and next exist and if they're not
            # whitespaces. This doesn't apply if prev or next is a paranthesis.
            if (prev is not None and next_ is not None
                and not prev.is_whitespace() and not next_.is_whitespace()
                and not (prev.match(T.Punctuation, '(')
                         or next_.match(T.Punctuation, ')'))):
                tlist.tokens[tidx] = sql.Token(T.Whitespace, ' ')
            else:
                tlist.tokens.pop(tidx)
            token = self._get_next_comment(tlist) 
Example #7
Source File: right_margin.py    From SublimeText-SQLTools with GNU General Public License v3.0 6 votes vote down vote up
def _process(self, group, stream):
        for token in stream:
            if token.is_whitespace and '\n' in token.value:
                if token.value.endswith('\n'):
                    self.line = ''
                else:
                    self.line = token.value.splitlines()[-1]
            elif token.is_group and type(token) not in self.keep_together:
                token.tokens = self._process(token, token.tokens)
            else:
                val = text_type(token)
                if len(self.line) + len(val) > self.width:
                    match = re.search(r'^ +', self.line)
                    if match is not None:
                        indent = match.group()
                    else:
                        indent = ''
                    yield sql.Token(T.Whitespace, '\n{0}'.format(indent))
                    self.line = indent
                self.line += val
            yield token 
Example #8
Source File: others.py    From SublimeText-SQLTools with GNU General Public License v3.0 6 votes vote down vote up
def _process(tlist):
        def get_next_comment():
            # TODO(andi) Comment types should be unified, see related issue38
            return tlist.token_next_by(i=sql.Comment, t=T.Comment)

        tidx, token = get_next_comment()
        while token:
            pidx, prev_ = tlist.token_prev(tidx, skip_ws=False)
            nidx, next_ = tlist.token_next(tidx, skip_ws=False)
            # Replace by whitespace if prev and next exist and if they're not
            # whitespaces. This doesn't apply if prev or next is a paranthesis.
            if (prev_ is None or next_ is None or
                    prev_.is_whitespace or prev_.match(T.Punctuation, '(') or
                    next_.is_whitespace or next_.match(T.Punctuation, ')')):
                tlist.tokens.remove(token)
            else:
                tlist.tokens[tidx] = sql.Token(T.Whitespace, ' ')

            tidx, token = get_next_comment() 
Example #9
Source File: filters.py    From codenn with MIT License 6 votes vote down vote up
def _process(self, tlist):
        token = self._get_next_comment(tlist)
        while token:
            tidx = tlist.token_index(token)
            prev = tlist.token_prev(tidx, False)
            next_ = tlist.token_next(tidx, False)
            # Replace by whitespace if prev and next exist and if they're not
            # whitespaces. This doesn't apply if prev or next is a paranthesis.
            if (prev is not None and next_ is not None
                and not prev.is_whitespace() and not next_.is_whitespace()
                and not (prev.match(T.Punctuation, '(')
                         or next_.match(T.Punctuation, ')'))):
                tlist.tokens[tidx] = sql.Token(T.Whitespace, ' ')
            else:
                tlist.tokens.pop(tidx)
            token = self._get_next_comment(tlist) 
Example #10
Source File: filters.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _process(self, stack, group, stream):
        for token in stream:
            if token.is_whitespace() and '\n' in token.value:
                if token.value.endswith('\n'):
                    self.line = ''
                else:
                    self.line = token.value.splitlines()[-1]
            elif (token.is_group()
                  and not token.__class__ in self.keep_together):
                token.tokens = self._process(stack, token, token.tokens)
            else:
                val = str(token)
                if len(self.line) + len(val) > self.width:
                    match = re.search('^ +', self.line)
                    if match is not None:
                        indent = match.group()
                    else:
                        indent = ''
                    yield sql.Token(T.Whitespace, '\n%s' % indent)
                    self.line = indent
                self.line += val
            yield token 
Example #11
Source File: filters.py    From codenn with MIT License 6 votes vote down vote up
def process(self, stack, stmt):
        if isinstance(stmt, sql.Statement):
            self._curr_stmt = stmt
        self._process(stmt)
        if isinstance(stmt, sql.Statement):
            if self._last_stmt is not None:
                if unicode(self._last_stmt).endswith('\n'):
                    nl = '\n'
                else:
                    nl = '\n\n'
                stmt.tokens.insert(
                    0, sql.Token(T.Whitespace, nl))
            if self._last_stmt != stmt:
                self._last_stmt = stmt


# FIXME: Doesn't work ;) 
Example #12
Source File: filters.py    From codenn with MIT License 6 votes vote down vote up
def _process(self, stack, group, stream):
        for token in stream:
            if token.is_whitespace() and '\n' in token.value:
                if token.value.endswith('\n'):
                    self.line = ''
                else:
                    self.line = token.value.splitlines()[-1]
            elif (token.is_group()
                  and not token.__class__ in self.keep_together):
                token.tokens = self._process(stack, token, token.tokens)
            else:
                val = unicode(token)
                if len(self.line) + len(val) > self.width:
                    match = re.search('^ +', self.line)
                    if match is not None:
                        indent = match.group()
                    else:
                        indent = ''
                    yield sql.Token(T.Whitespace, '\n%s' % indent)
                    self.line = indent
                self.line += val
            yield token 
Example #13
Source File: filters.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def process(self, stack, stmt):
        if isinstance(stmt, sql.Statement):
            self._curr_stmt = stmt
        self._process(stmt)
        if isinstance(stmt, sql.Statement):
            if self._last_stmt is not None:
                if str(self._last_stmt).endswith('\n'):
                    nl = '\n'
                else:
                    nl = '\n\n'
                stmt.tokens.insert(
                    0, sql.Token(T.Whitespace, nl))
            if self._last_stmt != stmt:
                self._last_stmt = stmt


# FIXME: Doesn't work ;) 
Example #14
Source File: search_utils.py    From mlflow with Apache License 2.0 5 votes vote down vote up
def _validate_comparison(cls, tokens):
        base_error_string = "Invalid comparison clause"
        if len(tokens) != 3:
            raise MlflowException("{}. Expected 3 tokens found {}".format(base_error_string,
                                                                          len(tokens)),
                                  error_code=INVALID_PARAMETER_VALUE)
        if not isinstance(tokens[0], Identifier):
            raise MlflowException("{}. Expected 'Identifier' found '{}'".format(base_error_string,
                                                                                str(tokens[0])),
                                  error_code=INVALID_PARAMETER_VALUE)
        if not isinstance(tokens[1], Token) and tokens[1].ttype != TokenType.Operator.Comparison:
            raise MlflowException("{}. Expected comparison found '{}'".format(base_error_string,
                                                                              str(tokens[1])),
                                  error_code=INVALID_PARAMETER_VALUE)
        if not isinstance(tokens[2], Token) and \
                (tokens[2].ttype not in cls.STRING_VALUE_TYPES.union(cls.NUMERIC_VALUE_TYPES) or
                 isinstance(tokens[2], Identifier)):
            raise MlflowException("{}. Expected value token found '{}'".format(base_error_string,
                                                                               str(tokens[2])),
                                  error_code=INVALID_PARAMETER_VALUE) 
Example #15
Source File: filters.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def nl_with_indent(self, offset):
        count = ((self.indent * self.width) + self.offset + offset)
        if count < 0:
            count = 0
        space = "\t" * count
        sws = '\n' + space
        return sql.Token(T.Whitespace, sws) 
Example #16
Source File: filters.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _adjust_comparisons_indent(self, comparisons):
        """
            Comparisonの内部インデントの調整
        """

        ids = []
        for token in comparisons:
            if not token.is_group():
                continue
            if tu.is_comparison(token):
                ids.append(_ComparisonObject(token, self.indent + self.offset, self.local_config))

        max_width_left = 0
        max_width_operator = 0
        max_width_right = 0
        for comparison in ids:
            max_width_left = max(max_width_left, comparison.width_left)
            max_width_operator = max(max_width_operator, comparison.width_operator)
            max_width_right = max(max_width_right, comparison.width_right)

        for comparison in ids:
            if comparison.right_tokens:
                left = comparison.left_lines[-1]
                left_space = "\t" * int(calc_tab_padding_count(left, max_width_left))
                if len(comparison.left_lines) > 1:
                    left_space += "\t"
                comparison.token.insert_after(comparison.left_tokens[-1], sql.Token(T.Whitespace, left_space))

                op_space = "\t" * int(calc_tab_padding_count(comparison.operator_string, max_width_operator))
                comparison.token.insert_after(comparison.operator_tokens[-1], sql.Token(T.Whitespace, op_space))

                if comparison.line_comment:
                    right = comparison.right_lines[-1]
                    right_space = "\t" * int(calc_tab_padding_count(right, max_width_right))
                    if len(comparison.right_lines) > 1:
                        right_space += "\t\t"  + ("\t" * int(calc_tab_padding_count("", max_width_left)))
                    comparison.token.insert_after(comparison.right_tokens[-1], sql.Token(T.Whitespace, right_space)) 
Example #17
Source File: filters.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def one_indent_space(self):
        return sql.Token(T.Whitespace, "\t") 
Example #18
Source File: filters.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def indent_space(self, offset=0):
        space = ("\t" * ((self.indent * self.width) + self.offset + offset))
        return sql.Token(T.Whitespace, space) 
Example #19
Source File: filters.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def cr(self):
        return sql.Token(T.Whitespace, '\n') 
Example #20
Source File: filters.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _process(self, stream, varname, has_nl):
        # SQL query asignation to varname
        if self.count > 1:
            yield sql.Token(T.Whitespace, '\n')
        yield sql.Token(T.Name, varname)
        yield sql.Token(T.Whitespace, ' ')
        yield sql.Token(T.Operator, '=')
        yield sql.Token(T.Whitespace, ' ')
        if has_nl:
            yield sql.Token(T.Operator, '(')
        yield sql.Token(T.Text, "'")

        # Print the tokens on the quote
        for token in stream:
            # Token is a new line separator
            if token.is_whitespace() and '\n' in token.value:
                # Close quote and add a new line
                yield sql.Token(T.Text, " '")
                yield sql.Token(T.Whitespace, '\n')

                # Quote header on secondary lines
                yield sql.Token(T.Whitespace, ' ' * (len(varname) + 4))
                yield sql.Token(T.Text, "'")

                # Indentation
                after_lb = token.value.split('\n', 1)[1]
                if after_lb:
                    yield sql.Token(T.Whitespace, after_lb)
                continue

            # Token has escape chars
            elif "'" in token.value:
                token.value = token.value.replace("'", "\\'")

            # Put the token
            yield sql.Token(T.Text, token.value)

        # Close quote
        yield sql.Token(T.Text, "'")
        if has_nl:
            yield sql.Token(T.Operator, ')') 
Example #21
Source File: sql_parse.py    From incubator-superset with Apache License 2.0 5 votes vote down vote up
def _is_identifier(token: Token) -> bool:
        return isinstance(token, (IdentifierList, Identifier)) 
Example #22
Source File: functions.py    From djongo with GNU Affero General Public License v3.0 5 votes vote down vote up
def token2sql(token: Token,
                  query: 'query_module.BaseQuery'
                  ) -> U['CountFunc',
                         'SimpleFunc']:
        func = token[0].get_name()
        if func == 'COUNT':
            return CountFunc.token2sql(token, query)
        else:
            return SimpleFunc(token, query) 
Example #23
Source File: functions.py    From djongo with GNU Affero General Public License v3.0 5 votes vote down vote up
def token2sql(token: Token,
                  query: 'query_module.BaseQuery'
                  ) -> U['CountFuncAll',
                         'CountFuncSingle']:
        try:
            token[0].get_parameters()[0]
        except IndexError:
            return CountFuncAll(token, query)
        else:
            return CountFuncSingle(token, query) 
Example #24
Source File: sql_tokens.py    From djongo with GNU Affero General Public License v3.0 5 votes vote down vote up
def __init__(self,
                 token: Token,
                 query: 'query_module.BaseQuery'):
        self._token = token
        self.query = query 
Example #25
Source File: sql_tokens.py    From djongo with GNU Affero General Public License v3.0 5 votes vote down vote up
def tokens2sql(token: Token,
                   query: 'query_module.BaseQuery'
                   ) -> Iterator[all_token_types]:
        from .functions import SQLFunc
        if isinstance(token, Identifier):
            # Bug fix for sql parse
            if isinstance(token[0], Parenthesis):
                try:
                    int(token[0][1].value)
                except ValueError:
                    raise
                else:
                    yield SQLConstIdentifier(token, query)
            elif isinstance(token[0], Function):
                yield SQLFunc.token2sql(token, query)
            else:
                yield SQLIdentifier(token, query)
        elif isinstance(token, Function):
            yield SQLFunc.token2sql(token, query)
        elif isinstance(token, Comparison):
            yield SQLComparison(token, query)
        elif isinstance(token, IdentifierList):
            for tok in token.get_identifiers():
                yield from SQLToken.tokens2sql(tok, query)
        elif isinstance(token, Parenthesis):
            yield SQLPlaceholder(token, query)
        else:
            raise SQLDecodeError(f'Unsupported: {token.value}') 
Example #26
Source File: sql_tokens.py    From djongo with GNU Affero General Public License v3.0 5 votes vote down vote up
def __init__(self, token: Token, query: 'query_module.BaseQuery'):
        super().__init__(token, query) 
Example #27
Source File: sql_tokens.py    From djongo with GNU Affero General Public License v3.0 5 votes vote down vote up
def get_value(self, tok: Token):
        if tok.ttype == tokens.Name.Placeholder:
            return self.placeholder_index(tok)
        elif tok.match(tokens.Keyword, 'NULL'):
            return None
        elif tok.match(tokens.Keyword, 'DEFAULT'):
            return 'DEFAULT'
        else:
            raise SQLDecodeError 
Example #28
Source File: sql_tokens.py    From djongo with GNU Affero General Public License v3.0 5 votes vote down vote up
def current_token(self) -> Token:
        return self._statement[self._tok_id] 
Example #29
Source File: sql_tokens.py    From djongo with GNU Affero General Public License v3.0 5 votes vote down vote up
def __init__(self, statement: U[Statement, Token]):
        self._statement = statement
        self._tok_id = 0
        self._gen_inst = self._generator() 
Example #30
Source File: sql_tokens.py    From djongo with GNU Affero General Public License v3.0 5 votes vote down vote up
def __iter__(self) -> Token:
        yield from self._gen_inst