Python sqlparse.sql.Comparison() Examples

The following are 19 code examples of sqlparse.sql.Comparison(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module sqlparse.sql , or try the search function .
Example #1
Source File: convert.py    From sqlitis with MIT License 7 votes vote down vote up
def build_comparison(token):
    assert type(token) is S.Comparison

    m = M.Comparison()
    for tok in remove_whitespace(token.tokens):
        LOG.debug("  %s %s", tok, type(tok))
        if type(tok) is S.Parenthesis:
            subtokens = remove_whitespace(tok.tokens)
            subquery = tokens_to_sqla(subtokens[1:-1])
            if not m.left:
                m.left = subquery
            else:
                m.right = subquery
        else:
            m = sql_literal_to_model(tok, m)
            if not m:
                raise Exception("[BUG] Failed to convert %s to model" % tok)

    return m 
Example #2
Source File: grouping.py    From SublimeText-SQLTools with GNU General Public License v3.0 7 votes vote down vote up
def group_identifier_list(tlist):
    m_role = T.Keyword, ('null', 'role')
    sqlcls = (sql.Function, sql.Case, sql.Identifier, sql.Comparison,
              sql.IdentifierList, sql.Operation)
    ttypes = (T_NUMERICAL + T_STRING + T_NAME +
              (T.Keyword, T.Comment, T.Wildcard))

    def match(token):
        return token.match(T.Punctuation, ',')

    def valid(token):
        return imt(token, i=sqlcls, m=m_role, t=ttypes)

    def post(tlist, pidx, tidx, nidx):
        return pidx, nidx

    valid_prev = valid_next = valid
    _group(tlist, sql.IdentifierList, match,
           valid_prev, valid_next, post, extend=True) 
Example #3
Source File: grouping.py    From SublimeText-SQLTools with GNU General Public License v3.0 6 votes vote down vote up
def group_comparison(tlist):
    sqlcls = (sql.Parenthesis, sql.Function, sql.Identifier,
              sql.Operation)
    ttypes = T_NUMERICAL + T_STRING + T_NAME

    def match(token):
        return token.ttype == T.Operator.Comparison

    def valid(token):
        if imt(token, t=ttypes, i=sqlcls):
            return True
        elif token and token.is_keyword and token.normalized == 'NULL':
            return True
        else:
            return False

    def post(tlist, pidx, tidx, nidx):
        return pidx, nidx

    valid_prev = valid_next = valid
    _group(tlist, sql.Comparison, match,
           valid_prev, valid_next, post, extend=False) 
Example #4
Source File: convert.py    From sqlitis with MIT License 6 votes vote down vote up
def sql_literal_to_model(tok, m=M):
    """
    :param m: the source model to "append" the literal to.
        defaults to M - the sqlitis models module (which means a fresh model
        is created)
    :return: the resulting model
    """

    def is_string_literal(tok):
        text = tok.normalized
        return all([text.startswith('"'), text.endswith('"')])

    # sqlparse treats string literals as identifiers
    if type(tok) is S.Identifier and is_string_literal(tok):
        return m.Field(tok.normalized, literal=True)
    elif type(tok) is S.Identifier:
        return m.Field(tok.normalized)
    elif tok.ttype is T.Comparison:
        return m.Op(tok.normalized)
    elif tok.ttype in [T.Literal, T.String, T.Number, T.Number.Integer, T.Number.Float]:
        return m.Field(tok.normalized, literal=True)

    return None 
Example #5
Source File: sql_tokens.py    From djongo with GNU Affero General Public License v3.0 5 votes vote down vote up
def tokens2sql(token: Token,
                   query: 'query_module.BaseQuery'
                   ) -> Iterator[all_token_types]:
        from .functions import SQLFunc
        if isinstance(token, Identifier):
            # Bug fix for sql parse
            if isinstance(token[0], Parenthesis):
                try:
                    int(token[0][1].value)
                except ValueError:
                    raise
                else:
                    yield SQLConstIdentifier(token, query)
            elif isinstance(token[0], Function):
                yield SQLFunc.token2sql(token, query)
            else:
                yield SQLIdentifier(token, query)
        elif isinstance(token, Function):
            yield SQLFunc.token2sql(token, query)
        elif isinstance(token, Comparison):
            yield SQLComparison(token, query)
        elif isinstance(token, IdentifierList):
            for tok in token.get_identifiers():
                yield from SQLToken.tokens2sql(tok, query)
        elif isinstance(token, Parenthesis):
            yield SQLPlaceholder(token, query)
        else:
            raise SQLDecodeError(f'Unsupported: {token.value}') 
Example #6
Source File: search_utils.py    From mlflow with Apache License 2.0 5 votes vote down vote up
def _parse_filter_for_model_registry(cls, filter_string, valid_search_keys):
        if not filter_string or filter_string == "":
            return []
        expected = "Expected search filter with single comparison operator. e.g. name='myModelName'"
        try:
            parsed = sqlparse.parse(filter_string)
        except Exception:
            raise MlflowException("Error while parsing filter '%s'. %s" % (filter_string, expected),
                                  error_code=INVALID_PARAMETER_VALUE)
        if len(parsed) == 0 or not isinstance(parsed[0], Statement):
            raise MlflowException("Invalid filter '%s'. Could not be parsed. %s" %
                                  (filter_string, expected), error_code=INVALID_PARAMETER_VALUE)
        elif len(parsed) > 1:
            raise MlflowException("Search filter '%s' contains multiple expressions. "
                                  "%s " % (filter_string, expected),
                                  error_code=INVALID_PARAMETER_VALUE)
        statement = parsed[0]
        invalids = list(filter(cls._invalid_statement_token, statement.tokens))
        if len(invalids) > 0:
            invalid_clauses = ", ".join("'%s'" % token for token in invalids)
            raise MlflowException("Invalid clause(s) in filter string: %s. "
                                  "%s" % (invalid_clauses, expected),
                                  error_code=INVALID_PARAMETER_VALUE)
        return [cls._get_comparison_for_model_registry(
            si,
            valid_search_keys)
            for si in statement.tokens if isinstance(si, Comparison)] 
Example #7
Source File: search_utils.py    From mlflow with Apache License 2.0 5 votes vote down vote up
def _process_statement(cls, statement):
        # check validity
        invalids = list(filter(cls._invalid_statement_token, statement.tokens))
        if len(invalids) > 0:
            invalid_clauses = ", ".join("'%s'" % token for token in invalids)
            raise MlflowException("Invalid clause(s) in filter string: %s" % invalid_clauses,
                                  error_code=INVALID_PARAMETER_VALUE)
        return [cls._get_comparison(si) for si in statement.tokens if isinstance(si, Comparison)] 
Example #8
Source File: search_utils.py    From mlflow with Apache License 2.0 5 votes vote down vote up
def _invalid_statement_token(cls, token):
        if isinstance(token, Comparison):
            return False
        elif token.is_whitespace:
            return False
        elif token.match(ttype=TokenType.Keyword, values=["AND"]):
            return False
        else:
            return True 
Example #9
Source File: search_utils.py    From mlflow with Apache License 2.0 5 votes vote down vote up
def _validate_comparison(cls, tokens):
        base_error_string = "Invalid comparison clause"
        if len(tokens) != 3:
            raise MlflowException("{}. Expected 3 tokens found {}".format(base_error_string,
                                                                          len(tokens)),
                                  error_code=INVALID_PARAMETER_VALUE)
        if not isinstance(tokens[0], Identifier):
            raise MlflowException("{}. Expected 'Identifier' found '{}'".format(base_error_string,
                                                                                str(tokens[0])),
                                  error_code=INVALID_PARAMETER_VALUE)
        if not isinstance(tokens[1], Token) and tokens[1].ttype != TokenType.Operator.Comparison:
            raise MlflowException("{}. Expected comparison found '{}'".format(base_error_string,
                                                                              str(tokens[1])),
                                  error_code=INVALID_PARAMETER_VALUE)
        if not isinstance(tokens[2], Token) and \
                (tokens[2].ttype not in cls.STRING_VALUE_TYPES.union(cls.NUMERIC_VALUE_TYPES) or
                 isinstance(tokens[2], Identifier)):
            raise MlflowException("{}. Expected value token found '{}'".format(base_error_string,
                                                                               str(tokens[2])),
                                  error_code=INVALID_PARAMETER_VALUE) 
Example #10
Source File: tokenutils.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def is_comparison(token):
    """
        比較演算判定
    """
    return isinstance(token, sql.Comparison) 
Example #11
Source File: grouping.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def group_comparison(tlist):

    def _parts_valid(token):
        return (token.ttype in (T.String.Symbol, T.String.Single,
                                T.Name, T.Number, T.Number.Float,
                                T.Number.Integer, T.Literal,
                                T.Literal.Number.Integer, T.Name.Placeholder)
                or isinstance(token, (sql.Identifier, sql.Parenthesis))
                or (token.ttype is T.Keyword
                    and token.value.upper() in ['NULL', ]))
    _group_left_right(tlist, T.Operator.Comparison, None, sql.Comparison,
                      check_left=_parts_valid, check_right=_parts_valid) 
Example #12
Source File: test_grouping.py    From codenn with MIT License 5 votes vote down vote up
def test_comparison_with_strings():  # issue148
    p = sqlparse.parse('foo = \'bar\'')[0]
    assert len(p.tokens) == 1
    assert isinstance(p.tokens[0], sql.Comparison)
    assert p.tokens[0].right.value == '\'bar\''
    assert p.tokens[0].right.ttype == T.String.Single 
Example #13
Source File: test_grouping.py    From codenn with MIT License 5 votes vote down vote up
def test_comparison_with_floats():  # issue145
    p = sqlparse.parse('foo = 25.5')[0]
    assert len(p.tokens) == 1
    assert isinstance(p.tokens[0], sql.Comparison)
    assert len(p.tokens[0].tokens) == 5
    assert p.tokens[0].left.value == 'foo'
    assert p.tokens[0].right.value == '25.5' 
Example #14
Source File: test_grouping.py    From codenn with MIT License 5 votes vote down vote up
def test_comparison_with_keywords():  # issue90
    # in fact these are assignments, but for now we don't distinguish them
    p = sqlparse.parse('foo = NULL')[0]
    assert len(p.tokens) == 1
    assert isinstance(p.tokens[0], sql.Comparison)
    assert len(p.tokens[0].tokens) == 5
    assert p.tokens[0].left.value == 'foo'
    assert p.tokens[0].right.value == 'NULL'
    # make sure it's case-insensitive
    p = sqlparse.parse('foo = null')[0]
    assert len(p.tokens) == 1
    assert isinstance(p.tokens[0], sql.Comparison) 
Example #15
Source File: test_grouping.py    From codenn with MIT License 5 votes vote down vote up
def test_comparison_exclude(self):
        # make sure operators are not handled too lazy
        p = sqlparse.parse('(=)')[0]
        self.assert_(isinstance(p.tokens[0], sql.Parenthesis))
        self.assert_(not isinstance(p.tokens[0].tokens[1], sql.Comparison))
        p = sqlparse.parse('(a=1)')[0]
        self.assert_(isinstance(p.tokens[0].tokens[1], sql.Comparison))
        p = sqlparse.parse('(a>=1)')[0]
        self.assert_(isinstance(p.tokens[0].tokens[1], sql.Comparison)) 
Example #16
Source File: grouping.py    From codenn with MIT License 5 votes vote down vote up
def group_comparison(tlist):

    def _parts_valid(token):
        return (token.ttype in (T.String.Symbol, T.String.Single,
                                T.Name, T.Number, T.Number.Float,
                                T.Number.Integer, T.Literal,
                                T.Literal.Number.Integer, T.Name.Placeholder)
                or isinstance(token, (sql.Identifier, sql.Parenthesis))
                or (token.ttype is T.Keyword
                    and token.value.upper() in ['NULL', ]))
    _group_left_right(tlist, T.Operator.Comparison, None, sql.Comparison,
                      check_left=_parts_valid, check_right=_parts_valid) 
Example #17
Source File: grouping.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def group_identifier_list(tlist):
    [group_identifier_list(sgroup) for sgroup in tlist.get_sublists()
     if not isinstance(sgroup, sql.IdentifierList)]
    # Allowed list items
    fend1_funcs = [lambda t: isinstance(t, (sql.Identifier, sql.Function,
                                            sql.Case)),
                   lambda t: t.is_whitespace(),
                   lambda t: t.ttype == T.Name,
                   lambda t: t.ttype == T.Wildcard,
                   lambda t: t.match(T.Keyword, 'null'),
                   lambda t: t.match(T.Keyword, 'role'),
                   lambda t: t.ttype == T.Number.Integer,
                   lambda t: t.ttype == T.String.Single,
                   lambda t: t.ttype == T.Name.Placeholder,
                   lambda t: t.ttype == T.Keyword,
                   lambda t: isinstance(t, sql.Comparison),
                   lambda t: isinstance(t, sql.Comment),
                   lambda t: t.ttype == T.Comment.Multiline,
                   ]
    tcomma = tlist.token_next_match(0, T.Punctuation, ',')
    start = None
    while tcomma is not None:
        # Go back one idx to make sure to find the correct tcomma
        idx = tlist.token_index(tcomma)
        before = tlist.token_prev(idx)
        after = tlist.token_next(idx)
        # Check if the tokens around tcomma belong to a list
        bpassed = apassed = False
        for func in fend1_funcs:
            if before is not None and func(before):
                bpassed = True
            if after is not None and func(after):
                apassed = True
        if not bpassed or not apassed:
            # Something's wrong here, skip ahead to next ","
            start = None
            tcomma = tlist.token_next_match(idx + 1,
                                            T.Punctuation, ',')
        else:
            if start is None:
                start = before
            after_idx = tlist.token_index(after, start=idx)
            next_ = tlist.token_next(after_idx)
            if next_ is None or not next_.match(T.Punctuation, ','):
                # Reached the end of the list
                tokens = tlist.tokens_between(start, after)
                group = tlist.group_tokens(sql.IdentifierList, tokens)
                start = None
                tcomma = tlist.token_next_match(tlist.token_index(group) + 1,
                                                T.Punctuation, ',')
            else:
                tcomma = next_ 
Example #18
Source File: grouping.py    From codenn with MIT License 4 votes vote down vote up
def group_identifier_list(tlist):
    [group_identifier_list(sgroup) for sgroup in tlist.get_sublists()
     if not isinstance(sgroup, sql.IdentifierList)]
    idx = 0
    # Allowed list items
    fend1_funcs = [lambda t: isinstance(t, (sql.Identifier, sql.Function,
                                            sql.Case)),
                   lambda t: t.is_whitespace(),
                   lambda t: t.ttype == T.Name,
                   lambda t: t.ttype == T.Wildcard,
                   lambda t: t.match(T.Keyword, 'null'),
                   lambda t: t.match(T.Keyword, 'role'),
                   lambda t: t.ttype == T.Number.Integer,
                   lambda t: t.ttype == T.String.Single,
                   lambda t: t.ttype == T.Name.Placeholder,
                   lambda t: t.ttype == T.Keyword,
                   lambda t: isinstance(t, sql.Comparison),
                   lambda t: isinstance(t, sql.Comment),
                   lambda t: t.ttype == T.Comment.Multiline,
                   ]
    tcomma = tlist.token_next_match(idx, T.Punctuation, ',')
    start = None
    while tcomma is not None:
        before = tlist.token_prev(tcomma)
        after = tlist.token_next(tcomma)
        # Check if the tokens around tcomma belong to a list
        bpassed = apassed = False
        for func in fend1_funcs:
            if before is not None and func(before):
                bpassed = True
            if after is not None and func(after):
                apassed = True
        if not bpassed or not apassed:
            # Something's wrong here, skip ahead to next ","
            start = None
            tcomma = tlist.token_next_match(tlist.token_index(tcomma) + 1,
                                            T.Punctuation, ',')
        else:
            if start is None:
                start = before
            next_ = tlist.token_next(after)
            if next_ is None or not next_.match(T.Punctuation, ','):
                # Reached the end of the list
                tokens = tlist.tokens_between(start, after)
                group = tlist.group_tokens(sql.IdentifierList, tokens)
                start = None
                tcomma = tlist.token_next_match(tlist.token_index(group) + 1,
                                                T.Punctuation, ',')
            else:
                tcomma = next_ 
Example #19
Source File: operators.py    From djongo with GNU Affero General Public License v3.0 4 votes vote down vote up
def _token2op(self,
                  tok: Token,
                  statement: SQLStatement) -> '_Op':
        op = None
        kw = {'statement': statement, 'query': self.query}
        if tok.match(tokens.Keyword, 'AND'):
            op = AndOp(**kw)

        elif tok.match(tokens.Keyword, 'OR'):
            op = OrOp(**kw)

        elif tok.match(tokens.Keyword, 'IN'):
            op = InOp(**kw)

        elif tok.match(tokens.Keyword, 'NOT'):
            if statement.next_token.match(tokens.Keyword, 'IN'):
                op = NotInOp(**kw)
                statement.skip(1)
            else:
                op = NotOp(**kw)

        elif tok.match(tokens.Keyword, 'LIKE'):
            op = LikeOp(**kw)

        elif tok.match(tokens.Keyword, 'iLIKE'):
            op = iLikeOp(**kw)

        elif tok.match(tokens.Keyword, 'BETWEEN'):
            op = BetweenOp(**kw)
            statement.skip(3)

        elif tok.match(tokens.Keyword, 'IS'):
            op = IsOp(**kw)

        elif isinstance(tok, Comparison):
            op = CmpOp(tok, self.query)

        elif isinstance(tok, Parenthesis):
            if (tok[1].match(tokens.Name.Placeholder, '.*', regex=True)
                    or tok[1].match(tokens.Keyword, 'Null')
                    or isinstance(tok[1], IdentifierList)
                    or tok[1].ttype == tokens.DML
            ):
                pass
            else:
                op = ParenthesisOp(SQLStatement(tok), self.query)

        elif tok.match(tokens.Punctuation, (')', '(')):
            pass

        elif isinstance(tok, Identifier):
            pass
        else:
            raise SQLDecodeError

        return op