Python sqlparse.tokens.DML Examples

The following are 30 code examples of sqlparse.tokens.DML(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module sqlparse.tokens , or try the search function .
Example #1
Source File: query.py    From djongo with GNU Affero General Public License v3.0 6 votes vote down vote up
def parse(self):

        statement = SQLStatement(self.statement)

        for tok in statement:
            if tok.match(tokens.DML, 'UPDATE'):
                c = ColumnSelectConverter(self, statement)
                self.left_table = c.sql_tokens[0].table

            elif tok.match(tokens.Keyword, 'SET'):
                c = self.set_columns = SetConverter(self, statement)

            elif isinstance(tok, Where):
                c = self.where = WhereConverter(self, statement)

            else:
                raise SQLDecodeError

        self.kwargs = {}
        if self.where:
            self.kwargs.update(self.where.to_mongo())

        self.kwargs.update(self.set_columns.to_mongo()) 
Example #2
Source File: tokenutils.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def is_dmlddl_parenthesis(token):
    """
        DMLかDDLの括弧判定
    """
    if not is_parenthesis(token):
        return False


    open_punc = token.token_next_match(0, T.Punctuation, '(')
    first = token_next_enable(token, open_punc)
    if first and first.ttype in (T.Keyword.DML, T.Keyword.DDL):
        return True

    if is_with(first):
        return True

    if is_parenthesis(first):
        return is_dmlddl_parenthesis(first)

    return False 
Example #3
Source File: grouping.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def group_where(tlist):
    def end_match(token):
        stopwords = ('ORDER', 'GROUP', 'LIMIT', 'UNION', 'EXCEPT', 'HAVING',
                     'WHEN', # for Oracle10g merge
                     'CONNECT', # for Oracle connect by
                     )
        if token.match(T.Keyword, stopwords):
            return True
        if token.match(T.DML, ('DELETE')): # for Oracle10g merge
            return True
        if token.match(T.DML, ('START')): # for Oracle connect by
            return True

        return False

    [group_where(sgroup) for sgroup in tlist.get_sublists()
     if not isinstance(sgroup, sql.Where)]
    idx = 0
    token = tlist.token_next_match(idx, T.Keyword, 'WHERE')
    while token:
        tidx = tlist.token_index(token)
        end = tlist.token_matching(tidx + 1, (end_match, ))
        if end is None:
            end = tlist._groupable_tokens[-1]
        else:
            end = tlist.tokens[tlist.token_index(end) - 1]
        group = tlist.group_tokens(sql.Where,
                                   tlist.tokens_between(token, end),
                                   ignore_ws=True)
        idx = tlist.token_index(group)
        token = tlist.token_next_match(idx, T.Keyword, 'WHERE') 
Example #4
Source File: query.py    From djongo with GNU Affero General Public License v3.0 5 votes vote down vote up
def parse(self):
        statement = SQLStatement(self.statement)

        for tok in statement:
            if tok.match(tokens.DML, 'SELECT'):
                self.selected_columns = ColumnSelectConverter(self, statement)

            elif tok.match(tokens.Keyword, 'FROM'):
                FromConverter(self, statement)

            elif tok.match(tokens.Keyword, 'LIMIT'):
                self.limit = LimitConverter(self, statement)

            elif tok.match(tokens.Keyword, 'ORDER'):
                self.order = OrderConverter(self, statement)

            elif tok.match(tokens.Keyword, 'OFFSET'):
                self.offset = OffsetConverter(self, statement)

            elif tok.match(tokens.Keyword, 'INNER JOIN'):
                converter = InnerJoinConverter(self, statement)
                self.joins.append(converter)

            elif tok.match(tokens.Keyword, 'LEFT OUTER JOIN'):
                converter = OuterJoinConverter(self, statement)
                self.joins.append(converter)

            elif tok.match(tokens.Keyword, 'GROUP'):
                self.groupby = GroupbyConverter(self, statement)

            elif tok.match(tokens.Keyword, 'HAVING'):
                self.having = HavingConverter(self, statement)

            elif isinstance(tok, Where):
                self.where = WhereConverter(self, statement)

            else:
                raise SQLDecodeError(f'Unknown keyword: {tok}') 
Example #5
Source File: operators.py    From djongo with GNU Affero General Public License v3.0 5 votes vote down vote up
def _fill_in(self, token):
        self._in = []

        # Check for nested
        if token[1].ttype == tokens.DML:
            from .converters import NestedInQueryConverter

            self.query.nested_query = NestedInQueryConverter(token, self.query, 0)
            return

        for index in SQLToken.token2sql(token, self.query):
            if index is not None:
                self._in.append(self.params[index])
            else:
                self._in.append(None) 
Example #6
Source File: tables.py    From mssql-cli with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def extract_from_part(parsed, stop_at_punctuation=True):
    tbl_prefix_seen = False
    for item in parsed.tokens:
        if tbl_prefix_seen:
            if is_subselect(item):
                for x in extract_from_part(item, stop_at_punctuation):
                    yield x
            elif stop_at_punctuation and item.ttype is Punctuation:
                # An incomplete nested select won't be recognized correctly as a
                # sub-select. eg: 'SELECT * FROM (SELECT id FROM user'. This causes
                # the second FROM to trigger this elif condition resulting in a
                # StopIteration. So we need to ignore the keyword if the keyword
                # FROM.
                # Also 'SELECT * FROM abc JOIN def' will trigger this elif
                # condition. So we need to ignore the keyword JOIN and its variants
                # INNER JOIN, FULL OUTER JOIN, etc.
                return
            elif item.ttype is Keyword and (
                    not item.value.upper() == 'FROM') and \
                    (not item.value.upper().endswith('JOIN')):
                tbl_prefix_seen = False
            else:
                yield item
        elif item.ttype is Keyword or item.ttype is Keyword.DML:
            item_val = item.value.upper()
            if (item_val in ('COPY', 'FROM', 'INTO', 'UPDATE', 'TABLE') or
                    item_val.endswith('JOIN')):
                tbl_prefix_seen = True
        # 'SELECT a, FROM abc' will detect FROM as part of the column list.
        # So this check here is necessary.
        elif isinstance(item, IdentifierList):
            for identifier in item.get_identifiers():
                if (identifier.ttype is Keyword and
                        identifier.value.upper() == 'FROM'):
                    tbl_prefix_seen = True
                    break 
Example #7
Source File: tables.py    From mssql-cli with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def is_subselect(parsed):
    if not parsed.is_group:
        return False
    for item in parsed.tokens:
        if item.ttype is DML and item.value.upper() in ('SELECT', 'INSERT',
                                                        'UPDATE', 'CREATE', 'DELETE'):
            return True
    return False 
Example #8
Source File: ctes.py    From mssql-cli with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def extract_column_names(parsed):
    # Find the first DML token to check if it's a SELECT or INSERT/UPDATE/DELETE
    idx, tok = parsed.token_next_by(t=DML)
    tok_val = tok and tok.value.lower()

    if tok_val in ('insert', 'update', 'delete'):
        # Jump ahead to the RETURNING clause where the list of column names is
        idx, tok = parsed.token_next_by(idx, (Keyword, 'returning'))
    elif not tok_val == 'select':
        # Must be invalid CTE
        return ()

    # The next token should be either a column name, or a list of column names
    idx, tok = parsed.token_next(idx, skip_ws=True, skip_cm=True)
    return tuple(t.get_name() for t in _identifiers(tok)) 
Example #9
Source File: grouping.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def group_having(tlist):
    def end_match(token):
        stopwords = ('ORDER', 'GROUP', 'LIMIT', 'UNION', 'EXCEPT', 'HAVING',
                     'WHEN', # for Oracle10g merge
                     'CONNECT', # for Oracle connect by
                     )
        if token.match(T.Keyword, stopwords):
            return True
        if token.match(T.DML, ('DELETE')): # for Oracle10g merge
            return True
        if token.match(T.DML, ('START')): # for Oracle connect by
            return True

        return False

    def proc(tlist):
        [proc(sgroup) for sgroup in tlist.get_sublists()
         if not isinstance(sgroup, Having)]
        idx = 0
        token = tlist.token_next_match(idx, T.Keyword, 'HAVING')
        while token:
            tidx = tlist.token_index(token)
            end = tlist.token_matching(tidx + 1, (end_match, ))
            if end is None:
                end = tlist._groupable_tokens[-1]
            else:
                end = tlist.tokens[tlist.token_index(end) - 1]
            tgroup = tlist.group_tokens(Having,
                                       tlist.tokens_between(token, end),
                                       ignore_ws=True)
            idx = tlist.token_index(tgroup)
            token = tlist.token_next_match(idx, T.Keyword, 'HAVING')

    proc = SqlFormatterException.to_wrap_try_except(proc, 0)
    proc(tlist) 
Example #10
Source File: tokenutils.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def is_mergeupdateinsertclause(token):
    """
         MERGEの内のDML判定
    """
    from uroborosqlfmt.sql import MergeUpdateInsertClause
    return isinstance(token, MergeUpdateInsertClause) 
Example #11
Source File: tokenutils.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def is_dml(token):
    """
        DML判定
    """
    return token.ttype in T.DML 
Example #12
Source File: tokenutils.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def is_delete_dml(token):
    """
        DELETE句判定
    """
    return token.match(T.DML, "DELETE") 
Example #13
Source File: tokenutils.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def is_update_dml(token):
    """
        UPDATE句判定
    """
    return token.match(T.DML, "UPDATE") 
Example #14
Source File: tokenutils.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def is_select_dml(token):
    """
        SELECT句判定
    """
    return token.match(T.DML, "SELECT") 
Example #15
Source File: ctes.py    From pgcli with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def extract_column_names(parsed):
    # Find the first DML token to check if it's a SELECT or INSERT/UPDATE/DELETE
    idx, tok = parsed.token_next_by(t=DML)
    tok_val = tok and tok.value.lower()

    if tok_val in ("insert", "update", "delete"):
        # Jump ahead to the RETURNING clause where the list of column names is
        idx, tok = parsed.token_next_by(idx, (Keyword, "returning"))
    elif not tok_val == "select":
        # Must be invalid CTE
        return ()

    # The next token should be either a column name, or a list of column names
    idx, tok = parsed.token_next(idx, skip_ws=True, skip_cm=True)
    return tuple(t.get_name() for t in _identifiers(tok)) 
Example #16
Source File: grouping.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def group_as(tlist):

    def _right_valid(token):
        # Currently limited to DML/DDL. Maybe additional more non SQL reserved
        # keywords should appear here (see issue8).
        return not token.ttype in (T.DML, T.DDL)

    def _left_valid(token):
        if token.ttype is T.Keyword and token.value in ('NULL',):
            return True
        return token.ttype is not T.Keyword

    _group_left_right(tlist, T.Keyword, 'AS', sql.Identifier,
                      check_right=_right_valid,
                      check_left=_left_valid) 
Example #17
Source File: parseutils.py    From athenacli with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def extract_from_part(parsed, stop_at_punctuation=True):
    tbl_prefix_seen = False
    for item in parsed.tokens:
        if tbl_prefix_seen:
            if is_subselect(item):
                for x in extract_from_part(item, stop_at_punctuation):
                    yield x
            elif stop_at_punctuation and item.ttype is Punctuation:
                return
            # An incomplete nested select won't be recognized correctly as a
            # sub-select. eg: 'SELECT * FROM (SELECT id FROM user'. This causes
            # the second FROM to trigger this elif condition resulting in a
            # StopIteration. So we need to ignore the keyword if the keyword
            # FROM.
            # Also 'SELECT * FROM abc JOIN def' will trigger this elif
            # condition. So we need to ignore the keyword JOIN and its variants
            # INNER JOIN, FULL OUTER JOIN, etc.
            elif item.ttype is Keyword and (
                    not item.value.upper() == 'FROM') and (
                    not item.value.upper().endswith('JOIN')):
                return
            else:
                yield item
        elif ((item.ttype is Keyword or item.ttype is Keyword.DML) and
                item.value.upper() in ('COPY', 'FROM', 'INTO', 'UPDATE', 'TABLE', 'JOIN',)):
            tbl_prefix_seen = True
        # 'SELECT a, FROM abc' will detect FROM as part of the column list.
        # So this check here is necessary.
        elif isinstance(item, IdentifierList):
            for identifier in item.get_identifiers():
                if (identifier.ttype is Keyword and
                        identifier.value.upper() == 'FROM'):
                    tbl_prefix_seen = True
                    break 
Example #18
Source File: parseutils.py    From athenacli with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def is_subselect(parsed):
    if not parsed.is_group:
        return False
    for item in parsed.tokens:
        if item.ttype is DML and item.value.upper() in ('SELECT', 'INSERT',
                'UPDATE', 'CREATE', 'DELETE'):
            return True
    return False 
Example #19
Source File: test_grouping.py    From codenn with MIT License 5 votes vote down vote up
def test_identifiers(self):
        s = 'select foo.bar from "myscheme"."table" where fail. order'
        parsed = sqlparse.parse(s)[0]
        self.ndiffAssertEqual(s, unicode(parsed))
        self.assert_(isinstance(parsed.tokens[2], sql.Identifier))
        self.assert_(isinstance(parsed.tokens[6], sql.Identifier))
        self.assert_(isinstance(parsed.tokens[8], sql.Where))
        s = 'select * from foo where foo.id = 1'
        parsed = sqlparse.parse(s)[0]
        self.ndiffAssertEqual(s, unicode(parsed))
        self.assert_(isinstance(parsed.tokens[-1].tokens[-1].tokens[0],
                                sql.Identifier))
        s = 'select * from (select "foo"."id" from foo)'
        parsed = sqlparse.parse(s)[0]
        self.ndiffAssertEqual(s, unicode(parsed))
        self.assert_(isinstance(parsed.tokens[-1].tokens[3], sql.Identifier))

        s = "INSERT INTO `test` VALUES('foo', 'bar');"
        parsed = sqlparse.parse(s)[0]
        types = [l.ttype for l in parsed.tokens if not l.is_whitespace()]
        self.assertEquals(types, [T.DML, T.Keyword, None,
                                  T.Keyword, None, T.Punctuation])

        s = "select 1.0*(a+b) as col, sum(c)/sum(d) from myschema.mytable"
        parsed = sqlparse.parse(s)[0]
        self.assertEqual(len(parsed.tokens), 7)
        self.assert_(isinstance(parsed.tokens[2], sql.IdentifierList))
        self.assertEqual(len(parsed.tokens[2].tokens), 4)
        identifiers = list(parsed.tokens[2].get_identifiers())
        self.assertEqual(len(identifiers), 2)
        self.assertEquals(identifiers[0].get_alias(), u"col") 
Example #20
Source File: grouping.py    From codenn with MIT License 5 votes vote down vote up
def group_as(tlist):

    def _right_valid(token):
        # Currently limited to DML/DDL. Maybe additional more non SQL reserved
        # keywords should appear here (see issue8).
        return not token.ttype in (T.DML, T.DDL)

    def _left_valid(token):
        if token.ttype is T.Keyword and token.value in ('NULL',):
            return True
        return token.ttype is not T.Keyword

    _group_left_right(tlist, T.Keyword, 'AS', sql.Identifier,
                      check_right=_right_valid,
                      check_left=_left_valid) 
Example #21
Source File: extract_table_names.py    From codenn with MIT License 5 votes vote down vote up
def is_subselect(parsed):
    if not parsed.is_group():
        return False
    for item in parsed.tokens:
        if item.ttype is DML and item.value.upper() == 'SELECT':
            return True
    return False 
Example #22
Source File: aligned_indent.py    From SublimeText-SQLTools with GNU General Public License v3.0 5 votes vote down vote up
def _process_parenthesis(self, tlist):
        # if this isn't a subquery, don't re-indent
        _, token = tlist.token_next_by(m=(T.DML, 'SELECT'))
        if token is not None:
            with indent(self):
                tlist.insert_after(tlist[0], self.nl('SELECT'))
                # process the inside of the parantheses
                self._process_default(tlist)

            # de-indent last parenthesis
            tlist.insert_before(tlist[-1], self.nl()) 
Example #23
Source File: grouping.py    From SublimeText-SQLTools with GNU General Public License v3.0 5 votes vote down vote up
def group_as(tlist):
    def match(token):
        return token.is_keyword and token.normalized == 'AS'

    def valid_prev(token):
        return token.normalized == 'NULL' or not token.is_keyword

    def valid_next(token):
        ttypes = T.DML, T.DDL
        return not imt(token, t=ttypes) and token is not None

    def post(tlist, pidx, tidx, nidx):
        return pidx, nidx

    _group(tlist, sql.Identifier, match, valid_prev, valid_next, post) 
Example #24
Source File: ParseUtils.py    From SublimeText-SQLTools with GNU General Public License v3.0 5 votes vote down vote up
def _extract_from_part(parsed):
    tbl_prefix_seen = False
    for item in parsed.tokens:
        if item.is_group:
            for x in _extract_from_part(item):
                yield x
        if tbl_prefix_seen:
            if _is_subselect(item):
                for x in _extract_from_part(item):
                    yield x
            # An incomplete nested select won't be recognized correctly as a
            # sub-select. eg: 'SELECT * FROM (SELECT id FROM user'. This causes
            # the second FROM to trigger this elif condition resulting in a
            # StopIteration. So we need to ignore the keyword if the keyword
            # FROM.
            # Also 'SELECT * FROM abc JOIN def' will trigger this elif
            # condition. So we need to ignore the keyword JOIN and its variants
            # INNER JOIN, FULL OUTER JOIN, etc.
            elif item.ttype is Keyword and (
                    not item.value.upper() == 'FROM') and (
                    not item.value.upper().endswith('JOIN')):
                tbl_prefix_seen = False
            else:
                yield item
        elif item.ttype is Keyword or item.ttype is Keyword.DML:
            item_val = item.value.upper()
            if (item_val in ('COPY', 'FROM', 'INTO', 'UPDATE', 'TABLE') or
                    item_val.endswith('JOIN')):
                tbl_prefix_seen = True
        # 'SELECT a, FROM abc' will detect FROM as part of the column list.
        # So this check here is necessary.
        elif isinstance(item, IdentifierList):
            for identifier in item.get_identifiers():
                if (identifier.ttype is Keyword and
                        identifier.value.upper() == 'FROM'):
                    tbl_prefix_seen = True
                    break 
Example #25
Source File: ParseUtils.py    From SublimeText-SQLTools with GNU General Public License v3.0 5 votes vote down vote up
def _is_subselect(parsed):
    if not parsed.is_group:
        return False
    for item in parsed.tokens:
        if item.ttype is DML and item.value.upper() in ('SELECT', 'INSERT',
                                                        'UPDATE', 'CREATE', 'DELETE'):
            return True
    return False 
Example #26
Source File: parseutils.py    From litecli with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def extract_from_part(parsed, stop_at_punctuation=True):
    tbl_prefix_seen = False
    for item in parsed.tokens:
        if tbl_prefix_seen:
            if is_subselect(item):
                for x in extract_from_part(item, stop_at_punctuation):
                    yield x
            elif stop_at_punctuation and item.ttype is Punctuation:
                return
            # An incomplete nested select won't be recognized correctly as a
            # sub-select. eg: 'SELECT * FROM (SELECT id FROM user'. This causes
            # the second FROM to trigger this elif condition resulting in a
            # `return`. So we need to ignore the keyword if the keyword
            # FROM.
            # Also 'SELECT * FROM abc JOIN def' will trigger this elif
            # condition. So we need to ignore the keyword JOIN and its variants
            # INNER JOIN, FULL OUTER JOIN, etc.
            elif (
                item.ttype is Keyword
                and (not item.value.upper() == "FROM")
                and (not item.value.upper().endswith("JOIN"))
            ):
                return
            else:
                yield item
        elif (
            item.ttype is Keyword or item.ttype is Keyword.DML
        ) and item.value.upper() in ("COPY", "FROM", "INTO", "UPDATE", "TABLE", "JOIN"):
            tbl_prefix_seen = True
        # 'SELECT a, FROM abc' will detect FROM as part of the column list.
        # So this check here is necessary.
        elif isinstance(item, IdentifierList):
            for identifier in item.get_identifiers():
                if identifier.ttype is Keyword and identifier.value.upper() == "FROM":
                    tbl_prefix_seen = True
                    break 
Example #27
Source File: parseutils.py    From litecli with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def is_subselect(parsed):
    if not parsed.is_group:
        return False
    for item in parsed.tokens:
        if item.ttype is DML and item.value.upper() in (
            "SELECT",
            "INSERT",
            "UPDATE",
            "CREATE",
            "DELETE",
        ):
            return True
    return False 
Example #28
Source File: tables.py    From pgcli with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def is_subselect(parsed):
    if not parsed.is_group:
        return False
    for item in parsed.tokens:
        if item.ttype is DML and item.value.upper() in (
            "SELECT",
            "INSERT",
            "UPDATE",
            "CREATE",
            "DELETE",
        ):
            return True
    return False 
Example #29
Source File: grouping.py    From uroboroSQL-formatter with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def group_connectby_startwith(tlist):
    def start_match(token):
        if tu.is_phrase(token):
            return token.match_phrase(("CONNECT", "BY")) or token.match_phrase(("START", "WITH"))


    def end_match(token):
        stopwords = ('ORDER', 'GROUP', 'LIMIT', 'UNION', 'EXCEPT', 'HAVING',
                     'WHEN', # for Oracle10g merge
                     'CONNECT', # for Oracle connect by
                     )
        if token.match(T.Keyword, stopwords):
            return True
        if token.match(T.DML, ('DELETE')): # for Oracle10g merge
            return True
        if token.match(T.DML, ('START')): # for Oracle connect by
            return True

        if tu.is_phrase(token):
            if token.match_phrase(("CONNECT", "BY")) or token.match_phrase(("START", "WITH")):
                return True

        return False

    def proc(tlist):
        [proc(sgroup) for sgroup in tlist.get_sublists()
         if (not isinstance(sgroup, ConnectBy)) and (not isinstance(sgroup, StartWith))]
        idx = 0
        token = tlist.token_matching(idx, (start_match, ))
        while token:
            tidx = tlist.token_index(token)
            end = tlist.token_matching(tidx + 1, (end_match, ))
            if end is None:
                end = tlist._groupable_tokens[-1]
            else:
                end = tlist.tokens[tlist.token_index(end) - 1]

            group_class = None
            if token.match_phrase(("CONNECT", "BY")):
                group_class = ConnectBy
            elif token.match_phrase(("START", "WITH")):
                group_class = StartWith
            tgroup = tlist.group_tokens(group_class,
                                       tlist.tokens_between(token, end),
                                       ignore_ws=True)

            idx = tlist.token_index(tgroup)
            token = tlist.token_matching(idx, (start_match, ))

    proc = SqlFormatterException.to_wrap_try_except(proc, 0)
    proc(tlist) 
Example #30
Source File: operators.py    From djongo with GNU Affero General Public License v3.0 4 votes vote down vote up
def _token2op(self,
                  tok: Token,
                  statement: SQLStatement) -> '_Op':
        op = None
        kw = {'statement': statement, 'query': self.query}
        if tok.match(tokens.Keyword, 'AND'):
            op = AndOp(**kw)

        elif tok.match(tokens.Keyword, 'OR'):
            op = OrOp(**kw)

        elif tok.match(tokens.Keyword, 'IN'):
            op = InOp(**kw)

        elif tok.match(tokens.Keyword, 'NOT'):
            if statement.next_token.match(tokens.Keyword, 'IN'):
                op = NotInOp(**kw)
                statement.skip(1)
            else:
                op = NotOp(**kw)

        elif tok.match(tokens.Keyword, 'LIKE'):
            op = LikeOp(**kw)

        elif tok.match(tokens.Keyword, 'iLIKE'):
            op = iLikeOp(**kw)

        elif tok.match(tokens.Keyword, 'BETWEEN'):
            op = BetweenOp(**kw)
            statement.skip(3)

        elif tok.match(tokens.Keyword, 'IS'):
            op = IsOp(**kw)

        elif isinstance(tok, Comparison):
            op = CmpOp(tok, self.query)

        elif isinstance(tok, Parenthesis):
            if (tok[1].match(tokens.Name.Placeholder, '.*', regex=True)
                    or tok[1].match(tokens.Keyword, 'Null')
                    or isinstance(tok[1], IdentifierList)
                    or tok[1].ttype == tokens.DML
            ):
                pass
            else:
                op = ParenthesisOp(SQLStatement(tok), self.query)

        elif tok.match(tokens.Punctuation, (')', '(')):
            pass

        elif isinstance(tok, Identifier):
            pass
        else:
            raise SQLDecodeError

        return op