Python sqlparse.tokens() Examples

The following are 30 code examples of sqlparse.tokens(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module sqlparse , or try the search function .
Example #1
Source File: utils.py    From pgcli with BSD 3-Clause "New" or "Revised" License 8 votes vote down vote up
def parse_partial_identifier(word):
    """Attempt to parse a (partially typed) word as an identifier

    word may include a schema qualification, like `schema_name.partial_name`
    or `schema_name.` There may also be unclosed quotation marks, like
    `"schema`, or `schema."partial_name`

    :param word: string representing a (partially complete) identifier
    :return: sqlparse.sql.Identifier, or None
    """

    p = sqlparse.parse(word)[0]
    n_tok = len(p.tokens)
    if n_tok == 1 and isinstance(p.tokens[0], Identifier):
        return p.tokens[0]
    elif p.token_next_by(m=(Error, '"'))[1]:
        # An unmatched double quote, e.g. '"foo', 'foo."', or 'foo."bar'
        # Close the double quote, then reparse
        return parse_partial_identifier(word + '"')
    else:
        return None 
Example #2
Source File: convert.py    From sqlitis with MIT License 7 votes vote down vote up
def build_comparison(token):
    assert type(token) is S.Comparison

    m = M.Comparison()
    for tok in remove_whitespace(token.tokens):
        LOG.debug("  %s %s", tok, type(tok))
        if type(tok) is S.Parenthesis:
            subtokens = remove_whitespace(tok.tokens)
            subquery = tokens_to_sqla(subtokens[1:-1])
            if not m.left:
                m.left = subquery
            else:
                m.right = subquery
        else:
            m = sql_literal_to_model(tok, m)
            if not m:
                raise Exception("[BUG] Failed to convert %s to model" % tok)

    return m 
Example #3
Source File: test_tokenize.py    From codenn with MIT License 6 votes vote down vote up
def test_simple(self):
        from cStringIO import StringIO

        stream = StringIO("SELECT 1; SELECT 2;")
        lex = lexer.Lexer()

        tokens = lex.get_tokens(stream)
        self.assertEqual(len(list(tokens)), 9)

        stream.seek(0)
        lex.bufsize = 4
        tokens = list(lex.get_tokens(stream))
        self.assertEqual(len(tokens), 9)

        stream.seek(0)
        lex.bufsize = len(stream.getvalue())
        tokens = list(lex.get_tokens(stream))
        self.assertEqual(len(tokens), 9) 
Example #4
Source File: sql_parse.py    From incubator-superset with Apache License 2.0 6 votes vote down vote up
def _process_tokenlist(self, token_list: TokenList) -> None:
        """
        Add table names to table set

        :param token_list: TokenList to be processed
        """
        # exclude subselects
        if "(" not in str(token_list):
            table = self._get_table(token_list)
            if table and not table.table.startswith(CTE_PREFIX):
                self._tables.add(table)
            return

        # store aliases
        if token_list.has_alias():
            self._alias_names.add(token_list.get_alias())

        # some aliases are not parsed properly
        if token_list.tokens[0].ttype == Name:
            self._alias_names.add(token_list.tokens[0].value)
        self._extract_from_token(token_list) 
Example #5
Source File: sql_parse.py    From incubator-superset with Apache License 2.0 6 votes vote down vote up
def _extract_limit_from_query(statement: TokenList) -> Optional[int]:
    """
    Extract limit clause from SQL statement.

    :param statement: SQL statement
    :return: Limit extracted from query, None if no limit present in statement
    """
    idx, _ = statement.token_next_by(m=(Keyword, "LIMIT"))
    if idx is not None:
        _, token = statement.token_next(idx=idx)
        if token:
            if isinstance(token, IdentifierList):
                # In case of "LIMIT <offset>, <limit>", find comma and extract
                # first succeeding non-whitespace token
                idx, _ = token.token_next_by(m=(sqlparse.tokens.Punctuation, ","))
                _, token = token.token_next(idx=idx)
            if token and token.ttype == sqlparse.tokens.Literal.Number.Integer:
                return int(token.value)
    return None 
Example #6
Source File: search_utils.py    From mlflow with Apache License 2.0 6 votes vote down vote up
def _validate_order_by_and_generate_token(cls, order_by):
        try:
            parsed = sqlparse.parse(order_by)
        except Exception:
            raise MlflowException(f"Error on parsing order_by clause '{order_by}'",
                                  error_code=INVALID_PARAMETER_VALUE)
        if len(parsed) != 1 or not isinstance(parsed[0], Statement):
            raise MlflowException(f"Invalid order_by clause '{order_by}'. Could not be parsed.",
                                  error_code=INVALID_PARAMETER_VALUE)
        statement = parsed[0]
        if len(statement.tokens) == 1 and isinstance(statement[0], Identifier):
            token_value = statement.tokens[0].value
        elif len(statement.tokens) == 1 and \
                statement.tokens[0].match(ttype=TokenType.Keyword,
                                          values=[cls.ORDER_BY_KEY_TIMESTAMP]):
            token_value = cls.ORDER_BY_KEY_TIMESTAMP
        elif statement.tokens[0].match(ttype=TokenType.Keyword,
                                       values=[cls.ORDER_BY_KEY_TIMESTAMP])\
                and all([token.is_whitespace for token in statement.tokens[1:-1]])\
                and statement.tokens[-1].ttype == TokenType.Keyword.Order:
            token_value = cls.ORDER_BY_KEY_TIMESTAMP + ' ' + statement.tokens[-1].value
        else:
            raise MlflowException(f"Invalid order_by clause '{order_by}'. Could not be parsed.",
                                  error_code=INVALID_PARAMETER_VALUE)
        return token_value 
Example #7
Source File: utils.py    From mssql-cli with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def parse_partial_identifier(word):
    """Attempt to parse a (partially typed) word as an identifier

    word may include a schema qualification, like `schema_name.partial_name`
    or `schema_name.` There may also be unclosed quotation marks, like
    `"schema`, or `schema."partial_name`

    :param word: string representing a (partially complete) identifier
    :return: sqlparse.sql.Identifier, or None
    """

    p = sqlparse.parse(word)[0]
    n_tok = len(p.tokens)
    if n_tok == 1 and isinstance(p.tokens[0], Identifier):
        return p.tokens[0]
    if p.token_next_by(m=(Error, '"'))[1]:
        # An unmatched double quote, e.g. '"foo', 'foo."', or 'foo."bar'
        # Close the double quote, then reparse
        return parse_partial_identifier(word + '"')
    return None 
Example #8
Source File: parseutils.py    From athenacli with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def is_subselect(parsed):
    if not parsed.is_group:
        return False
    for item in parsed.tokens:
        if item.ttype is DML and item.value.upper() in ('SELECT', 'INSERT',
                'UPDATE', 'CREATE', 'DELETE'):
            return True
    return False 
Example #9
Source File: parseutils.py    From athenacli with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def extract_from_part(parsed, stop_at_punctuation=True):
    tbl_prefix_seen = False
    for item in parsed.tokens:
        if tbl_prefix_seen:
            if is_subselect(item):
                for x in extract_from_part(item, stop_at_punctuation):
                    yield x
            elif stop_at_punctuation and item.ttype is Punctuation:
                return
            # An incomplete nested select won't be recognized correctly as a
            # sub-select. eg: 'SELECT * FROM (SELECT id FROM user'. This causes
            # the second FROM to trigger this elif condition resulting in a
            # StopIteration. So we need to ignore the keyword if the keyword
            # FROM.
            # Also 'SELECT * FROM abc JOIN def' will trigger this elif
            # condition. So we need to ignore the keyword JOIN and its variants
            # INNER JOIN, FULL OUTER JOIN, etc.
            elif item.ttype is Keyword and (
                    not item.value.upper() == 'FROM') and (
                    not item.value.upper().endswith('JOIN')):
                return
            else:
                yield item
        elif ((item.ttype is Keyword or item.ttype is Keyword.DML) and
                item.value.upper() in ('COPY', 'FROM', 'INTO', 'UPDATE', 'TABLE', 'JOIN',)):
            tbl_prefix_seen = True
        # 'SELECT a, FROM abc' will detect FROM as part of the column list.
        # So this check here is necessary.
        elif isinstance(item, IdentifierList):
            for identifier in item.get_identifiers():
                if (identifier.ttype is Keyword and
                        identifier.value.upper() == 'FROM'):
                    tbl_prefix_seen = True
                    break 
Example #10
Source File: parseutils.py    From athenacli with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def find_prev_keyword(sql):
    """ Find the last sql keyword in an SQL statement
    Returns the value of the last keyword, and the text of the query with
    everything after the last keyword stripped
    """
    if not sql.strip():
        return None, ''

    parsed = sqlparse.parse(sql)[0]
    flattened = list(parsed.flatten())

    logical_operators = ('AND', 'OR', 'NOT', 'BETWEEN')

    for t in reversed(flattened):
        if t.value == '(' or (t.is_keyword and (
                              t.value.upper() not in logical_operators)):
            # Find the location of token t in the original parsed statement
            # We can't use parsed.token_index(t) because t may be a child token
            # inside a TokenList, in which case token_index thows an error
            # Minimal example:
            #   p = sqlparse.parse('select * from foo where bar')
            #   t = list(p.flatten())[-3]  # The "Where" token
            #   p.token_index(t)  # Throws ValueError: not in list
            idx = flattened.index(t)

            # Combine the string values of all tokens in the original list
            # up to and including the target keyword token t, to produce a
            # query string with everything after the keyword token removed
            text = ''.join(tok.value for tok in flattened[:idx+1])
            return t, text

    return None, '' 
Example #11
Source File: search_utils.py    From mlflow with Apache License 2.0 5 votes vote down vote up
def _validate_comparison(cls, tokens):
        base_error_string = "Invalid comparison clause"
        if len(tokens) != 3:
            raise MlflowException("{}. Expected 3 tokens found {}".format(base_error_string,
                                                                          len(tokens)),
                                  error_code=INVALID_PARAMETER_VALUE)
        if not isinstance(tokens[0], Identifier):
            raise MlflowException("{}. Expected 'Identifier' found '{}'".format(base_error_string,
                                                                                str(tokens[0])),
                                  error_code=INVALID_PARAMETER_VALUE)
        if not isinstance(tokens[1], Token) and tokens[1].ttype != TokenType.Operator.Comparison:
            raise MlflowException("{}. Expected comparison found '{}'".format(base_error_string,
                                                                              str(tokens[1])),
                                  error_code=INVALID_PARAMETER_VALUE)
        if not isinstance(tokens[2], Token) and \
                (tokens[2].ttype not in cls.STRING_VALUE_TYPES.union(cls.NUMERIC_VALUE_TYPES) or
                 isinstance(tokens[2], Identifier)):
            raise MlflowException("{}. Expected value token found '{}'".format(base_error_string,
                                                                               str(tokens[2])),
                                  error_code=INVALID_PARAMETER_VALUE) 
Example #12
Source File: search_utils.py    From mlflow with Apache License 2.0 5 votes vote down vote up
def _get_comparison(cls, comparison):
        stripped_comparison = [token for token in comparison.tokens if not token.is_whitespace]
        cls._validate_comparison(stripped_comparison)
        comp = cls._get_identifier(stripped_comparison[0].value, cls.VALID_SEARCH_ATTRIBUTE_KEYS)
        comp["comparator"] = stripped_comparison[1].value
        comp["value"] = cls._get_value(comp.get("type"), stripped_comparison[2])
        return comp 
Example #13
Source File: search_utils.py    From mlflow with Apache License 2.0 5 votes vote down vote up
def _process_statement(cls, statement):
        # check validity
        invalids = list(filter(cls._invalid_statement_token, statement.tokens))
        if len(invalids) > 0:
            invalid_clauses = ", ".join("'%s'" % token for token in invalids)
            raise MlflowException("Invalid clause(s) in filter string: %s" % invalid_clauses,
                                  error_code=INVALID_PARAMETER_VALUE)
        return [cls._get_comparison(si) for si in statement.tokens if isinstance(si, Comparison)] 
Example #14
Source File: search_utils.py    From mlflow with Apache License 2.0 5 votes vote down vote up
def _parse_order_by_string(cls, order_by):
        token_value = cls._validate_order_by_and_generate_token(order_by)
        is_ascending = True
        tokens = token_value.split()
        if len(tokens) > 2:
            raise MlflowException(f"Invalid order_by clause '{order_by}'. Could not be parsed.",
                                  error_code=INVALID_PARAMETER_VALUE)
        elif len(tokens) == 2:
            order_token = tokens[1].lower()
            if order_token not in cls.VALID_ORDER_BY_TAGS:
                raise MlflowException(f"Invalid ordering key in order_by clause '{order_by}'.",
                                      error_code=INVALID_PARAMETER_VALUE)
            is_ascending = (order_token == cls.ASC_OPERATOR)
            token_value = tokens[0]
        return token_value, is_ascending 
Example #15
Source File: search_utils.py    From mlflow with Apache License 2.0 5 votes vote down vote up
def _parse_filter_for_model_registry(cls, filter_string, valid_search_keys):
        if not filter_string or filter_string == "":
            return []
        expected = "Expected search filter with single comparison operator. e.g. name='myModelName'"
        try:
            parsed = sqlparse.parse(filter_string)
        except Exception:
            raise MlflowException("Error while parsing filter '%s'. %s" % (filter_string, expected),
                                  error_code=INVALID_PARAMETER_VALUE)
        if len(parsed) == 0 or not isinstance(parsed[0], Statement):
            raise MlflowException("Invalid filter '%s'. Could not be parsed. %s" %
                                  (filter_string, expected), error_code=INVALID_PARAMETER_VALUE)
        elif len(parsed) > 1:
            raise MlflowException("Search filter '%s' contains multiple expressions. "
                                  "%s " % (filter_string, expected),
                                  error_code=INVALID_PARAMETER_VALUE)
        statement = parsed[0]
        invalids = list(filter(cls._invalid_statement_token, statement.tokens))
        if len(invalids) > 0:
            invalid_clauses = ", ".join("'%s'" % token for token in invalids)
            raise MlflowException("Invalid clause(s) in filter string: %s. "
                                  "%s" % (invalid_clauses, expected),
                                  error_code=INVALID_PARAMETER_VALUE)
        return [cls._get_comparison_for_model_registry(
            si,
            valid_search_keys)
            for si in statement.tokens if isinstance(si, Comparison)] 
Example #16
Source File: utils.py    From mssql-cli with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def find_prev_keyword(sql, n_skip=0):
    """ Find the last sql keyword in an SQL statement

    Returns the value of the last keyword, and the text of the query with
    everything after the last keyword stripped
    """
    if not sql.strip():
        return None, ''

    parsed = sqlparse.parse(sql)[0]
    flattened = list(parsed.flatten())
    flattened = flattened[:len(flattened) - n_skip]

    logical_operators = ('AND', 'OR', 'NOT', 'BETWEEN')

    for t in reversed(flattened):
        if t.value == '(' or (t.is_keyword and
                              (t.value.upper() not in logical_operators)
                             ):
            # Find the location of token t in the original parsed statement
            # We can't use parsed.token_index(t) because t may be a child token
            # inside a TokenList, in which case token_index thows an error
            # Minimal example:
            #   p = sqlparse.parse('select * from foo where bar')
            #   t = list(p.flatten())[-3]  # The "Where" token
            #   p.token_index(t)  # Throws ValueError: not in list
            idx = flattened.index(t)

            # Combine the string values of all tokens in the original list
            # up to and including the target keyword token t, to produce a
            # query string with everything after the keyword token removed
            text = ''.join(tok.value for tok in flattened[:idx + 1])
            return t, text

    return None, ''


# Postgresql dollar quote signs look like `$$` or `$tag$` 
Example #17
Source File: tables.py    From mssql-cli with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def is_subselect(parsed):
    if not parsed.is_group:
        return False
    for item in parsed.tokens:
        if item.ttype is DML and item.value.upper() in ('SELECT', 'INSERT',
                                                        'UPDATE', 'CREATE', 'DELETE'):
            return True
    return False 
Example #18
Source File: tables.py    From mssql-cli with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _identifier_is_function(identifier):
    return any(isinstance(t, Function) for t in identifier.tokens) 
Example #19
Source File: tables.py    From mssql-cli with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def extract_from_part(parsed, stop_at_punctuation=True):
    tbl_prefix_seen = False
    for item in parsed.tokens:
        if tbl_prefix_seen:
            if is_subselect(item):
                for x in extract_from_part(item, stop_at_punctuation):
                    yield x
            elif stop_at_punctuation and item.ttype is Punctuation:
                # An incomplete nested select won't be recognized correctly as a
                # sub-select. eg: 'SELECT * FROM (SELECT id FROM user'. This causes
                # the second FROM to trigger this elif condition resulting in a
                # StopIteration. So we need to ignore the keyword if the keyword
                # FROM.
                # Also 'SELECT * FROM abc JOIN def' will trigger this elif
                # condition. So we need to ignore the keyword JOIN and its variants
                # INNER JOIN, FULL OUTER JOIN, etc.
                return
            elif item.ttype is Keyword and (
                    not item.value.upper() == 'FROM') and \
                    (not item.value.upper().endswith('JOIN')):
                tbl_prefix_seen = False
            else:
                yield item
        elif item.ttype is Keyword or item.ttype is Keyword.DML:
            item_val = item.value.upper()
            if (item_val in ('COPY', 'FROM', 'INTO', 'UPDATE', 'TABLE') or
                    item_val.endswith('JOIN')):
                tbl_prefix_seen = True
        # 'SELECT a, FROM abc' will detect FROM as part of the column list.
        # So this check here is necessary.
        elif isinstance(item, IdentifierList):
            for identifier in item.get_identifiers():
                if (identifier.ttype is Keyword and
                        identifier.value.upper() == 'FROM'):
                    tbl_prefix_seen = True
                    break 
Example #20
Source File: sql_parse.py    From incubator-superset with Apache License 2.0 5 votes vote down vote up
def _get_table(tlist: TokenList) -> Optional[Table]:
        """
        Return the table if valid, i.e., conforms to the [[catalog.]schema.]table
        construct.

        :param tlist: The SQL tokens
        :returns: The table if the name conforms
        """

        # Strip the alias if present.
        idx = len(tlist.tokens)

        if tlist.has_alias():
            ws_idx, _ = tlist.token_next_by(t=Whitespace)

            if ws_idx != -1:
                idx = ws_idx

        tokens = tlist.tokens[:idx]

        if (
            len(tokens) in (1, 3, 5)
            and all(imt(token, t=[Name, String]) for token in tokens[::2])
            and all(imt(token, m=(Punctuation, ".")) for token in tokens[1::2])
        ):
            return Table(*[remove_quotes(token.value) for token in tokens[::-2]])

        return None 
Example #21
Source File: sql_parse.py    From incubator-superset with Apache License 2.0 5 votes vote down vote up
def set_or_update_query_limit(self, new_limit: int) -> str:
        """Returns the query with the specified limit.

        Does not change the underlying query if user did not apply the limit,
        otherwise replaces the limit with the lower value between existing limit
        in the query and new_limit.

        :param new_limit: Limit to be incorporated into returned query
        :return: The original query with new limit
        """
        if not self._limit:
            return f"{self.stripped()}\nLIMIT {new_limit}"
        limit_pos = None
        statement = self._parsed[0]
        # Add all items to before_str until there is a limit
        for pos, item in enumerate(statement.tokens):
            if item.ttype in Keyword and item.value.lower() == "limit":
                limit_pos = pos
                break
        _, limit = statement.token_next(idx=limit_pos)
        # Override the limit only when it exceeds the configured value.
        if limit.ttype == sqlparse.tokens.Literal.Number.Integer and new_limit < int(
            limit.value
        ):
            limit.value = new_limit
        elif limit.is_group:
            limit.value = f"{next(limit.get_identifiers())}, {new_limit}"

        str_res = ""
        for i in statement.tokens:
            str_res += str(i.value)
        return str_res 
Example #22
Source File: sql_metadata.py    From sql-metadata with MIT License 5 votes vote down vote up
def get_query_tokens(query: str) -> List[sqlparse.sql.Token]:
    """
    :type query str
    :rtype: list[sqlparse.sql.Token]
    """
    query = preprocess_query(query)
    parsed = sqlparse.parse(query)

    # handle empty queries (#12)
    if not parsed:
        return []

    tokens = TokenList(parsed[0].tokens).flatten()
    # print([(token.value, token.ttype) for token in tokens])

    return [token for token in tokens if token.ttype is not Whitespace] 
Example #23
Source File: extract_tables.py    From Archery with Apache License 2.0 5 votes vote down vote up
def is_subselect(parsed):
    if not parsed.is_group:
        return False
    for item in parsed.tokens:
        if (
            item.ttype is DML
            and item.value.upper() in ("SELECT", "INSERT", "UPDATE", "CREATE", "DELETE")
        ):
            return True
    return False 
Example #24
Source File: extract_tables.py    From Archery with Apache License 2.0 5 votes vote down vote up
def _identifier_is_function(identifier):
    return any(isinstance(t, Function) for t in identifier.tokens) 
Example #25
Source File: extract_tables.py    From Archery with Apache License 2.0 5 votes vote down vote up
def extract_from_part(parsed, stop_at_punctuation=True):
    tbl_prefix_seen = False
    for item in parsed.tokens:
        if tbl_prefix_seen:
            if is_subselect(item):
                for x in extract_from_part(item, stop_at_punctuation):
                    yield x
            elif stop_at_punctuation and item.ttype is Punctuation:
                raise StopIteration
            # An incomplete nested select won't be recognized correctly as a
            # sub-select. eg: 'SELECT * FROM (SELECT id FROM user'. This causes
            # the second FROM to trigger this elif condition resulting in a
            # StopIteration. So we need to ignore the keyword if the keyword
            # FROM.
            # Also 'SELECT * FROM abc JOIN def' will trigger this elif
            # condition. So we need to ignore the keyword JOIN and its variants
            # INNER JOIN, FULL OUTER JOIN, etc.
            elif item.ttype is Keyword and (not item.value.upper() == "FROM") and (
                not item.value.upper().endswith("JOIN")
            ):
                tbl_prefix_seen = False
            else:
                yield item
        elif item.ttype is Keyword or item.ttype is Keyword.DML:
            item_val = item.value.upper()
            if (
                item_val in ("COPY", "FROM", "INTO", "UPDATE", "TABLE")
                or item_val.endswith("JOIN")
            ):
                tbl_prefix_seen = True
        # 'SELECT a, FROM abc' will detect FROM as part of the column list.
        # So this check here is necessary.
        elif isinstance(item, IdentifierList):
            for identifier in item.get_identifiers():
                if identifier.ttype is Keyword and identifier.value.upper() == "FROM":
                    tbl_prefix_seen = True
                    break 
Example #26
Source File: test_tokenize.py    From codenn with MIT License 5 votes vote down vote up
def test_parse_endifloop():
    p = sqlparse.parse('END IF')[0]
    assert len(p.tokens) == 1
    assert p.tokens[0].ttype is Keyword
    p = sqlparse.parse('END   IF')[0]
    assert len(p.tokens) == 1
    p = sqlparse.parse('END\t\nIF')[0]
    assert len(p.tokens) == 1
    assert p.tokens[0].ttype is Keyword
    p = sqlparse.parse('END LOOP')[0]
    assert len(p.tokens) == 1
    assert p.tokens[0].ttype is Keyword
    p = sqlparse.parse('END  LOOP')[0]
    assert len(p.tokens) == 1
    assert p.tokens[0].ttype is Keyword 
Example #27
Source File: convert.py    From sqlitis with MIT License 5 votes vote down vote up
def remove_whitespace(tokens):
    return [x for x in tokens if not x.is_whitespace] 
Example #28
Source File: convert.py    From sqlitis with MIT License 5 votes vote down vote up
def to_sqla(sql):
    sql = sql.strip()
    if not sql:
        raise Exception("Empty SQL string provided")

    tokens = sqlparse.parse(sql)[0].tokens
    tokens = remove_whitespace(tokens)
    return tokens_to_sqla(tokens).render() 
Example #29
Source File: tables.py    From pgcli with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _identifier_is_function(identifier):
    return any(isinstance(t, Function) for t in identifier.tokens) 
Example #30
Source File: parseutils.py    From litecli with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def is_subselect(parsed):
    if not parsed.is_group:
        return False
    for item in parsed.tokens:
        if item.ttype is DML and item.value.upper() in (
            "SELECT",
            "INSERT",
            "UPDATE",
            "CREATE",
            "DELETE",
        ):
            return True
    return False