Python token.OP Examples

The following are 30 code examples of token.OP(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module token , or try the search function .
Example #1
Source File: autopep8.py    From filmkodi with Apache License 2.0 6 votes vote down vote up
def _find_logical(source_lines):
    # Make a variable which is the index of all the starts of lines.
    logical_start = []
    logical_end = []
    last_newline = True
    parens = 0
    for t in generate_tokens(''.join(source_lines)):
        if t[0] in [tokenize.COMMENT, tokenize.DEDENT,
                    tokenize.INDENT, tokenize.NL,
                    tokenize.ENDMARKER]:
            continue
        if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]:
            last_newline = True
            logical_end.append((t[3][0] - 1, t[2][1]))
            continue
        if last_newline and not parens:
            logical_start.append((t[2][0] - 1, t[2][1]))
            last_newline = False
        if t[0] == tokenize.OP:
            if t[1] in '([{':
                parens += 1
            elif t[1] in '}])':
                parens -= 1
    return (logical_start, logical_end) 
Example #2
Source File: autopep8.py    From python-netsurv with MIT License 6 votes vote down vote up
def _find_logical(source_lines):
    # Make a variable which is the index of all the starts of lines.
    logical_start = []
    logical_end = []
    last_newline = True
    parens = 0
    for t in generate_tokens(''.join(source_lines)):
        if t[0] in [tokenize.COMMENT, tokenize.DEDENT,
                    tokenize.INDENT, tokenize.NL,
                    tokenize.ENDMARKER]:
            continue
        if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]:
            last_newline = True
            logical_end.append((t[3][0] - 1, t[2][1]))
            continue
        if last_newline and not parens:
            logical_start.append((t[2][0] - 1, t[2][1]))
            last_newline = False
        if t[0] == tokenize.OP:
            if t[1] in '([{':
                parens += 1
            elif t[1] in '}])':
                parens -= 1
    return (logical_start, logical_end) 
Example #3
Source File: autopep8.py    From python-netsurv with MIT License 6 votes vote down vote up
def _find_logical(source_lines):
    # Make a variable which is the index of all the starts of lines.
    logical_start = []
    logical_end = []
    last_newline = True
    parens = 0
    for t in generate_tokens(''.join(source_lines)):
        if t[0] in [tokenize.COMMENT, tokenize.DEDENT,
                    tokenize.INDENT, tokenize.NL,
                    tokenize.ENDMARKER]:
            continue
        if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]:
            last_newline = True
            logical_end.append((t[3][0] - 1, t[2][1]))
            continue
        if last_newline and not parens:
            logical_start.append((t[2][0] - 1, t[2][1]))
            last_newline = False
        if t[0] == tokenize.OP:
            if t[1] in '([{':
                parens += 1
            elif t[1] in '}])':
                parens -= 1
    return (logical_start, logical_end) 
Example #4
Source File: autopep8.py    From PyDev.Debugger with Eclipse Public License 1.0 6 votes vote down vote up
def _find_logical(source_lines):
    # Make a variable which is the index of all the starts of lines.
    logical_start = []
    logical_end = []
    last_newline = True
    parens = 0
    for t in generate_tokens(''.join(source_lines)):
        if t[0] in [tokenize.COMMENT, tokenize.DEDENT,
                    tokenize.INDENT, tokenize.NL,
                    tokenize.ENDMARKER]:
            continue
        if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]:
            last_newline = True
            logical_end.append((t[3][0] - 1, t[2][1]))
            continue
        if last_newline and not parens:
            logical_start.append((t[2][0] - 1, t[2][1]))
            last_newline = False
        if t[0] == tokenize.OP:
            if t[1] in '([{':
                parens += 1
            elif t[1] in '}])':
                parens -= 1
    return (logical_start, logical_end) 
Example #5
Source File: PyToken.py    From flynt with MIT License 5 votes vote down vote up
def is_exponentiation_op(self):
        return self.toknum == token.OP and self.tokval == "**" 
Example #6
Source File: PyToken.py    From flynt with MIT License 5 votes vote down vote up
def is_paren_op(self):
        return self.toknum == token.OP and self.tokval == "(" 
Example #7
Source File: mark_tokens.py    From asttokens with Apache License 2.0 5 votes vote down vote up
def _find_last_in_stmt(self, start_token):
    t = start_token
    while (not util.match_token(t, token.NEWLINE) and
           not util.match_token(t, token.OP, ';') and
           not token.ISEOF(t.type)):
      t = self._code.next_token(t, include_extra=True)
    return self._code.prev_token(t) 
Example #8
Source File: mark_tokens.py    From asttokens with Apache License 2.0 5 votes vote down vote up
def _expand_to_matching_pairs(self, first_token, last_token, node):
    """
    Scan tokens in [first_token, last_token] range that are between node's children, and for any
    unmatched brackets, adjust first/last tokens to include the closing pair.
    """
    # We look for opening parens/braces among non-child tokens (i.e. tokens between our actual
    # child nodes). If we find any closing ones, we match them to the opens.
    to_match_right = []
    to_match_left = []
    for tok in self._code.token_range(first_token, last_token):
      tok_info = tok[:2]
      if to_match_right and tok_info == to_match_right[-1]:
        to_match_right.pop()
      elif tok_info in _matching_pairs_left:
        to_match_right.append(_matching_pairs_left[tok_info])
      elif tok_info in _matching_pairs_right:
        to_match_left.append(_matching_pairs_right[tok_info])

    # Once done, extend `last_token` to match any unclosed parens/braces.
    for match in reversed(to_match_right):
      last = self._code.next_token(last_token)
      # Allow for trailing commas or colons (allowed in subscripts) before the closing delimiter
      while any(util.match_token(last, token.OP, x) for x in (',', ':')):
        last = self._code.next_token(last)
      # Now check for the actual closing delimiter.
      if util.match_token(last, *match):
        last_token = last

    # And extend `first_token` to match any unclosed opening parens/braces.
    for match in to_match_left:
      first = self._code.prev_token(first_token)
      if util.match_token(first, *match):
        first_token = first

    return (first_token, last_token)

  #----------------------------------------------------------------------
  # Node visitors. Each takes a preliminary first and last tokens, and returns the adjusted pair
  # that will actually be assigned. 
Example #9
Source File: mark_tokens.py    From asttokens with Apache License 2.0 5 votes vote down vote up
def handle_comp(self, open_brace, node, first_token, last_token):
    # For list/set/dict comprehensions, we only get the token of the first child, so adjust it to
    # include the opening brace (the closing brace will be matched automatically).
    before = self._code.prev_token(first_token)
    util.expect_token(before, token.OP, open_brace)
    return (before, last_token)

  # Python 3.8 fixed the starting position of list comprehensions:
  # https://bugs.python.org/issue31241 
Example #10
Source File: mark_tokens.py    From asttokens with Apache License 2.0 5 votes vote down vote up
def handle_attr(self, node, first_token, last_token):
    # Attribute node has ".attr" (2 tokens) after the last child.
    dot = self._code.find_token(last_token, token.OP, '.')
    name = self._code.next_token(dot)
    util.expect_token(name, token.NAME)
    return (first_token, name) 
Example #11
Source File: mark_tokens.py    From asttokens with Apache License 2.0 5 votes vote down vote up
def handle_def(self, node, first_token, last_token):
    # With astroid, nodes that start with a doc-string can have an empty body, in which case we
    # need to adjust the last token to include the doc string.
    if not node.body and getattr(node, 'doc', None):
      last_token = self._code.find_token(last_token, token.STRING)

    # Include @ from decorator
    if first_token.index > 0:
      prev = self._code.prev_token(first_token)
      if util.match_token(prev, token.OP, '@'):
        first_token = prev
    return (first_token, last_token) 
Example #12
Source File: mark_tokens.py    From asttokens with Apache License 2.0 5 votes vote down vote up
def visit_call(self, node, first_token, last_token):
    last_token = self.handle_following_brackets(node, last_token, '(')

    # Handling a python bug with decorators with empty parens, e.g.
    # @deco()
    # def ...
    if util.match_token(first_token, token.OP, '@'):
      first_token = self._code.next_token(first_token)
    return (first_token, last_token) 
Example #13
Source File: mark_tokens.py    From asttokens with Apache License 2.0 5 votes vote down vote up
def handle_bare_tuple(self, node, first_token, last_token):
    # A bare tuple doesn't include parens; if there is a trailing comma, make it part of the tuple.
    maybe_comma = self._code.next_token(last_token)
    if util.match_token(maybe_comma, token.OP, ','):
      last_token = maybe_comma
    return (first_token, last_token) 
Example #14
Source File: mark_tokens.py    From asttokens with Apache License 2.0 5 votes vote down vote up
def _gobble_parens(self, first_token, last_token, include_all=False):
    # Expands a range of tokens to include one or all pairs of surrounding parentheses, and
    # returns (first, last) tokens that include these parens.
    while first_token.index > 0:
      prev = self._code.prev_token(first_token)
      next = self._code.next_token(last_token)
      if util.match_token(prev, token.OP, '(') and util.match_token(next, token.OP, ')'):
        first_token, last_token = prev, next
        if include_all:
          continue
      break
    return (first_token, last_token) 
Example #15
Source File: mark_tokens.py    From asttokens with Apache License 2.0 5 votes vote down vote up
def handle_num(self, node, value, first_token, last_token):
    # A constant like '-1' gets turned into two tokens; this will skip the '-'.
    while util.match_token(last_token, token.OP):
      last_token = self._code.next_token(last_token)

    if isinstance(value, complex):
      # A complex number like -2j cannot be compared directly to 0
      # A complex number like 1-2j is expressed as a binary operation
      # so we don't need to worry about it
      value = value.imag

    # This makes sure that the - is included
    if value < 0 and first_token.type == token.NUMBER:
        first_token = self._code.prev_token(first_token)
    return (first_token, last_token) 
Example #16
Source File: mark_tokens.py    From asttokens with Apache License 2.0 5 votes vote down vote up
def visit_keyword(self, node, first_token, last_token):
    # Until python 3.9 (https://bugs.python.org/issue40141),
    # ast.keyword nodes didn't have line info. Astroid has lineno None.
    if node.arg is not None and getattr(node, 'lineno', None) is None:
      equals = self._code.find_token(first_token, token.OP, '=', reverse=True)
      name = self._code.prev_token(equals)
      util.expect_token(name, token.NAME, node.arg)
      first_token = name
    return (first_token, last_token) 
Example #17
Source File: mark_tokens.py    From asttokens with Apache License 2.0 5 votes vote down vote up
def visit_assignname(self, node, first_token, last_token):
    # Astroid may turn 'except' clause into AssignName, but we need to adjust it.
    if util.match_token(first_token, token.NAME, 'except'):
      colon = self._code.find_token(last_token, token.OP, ':')
      first_token = last_token = self._code.prev_token(colon)
    return (first_token, last_token) 
Example #18
Source File: test_util.py    From asttokens with Apache License 2.0 5 votes vote down vote up
def test_expect_token():
  atok = asttokens.ASTTokens("a", parse=True)
  tok = atok.tokens[0]
  with pytest.raises(ValueError):
    asttokens.util.expect_token(tok, token.OP) 
Example #19
Source File: GoogleFinanceQuoteAdapter.py    From paperbroker with MIT License 5 votes vote down vote up
def fixLazyJson (in_text):
    tokengen = tokenize.generate_tokens(StringIO(in_text.decode('ascii')).readline)

    result = []
    for tokid, tokval, _, _, _ in tokengen:
        # fix unquoted strings
        if (tokid == token.NAME):
            if tokval not in ['true', 'false', 'null', '-Infinity', 'Infinity', 'NaN']:
                tokid = token.STRING
                tokval = u'"%s"' % tokval

        # fix single-quoted strings
        elif (tokid == token.STRING):
            if tokval.startswith ("'"):
                tokval = u'"%s"' % tokval[1:-1].replace ('"', '\\"')

        # remove invalid commas
        elif (tokid == token.OP) and ((tokval == '}') or (tokval == ']')):
            if (len(result) > 0) and (result[-1][1] == ','):
                result.pop()

        # fix single-quoted strings
        elif (tokid == token.STRING):
            if tokval.startswith ("'"):
                tokval = u'"%s"' % tokval[1:-1].replace ('"', '\\"')

        result.append((tokid, tokval))

    return tokenize.untokenize(result) 
Example #20
Source File: __init__.py    From accloudtant with Apache License 2.0 5 votes vote down vote up
def fix_lazy_json(in_text):
    """
    This function modifies JS-contained JSON to be valid.

    Posted in http://stackoverflow.com/questions/4033633/handling-lazy-json-\
            in-python-expecting-property-name by Pau Sánchez (codigomanso.com)
    """
    tokengen = tokenize.generate_tokens(io.StringIO(in_text).readline)

    valid_tokens = ['true', 'false', 'null', '-Infinity', 'Infinity', 'NaN']
    result = []
    for tokid, tokval, _, _, _ in tokengen:
        # fix unquoted strings
        if tokid == token.NAME:
            tokid, tokval = fix_unquoted((tokid, tokval), valid_tokens)

        # fix single-quoted strings
        elif tokid == token.STRING:
            tokval = fix_single_quoted(tokval)

        # remove invalid commas
        elif (tokid == token.OP) and ((tokval == '}') or (tokval == ']')):
            result = remove_invalid_commas(result)

        result.append((tokid, tokval))

    return tokenize.untokenize(result) 
Example #21
Source File: internationalizer.py    From zim-desktop-wiki with GNU General Public License v2.0 5 votes vote down vote up
def tokenize(self):
		translated = []
		untranslated = []
		notsure = []
		tokens = tokenize.generate_tokens(open(self.file).readline)

		reset = lambda: {'funcname': None, 'isfunc': False, 'iskey': False}
		state = reset()

		for type, string, start, end, line in tokens:
			if type == token.STRING:
				if state['isfunc'] and state['funcname'] == '_':
					translated.append((start, end))
				elif state['iskey'] or \
				(state['isfunc'] and state['funcname'] in ignore_functions) or \
				match_ignore(string):
					notsure.append((start, end))
				else:
					untranslated.append((start, end))
				state = reset()
			elif type == token.NAME:
				state = reset()
				state['funcname'] = string
			elif type == token.OP and string == '(':
				state['iskey'] = False
				state['isfunc'] = True
			elif type == token.OP and string == '[':
				state['isfunc'] = False
				state['iskey'] = True
			else:
				state = reset()

		return translated, untranslated, notsure 
Example #22
Source File: snippet.py    From Jandroid with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, token_type, tokens):
    # For operators and delimiters, the TokenSnippet's type may be more specific
    # than the type of the constituent token. E.g. the TokenSnippet type is
    # token.DOT, but the token type is token.OP. This is because the parser
    # has more context than the tokenizer.
    self._type = token_type
    self._tokens = tokens
    self._modified = False 
Example #23
Source File: __init__.py    From pyta with GNU General Public License v3.0 5 votes vote down vote up
def s_(string):
    return skip(some(lambda tok: tok.type == token.OP and tok.string == string)) 
Example #24
Source File: flake8_tuple.py    From flake8_tuple with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def ending_of_bad_tuple(x):
    return x.type == token.OP and x.string == ',' 
Example #25
Source File: flake8_tuple.py    From flake8_tuple with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def check_for_wrong_tuple(tree, code, noqa):
    errors = []
    candidates = []
    for assign in ast.walk(tree):
        if not isinstance(assign, (ast.Assign, ast.Return)):
            continue
        elif assign.lineno in noqa:
            continue
        elif isinstance(assign.value, ast.Call):
            continue
        for tuple_el in ast.walk(assign):
            if isinstance(tuple_el, ast.Tuple) and len(tuple_el.elts) == 1:
                candidates.append((assign.lineno, assign.col_offset))
                break
    if not candidates:
        return []
    for candidate in candidates:
        number_nl = 0  # account for logical newlines within statements
        tokens = tokenize.generate_tokens(
            lambda L=iter(code): next(L)
        )
        previous_token = None
        for t in tokens:
            if previous_token is not None and previous_token.type == tokenize.NEWLINE:
                number_nl = 0
            x = TokenInfo(*t)
            if x.start[0] - number_nl != candidate[0]:
                previous_token = x
                continue
            if x.type == tokenize.NL:
                number_nl += 1
            if x.type == token.NEWLINE and ending_of_bad_tuple(previous_token):
                errors.append(x.start)
            if x.type == token.OP and x.string == '=' and previous_token.type != token.NAME:
                x = TokenInfo(*next(tokens))
                if x.type != token.OP and x.string != '(':
                    x_next = TokenInfo(*next(tokens))
                    if ending_of_bad_tuple(x_next):
                        errors.append(x.start)
            previous_token = x
    return errors 
Example #26
Source File: autopep8.py    From python-netsurv with MIT License 5 votes vote down vote up
def _is_binary_operator(token_type, text):
    return ((token_type == tokenize.OP or text in ['and', 'or']) and
            text not in '()[]{},:.;@=%~')


# A convenient way to handle tokens. 
Example #27
Source File: autopep8.py    From python-netsurv with MIT License 5 votes vote down vote up
def _is_binary_operator(token_type, text):
    return ((token_type == tokenize.OP or text in ['and', 'or']) and
            text not in '()[]{},:.;@=%~')


# A convenient way to handle tokens. 
Example #28
Source File: PyColorize.py    From Computable with MIT License 5 votes vote down vote up
def __call__(self, toktype, toktext, start_pos, end_pos, line):
        """ Token handler, with syntax highlighting."""
        (srow,scol) = start_pos
        (erow,ecol) = end_pos
        colors = self.colors
        owrite = self.out.write

        # line separator, so this works across platforms
        linesep = os.linesep

        # calculate new positions
        oldpos = self.pos
        newpos = self.lines[srow] + scol
        self.pos = newpos + len(toktext)

        # send the original whitespace, if needed
        if newpos > oldpos:
            owrite(self.raw[oldpos:newpos])

        # skip indenting tokens
        if toktype in [token.INDENT, token.DEDENT]:
            self.pos = newpos
            return

        # map token type to a color group
        if token.LPAR <= toktype and toktype <= token.OP:
            toktype = token.OP
        elif toktype == token.NAME and keyword.iskeyword(toktext):
            toktype = _KEYWORD
        color = colors.get(toktype, colors[_TEXT])

        #print '<%s>' % toktext,    # dbg

        # Triple quoted strings must be handled carefully so that backtracking
        # in pagers works correctly. We need color terminators on _each_ line.
        if linesep in toktext:
            toktext = toktext.replace(linesep, '%s%s%s' %
                                      (colors.normal,linesep,color))

        # send text
        owrite('%s%s%s' % (color,toktext,colors.normal)) 
Example #29
Source File: pt2html.py    From pyx with GNU General Public License v2.0 5 votes vote down vote up
def tokeneater(self, toktype, toktext, xxx_todo_changeme, xxx_todo_changeme1, line):
        (srow, scol) = xxx_todo_changeme
        (erow, ecol) = xxx_todo_changeme1
        if toktype == token.ERRORTOKEN:
            raise RuntimeError("ErrorToken occured")
        if toktype in [token.NEWLINE, tokenize.NL]:
            self.output.write('\n')
            self.col = 0
        else:
            # map token type to a color group
            if token.LPAR <= toktype and toktype <= token.OP:
                toktype = token.OP
            elif toktype == token.NAME and keyword.iskeyword(toktext):
                toktype = _KEYWORD

            # restore whitespace
            assert scol >= self.col
            self.output.write(" "*(scol-self.col))

            try:
                tokclass = tokclasses[toktype]
            except KeyError:
                tokclass = None
            if self.tokclass is not None and tokclass != self.tokclass:
                self.output.write('</span>')
            if tokclass is not None and tokclass != self.tokclass:
                self.output.write('<span class="%s">' % tokclass)
            self.output.write(cgi.escape(toktext))
            self.tokclass = tokclass

            # calculate new column position
            self.col = scol + len(toktext)
            newline = toktext.rfind("\n")
            if newline != -1:
                self.col = len(toktext) - newline - 1 
Example #30
Source File: autopep8.py    From PyDev.Debugger with Eclipse Public License 1.0 5 votes vote down vote up
def _is_binary_operator(token_type, text):
    return ((token_type == tokenize.OP or text in ['and', 'or']) and
            text not in '()[]{},:.;@=%~')


# A convenient way to handle tokens.