Python tokenize.NAME Examples

The following are 30 code examples of tokenize.NAME(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tokenize , or try the search function .
Example #1
Source File: cgitb.py    From ironpython3 with Apache License 2.0 6 votes vote down vote up
def scanvars(reader, frame, locals):
    """Scan one logical line of Python and look up values of variables used."""
    vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__
    for ttype, token, start, end, line in tokenize.generate_tokens(reader):
        if ttype == tokenize.NEWLINE: break
        if ttype == tokenize.NAME and token not in keyword.kwlist:
            if lasttoken == '.':
                if parent is not __UNDEF__:
                    value = getattr(parent, token, __UNDEF__)
                    vars.append((prefix + token, prefix, value))
            else:
                where, value = lookup(token, frame, locals)
                vars.append((token, where, value))
        elif token == '.':
            prefix += lasttoken + '.'
            parent = value
        else:
            parent, prefix = None, ''
        lasttoken = token
    return vars 
Example #2
Source File: analyze.py    From pyminifier with GNU General Public License v3.0 6 votes vote down vote up
def enumerate_keyword_args(tokens):
    """
    Iterates over *tokens* and returns a dictionary with function names as the
    keys and lists of keyword arguments as the values.
    """
    keyword_args = {}
    inside_function = False
    for index, tok in enumerate(tokens):
        token_type = tok[0]
        token_string = tok[1]
        if token_type == tokenize.NEWLINE:
            inside_function = False
        if token_type == tokenize.NAME:
            if token_string == "def":
                function_name = tokens[index+1][1]
                inside_function = function_name
                keyword_args.update({function_name: []})
            elif inside_function:
                if tokens[index+1][1] == '=': # keyword argument
                    keyword_args[function_name].append(token_string)
    return keyword_args 
Example #3
Source File: format.py    From python-netsurv with MIT License 6 votes vote down vote up
def _has_valid_type_annotation(self, tokens, i):
        """Extended check of PEP-484 type hint presence"""
        if not self._inside_brackets("("):
            return False
        # token_info
        # type string start end line
        #  0      1     2    3    4
        bracket_level = 0
        for token in tokens[i - 1 :: -1]:
            if token[1] == ":":
                return True
            if token[1] == "(":
                return False
            if token[1] == "]":
                bracket_level += 1
            elif token[1] == "[":
                bracket_level -= 1
            elif token[1] == ",":
                if not bracket_level:
                    return False
            elif token[1] in (".", "..."):
                continue
            elif token[0] not in (tokenize.NAME, tokenize.STRING, tokenize.NL):
                return False
        return False 
Example #4
Source File: format.py    From linter-pylama with MIT License 6 votes vote down vote up
def _has_valid_type_annotation(self, tokens, i):
        """Extended check of PEP-484 type hint presence"""
        if not self._inside_brackets('('):
            return False
        bracket_level = 0
        for token in tokens[i-1::-1]:
            if token[1] == ':':
                return True
            if token[1] == '(':
                return False
            if token[1] == ']':
                bracket_level += 1
            elif token[1] == '[':
                bracket_level -= 1
            elif token[1] == ',':
                if not bracket_level:
                    return False
            elif token[1] == '.':
                continue
            elif token[0] not in (tokenize.NAME, tokenize.STRING):
                return False
        return False 
Example #5
Source File: expr.py    From recruit with Apache License 2.0 6 votes vote down vote up
def _replace_booleans(tok):
    """Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise
    precedence is changed to boolean precedence.

    Parameters
    ----------
    tok : tuple of int, str
        ints correspond to the all caps constants in the tokenize module

    Returns
    -------
    t : tuple of int, str
        Either the input or token or the replacement values
    """
    toknum, tokval = tok
    if toknum == tokenize.OP:
        if tokval == '&':
            return tokenize.NAME, 'and'
        elif tokval == '|':
            return tokenize.NAME, 'or'
        return toknum, tokval
    return toknum, tokval 
Example #6
Source File: format.py    From python-netsurv with MIT License 6 votes vote down vote up
def _has_valid_type_annotation(self, tokens, i):
        """Extended check of PEP-484 type hint presence"""
        if not self._inside_brackets("("):
            return False
        # token_info
        # type string start end line
        #  0      1     2    3    4
        bracket_level = 0
        for token in tokens[i - 1 :: -1]:
            if token[1] == ":":
                return True
            if token[1] == "(":
                return False
            if token[1] == "]":
                bracket_level += 1
            elif token[1] == "[":
                bracket_level -= 1
            elif token[1] == ",":
                if not bracket_level:
                    return False
            elif token[1] in (".", "..."):
                continue
            elif token[0] not in (tokenize.NAME, tokenize.STRING, tokenize.NL):
                return False
        return False 
Example #7
Source File: cgitb.py    From meddle with MIT License 6 votes vote down vote up
def scanvars(reader, frame, locals):
    """Scan one logical line of Python and look up values of variables used."""
    vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__
    for ttype, token, start, end, line in tokenize.generate_tokens(reader):
        if ttype == tokenize.NEWLINE: break
        if ttype == tokenize.NAME and token not in keyword.kwlist:
            if lasttoken == '.':
                if parent is not __UNDEF__:
                    value = getattr(parent, token, __UNDEF__)
                    vars.append((prefix + token, prefix, value))
            else:
                where, value = lookup(token, frame, locals)
                vars.append((token, where, value))
        elif token == '.':
            prefix += lasttoken + '.'
            parent = value
        else:
            parent, prefix = None, ''
        lasttoken = token
    return vars 
Example #8
Source File: cgitb.py    From ironpython2 with Apache License 2.0 6 votes vote down vote up
def scanvars(reader, frame, locals):
    """Scan one logical line of Python and look up values of variables used."""
    vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__
    for ttype, token, start, end, line in tokenize.generate_tokens(reader):
        if ttype == tokenize.NEWLINE: break
        if ttype == tokenize.NAME and token not in keyword.kwlist:
            if lasttoken == '.':
                if parent is not __UNDEF__:
                    value = getattr(parent, token, __UNDEF__)
                    vars.append((prefix + token, prefix, value))
            else:
                where, value = lookup(token, frame, locals)
                vars.append((token, where, value))
        elif token == '.':
            prefix += lasttoken + '.'
            parent = value
        else:
            parent, prefix = None, ''
        lasttoken = token
    return vars 
Example #9
Source File: AutoIndent.py    From ironpython2 with Apache License 2.0 6 votes vote down vote up
def run(self):
        OPENERS=('class', 'def', 'for', 'if', 'try', 'while')
        INDENT=tokenize.INDENT
        NAME=tokenize.NAME
                   
        save_tabsize = tokenize.tabsize
        tokenize.tabsize = self.tabwidth
        try:
            try:
                for (typ, token, start, end, line) in token_generator(self.readline):
                    if typ == NAME and token in OPENERS:
                        self.blkopenline = line
                    elif type == INDENT and self.blkopenline:
                        self.indentedline = line
                        break

            except (tokenize.TokenError, IndentationError):
                # since we cut off the tokenizer early, we can trigger
                # spurious errors
                pass
        finally:
            tokenize.tabsize = save_tabsize
        return self.blkopenline, self.indentedline 
Example #10
Source File: expr.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def _replace_booleans(tok):
    """Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise
    precedence is changed to boolean precedence.

    Parameters
    ----------
    tok : tuple of int, str
        ints correspond to the all caps constants in the tokenize module

    Returns
    -------
    t : tuple of int, str
        Either the input or token or the replacement values
    """
    toknum, tokval = tok
    if toknum == tokenize.OP:
        if tokval == '&':
            return tokenize.NAME, 'and'
        elif tokval == '|':
            return tokenize.NAME, 'or'
        return toknum, tokval
    return toknum, tokval 
Example #11
Source File: cgitb.py    From BinderFilter with MIT License 6 votes vote down vote up
def scanvars(reader, frame, locals):
    """Scan one logical line of Python and look up values of variables used."""
    vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__
    for ttype, token, start, end, line in tokenize.generate_tokens(reader):
        if ttype == tokenize.NEWLINE: break
        if ttype == tokenize.NAME and token not in keyword.kwlist:
            if lasttoken == '.':
                if parent is not __UNDEF__:
                    value = getattr(parent, token, __UNDEF__)
                    vars.append((prefix + token, prefix, value))
            else:
                where, value = lookup(token, frame, locals)
                vars.append((token, where, value))
        elif token == '.':
            prefix += lasttoken + '.'
            parent = value
        else:
            parent, prefix = None, ''
        lasttoken = token
    return vars 
Example #12
Source File: cgitb.py    From Computable with MIT License 6 votes vote down vote up
def scanvars(reader, frame, locals):
    """Scan one logical line of Python and look up values of variables used."""
    vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__
    for ttype, token, start, end, line in tokenize.generate_tokens(reader):
        if ttype == tokenize.NEWLINE: break
        if ttype == tokenize.NAME and token not in keyword.kwlist:
            if lasttoken == '.':
                if parent is not __UNDEF__:
                    value = getattr(parent, token, __UNDEF__)
                    vars.append((prefix + token, prefix, value))
            else:
                where, value = lookup(token, frame, locals)
                vars.append((token, where, value))
        elif token == '.':
            prefix += lasttoken + '.'
            parent = value
        else:
            parent, prefix = None, ''
        lasttoken = token
    return vars 
Example #13
Source File: pygettext.py    From oss-ftp with MIT License 6 votes vote down vote up
def __waiting(self, ttype, tstring, lineno):
        opts = self.__options
        # Do docstring extractions, if enabled
        if opts.docstrings and not opts.nodocstrings.get(self.__curfile):
            # module docstring?
            if self.__freshmodule:
                if ttype == tokenize.STRING:
                    self.__addentry(safe_eval(tstring), lineno, isdocstring=1)
                    self.__freshmodule = 0
                elif ttype not in (tokenize.COMMENT, tokenize.NL):
                    self.__freshmodule = 0
                return
            # class docstring?
            if ttype == tokenize.NAME and tstring in ('class', 'def'):
                self.__state = self.__suiteseen
                return
        if ttype == tokenize.NAME and tstring in opts.keywords:
            self.__state = self.__keywordseen 
Example #14
Source File: eval.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def add_token(self, token_type, token):
        if self.done:
            return
        self.tokens.append((token_type, token))
        if token in ["(", "{", "["]:
            self.paren_depth += 1
        if token in [")", "}", "]"]:
            self.paren_depth -= 1
        assert self.paren_depth >= 0
        if not self.started:
            if token == "(":
                self.started = True
            else:
                assert token_type == tokenize.NAME or token == "."
                self.func.append(token)
        if self.started and self.paren_depth == 0:
            self.done = True

# This is not a very general function -- it assumes that all references to the
# given object are of the form '<obj_name>.something(method call)'. 
Example #15
Source File: cgitb.py    From Imogen with MIT License 6 votes vote down vote up
def scanvars(reader, frame, locals):
    """Scan one logical line of Python and look up values of variables used."""
    vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__
    for ttype, token, start, end, line in tokenize.generate_tokens(reader):
        if ttype == tokenize.NEWLINE: break
        if ttype == tokenize.NAME and token not in keyword.kwlist:
            if lasttoken == '.':
                if parent is not __UNDEF__:
                    value = getattr(parent, token, __UNDEF__)
                    vars.append((prefix + token, prefix, value))
            else:
                where, value = lookup(token, frame, locals)
                vars.append((token, where, value))
        elif token == '.':
            prefix += lasttoken + '.'
            parent = value
        else:
            parent, prefix = None, ''
        lasttoken = token
    return vars 
Example #16
Source File: cgitb.py    From Fluid-Designer with GNU General Public License v3.0 6 votes vote down vote up
def scanvars(reader, frame, locals):
    """Scan one logical line of Python and look up values of variables used."""
    vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__
    for ttype, token, start, end, line in tokenize.generate_tokens(reader):
        if ttype == tokenize.NEWLINE: break
        if ttype == tokenize.NAME and token not in keyword.kwlist:
            if lasttoken == '.':
                if parent is not __UNDEF__:
                    value = getattr(parent, token, __UNDEF__)
                    vars.append((prefix + token, prefix, value))
            else:
                where, value = lookup(token, frame, locals)
                vars.append((token, where, value))
        elif token == '.':
            prefix += lasttoken + '.'
            parent = value
        else:
            parent, prefix = None, ''
        lasttoken = token
    return vars 
Example #17
Source File: expr.py    From predictive-maintenance-using-machine-learning with Apache License 2.0 6 votes vote down vote up
def _replace_booleans(tok):
    """Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise
    precedence is changed to boolean precedence.

    Parameters
    ----------
    tok : tuple of int, str
        ints correspond to the all caps constants in the tokenize module

    Returns
    -------
    t : tuple of int, str
        Either the input or token or the replacement values
    """
    toknum, tokval = tok
    if toknum == tokenize.OP:
        if tokval == '&':
            return tokenize.NAME, 'and'
        elif tokval == '|':
            return tokenize.NAME, 'or'
        return toknum, tokval
    return toknum, tokval 
Example #18
Source File: obfuscate.py    From shellsploit-library with MIT License 6 votes vote down vote up
def obfuscate_unique(tokens, index, replace, replacement, *args):
    """
    If the token string (a unique value anywhere) inside *tokens[index]*
    matches *replace*, return *replacement*.

    .. note::

        This function is only for replacing absolutely unique ocurrences of
        *replace* (where we don't have to worry about their position).
    """
    def return_replacement(replacement):
        UNIQUE_REPLACEMENTS[replacement] = replace
        return replacement
    tok = tokens[index]
    token_type = tok[0]
    token_string = tok[1]
    if token_type != tokenize.NAME:
        return None  # Skip this token
    if token_string == replace:
        return return_replacement(replacement) 
Example #19
Source File: obfuscate.py    From shellsploit-library with MIT License 6 votes vote down vote up
def obfuscate_class(tokens, index, replace, replacement, *args):
    """
    If the token string (a class) inside *tokens[index]* matches *replace*,
    return *replacement*.
    """
    def return_replacement(replacement):
        CLASS_REPLACEMENTS[replacement] = replace
        return replacement
    tok = tokens[index]
    token_type = tok[0]
    token_string = tok[1]
    prev_tok = tokens[index - 1]
    prev_tok_string = prev_tok[1]
    if token_type != tokenize.NAME:
        return None  # Skip this token
    if token_string.startswith('__'):
        return None
    if prev_tok_string != '.':
        if token_string == replace:
            return return_replacement(replacement) 
Example #20
Source File: obfuscate.py    From shellsploit-library with MIT License 6 votes vote down vote up
def obfuscatable_function(tokens, index, **kwargs):
    """
    Given a list of *tokens* and an *index* (representing the current position),
    returns the token string if it is a function or method name that can be
    safely obfuscated.
    """
    tok = tokens[index]
    token_type = tok[0]
    token_string = tok[1]
    if index > 0:
        prev_tok = tokens[index - 1]
    else:  # Pretend it's a newline (for simplicity)
        prev_tok = (54, '\n', (1, 1), (1, 2), '#\n')
    prev_tok_string = prev_tok[1]
    if token_type != tokenize.NAME:
        return None  # Skip this token
    if token_string.startswith('__'):  # Don't mess with specials
        return None
    if prev_tok_string == "def":
        return token_string 
Example #21
Source File: obfuscate.py    From shellsploit-library with MIT License 6 votes vote down vote up
def obfuscatable_class(tokens, index, **kwargs):
    """
    Given a list of *tokens* and an *index* (representing the current position),
    returns the token string if it is a class name that can be safely
    obfuscated.
    """
    tok = tokens[index]
    token_type = tok[0]
    token_string = tok[1]
    if index > 0:
        prev_tok = tokens[index - 1]
    else:  # Pretend it's a newline (for simplicity)
        prev_tok = (54, '\n', (1, 1), (1, 2), '#\n')
    prev_tok_string = prev_tok[1]
    if token_type != tokenize.NAME:
        return None  # Skip this token
    if token_string.startswith('__'):  # Don't mess with specials
        return None
    if prev_tok_string == "class":
        return token_string 
Example #22
Source File: analyze.py    From shellsploit-library with MIT License 6 votes vote down vote up
def enumerate_keyword_args(tokens):
    """
    Iterates over *tokens* and returns a dictionary with function names as the
    keys and lists of keyword arguments as the values.
    """
    keyword_args = {}
    inside_function = False
    for index, tok in enumerate(tokens):
        token_type = tok[0]
        token_string = tok[1]
        if token_type == tokenize.NEWLINE:
            inside_function = False
        if token_type == tokenize.NAME:
            if token_string == "def":
                function_name = tokens[index + 1][1]
                inside_function = function_name
                keyword_args.update({function_name: []})
            elif inside_function:
                if tokens[index + 1][1] == '=':  # keyword argument
                    keyword_args[function_name].append(token_string)
    return keyword_args 
Example #23
Source File: cgitb.py    From oss-ftp with MIT License 6 votes vote down vote up
def scanvars(reader, frame, locals):
    """Scan one logical line of Python and look up values of variables used."""
    vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__
    for ttype, token, start, end, line in tokenize.generate_tokens(reader):
        if ttype == tokenize.NEWLINE: break
        if ttype == tokenize.NAME and token not in keyword.kwlist:
            if lasttoken == '.':
                if parent is not __UNDEF__:
                    value = getattr(parent, token, __UNDEF__)
                    vars.append((prefix + token, prefix, value))
            else:
                where, value = lookup(token, frame, locals)
                vars.append((token, where, value))
        elif token == '.':
            prefix += lasttoken + '.'
            parent = value
        else:
            parent, prefix = None, ''
        lasttoken = token
    return vars 
Example #24
Source File: test_inputtransformer.py    From Computable with MIT License 6 votes vote down vote up
def decistmt(tokens):
    """Substitute Decimals for floats in a string of statements.

    Based on an example from the tokenize module docs.
    """
    result = []
    for toknum, tokval, _, _, _  in tokens:
        if toknum == tokenize.NUMBER and '.' in tokval:  # replace NUMBER tokens
            for newtok in [
                (tokenize.NAME, 'Decimal'),
                (tokenize.OP, '('),
                (tokenize.STRING, repr(tokval)),
                (tokenize.OP, ')')
            ]:
                yield newtok
        else:
            yield (toknum, tokval) 
Example #25
Source File: analyze.py    From shellsploit-library with MIT License 5 votes vote down vote up
def enumerate_dynamic_imports(tokens):
    """
    Returns a dictionary of all dynamically imported modules (those inside of
    classes or functions) in the form of {<func or class name>: [<modules>]}

    Example:
        >>> enumerate_dynamic_modules(tokens)
        {'myfunc': ['zlib', 'base64']}
    """
    imported_modules = []
    import_line = False
    for index, tok in enumerate(tokens):
        token_type = tok[0]
        token_string = tok[1]
        if token_type == tokenize.NEWLINE:
            import_line = False
        elif token_string == "import":
            try:
                if tokens[index - 1][0] == tokenize.NEWLINE:
                    import_line = True
            except IndexError:
                import_line = True  # Just means this is the first line
        elif import_line:
            if token_type == tokenize.NAME and tokens[index + 1][1] != 'as':
                if token_string not in reserved_words:
                    if token_string not in imported_modules:
                        imported_modules.append(token_string)
    return imported_modules 
Example #26
Source File: parser.py    From linter-pylama with MIT License 5 votes vote down vote up
def _parse_from_import_names(self, is_future_import):
        """Parse the 'y' part in a 'from x import y' statement."""
        if self.current.value == '(':
            self.consume(tk.OP)
            expected_end_kinds = (tk.OP, )
        else:
            expected_end_kinds = (tk.NEWLINE, tk.ENDMARKER)
        while self.current.kind not in expected_end_kinds and not (
                    self.current.kind == tk.OP and self.current.value == ';'):
            if self.current.kind != tk.NAME:
                self.stream.move()
                continue
            self.log.debug("parsing import, token is %r (%s)",
                           self.current.kind, self.current.value)
            if is_future_import:
                self.log.debug('found future import: %s', self.current.value)
                self.future_imports.add(self.current.value)
            self.consume(tk.NAME)
            self.log.debug("parsing import, token is %r (%s)",
                           self.current.kind, self.current.value)
            if self.current.kind == tk.NAME and self.current.value == 'as':
                self.consume(tk.NAME)  # as
                if self.current.kind == tk.NAME:
                    self.consume(tk.NAME)  # new name, irrelevant
            if self.current.value == ',':
                self.consume(tk.OP)
            self.log.debug("parsing import, token is %r (%s)",
                           self.current.kind, self.current.value) 
Example #27
Source File: obfuscate.py    From shellsploit-library with MIT License 5 votes vote down vote up
def obfuscate_function(tokens, index, replace, replacement, *args):
    """
    If the token string (a function) inside *tokens[index]* matches *replace*,
    return *replacement*.
    """
    def return_replacement(replacement):
        FUNC_REPLACEMENTS[replacement] = replace
        return replacement
    tok = tokens[index]
    token_type = tok[0]
    token_string = tok[1]
    prev_tok = tokens[index - 1]
    prev_tok_string = prev_tok[1]
    if token_type != tokenize.NAME:
        return None  # Skip this token
    if token_string.startswith('__'):
        return None
    if token_string == replace:
        if prev_tok_string != '.':
            if token_string == replace:
                return return_replacement(replacement)
        else:
            parent_name = tokens[index - 2][1]
            if parent_name in CLASS_REPLACEMENTS:
                # This should work for @classmethod methods
                return return_replacement(replacement)
            elif parent_name in VAR_REPLACEMENTS:
                # This covers regular ol' instance methods
                return return_replacement(replacement) 
Example #28
Source File: pycodestyle.py    From linter-pylama with MIT License 5 votes vote down vote up
def whitespace_before_parameters(logical_line, tokens):
    r"""Avoid extraneous whitespace.

    Avoid extraneous whitespace in the following situations:
    - before the open parenthesis that starts the argument list of a
      function call.
    - before the open parenthesis that starts an indexing or slicing.

    Okay: spam(1)
    E211: spam (1)

    Okay: dict['key'] = list[index]
    E211: dict ['key'] = list[index]
    E211: dict['key'] = list [index]
    """
    prev_type, prev_text, __, prev_end, __ = tokens[0]
    for index in range(1, len(tokens)):
        token_type, text, start, end, __ = tokens[index]
        if (token_type == tokenize.OP and
            text in '([' and
            start != prev_end and
            (prev_type == tokenize.NAME or prev_text in '}])') and
            # Syntax "class A (B):" is allowed, but avoid it
            (index < 2 or tokens[index - 2][1] != 'class') and
                # Allow "return (a.foo for a in range(5))"
                not keyword.iskeyword(prev_text)):
            yield prev_end, "E211 whitespace before '%s'" % text
        prev_type = token_type
        prev_text = text
        prev_end = end 
Example #29
Source File: analyze.py    From shellsploit-library with MIT License 5 votes vote down vote up
def enumerate_imports(tokens):
    """
    Iterates over *tokens* and returns a list of all imported modules.

    .. note:: This ignores imports using the 'as' and 'from' keywords.
    """
    imported_modules = []
    import_line = False
    from_import = False
    for index, tok in enumerate(tokens):
        token_type = tok[0]
        token_string = tok[1]
        if token_type == tokenize.NEWLINE:
            import_line = False
            from_import = False
        elif token_string == "import":
            import_line = True
        elif token_string == "from":
            from_import = True
        elif import_line:
            if token_type == tokenize.NAME and tokens[index + 1][1] != 'as':
                if not from_import:
                    if token_string not in reserved_words:
                        if token_string not in imported_modules:
                            imported_modules.append(token_string)
    return imported_modules 
Example #30
Source File: htmlizer.py    From Safejumper-for-Desktop with GNU General Public License v2.0 5 votes vote down vote up
def printtoken(self, type, token, sCoordinates, eCoordinates, line):
        if hasattr(tokenize, "ENCODING") and type == tokenize.ENCODING:
            self.encoding = token
            return

        (srow, scol) = sCoordinates
        (erow, ecol) = eCoordinates
        if self.currentLine < srow:
            self.writer('\n'*(srow-self.currentLine))
            self.currentLine, self.currentCol = srow, 0
        self.writer(' '*(scol-self.currentCol))
        if self.lastIdentifier:
            type = "identifier"
            self.parameters = 1
        elif type == tokenize.NAME:
            if keyword.iskeyword(token):
                type = 'keyword'
            else:
                if self.parameters:
                    type = 'parameter'
                else:
                    type = 'variable'
        else:
            type = tokenize.tok_name.get(type).lower()
        self.writer(token, type)
        self.currentCol = ecol
        self.currentLine += token.count('\n')
        if self.currentLine != erow:
            self.currentCol = 0
        self.lastIdentifier = token in ('def', 'class')
        if token == ':':
            self.parameters = 0