Python token.tok_name() Examples

The following are 30 code examples of token.tok_name(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module token , or try the search function .
Example #1
Source File: gen_rst.py    From deeppy with MIT License 6 votes vote down vote up
def extract_line_count(filename, target_dir):
    # Extract the line count of a file
    example_file = os.path.join(target_dir, filename)
    if six.PY2:
        lines = open(example_file).readlines()
    else:
        lines = open(example_file, encoding='utf-8').readlines()
    start_row = 0
    if lines and lines[0].startswith('#!'):
        lines.pop(0)
        start_row = 1
    line_iterator = iter(lines)
    tokens = tokenize.generate_tokens(lambda: next(line_iterator))
    check_docstring = True
    erow_docstring = 0
    for tok_type, _, _, (erow, _), _ in tokens:
        tok_type = token.tok_name[tok_type]
        if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
            continue
        elif (tok_type == 'STRING') and check_docstring:
            erow_docstring = erow
            check_docstring = False
    return erow_docstring+1+start_row, erow+1+start_row 
Example #2
Source File: lexer.py    From heaviside with Apache License 2.0 6 votes vote down vote up
def __init__(self, code, value, start=(0,0), stop=(0,0), line=''):
        """
        Args:
            code (string|int): Token code. Ints are translated using token.tok_name.
            value (string): Token value
            start (tuple): Pair of values describing token start line, start position
            stop (tuple): Pair of values describing token stop line, stop position
            line (string): String containing the line the token was parsed from
        """
        try:
            self.code = token.tok_name[code]
        except:
            self.code = code
        self.value = value
        self.start = start
        self.stop = stop
        self.line = line 
Example #3
Source File: gen_rst.py    From artview with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def extract_line_count(filename, target_dir):
    # Extract the line count of a file
    example_file = os.path.join(target_dir, filename)
    lines = file(example_file).readlines()
    start_row = 0
    if lines and lines[0].startswith('#!'):
        lines.pop(0)
        start_row = 1
    tokens = tokenize.generate_tokens(lines.__iter__().next)
    check_docstring = True
    erow_docstring = 0
    for tok_type, _, _, (erow, _), _ in tokens:
        tok_type = token.tok_name[tok_type]
        if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
            continue
        elif ((tok_type == 'STRING') and check_docstring):
            erow_docstring = erow
            check_docstring = False
    return erow_docstring+1+start_row, erow+1+start_row 
Example #4
Source File: gen_rst.py    From tensorlib with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def extract_line_count(filename, target_dir):
    # Extract the line count of a file
    example_file = os.path.join(target_dir, filename)
    lines = open(example_file).readlines()
    start_row = 0
    if lines and lines[0].startswith('#!'):
        lines.pop(0)
        start_row = 1
    line_iterator = iter(lines)
    tokens = tokenize.generate_tokens(lambda: next(line_iterator))
    check_docstring = True
    erow_docstring = 0
    for tok_type, _, _, (erow, _), _ in tokens:
        tok_type = token.tok_name[tok_type]
        if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
            continue
        elif ((tok_type == 'STRING') and check_docstring):
            erow_docstring = erow
            check_docstring = False
    return erow_docstring+1+start_row, erow+1+start_row 
Example #5
Source File: gen_rst.py    From polylearn with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def extract_line_count(filename, target_dir):
    # Extract the line count of a file
    example_file = os.path.join(target_dir, filename)
    if six.PY2:
        lines = open(example_file).readlines()
    else:
        lines = open(example_file, encoding='utf-8').readlines()
    start_row = 0
    if lines and lines[0].startswith('#!'):
        lines.pop(0)
        start_row = 1
    line_iterator = iter(lines)
    tokens = tokenize.generate_tokens(lambda: next(line_iterator))
    check_docstring = True
    erow_docstring = 0
    for tok_type, _, _, (erow, _), _ in tokens:
        tok_type = token.tok_name[tok_type]
        if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
            continue
        elif (tok_type == 'STRING') and check_docstring:
            erow_docstring = erow
            check_docstring = False
    return erow_docstring+1+start_row, erow+1+start_row 
Example #6
Source File: gen_rst.py    From tick with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def extract_line_count(filename, target_dir):
    # Extract the line count of a file
    example_file = os.path.join(target_dir, filename)
    if six.PY2:
        lines = open(example_file).readlines()
    else:
        lines = open(example_file, encoding='utf-8').readlines()
    start_row = 0
    if lines and lines[0].startswith('#!'):
        lines.pop(0)
        start_row = 1
    line_iterator = iter(lines)
    tokens = tokenize.generate_tokens(lambda: next(line_iterator))
    check_docstring = True
    erow_docstring = 0
    for tok_type, _, _, (erow, _), _ in tokens:
        tok_type = token.tok_name[tok_type]
        if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
            continue
        elif (tok_type == 'STRING') and check_docstring:
            erow_docstring = erow
            check_docstring = False
    return erow_docstring+1+start_row, erow+1+start_row 
Example #7
Source File: gen_rst.py    From sklearn-theano with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def extract_line_count(filename, target_dir):
    # Extract the line count of a file
    example_file = os.path.join(target_dir, filename)
    lines = open(example_file).readlines()
    start_row = 0
    if lines and lines[0].startswith('#!'):
        lines.pop(0)
        start_row = 1
    line_iterator = iter(lines)
    tokens = tokenize.generate_tokens(lambda: next(line_iterator))
    check_docstring = True
    erow_docstring = 0
    for tok_type, _, _, (erow, _), _ in tokens:
        tok_type = token.tok_name[tok_type]
        if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
            continue
        elif ((tok_type == 'STRING') and check_docstring):
            erow_docstring = erow
            check_docstring = False
    return erow_docstring+1+start_row, erow+1+start_row 
Example #8
Source File: ast_tools.py    From Computable with MIT License 6 votes vote down vote up
def translate_symbols(ast_tuple):
    """ Translate numeric grammar symbols in an ast_tuple descriptive names.

        This simply traverses the tree converting any integer value to values
        found in symbol.sym_name or token.tok_name.
    """
    new_list = []
    for item in ast_tuple:
        if isinstance(item, int):
            new_list.append(int_to_symbol(item))
        elif issequence(item):
            new_list.append(translate_symbols(item))
        else:
            new_list.append(item)
    if isinstance(ast_tuple, tuple):
        return tuple(new_list)
    else:
        return new_list 
Example #9
Source File: pygettext.py    From android_universal with MIT License 5 votes vote down vote up
def __call__(self, ttype, tstring, stup, etup, line):
        # dispatch
##        import token
##        print('ttype:', token.tok_name[ttype], 'tstring:', tstring,
##              file=sys.stderr)
        self.__state(ttype, tstring, stup[0]) 
Example #10
Source File: tree.py    From coarse2fine with MIT License 5 votes vote down vote up
def set_by_str(self, f):
        tk_list = list(
            tokenize(BytesIO(f.strip().encode('utf-8')).readline))[1:-1]
        self.token_list = [tk.string for tk in tk_list]
        self.type_list = [token.tok_name[tk.type] for tk in tk_list]

    # well-tokenized token list 
Example #11
Source File: pygettext.py    From odoo13-x64 with GNU General Public License v3.0 5 votes vote down vote up
def __call__(self, ttype, tstring, stup, etup, line):
        # dispatch
##        import token
##        print('ttype:', token.tok_name[ttype], 'tstring:', tstring,
##              file=sys.stderr)
        self.__state(ttype, tstring, stup[0]) 
Example #12
Source File: color_generator.py    From msmexplorer with MIT License 5 votes vote down vote up
def extract_docstring(self):
        """ Extract a module-level docstring
        """
        lines = open(self.filename).readlines()
        start_row = 0
        if lines[0].startswith('#!'):
            lines.pop(0)
            start_row = 1

        docstring = ''
        first_par = ''
        tokens = tokenize.generate_tokens(lines.__iter__().__next__)
        for tok_type, tok_content, _, (erow, _), _ in tokens:
            tok_type = token.tok_name[tok_type]
            if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
                continue
            elif tok_type == 'STRING':
                docstring = eval(tok_content)
                # If the docstring is formatted with several paragraphs,
                # extract the first one:
                paragraphs = '\n'.join(line.rstrip()
                                       for line in docstring.split('\n')
                                       ).split('\n\n')
                if len(paragraphs) > 0:
                    first_par = paragraphs[0]
            break

        self.docstring = docstring
        self.short_desc = first_par
        self.end_line = erow + 1 + start_row 
Example #13
Source File: test_tokenize.py    From Project-New-Reign---Nemesis-Main with GNU General Public License v3.0 5 votes vote down vote up
def assertExactTypeEqual(self, opstr, *optypes):
        tokens = list(tokenize(BytesIO(opstr.encode('utf-8')).readline))
        num_optypes = len(optypes)
        self.assertEqual(len(tokens), 2 + num_optypes)
        self.assertEqual(token.tok_name[tokens[0].exact_type],
                         token.tok_name[ENCODING])
        for i in range(num_optypes):
            self.assertEqual(token.tok_name[tokens[i + 1].exact_type],
                             token.tok_name[optypes[i]])
        self.assertEqual(token.tok_name[tokens[1 + num_optypes].exact_type],
                         token.tok_name[token.ENDMARKER]) 
Example #14
Source File: test_tokenize.py    From Project-New-Reign---Nemesis-Main with GNU General Public License v3.0 5 votes vote down vote up
def check_tokenize(self, s, expected):
        # Format the tokens in s in a table format.
        # The ENDMARKER is omitted.
        result = []
        f = BytesIO(s.encode('utf-8'))
        for type, token, start, end, line in tokenize(f.readline):
            if type == ENDMARKER:
                break
            type = tok_name[type]
            result.append("    %(type)-10.10s %(token)-13.13r %(start)s %(end)s" %
                          locals())
        self.assertEqual(result,
                         ["    ENCODING   'utf-8'       (0, 0) (0, 0)"] +
                         expected.rstrip().splitlines()) 
Example #15
Source File: pygettext.py    From HRTunerProxy with GNU General Public License v2.0 5 votes vote down vote up
def __call__(self, ttype, tstring, stup, etup, line):
        # dispatch
##        import token
##        print >> sys.stderr, 'ttype:', token.tok_name[ttype], \
##              'tstring:', tstring
        self.__state(ttype, tstring, stup[0]) 
Example #16
Source File: plot_generator.py    From msmexplorer with MIT License 5 votes vote down vote up
def extract_docstring(self):
        """ Extract a module-level docstring
        """
        lines = open(self.filename).readlines()
        start_row = 0
        if lines[0].startswith('#!'):
            lines.pop(0)
            start_row = 1

        docstring = ''
        first_par = ''
        tokens = tokenize.generate_tokens(lines.__iter__().__next__)
        for tok_type, tok_content, _, (erow, _), _ in tokens:
            tok_type = token.tok_name[tok_type]
            if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
                continue
            elif tok_type == 'STRING':
                docstring = eval(tok_content)
                # If the docstring is formatted with several paragraphs,
                # extract the first one:
                paragraphs = '\n'.join(line.rstrip()
                                       for line in docstring.split('\n')
                                       ).split('\n\n')
                if len(paragraphs) > 0:
                    first_par = paragraphs[0]
            break

        thumbloc = None
        for i, line in enumerate(docstring.split("\n")):
            m = re.match(r"^_thumb: (\.\d+),\s*(\.\d+)", line)
            if m:
                thumbloc = float(m.group(1)), float(m.group(2))
                break
        if thumbloc is not None:
            self.thumbloc = thumbloc
            docstring = "\n".join([l for l in docstring.split("\n")
                                   if not l.startswith("_thumb")])

        self.docstring = docstring
        self.short_desc = first_par
        self.end_line = erow + 1 + start_row 
Example #17
Source File: gen_rst.py    From tensorlib with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def extract_docstring(filename, ignore_heading=False):
    """ Extract a module-level docstring, if any
    """
    lines = open(filename).readlines()
    start_row = 0
    if lines[0].startswith('#!'):
        lines.pop(0)
        start_row = 1
    docstring = ''
    first_par = ''
    line_iterator = iter(lines)
    tokens = tokenize.generate_tokens(lambda: next(line_iterator))
    for tok_type, tok_content, _, (erow, _), _ in tokens:
        tok_type = token.tok_name[tok_type]
        if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
            continue
        elif tok_type == 'STRING':
            docstring = eval(tok_content)
            # If the docstring is formatted with several paragraphs, extract
            # the first one:
            paragraphs = '\n'.join(
                line.rstrip() for line
                in docstring.split('\n')).split('\n\n')
            if paragraphs:
                if ignore_heading:
                    if len(paragraphs) > 1:
                        first_par = re.sub('\n', ' ', paragraphs[1])
                        first_par = ((first_par[:95] + '...')
                                     if len(first_par) > 95 else first_par)
                    else:
                        raise ValueError("Docstring not found by gallery",
                                         "Please check your example's layout",
                                         " and make sure it's correct")
                else:
                    first_par = paragraphs[0]

        break
    return docstring, first_par, erow + 1 + start_row 
Example #18
Source File: pygettext.py    From datafari with Apache License 2.0 5 votes vote down vote up
def __call__(self, ttype, tstring, stup, etup, line):
        # dispatch
##        import token
##        print >> sys.stderr, 'ttype:', token.tok_name[ttype], \
##              'tstring:', tstring
        self.__state(ttype, tstring, stup[0]) 
Example #19
Source File: ast_tools.py    From Computable with MIT License 5 votes vote down vote up
def int_to_symbol(i):
    """ Convert numeric symbol or token to a desriptive name.
    """
    try:
        return symbol.sym_name[i]
    except KeyError:
        return token.tok_name[i] 
Example #20
Source File: gen_rst.py    From artview with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def extract_docstring(filename, ignore_heading=False):
    """ Extract a module-level docstring, if any
    """
    lines = file(filename).readlines()
    start_row = 0
    if lines[0].startswith('#!'):
        lines.pop(0)
        start_row = 1
    docstring = ''
    first_par = ''
    tokens = tokenize.generate_tokens(iter(lines).next)
    for tok_type, tok_content, _, (erow, _), _ in tokens:
        tok_type = token.tok_name[tok_type]
        if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
            continue
        elif tok_type == 'STRING':
            docstring = eval(tok_content)
            # If the docstring is formatted with several paragraphs, extract
            # the first one:
            paragraphs = '\n'.join(
                line.rstrip() for line
                in docstring.split('\n')).split('\n\n')
            if paragraphs:
                if ignore_heading:
                    if len(paragraphs) > 1:
                        first_par = re.sub('\n', ' ', paragraphs[1])
                        first_par = ((first_par[:95] + '...')
                                     if len(first_par) > 95 else first_par)
                    else:
                        raise ValueError("Docstring not found by gallery",
                                         "Please check your example's layout",
                                         " and make sure it's correct")
                else:
                    first_par = paragraphs[0]

        break
    return docstring, first_par, erow + 1 + start_row 
Example #21
Source File: offset_token.py    From Jandroid with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def type_name(self):
    return token.tok_name[self._type] 
Example #22
Source File: pygettext.py    From oss-ftp with MIT License 5 votes vote down vote up
def __call__(self, ttype, tstring, stup, etup, line):
        # dispatch
##        import token
##        print >> sys.stderr, 'ttype:', token.tok_name[ttype], \
##              'tstring:', tstring
        self.__state(ttype, tstring, stup[0]) 
Example #23
Source File: gen_rst.py    From sklearn-theano with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def extract_docstring(filename, ignore_heading=False):
    """ Extract a module-level docstring, if any
    """
    lines = open(filename).readlines()
    start_row = 0
    if lines[0].startswith('#!'):
        lines.pop(0)
        start_row = 1
    docstring = ''
    first_par = ''
    line_iterator = iter(lines)
    tokens = tokenize.generate_tokens(lambda: next(line_iterator))
    for tok_type, tok_content, _, (erow, _), _ in tokens:
        tok_type = token.tok_name[tok_type]
        if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
            continue
        elif tok_type == 'STRING':
            docstring = eval(tok_content)
            # If the docstring is formatted with several paragraphs, extract
            # the first one:
            paragraphs = '\n'.join(
                line.rstrip() for line
                in docstring.split('\n')).split('\n\n')
            if paragraphs:
                if ignore_heading:
                    if len(paragraphs) > 1:
                        first_par = re.sub('\n', ' ', paragraphs[1])
                        first_par = ((first_par[:95] + '...')
                                     if len(first_par) > 95 else first_par)
                    else:
                        raise ValueError("Docstring not found by gallery",
                                         "Please check your example's layout",
                                         " and make sure it's correct")
                else:
                    first_par = paragraphs[0]

        break
    return docstring, first_par, erow + 1 + start_row 
Example #24
Source File: offset_token.py    From Jandroid with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def type_name(self):
    return token.tok_name[self._type] 
Example #25
Source File: snippet.py    From Jandroid with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def type_name(self):
    return token.tok_name[self.type] 
Example #26
Source File: offset_token.py    From Jandroid with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def type_name(self):
    return token.tok_name[self._type] 
Example #27
Source File: snippet.py    From Jandroid with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def type_name(self):
    return token.tok_name[self.type] 
Example #28
Source File: gallery_generator.py    From arviz with Apache License 2.0 5 votes vote down vote up
def extract_docstring(self):
        """ Extract a module-level docstring
        """
        lines = open(self.filename).readlines()
        start_row = 0
        if lines[0].startswith("#!"):
            lines.pop(0)
            start_row = 1

        docstring = ""
        first_par = ""
        line_iter = lines.__iter__()
        tokens = tokenize.generate_tokens(lambda: next(line_iter))
        for tok_type, tok_content, _, (erow, _), _ in tokens:
            tok_type = token.tok_name[tok_type]
            if tok_type in ("NEWLINE", "COMMENT", "NL", "INDENT", "DEDENT"):
                continue
            elif tok_type == "STRING":
                docstring = eval(tok_content)
                # If the docstring is formatted with several paragraphs,
                # extract the first one:
                paragraphs = "\n".join(line.rstrip() for line in docstring.split("\n")).split(
                    "\n\n"
                )
                if len(paragraphs) > 0:
                    first_par = paragraphs[0]
            break

        thumbloc = None
        for i, line in enumerate(docstring.split("\n")):
            m = re.match(r"^_thumb: (\.\d+),\s*(\.\d+)", line)
            if m:
                thumbloc = float(m.group(1)), float(m.group(2))
                break
        if thumbloc is not None:
            self.thumbloc = thumbloc
            docstring = "\n".join([l for l in docstring.split("\n") if not l.startswith("_thumb")])

        self.docstring = docstring
        self.short_desc = first_par
        self.end_line = erow + 1 + start_row 
Example #29
Source File: plot_generator.py    From mpl-probscale with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def extract_docstring(self):
        """ Extract a module-level docstring
        """
        lines = open(self.filename).readlines()
        start_row = 0
        if lines[0].startswith('#!'):
            lines.pop(0)
            start_row = 1

        docstring = ''
        first_par = ''
        tokens = tokenize.generate_tokens(lines.__iter__().next)
        for tok_type, tok_content, _, (erow, _), _ in tokens:
            tok_type = token.tok_name[tok_type]
            if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
                continue
            elif tok_type == 'STRING':
                docstring = eval(tok_content)
                # If the docstring is formatted with several paragraphs,
                # extract the first one:
                paragraphs = '\n'.join(line.rstrip()
                                       for line in docstring.split('\n')
                                       ).split('\n\n')
                if len(paragraphs) > 0:
                    first_par = paragraphs[0]
            break

        thumbloc = None
        for i, line in enumerate(docstring.split("\n")):
            m = re.match(r"^_thumb: (\.\d+),\s*(\.\d+)", line)
            if m:
                thumbloc = float(m.group(1)), float(m.group(2))
                break
        if thumbloc is not None:
            self.thumbloc = thumbloc
            docstring = "\n".join([l for l in docstring.split("\n")
                                   if not l.startswith("_thumb")])

        self.docstring = docstring
        self.short_desc = first_par
        self.end_line = erow + 1 + start_row 
Example #30
Source File: test_tokenize.py    From Fluid-Designer with GNU General Public License v3.0 5 votes vote down vote up
def check_tokenize(self, s, expected):
        # Format the tokens in s in a table format.
        # The ENDMARKER is omitted.
        result = []
        f = BytesIO(s.encode('utf-8'))
        for type, token, start, end, line in tokenize(f.readline):
            if type == ENDMARKER:
                break
            type = tok_name[type]
            result.append("    %(type)-10.10s %(token)-13.13r %(start)s %(end)s" %
                          locals())
        self.assertEqual(result,
                         ["    ENCODING   'utf-8'       (0, 0) (0, 0)"] +
                         expected.rstrip().splitlines())