Python tokenize.DEDENT Examples
The following are 30
code examples of tokenize.DEDENT().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tokenize
, or try the search function
.
Example #1
Source File: autopep8.py From python-netsurv with MIT License | 6 votes |
def _find_logical(source_lines): # Make a variable which is the index of all the starts of lines. logical_start = [] logical_end = [] last_newline = True parens = 0 for t in generate_tokens(''.join(source_lines)): if t[0] in [tokenize.COMMENT, tokenize.DEDENT, tokenize.INDENT, tokenize.NL, tokenize.ENDMARKER]: continue if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]: last_newline = True logical_end.append((t[3][0] - 1, t[2][1])) continue if last_newline and not parens: logical_start.append((t[2][0] - 1, t[2][1])) last_newline = False if t[0] == tokenize.OP: if t[1] in '([{': parens += 1 elif t[1] in '}])': parens -= 1 return (logical_start, logical_end)
Example #2
Source File: autopep8.py From python-netsurv with MIT License | 6 votes |
def _find_logical(source_lines): # Make a variable which is the index of all the starts of lines. logical_start = [] logical_end = [] last_newline = True parens = 0 for t in generate_tokens(''.join(source_lines)): if t[0] in [tokenize.COMMENT, tokenize.DEDENT, tokenize.INDENT, tokenize.NL, tokenize.ENDMARKER]: continue if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]: last_newline = True logical_end.append((t[3][0] - 1, t[2][1])) continue if last_newline and not parens: logical_start.append((t[2][0] - 1, t[2][1])) last_newline = False if t[0] == tokenize.OP: if t[1] in '([{': parens += 1 elif t[1] in '}])': parens -= 1 return (logical_start, logical_end)
Example #3
Source File: autopep8.py From filmkodi with Apache License 2.0 | 6 votes |
def _find_logical(source_lines): # Make a variable which is the index of all the starts of lines. logical_start = [] logical_end = [] last_newline = True parens = 0 for t in generate_tokens(''.join(source_lines)): if t[0] in [tokenize.COMMENT, tokenize.DEDENT, tokenize.INDENT, tokenize.NL, tokenize.ENDMARKER]: continue if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]: last_newline = True logical_end.append((t[3][0] - 1, t[2][1])) continue if last_newline and not parens: logical_start.append((t[2][0] - 1, t[2][1])) last_newline = False if t[0] == tokenize.OP: if t[1] in '([{': parens += 1 elif t[1] in '}])': parens -= 1 return (logical_start, logical_end)
Example #4
Source File: pythondoc.py From InternationalizationScript-iOS with MIT License | 6 votes |
def handle_token(self, *args): # dispatch incoming tokens to the current handler if DEBUG > 1: print self.handler.im_func.func_name, self.indent, print tokenize.tok_name[args[0]], repr(args[1]) if args[0] == tokenize.DEDENT: self.indent = self.indent - 1 while self.scope and self.scope[-1][0] >= self.indent: del self.scope[-1] del self.stack[-1] self.handler = apply(self.handler, args) if args[0] == tokenize.INDENT: self.indent = self.indent + 1 ## # (Token handler) Scans for encoding directive.
Example #5
Source File: pythondoc.py From InternationalizationScript-iOS with MIT License | 6 votes |
def handle_token(self, *args): # dispatch incoming tokens to the current handler if DEBUG > 1: print self.handler.im_func.func_name, self.indent, print tokenize.tok_name[args[0]], repr(args[1]) if args[0] == tokenize.DEDENT: self.indent = self.indent - 1 while self.scope and self.scope[-1][0] >= self.indent: del self.scope[-1] del self.stack[-1] self.handler = apply(self.handler, args) if args[0] == tokenize.INDENT: self.indent = self.indent + 1 ## # (Token handler) Scans for encoding directive.
Example #6
Source File: reader.py From pycodesuggest with MIT License | 6 votes |
def read_data(path, listfile): if isinstance(listfile, list): python_files = [os.path.join(path, f) for f in listfile] else: with open(listfile) as f: python_files = [os.path.join(path, x) for x in f.read().splitlines()] for filename in python_files: try: with open(filename) as f: tokens = list(tokenize.generate_tokens(f.readline)) yield [preprocess(tokenType, tokenVal) for tokenType, tokenVal, _, _, _ in tokens if tokenType != tokenize.COMMENT and not tokenVal.startswith("'''") and not tokenVal.startswith('"""') and (tokenType == tokenize.DEDENT or tokenVal != "")] except: pass
Example #7
Source File: autopep8.py From PyDev.Debugger with Eclipse Public License 1.0 | 6 votes |
def _find_logical(source_lines): # Make a variable which is the index of all the starts of lines. logical_start = [] logical_end = [] last_newline = True parens = 0 for t in generate_tokens(''.join(source_lines)): if t[0] in [tokenize.COMMENT, tokenize.DEDENT, tokenize.INDENT, tokenize.NL, tokenize.ENDMARKER]: continue if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]: last_newline = True logical_end.append((t[3][0] - 1, t[2][1])) continue if last_newline and not parens: logical_start.append((t[2][0] - 1, t[2][1])) last_newline = False if t[0] == tokenize.OP: if t[1] in '([{': parens += 1 elif t[1] in '}])': parens -= 1 return (logical_start, logical_end)
Example #8
Source File: evaluate.py From python_autocomplete with MIT License | 6 votes |
def __get_tokens(it): tokens: List[tokenize.TokenInfo] = [] try: for t in it: if t.type in tokenizer.SKIP_TOKENS: continue if t.type == tokenize.NEWLINE and t.string == '': continue if t.type == tokenize.DEDENT: continue if t.type == tokenize.ERRORTOKEN: continue tokens.append(t) except tokenize.TokenError as e: if not e.args[0].startswith('EOF in'): print(e) except IndentationError as e: print(e) return tokens
Example #9
Source File: conditions.py From wemake-python-styleguide with MIT License | 6 votes |
def _does_else_belong_to_if(self, start_index: int) -> bool: previous_token = self.file_tokens[start_index - 1] if previous_token.type != tokenize.DEDENT: # This is not the first token on the line, which means that it can # also be "embedded" else: x if A else B return False for token in reversed(self.file_tokens[:start_index - 1]): if token.type != tokenize.NAME: continue # Here we rely upon an intuition that in Python else have to be # on the same level (same indentation) as parent statement. if token.start[1] == previous_token.start[1]: return token.string in {'if', 'elif'} return False
Example #10
Source File: minify.py From mitogen with BSD 3-Clause "New" or "Revised" License | 6 votes |
def reindent(tokens, indent=' '): """ Replace existing indentation in a token steam, with `indent`. """ old_levels = [] old_level = 0 new_level = 0 for typ, tok, (start_row, start_col), (end_row, end_col), line in tokens: if typ == tokenize.INDENT: old_levels.append(old_level) old_level = len(tok) new_level += 1 tok = indent * new_level elif typ == tokenize.DEDENT: old_level = old_levels.pop() new_level -= 1 start_col = max(0, start_col - old_level + new_level) if start_row == end_row: end_col = start_col + len(tok) yield typ, tok, (start_row, start_col), (end_row, end_col), line
Example #11
Source File: pyreader.py From pycodesuggest with MIT License | 5 votes |
def preprocess(tokentype, tokenval): if tokentype == tokenize.NUMBER: return number_token elif tokentype == tokenize.INDENT: return indent_token elif tokentype == tokenize.DEDENT: return dedent_token return tokenval
Example #12
Source File: inspect.py From CTFCrackTools-V2 with GNU General Public License v3.0 | 5 votes |
def tokeneater(self, type, token, srow_scol, erow_ecol, line): srow, scol = srow_scol erow, ecol = erow_ecol if not self.started: # look for the first "def", "class" or "lambda" if token in ("def", "class", "lambda"): if token == "lambda": self.islambda = True self.started = True self.passline = True # skip to the end of the line elif type == tokenize.NEWLINE: self.passline = False # stop skipping when a NEWLINE is seen self.last = srow if self.islambda: # lambdas always end at the first NEWLINE raise EndOfBlock elif self.passline: pass elif type == tokenize.INDENT: self.indent = self.indent + 1 self.passline = True elif type == tokenize.DEDENT: self.indent = self.indent - 1 # the end of matching indent/dedent pairs end a block # (note that this only works for "def"/"class" blocks, # not e.g. for "if: else:" or "try: finally:" blocks) if self.indent <= 0: raise EndOfBlock elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL): # any other token on the same indentation level end the previous # block as well, except the pseudo-tokens COMMENT and NL. raise EndOfBlock
Example #13
Source File: pyreader.py From pycodesuggest with MIT License | 5 votes |
def read_data(path, listfile, word_to_id=None, gen_def_positions=True): if isinstance(listfile, list): python_files = [os.path.join(path, f) for f in listfile] else: with open(listfile) as f: python_files = [os.path.join(path, x) for x in f.read().splitlines()] mapping = (lambda x: x) if word_to_id is None else (lambda x: word_to_id.get(x, oov_id)) data = [] definition_positions = [] identifier_usage = [] for filename in python_files: try: source, tree = get_source_tree(filename) tokens = tokenize.generate_tokens(StringIO(source).readline) data.append([(mapping(preprocess(tokenType, tokenVal)), start) for tokenType, tokenVal, start, _, _ in tokens if tokenType != tokenize.COMMENT and not tokenVal.startswith("'''") and not tokenVal.startswith('"""') and (tokenType == tokenize.DEDENT or tokenVal != "")]) if gen_def_positions: walker = astwalker.ASTWalker() walker.walk(tree) definition_positions.append(walker.definition_positions) identifier_usage.append(walker.name_usage) except: print("Error when tokenizing %s: %s" % (filename, sys.exc_info()[0])) return data, definition_positions, identifier_usage
Example #14
Source File: reader.py From pycodesuggest with MIT License | 5 votes |
def preprocess(tokentype, tokenval): if tokentype == tokenize.NUMBER: return number_token elif tokentype == tokenize.INDENT: return "<indent>" elif tokentype == tokenize.DEDENT: return "<dedent>" # Need to replace spaces with some other character because the ngram processor # splits on spaces return tokenval.replace(" ", "ยง").replace("\n", "<newline>")
Example #15
Source File: inspect.py From canape with GNU General Public License v3.0 | 5 votes |
def tokeneater(self, type, token, srow_scol, erow_ecol, line): srow, scol = srow_scol erow, ecol = erow_ecol if not self.started: # look for the first "def", "class" or "lambda" if token in ("def", "class", "lambda"): if token == "lambda": self.islambda = True self.started = True self.passline = True # skip to the end of the line elif type == tokenize.NEWLINE: self.passline = False # stop skipping when a NEWLINE is seen self.last = srow if self.islambda: # lambdas always end at the first NEWLINE raise EndOfBlock elif self.passline: pass elif type == tokenize.INDENT: self.indent = self.indent + 1 self.passline = True elif type == tokenize.DEDENT: self.indent = self.indent - 1 # the end of matching indent/dedent pairs end a block # (note that this only works for "def"/"class" blocks, # not e.g. for "if: else:" or "try: finally:" blocks) if self.indent <= 0: raise EndOfBlock elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL): # any other token on the same indentation level end the previous # block as well, except the pseudo-tokens COMMENT and NL. raise EndOfBlock
Example #16
Source File: reindent.py From Carnets with BSD 3-Clause "New" or "Revised" License | 5 votes |
def tokeneater(self, type, token, (sline, scol), end, line, INDENT=tokenize.INDENT, DEDENT=tokenize.DEDENT, NEWLINE=tokenize.NEWLINE, COMMENT=tokenize.COMMENT, NL=tokenize.NL):
Example #17
Source File: inspect.py From CTFCrackTools-V2 with GNU General Public License v3.0 | 5 votes |
def tokeneater(self, type, token, srow_scol, erow_ecol, line): srow, scol = srow_scol erow, ecol = erow_ecol if not self.started: # look for the first "def", "class" or "lambda" if token in ("def", "class", "lambda"): if token == "lambda": self.islambda = True self.started = True self.passline = True # skip to the end of the line elif type == tokenize.NEWLINE: self.passline = False # stop skipping when a NEWLINE is seen self.last = srow if self.islambda: # lambdas always end at the first NEWLINE raise EndOfBlock elif self.passline: pass elif type == tokenize.INDENT: self.indent = self.indent + 1 self.passline = True elif type == tokenize.DEDENT: self.indent = self.indent - 1 # the end of matching indent/dedent pairs end a block # (note that this only works for "def"/"class" blocks, # not e.g. for "if: else:" or "try: finally:" blocks) if self.indent <= 0: raise EndOfBlock elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL): # any other token on the same indentation level end the previous # block as well, except the pseudo-tokens COMMENT and NL. raise EndOfBlock
Example #18
Source File: parser.py From linter-pylama with MIT License | 5 votes |
def parse_definitions(self, class_, all=False): """Parse multiple definitions and yield them.""" while self.current is not None: self.log.debug("parsing definition list, current token is %r (%s)", self.current.kind, self.current.value) self.log.debug('got_newline: %s', self.stream.got_logical_newline) if all and self.current.value == '__all__': self.parse_all() elif (self.current.kind == tk.OP and self.current.value == '@' and self.stream.got_logical_newline): self.consume(tk.OP) self.parse_decorators() elif self.current.value in ['def', 'class']: yield self.parse_definition(class_._nest(self.current.value)) elif self.current.kind == tk.INDENT: self.consume(tk.INDENT) for definition in self.parse_definitions(class_): yield definition elif self.current.kind == tk.DEDENT: self.consume(tk.DEDENT) return elif self.current.value == 'from': self.parse_from_import_statement() else: self.stream.move()
Example #19
Source File: inspect.py From RevitBatchProcessor with GNU General Public License v3.0 | 5 votes |
def tokeneater(self, type, token, srow_scol, erow_ecol, line): srow, scol = srow_scol erow, ecol = erow_ecol if not self.started: # look for the first "def", "class" or "lambda" if token in ("def", "class", "lambda"): if token == "lambda": self.islambda = True self.started = True self.passline = True # skip to the end of the line elif type == tokenize.NEWLINE: self.passline = False # stop skipping when a NEWLINE is seen self.last = srow if self.islambda: # lambdas always end at the first NEWLINE raise EndOfBlock elif self.passline: pass elif type == tokenize.INDENT: self.indent = self.indent + 1 self.passline = True elif type == tokenize.DEDENT: self.indent = self.indent - 1 # the end of matching indent/dedent pairs end a block # (note that this only works for "def"/"class" blocks, # not e.g. for "if: else:" or "try: finally:" blocks) if self.indent <= 0: raise EndOfBlock elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL): # any other token on the same indentation level end the previous # block as well, except the pseudo-tokens COMMENT and NL. raise EndOfBlock
Example #20
Source File: reindent.py From guider with GNU General Public License v2.0 | 5 votes |
def tokeneater(self, type, token, (sline, scol), end, line, INDENT=tokenize.INDENT, DEDENT=tokenize.DEDENT, NEWLINE=tokenize.NEWLINE, COMMENT=tokenize.COMMENT, NL=tokenize.NL):
Example #21
Source File: inspect.py From CTFCrackTools with GNU General Public License v3.0 | 5 votes |
def tokeneater(self, type, token, srow_scol, erow_ecol, line): srow, scol = srow_scol erow, ecol = erow_ecol if not self.started: # look for the first "def", "class" or "lambda" if token in ("def", "class", "lambda"): if token == "lambda": self.islambda = True self.started = True self.passline = True # skip to the end of the line elif type == tokenize.NEWLINE: self.passline = False # stop skipping when a NEWLINE is seen self.last = srow if self.islambda: # lambdas always end at the first NEWLINE raise EndOfBlock elif self.passline: pass elif type == tokenize.INDENT: self.indent = self.indent + 1 self.passline = True elif type == tokenize.DEDENT: self.indent = self.indent - 1 # the end of matching indent/dedent pairs end a block # (note that this only works for "def"/"class" blocks, # not e.g. for "if: else:" or "try: finally:" blocks) if self.indent <= 0: raise EndOfBlock elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL): # any other token on the same indentation level end the previous # block as well, except the pseudo-tokens COMMENT and NL. raise EndOfBlock
Example #22
Source File: inspect.py From CTFCrackTools with GNU General Public License v3.0 | 5 votes |
def tokeneater(self, type, token, srow_scol, erow_ecol, line): srow, scol = srow_scol erow, ecol = erow_ecol if not self.started: # look for the first "def", "class" or "lambda" if token in ("def", "class", "lambda"): if token == "lambda": self.islambda = True self.started = True self.passline = True # skip to the end of the line elif type == tokenize.NEWLINE: self.passline = False # stop skipping when a NEWLINE is seen self.last = srow if self.islambda: # lambdas always end at the first NEWLINE raise EndOfBlock elif self.passline: pass elif type == tokenize.INDENT: self.indent = self.indent + 1 self.passline = True elif type == tokenize.DEDENT: self.indent = self.indent - 1 # the end of matching indent/dedent pairs end a block # (note that this only works for "def"/"class" blocks, # not e.g. for "if: else:" or "try: finally:" blocks) if self.indent <= 0: raise EndOfBlock elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL): # any other token on the same indentation level end the previous # block as well, except the pseudo-tokens COMMENT and NL. raise EndOfBlock
Example #23
Source File: reindent.py From android_universal with MIT License | 5 votes |
def tokeneater(self, type, token, slinecol, end, line, INDENT=tokenize.INDENT, DEDENT=tokenize.DEDENT, NEWLINE=tokenize.NEWLINE, COMMENT=tokenize.COMMENT, NL=tokenize.NL): if type == NEWLINE: # A program statement, or ENDMARKER, will eventually follow, # after some (possibly empty) run of tokens of the form # (NL | COMMENT)* (INDENT | DEDENT+)? self.find_stmt = 1 elif type == INDENT: self.find_stmt = 1 self.level += 1 elif type == DEDENT: self.find_stmt = 1 self.level -= 1 elif type == COMMENT: if self.find_stmt: self.stats.append((slinecol[0], -1)) # but we're still looking for a new stmt, so leave # find_stmt alone elif type == NL: pass elif self.find_stmt: # This is the first "real token" following a NEWLINE, so it # must be the first token of the next program statement, or an # ENDMARKER. self.find_stmt = 0 if line: # not endmarker self.stats.append((slinecol[0], self.level)) # Count number of leading blanks.
Example #24
Source File: minify.py From mitogen with BSD 3-Clause "New" or "Revised" License | 5 votes |
def strip_docstrings(tokens): """ Replace docstring tokens with NL tokens in a `tokenize` stream. Any STRING token not part of an expression is deemed a docstring. Indented docstrings are not yet recognised. """ stack = [] state = 'wait_string' for t in tokens: typ = t[0] if state == 'wait_string': if typ in (tokenize.NL, tokenize.COMMENT): yield t elif typ in (tokenize.DEDENT, tokenize.INDENT, tokenize.STRING): stack.append(t) elif typ == tokenize.NEWLINE: stack.append(t) start_line, end_line = stack[0][2][0], stack[-1][3][0]+1 for i in range(start_line, end_line): yield tokenize.NL, '\n', (i, 0), (i,1), '\n' for t in stack: if t[0] in (tokenize.DEDENT, tokenize.INDENT): yield t[0], t[1], (i+1, t[2][1]), (i+1, t[3][1]), t[4] del stack[:] else: stack.append(t) for t in stack: yield t del stack[:] state = 'wait_newline' elif state == 'wait_newline': if typ == tokenize.NEWLINE: state = 'wait_string' yield t
Example #25
Source File: reindent.py From odoo13-x64 with GNU General Public License v3.0 | 5 votes |
def tokeneater(self, type, token, slinecol, end, line, INDENT=tokenize.INDENT, DEDENT=tokenize.DEDENT, NEWLINE=tokenize.NEWLINE, COMMENT=tokenize.COMMENT, NL=tokenize.NL): if type == NEWLINE: # A program statement, or ENDMARKER, will eventually follow, # after some (possibly empty) run of tokens of the form # (NL | COMMENT)* (INDENT | DEDENT+)? self.find_stmt = 1 elif type == INDENT: self.find_stmt = 1 self.level += 1 elif type == DEDENT: self.find_stmt = 1 self.level -= 1 elif type == COMMENT: if self.find_stmt: self.stats.append((slinecol[0], -1)) # but we're still looking for a new stmt, so leave # find_stmt alone elif type == NL: pass elif self.find_stmt: # This is the first "real token" following a NEWLINE, so it # must be the first token of the next program statement, or an # ENDMARKER. self.find_stmt = 0 if line: # not endmarker self.stats.append((slinecol[0], self.level)) # Count number of leading blanks.
Example #26
Source File: inspect.py From Splunking-Crime with GNU Affero General Public License v3.0 | 5 votes |
def tokeneater(self, type, token, srow_scol, erow_ecol, line): srow, scol = srow_scol erow, ecol = erow_ecol if not self.started: # look for the first "def", "class" or "lambda" if token in ("def", "class", "lambda"): if token == "lambda": self.islambda = True self.started = True self.passline = True # skip to the end of the line elif type == tokenize.NEWLINE: self.passline = False # stop skipping when a NEWLINE is seen self.last = srow if self.islambda: # lambdas always end at the first NEWLINE raise EndOfBlock elif self.passline: pass elif type == tokenize.INDENT: self.indent = self.indent + 1 self.passline = True elif type == tokenize.DEDENT: self.indent = self.indent - 1 # the end of matching indent/dedent pairs end a block # (note that this only works for "def"/"class" blocks, # not e.g. for "if: else:" or "try: finally:" blocks) if self.indent <= 0: raise EndOfBlock elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL): # any other token on the same indentation level end the previous # block as well, except the pseudo-tokens COMMENT and NL. raise EndOfBlock
Example #27
Source File: reindent.py From datafari with Apache License 2.0 | 5 votes |
def tokeneater(self, type, token, (sline, scol), end, line, INDENT=tokenize.INDENT, DEDENT=tokenize.DEDENT, NEWLINE=tokenize.NEWLINE, COMMENT=tokenize.COMMENT, NL=tokenize.NL):
Example #28
Source File: config_parser.py From gin-config with Apache License 2.0 | 5 votes |
def _skip_whitespace_and_comments(self): skippable_token_kinds = [ tokenize.COMMENT, tokenize.NL, tokenize.INDENT, tokenize.DEDENT ] while self._current_token.kind in skippable_token_kinds: self._advance_one_token()
Example #29
Source File: recipe-496893.py From code with MIT License | 5 votes |
def tabify(filename): mode = os.stat(filename)[ST_MODE] os.rename(filename, filename+".bak") infile = file(filename+".bak") outfile = file(filename,"w") tokens = tokenize.generate_tokens(infile.readline) text = [] indent = 0 minlineno = 0 for (toktype, token, start, end, line) in tokens: y, x = end if toktype == tokenize.INDENT: indent += 1 elif toktype == tokenize.DEDENT: indent -= 1 elif y > minlineno: minlineno = y text += "%s%s\n" % ("\t"*indent,line.strip()) outfile.write("".join(text)) infile.close() outfile.close() os.chmod(filename, mode)
Example #30
Source File: inspect.py From ironpython3 with Apache License 2.0 | 5 votes |
def tokeneater(self, type, token, srowcol, erowcol, line): if not self.started: # look for the first "def", "class" or "lambda" if token in ("def", "class", "lambda"): if token == "lambda": self.islambda = True self.started = True self.passline = True # skip to the end of the line elif type == tokenize.NEWLINE: self.passline = False # stop skipping when a NEWLINE is seen self.last = srowcol[0] if self.islambda: # lambdas always end at the first NEWLINE raise EndOfBlock elif self.passline: pass elif type == tokenize.INDENT: self.indent = self.indent + 1 self.passline = True elif type == tokenize.DEDENT: self.indent = self.indent - 1 # the end of matching indent/dedent pairs end a block # (note that this only works for "def"/"class" blocks, # not e.g. for "if: else:" or "try: finally:" blocks) if self.indent <= 0: raise EndOfBlock elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL): # any other token on the same indentation level end the previous # block as well, except the pseudo-tokens COMMENT and NL. raise EndOfBlock