Python ply.lex.lex() Examples

The following are 30 code examples of ply.lex.lex(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module ply.lex , or try the search function .
Example #1
Source File: cpp.py    From SublimeKSP with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self,lexer=None):
        if lexer is None:
            lexer = lex.lexer
        self.lexer = lexer
        self.macros = { }
        self.path = []
        self.temp_path = []

        # Probe the lexer for selected tokens
        self.lexprobe()

        tm = time.localtime()
        self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
        self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
        self.parser = None

    # -----------------------------------------------------------------------------
    # tokenize()
    #
    # Utility function. Given a string of text, tokenize into a list of tokens
    # ----------------------------------------------------------------------------- 
Example #2
Source File: cpp.py    From rfmt with Apache License 2.0 6 votes vote down vote up
def __init__(self,lexer=None):
        if lexer is None:
            lexer = lex.lexer
        self.lexer = lexer
        self.macros = { }
        self.path = []
        self.temp_path = []

        # Probe the lexer for selected tokens
        self.lexprobe()

        tm = time.localtime()
        self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
        self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
        self.parser = None

    # -----------------------------------------------------------------------------
    # tokenize()
    #
    # Utility function. Given a string of text, tokenize into a list of tokens
    # ----------------------------------------------------------------------------- 
Example #3
Source File: zxbasmpplex.py    From zxbasic with GNU General Public License v3.0 6 votes vote down vote up
def include(self, filename):
        """ Changes FILENAME and line count
        """
        if filename != STDIN and filename in [x[0] for x in self.filestack]:  # Already included?
            self.warning(' Recursive inclusion')

        self.filestack.append([filename, 1, self.lex, self.input_data])
        self.lex = lex.lex(object=self)
        result = self.put_current_line()  # First #line start with \n (EOL)

        try:
            if filename == STDIN:
                self.input_data = sys.stdin.read()
            else:
                self.input_data = api.utils.read_txt_file(filename)

            if len(self.input_data) and self.input_data[-1] != EOL:
                self.input_data += EOL
        except IOError:
            self.input_data = EOL

        self.lex.input(self.input_data)
        return result 
Example #4
Source File: zxbasmpplex.py    From zxbasic with GNU General Public License v3.0 6 votes vote down vote up
def include_end(self):
        """ Performs and end of include.
        """
        self.lex = self.filestack[-1][2]
        self.input_data = self.filestack[-1][3]
        self.filestack.pop()

        if not self.filestack:  # End of input?
            return

        self.filestack[-1][1] += 1  # Increment line counter of previous file

        result = lex.LexToken()  # Creates the token
        result.value = self.put_current_line()
        result.type = '_ENDFILE_'
        result.lineno = self.lex.lineno
        result.lexpos = self.lex.lexpos

        return result 
Example #5
Source File: cpp.py    From retdec-regression-tests-framework with MIT License 6 votes vote down vote up
def __init__(self,lexer=None):
        if lexer is None:
            lexer = lex.lexer
        self.lexer = lexer
        self.macros = { }
        self.path = []
        self.temp_path = []

        # Probe the lexer for selected tokens
        self.lexprobe()

        tm = time.localtime()
        self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
        self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
        self.parser = None

    # -----------------------------------------------------------------------------
    # tokenize()
    #
    # Utility function. Given a string of text, tokenize into a list of tokens
    # ----------------------------------------------------------------------------- 
Example #6
Source File: cpp.py    From SwiftKitten with MIT License 6 votes vote down vote up
def __init__(self,lexer=None):
        if lexer is None:
            lexer = lex.lexer
        self.lexer = lexer
        self.macros = { }
        self.path = []
        self.temp_path = []

        # Probe the lexer for selected tokens
        self.lexprobe()

        tm = time.localtime()
        self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
        self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
        self.parser = None

    # -----------------------------------------------------------------------------
    # tokenize()
    #
    # Utility function. Given a string of text, tokenize into a list of tokens
    # ----------------------------------------------------------------------------- 
Example #7
Source File: cpp.py    From Safejumper-for-Desktop with GNU General Public License v2.0 6 votes vote down vote up
def __init__(self,lexer=None):
        if lexer is None:
            lexer = lex.lexer
        self.lexer = lexer
        self.macros = { }
        self.path = []
        self.temp_path = []

        # Probe the lexer for selected tokens
        self.lexprobe()

        tm = time.localtime()
        self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
        self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
        self.parser = None

    # -----------------------------------------------------------------------------
    # tokenize()
    #
    # Utility function. Given a string of text, tokenize into a list of tokens
    # ----------------------------------------------------------------------------- 
Example #8
Source File: zxbpplex.py    From zxbasic with GNU General Public License v3.0 6 votes vote down vote up
def include_end(self):
        """ Performs and end of include.
        """
        self.lex = self.filestack[-1][2]
        self.input_data = self.filestack[-1][3]
        self.filestack.pop()

        if not self.filestack:  # End of input?
            return

        self.filestack[-1][1] += 1  # Increment line counter of previous file

        result = lex.LexToken()
        result.value = self.put_current_line(suffix='\n')
        result.type = '_ENDFILE_'
        result.lineno = self.lex.lineno
        result.lexpos = self.lex.lexpos

        return result 
Example #9
Source File: cpp.py    From misp42splunk with GNU Lesser General Public License v3.0 6 votes vote down vote up
def __init__(self,lexer=None):
        if lexer is None:
            lexer = lex.lexer
        self.lexer = lexer
        self.macros = { }
        self.path = []
        self.temp_path = []

        # Probe the lexer for selected tokens
        self.lexprobe()

        tm = time.localtime()
        self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
        self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
        self.parser = None

    # -----------------------------------------------------------------------------
    # tokenize()
    #
    # Utility function. Given a string of text, tokenize into a list of tokens
    # ----------------------------------------------------------------------------- 
Example #10
Source File: cpp.py    From misp42splunk with GNU Lesser General Public License v3.0 6 votes vote down vote up
def __init__(self,lexer=None):
        if lexer is None:
            lexer = lex.lexer
        self.lexer = lexer
        self.macros = { }
        self.path = []
        self.temp_path = []

        # Probe the lexer for selected tokens
        self.lexprobe()

        tm = time.localtime()
        self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
        self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
        self.parser = None

    # -----------------------------------------------------------------------------
    # tokenize()
    #
    # Utility function. Given a string of text, tokenize into a list of tokens
    # ----------------------------------------------------------------------------- 
Example #11
Source File: cpp.py    From oss-ftp with MIT License 6 votes vote down vote up
def __init__(self,lexer=None):
        if lexer is None:
            lexer = lex.lexer
        self.lexer = lexer
        self.macros = { }
        self.path = []
        self.temp_path = []

        # Probe the lexer for selected tokens
        self.lexprobe()

        tm = time.localtime()
        self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
        self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
        self.parser = None

    # -----------------------------------------------------------------------------
    # tokenize()
    #
    # Utility function. Given a string of text, tokenize into a list of tokens
    # ----------------------------------------------------------------------------- 
Example #12
Source File: prettypfa.py    From hadrian with Apache License 2.0 6 votes vote down vote up
def ppfas(text, subs={}, **subs2):
    """Parse a string of several PrettyPFA expressions (delimited by semicolons) as a list of PFA abstract syntax trees.

    :type text: string
    :param text: PrettyPFA expressions (delimited by semicolons)
    :type subs: dict from substitution names to substitutions
    :param subs: replacement values as PFA titus.pfaast.Ast, PrettyPFA strings, or PFA Pythonized JSON
    :type subs2: dict from substitution names to substitutions
    :param subs2: added to ``subs`` (a more convenient way to pass them)
    :rtype: list of titus.pfaast.Expression
    :return: parsed expressions as PFA
    """

    subs2.update(subs)

    if not exprParser.initialized:
        try:
            import ply.lex as lex
            import ply.yacc as yacc
        except ImportError:
            raise ImportError("ply (used to parse the PrettyPFA) is not available on your system")
        else:
            exprParser.initialize(lex, yacc)

    return exprParser.parse(text, subs2) 
Example #13
Source File: ksp_parser.py    From SublimeKSP with GNU General Public License v3.0 6 votes vote down vote up
def init(outputdir=None):
    outputdir = outputdir or os.path.dirname(__file__)  # os.getcwd()
    current_module = sys.modules[__name__]
    #print (outputdir, current_module)
    debug = 0
    optimize = 0
    lexer = lex.lex(optimize=0, debug=debug)

    # lexer.input('on init\n   declare shared parameter cutoff')
    # while True:
    #     tok = lexer.token()
    #     if tok is None:
    #         break
    #     print (tok)

    return yacc.yacc(method="LALR", optimize=optimize, debug=debug,
                     write_tables=0, module=current_module, start='script',
                     outputdir=outputdir, tabmodule='ksp_parser_tab') 
Example #14
Source File: cpp.py    From learn_python3_spider with MIT License 6 votes vote down vote up
def __init__(self,lexer=None):
        if lexer is None:
            lexer = lex.lexer
        self.lexer = lexer
        self.macros = { }
        self.path = []
        self.temp_path = []

        # Probe the lexer for selected tokens
        self.lexprobe()

        tm = time.localtime()
        self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
        self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
        self.parser = None

    # -----------------------------------------------------------------------------
    # tokenize()
    #
    # Utility function. Given a string of text, tokenize into a list of tokens
    # ----------------------------------------------------------------------------- 
Example #15
Source File: zxbpplex.py    From zxbasic with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self):
        """ Creates a new GLOBAL lexer instance
        """
        self.lex = None
        self.filestack = []  # Current filename, and line number being parsed
        self.input_data = ''
        self.tokens = tokens
        self.states = states
        self.next_token = None  # if set to something, this will be returned once
        self.expectingDirective = False  # True if the lexer expects a preprocessor directive
        self.__COMMENT_LEVEL = 0

# --------------------- PREPROCESOR FUNCTIONS -------------------


# Needed for states 
Example #16
Source File: cpp.py    From SwiftKitten with MIT License 5 votes vote down vote up
def group_lines(self,input):
        lex = self.lexer.clone()
        lines = [x.rstrip() for x in input.splitlines()]
        for i in xrange(len(lines)):
            j = i+1
            while lines[i].endswith('\\') and (j < len(lines)):
                lines[i] = lines[i][:-1]+lines[j]
                lines[j] = ""
                j += 1

        input = "\n".join(lines)
        lex.input(input)
        lex.lineno = 1

        current_line = []
        while True:
            tok = lex.token()
            if not tok:
                break
            current_line.append(tok)
            if tok.type in self.t_WS and '\n' in tok.value:
                yield current_line
                current_line = []

        if current_line:
            yield current_line

    # ----------------------------------------------------------------------
    # tokenstrip()
    # 
    # Remove leading/trailing whitespace tokens from a token list
    # ---------------------------------------------------------------------- 
Example #17
Source File: lexer.py    From tinyquery with MIT License 5 votes vote down vote up
def get_lexer():
    return lex.lex() 
Example #18
Source File: cpp.py    From learn_python3_spider with MIT License 5 votes vote down vote up
def group_lines(self,input):
        lex = self.lexer.clone()
        lines = [x.rstrip() for x in input.splitlines()]
        for i in xrange(len(lines)):
            j = i+1
            while lines[i].endswith('\\') and (j < len(lines)):
                lines[i] = lines[i][:-1]+lines[j]
                lines[j] = ""
                j += 1

        input = "\n".join(lines)
        lex.input(input)
        lex.lineno = 1

        current_line = []
        while True:
            tok = lex.token()
            if not tok:
                break
            current_line.append(tok)
            if tok.type in self.t_WS and '\n' in tok.value:
                yield current_line
                current_line = []

        if current_line:
            yield current_line

    # ----------------------------------------------------------------------
    # tokenstrip()
    #
    # Remove leading/trailing whitespace tokens from a token list
    # ---------------------------------------------------------------------- 
Example #19
Source File: qasmlexer.py    From qiskit-terra with Apache License 2.0 5 votes vote down vote up
def __mklexer__(self, filename):
        """Create a PLY lexer."""
        self.lexer = lex.lex(module=self, debug=False)
        self.filename = filename
        self.lineno = 1

        if filename:
            with open(filename, 'r') as ifile:
                self.data = ifile.read()
            self.lexer.input(self.data) 
Example #20
Source File: cli_helpers.py    From stone with MIT License 5 votes vote down vote up
def __init__(self, debug=False):
        self.lexer = lex.lex(module=self, debug=debug)
        self.errors = [] 
Example #21
Source File: lexer.py    From stone with MIT License 5 votes vote down vote up
def _create_token(token_type, value, lineno, lexpos):
    """
    Helper for creating ply.lex.LexToken objects. Unfortunately, LexToken
    does not have a constructor defined to make settings these values easy.
    """
    token = lex.LexToken()
    token.type = token_type
    token.value = value
    token.lineno = lineno
    token.lexpos = lexpos
    return token 
Example #22
Source File: lexer.py    From tinyquery with MIT License 5 votes vote down vote up
def t_ID(t):
    r"""[a-zA-Z_][a-zA-Z_0-9]*"""
    # Specific tokens should be lower-cased here, but functions can't be
    # lower-cased at lex time since we don't know what IDs are functions vs.
    # columns or tables. We lower-case function names at parse time.
    if t.value.lower() in reserved_words:
        t.value = t.value.lower()
        t.type = reserved_words[t.value.lower()]
    else:
        t.type = 'ID'
    return t 
Example #23
Source File: cpp.py    From rfmt with Apache License 2.0 5 votes vote down vote up
def group_lines(self,input):
        lex = self.lexer.clone()
        lines = [x.rstrip() for x in input.splitlines()]
        for i in xrange(len(lines)):
            j = i+1
            while lines[i].endswith('\\') and (j < len(lines)):
                lines[i] = lines[i][:-1]+lines[j]
                lines[j] = ""
                j += 1

        input = "\n".join(lines)
        lex.input(input)
        lex.lineno = 1

        current_line = []
        while True:
            tok = lex.token()
            if not tok:
                break
            current_line.append(tok)
            if tok.type in self.t_WS and '\n' in tok.value:
                yield current_line
                current_line = []

        if current_line:
            yield current_line

    # ----------------------------------------------------------------------
    # tokenstrip()
    # 
    # Remove leading/trailing whitespace tokens from a token list
    # ---------------------------------------------------------------------- 
Example #24
Source File: parser.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def get_tokens(fn):
  lex.input("".join(open(fn).readlines()))
  return lex.token 
Example #25
Source File: asmlex.py    From zxbasic with GNU General Public License v3.0 5 votes vote down vote up
def token(self):
        return self.lex.token() 
Example #26
Source File: path_template.py    From gax-python with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self):
        self.lexer = lex.lex(module=self)
        self.parser = yacc.yacc(module=self, debug=False, write_tables=False) 
Example #27
Source File: cpp.py    From Safejumper-for-Desktop with GNU General Public License v2.0 5 votes vote down vote up
def group_lines(self,input):
        lex = self.lexer.clone()
        lines = [x.rstrip() for x in input.splitlines()]
        for i in xrange(len(lines)):
            j = i+1
            while lines[i].endswith('\\') and (j < len(lines)):
                lines[i] = lines[i][:-1]+lines[j]
                lines[j] = ""
                j += 1

        input = "\n".join(lines)
        lex.input(input)
        lex.lineno = 1

        current_line = []
        while True:
            tok = lex.token()
            if not tok:
                break
            current_line.append(tok)
            if tok.type in self.t_WS and '\n' in tok.value:
                yield current_line
                current_line = []

        if current_line:
            yield current_line

    # ----------------------------------------------------------------------
    # tokenstrip()
    # 
    # Remove leading/trailing whitespace tokens from a token list
    # ---------------------------------------------------------------------- 
Example #28
Source File: conditionlexer.py    From autoremove-torrents with MIT License 5 votes vote down vote up
def __init__(self):
        # Build the lexer
        self.lexer = lex.lex(module=self)
        # Set logger
        self._logger = logger.Logger.register(__name__) 
Example #29
Source File: vmem.py    From simplez-fpga with GNU General Public License v2.0 5 votes vote down vote up
def __init__(self, data):
        """Create the lexer and give the data"""
        self.lexer = lex.lex(module=self)
        self.lexer.input(data)

        # -- Read the first token
        self.current_token = self.lexer.token()

        # -- Address pointer. It contains the current address
        self.addr = 0

    # - Comments are ignored 
Example #30
Source File: cpp.py    From oss-ftp with MIT License 5 votes vote down vote up
def group_lines(self,input):
        lex = self.lexer.clone()
        lines = [x.rstrip() for x in input.splitlines()]
        for i in xrange(len(lines)):
            j = i+1
            while lines[i].endswith('\\') and (j < len(lines)):
                lines[i] = lines[i][:-1]+lines[j]
                lines[j] = ""
                j += 1

        input = "\n".join(lines)
        lex.input(input)
        lex.lineno = 1

        current_line = []
        while True:
            tok = lex.token()
            if not tok:
                break
            current_line.append(tok)
            if tok.type in self.t_WS and '\n' in tok.value:
                yield current_line
                current_line = []

        if current_line:
            yield current_line

    # ----------------------------------------------------------------------
    # tokenstrip()
    # 
    # Remove leading/trailing whitespace tokens from a token list
    # ----------------------------------------------------------------------