Python ply.lex.LexToken() Examples

The following are 24 code examples of ply.lex.LexToken(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module ply.lex , or try the search function .
Example #1
Source File: _mof_compiler.py    From pywbem with GNU Lesser General Public License v2.1 6 votes vote down vote up
def __init__(self, msg, parser_token=None, cim_error=None):
        """
        Parameters:

          msg (:term:`string`):
            Message text describing the error.

          parser_token (lex.LexToken or yacc.YaccProduction):
            PLY lex or yacc parser token (that is, the ``p`` argument of a yacc
            parser function or the ``t`` argument of a lex parser function).
            This token is used to obtain the MOF source text and location
            information.

            `None` will result in no MOF source text and location information
            to be obtained.

          cim_error (:class:`~pywbem.CIMError`):
            CIM error returned by the CIM repository.
        """
        super(MOFRepositoryError, self).__init__(msg, parser_token)
        self._cim_error = cim_error 
Example #2
Source File: yaccparser.py    From epater with GNU General Public License v3.0 6 votes vote down vote up
def p_datainst3op_error(p):
    """datainst3op : OPDATA3OP logmnemonic flagscondandspace REG error REG COMMA op2
                   | OPDATA3OP logmnemonic flagscondandspace REG COMMA REG error op2
                   | OPDATA3OP logmnemonic flagscondandspace REG COMMA REG
                   | OPDATA3OP logmnemonic flagscondandspace REG error COMMA REG COMMA op2
                   | OPDATA3OP logmnemonic flagscondandspace REG COMMA REG error COMMA op2"""
    if len(p) == 9:
        raise YaccError("Les registres et/ou constantes utilisés dans une opération doivent être séparés par une virgule")
    elif len(p) == 7:
        raise YaccError("L'instruction {} requiert 3 arguments".format(p[1]))
    elif len(p) == 10:
        if isinstance(p[5], LexToken):
            raise YaccError("Le registre R{}{} n'existe pas".format(p[4], p[5].value))
        else:
            raise YaccError("Le registre R{}{} n'existe pas".format(p[6], p[7].value))
    elif len(p) == 11:
        raise YaccError("TEST") 
Example #3
Source File: fst.py    From dotenv-linter with MIT License 6 votes vote down vote up
def from_token(
        cls: Type[TAssign],
        name_token: lex.LexToken,
        equal_token: lex.LexToken = None,
        value_token: lex.LexToken = None,
    ) -> TAssign:
        """Creates instance from parser's token."""
        if equal_token is None:
            raise ValueError('Empty EQUAL node is not allowed')

        if value_token is None:
            value_item = None
        else:
            value_item = Value.from_token(value_token)

        return cls(
            left=Name.from_token(name_token),
            right=value_item,
            lineno=name_token.lineno,
            raw_text=equal_token.value,
        ) 
Example #4
Source File: zxbasmpplex.py    From zxbasic with GNU General Public License v3.0 6 votes vote down vote up
def include_end(self):
        """ Performs and end of include.
        """
        self.lex = self.filestack[-1][2]
        self.input_data = self.filestack[-1][3]
        self.filestack.pop()

        if not self.filestack:  # End of input?
            return

        self.filestack[-1][1] += 1  # Increment line counter of previous file

        result = lex.LexToken()  # Creates the token
        result.value = self.put_current_line()
        result.type = '_ENDFILE_'
        result.lineno = self.lex.lineno
        result.lexpos = self.lex.lexpos

        return result 
Example #5
Source File: zxbpplex.py    From zxbasic with GNU General Public License v3.0 6 votes vote down vote up
def include_end(self):
        """ Performs and end of include.
        """
        self.lex = self.filestack[-1][2]
        self.input_data = self.filestack[-1][3]
        self.filestack.pop()

        if not self.filestack:  # End of input?
            return

        self.filestack[-1][1] += 1  # Increment line counter of previous file

        result = lex.LexToken()
        result.value = self.put_current_line(suffix='\n')
        result.type = '_ENDFILE_'
        result.lineno = self.lex.lineno
        result.lexpos = self.lex.lexpos

        return result 
Example #6
Source File: zxbpplex.py    From zxbasic with GNU General Public License v3.0 5 votes vote down vote up
def token(self):
        """ Returns a token from the current input. If tok is None
        from the current input, it means we are at end of current input
        (e.g. at end of include file). If so, closes the current input
        and discards it; then pops the previous input and lexer from
        the input stack, and gets another token.

        If new token is again None, repeat the process described above
        until the token is either not None, or self.lex is None, wich
        means we must effectively return None, because parsing has
        ended.
        """
        tok = None
        if self.next_token is not None:
            tok = lex.LexToken()
            tok.value = ''
            tok.lineno = self.lex.lineno
            tok.lexpos = self.lex.lexpos
            tok.type = self.next_token
            self.next_token = None

        while self.lex is not None and tok is None:
            tok = self.lex.token()
            if tok is not None:
                break

            tok = self.include_end()

        return tok 
Example #7
Source File: test_mof_compiler.py    From pywbem with GNU Lesser General Public License v2.1 5 votes vote down vote up
def lex_token(type_, value, lineno, lexpos):
        """Return an expected LexToken."""
        tok = lex.LexToken()
        tok.type = type_
        tok.value = value
        tok.lineno = lineno
        tok.lexpos = lexpos
        return tok 
Example #8
Source File: _mof_compiler.py    From pywbem with GNU Lesser General Public License v2.1 5 votes vote down vote up
def _value(token):
    """
    Return the value of the (Lex or Yacc) token.
    """
    if isinstance(token, lex.LexToken):
        value = token.value
    else:
        assert isinstance(token, yacc.YaccProduction)
        value = token[1]  # always first item in grammar
    return value 
Example #9
Source File: _mof_compiler.py    From pywbem with GNU Lesser General Public License v2.1 5 votes vote down vote up
def _lexpos(token):
    """
    Return the position in the (Lex or Yacc) token.
    """
    if isinstance(token, lex.LexToken):
        lexpos = token.lexpos
    else:
        assert isinstance(token, yacc.YaccProduction)
        lexpos = token.lexpos(1)  # always first item in grammar
    assert isinstance(lexpos, int)
    return lexpos 
Example #10
Source File: _mof_compiler.py    From pywbem with GNU Lesser General Public License v2.1 5 votes vote down vote up
def __init__(self, msg, parser_token=None):
        """
        Parameters:

          msg (:term:`string`):
            Message text describing the error.

          parser_token (lex.LexToken or yacc.YaccProduction):
            PLY lex or yacc parser token (that is, the ``p`` argument of a yacc
            parser function or the ``t`` argument of a lex parser function).
            This token is used to obtain the MOF source text and location
            information.

            `None` will result in no MOF source text and location information
            to be obtained.
        """
        assert msg is not None
        self._msg = msg
        if parser_token is None:
            self.args = (None, None, None, None)
        else:
            assert isinstance(
                parser_token,
                (lex.LexToken, yacc.YaccProduction))
            mof_ = parser_token.lexer.parser.mof
            self.args = (parser_token.lexer.lineno,
                         _find_column(mof_, parser_token),
                         parser_token.lexer.parser.file,
                         _get_error_context(mof_, parser_token)) 
Example #11
Source File: yaccparser.py    From epater with GNU General Public License v3.0 5 votes vote down vote up
def p_datainst2op_error(p):
    """datainst2op : OPDATA2OP logmnemonic flagscondandspace error COMMA op2
                   | OPDATA2OP logmnemonic flagscondandspace REG error op2
                   | OPDATA2OP logmnemonic flagscondandspace REG error COMMA op2"""

    if len(p) == 8:
        raise YaccError("Le registre R{}{} n'existe pas".format(p[4], p[5].value))
    elif isinstance(p[4], LexToken):
        raise YaccError("L'instruction {} requiert un registre comme premier argument".format(p[1]))
    else:
        raise YaccError("Les registres et/ou constantes utilisés dans une opération doivent être séparés par une virgule") 
Example #12
Source File: asttypes.py    From calmjs.parse with MIT License 5 votes vote down vote up
def set_comments(self, p, idx):
        """
        Set comments associated with the element inside the production
        rule provided referenced by idx to this node.  Only applicable
        if the element is a LexToken and that the hidden_tokens is set.
        """

        comments = []
        # reversing the order to capture the first pos in the
        # laziest way possible.
        for token in reversed(getattr(p.slice[idx], 'hidden_tokens', [])):
            if token.type == 'LINE_COMMENT':
                comment = LineComment(token.value)
            elif token.type == 'BLOCK_COMMENT':
                comment = BlockComment(token.value)
            else:
                continue  # pragma: no cover

            # short-circuit the setpos only
            pos = (token.lexpos, token.lineno, token.colno)
            comment.lexpos, comment.lineno, comment.colno = pos
            comment._token_map = {token.value: [pos]}
            comments.append(comment)

        if comments:
            self.comments = Comments(list(reversed(comments)))
            (self.comments.lexpos, self.comments.lineno,
                self.comments.colno) = pos 
Example #13
Source File: asttypes.py    From calmjs.parse with MIT License 5 votes vote down vote up
def setpos(self, p, idx=1, additional=()):
        """
        This takes a production produced by the lexer and set various
        attributes for this node, such as the positions in the original
        source that defined this token, along with other "hidden" tokens
        that should be converted into comment nodes associated with this
        node.
        """

        self._token_map = defaultdict(list)

        # only do so if the lexer has comments enabled, and that the
        # production at the index actually has a token provided (which
        # presumes that this is the lowest level node being produced).
        if p.lexer.with_comments and isinstance(p.slice[idx], LexToken):
            self.set_comments(p, idx)

        self.lexpos, self.lineno, self.colno = self.findpos(p, idx)
        for i, token in enumerate(p):
            if not isinstance(token, str):
                continue
            self._token_map[token].append(self.findpos(p, i))

        for token, i in additional:
            self._token_map[token].append(self.findpos(p, i))

        # the very ugly debugger invocation for locating the special
        # cases that are required

        # if not self.lexpos and not self.lineno:
        #     print('setpos', self.__class__.__name__, p.stack,
        #           self.lexpos, self.lineno, self.colno)
        #     # uncomment when yacc_tracking is True
        #     # import pdb;pdb.set_trace()
        #     # uncomment when yacc_tracking is False
        #     # import sys
        #     # from traceback import extract_stack
        #     # _src = extract_stack(sys._getframe(1), 1)[0].line
        #     # if '# require yacc_tracking' not in _src:
        #     #     import pdb;pdb.set_trace() 
Example #14
Source File: lexer.py    From stone with MIT License 5 votes vote down vote up
def _create_token(token_type, value, lineno, lexpos):
    """
    Helper for creating ply.lex.LexToken objects. Unfortunately, LexToken
    does not have a constructor defined to make settings these values easy.
    """
    token = lex.LexToken()
    token.type = token_type
    token.value = value
    token.lineno = lineno
    token.lexpos = lexpos
    return token 
Example #15
Source File: lexer.py    From stone with MIT License 5 votes vote down vote up
def token(self):
        """
        Returns the next LexToken. Returns None when all tokens have been
        exhausted.
        """

        if self.tokens_queue:
            self.last_token = self.tokens_queue.pop(0)
        else:
            r = self.lex.token()
            if isinstance(r, MultiToken):
                self.tokens_queue.extend(r.tokens)
                self.last_token = self.tokens_queue.pop(0)
            else:
                if r is None and self.cur_indent > 0:
                    if (self.last_token and
                            self.last_token.type not in ('NEWLINE', 'LINE')):
                        newline_token = _create_token(
                            'NEWLINE', '\n', self.lex.lineno, self.lex.lexpos)
                        self.tokens_queue.append(newline_token)
                    dedent_count = self.cur_indent
                    dedent_token = _create_token(
                        'DEDENT', '\t', self.lex.lineno, self.lex.lexpos)
                    self.tokens_queue.extend([dedent_token] * dedent_count)

                    self.cur_indent = 0
                    self.last_token = self.tokens_queue.pop(0)
                else:
                    self.last_token = r
        return self.last_token 
Example #16
Source File: lexer.py    From dotenv-linter with MIT License 5 votes vote down vote up
def token(self) -> lex.LexToken:
        """
        Returns the next token to work with.

        Should not be called directly, since it is a part of ``ply`` API.
        """
        return self._lexer.token() 
Example #17
Source File: zxbasmpplex.py    From zxbasic with GNU General Public License v3.0 5 votes vote down vote up
def token(self):
        """ Returns a token from the current input. If tok is None
        from the current input, it means we are at end of current input
        (e.g. at end of include file). If so, closes the current input
        and discards it; then pops the previous input and lexer from
        the input stack, and gets another token.

        If new token is again None, repeat the process described above
        until the token is either not None, or self.lex is None, wich
        means we must effectively return None, because parsing has
        ended.
        """
        tok = None
        if self.next_token is not None:
            tok = lex.LexToken()
            tok.value = ''
            tok.lineno = self.lex.lineno
            tok.lexpos = self.lex.lexpos
            tok.type = self.next_token
            self.next_token = None

        while self.lex is not None and tok is None:
            tok = self.lex.token()
            if tok is not None:
                break

            tok = self.include_end()

        return tok 
Example #18
Source File: parser.py    From dotenv-linter with MIT License 5 votes vote down vote up
def _get_token(
    parsed: yacc.YaccProduction,
    index: int,
) -> Optional[lex.LexToken]:  # TODO: lex.LexToken is in fact just `Any`
    """YaccProduction has a broken __getitem__ method definition."""
    return parsed.slice[index] 
Example #19
Source File: fst.py    From dotenv-linter with MIT License 5 votes vote down vote up
def from_token(cls: Type[TNode], token: lex.LexToken) -> TNode:
        """Creates instance from parser's token."""
        return cls(
            lineno=token.lineno,
            raw_text=token.value,
        ) 
Example #20
Source File: lexer.py    From dotenv-linter with MIT License 5 votes vote down vote up
def t_ANY_error(self, token: lex.LexToken) -> None:
        """
        Error handling rule.

        Raises an exception that file can not be parsed.
        """
        raise ParsingError(token.value) 
Example #21
Source File: lexer.py    From dotenv-linter with MIT License 5 votes vote down vote up
def t_value_VALUE(self, token: lex.LexToken) -> lex.LexToken:
        """Parsing VALUE tokens."""
        token.lexer.pop_state()
        return token 
Example #22
Source File: lexer.py    From dotenv-linter with MIT License 5 votes vote down vote up
def t_name_EQUAL(self, token: lex.LexToken) -> lex.LexToken:
        """Parsing EQUAL tokens."""
        token.lexer.push_state('value')
        return token 
Example #23
Source File: lexer.py    From dotenv-linter with MIT License 5 votes vote down vote up
def t_COMMENT(self, token: lex.LexToken) -> lex.LexToken:
        """Parsing COMMENT tokens."""
        return token 
Example #24
Source File: lexer.py    From dotenv-linter with MIT License 5 votes vote down vote up
def t_NAME(self, token: lex.LexToken) -> lex.LexToken:
        """Parsing NAME tokens."""
        token.lexer.push_state('name')
        return token