Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from jf.parser.lexer_base import LexerBase, LexerGrammar, LexerError
- from enum import Enum
- class TokenType(Enum):
- Identifier = 0
- IntLiteral = 1
- FloatLiteral = 2
- K_Namespace = 3
- EOF = -1
- grammar = LexerGrammar(
- [
- ("namespace", TokenType.K_Namespace),
- ("[a-zA-Z_][a-zA-Z_0-9]*", TokenType.Identifier),
- ("0|[1-9][0-9]*([.][0-9]+)?((e|E)[+-]?[0-9]+)?", TokenType.FloatLiteral),
- ("0|[1-9][0-9]*", TokenType.IntLiteral),
- ],
- TokenType.EOF
- )
- def tokenize(source):
- lexer = LexerBase(grammar)
- lexer.set_input(source)
- yield from lexer.tokens()
Add Comment
Please, Sign In to add comment