hedit.py (4479:61d3ed46e373) | hedit.py (6498:e21e9ab5fad0) |
---|---|
1# ----------------------------------------------------------------------------- 2# hedit.py 3# 4# Paring of Fortran H Edit descriptions (Contributed by Pearu Peterson) 5# 6# These tokens can't be easily tokenized because they are of the following 7# form: 8# --- 15 unchanged lines hidden (view full) --- 24 25# Tokens 26t_ignore = " \t\n" 27 28def t_H_EDIT_DESCRIPTOR(t): 29 r"\d+H.*" # This grabs all of the remaining text 30 i = t.value.index('H') 31 n = eval(t.value[:i]) | 1# ----------------------------------------------------------------------------- 2# hedit.py 3# 4# Paring of Fortran H Edit descriptions (Contributed by Pearu Peterson) 5# 6# These tokens can't be easily tokenized because they are of the following 7# form: 8# --- 15 unchanged lines hidden (view full) --- 24 25# Tokens 26t_ignore = " \t\n" 27 28def t_H_EDIT_DESCRIPTOR(t): 29 r"\d+H.*" # This grabs all of the remaining text 30 i = t.value.index('H') 31 n = eval(t.value[:i]) |
32 | 32 |
33 # Adjust the tokenizing position 34 t.lexer.lexpos -= len(t.value) - (i+1+n) | 33 # Adjust the tokenizing position 34 t.lexer.lexpos -= len(t.value) - (i+1+n) |
35 | 35 |
36 t.value = t.value[i+1:i+1+n] | 36 t.value = t.value[i+1:i+1+n] |
37 return t 38 | 37 return t 38 |
39def t_error(t): | 39def t_error(t): |
40 print "Illegal character '%s'" % t.value[0] | 40 print("Illegal character '%s'" % t.value[0]) |
41 t.lexer.skip(1) | 41 t.lexer.skip(1) |
42 | 42 |
43# Build the lexer 44import ply.lex as lex 45lex.lex() 46lex.runmain() 47 48 | 43# Build the lexer 44import ply.lex as lex 45lex.lex() 46lex.runmain() 47 48 |