Searched refs:tokens (Results 1 - 25 of 101) sorted by relevance
12345
/gem5/src/base/ |
H A D | match.cc | 49 tokens.insert(tokens.end(), other.tokens.begin(), other.tokens.end()); 55 tokens.resize(1); 56 tokenize(tokens[0], expr, '.'); 63 tokens.resize(0); 65 tokens.resize(expr.size()); 67 tokenize(tokens[i], expr[i], '.'); 82 int num_expr = tokens [all...] |
H A D | match.hh | 44 std::vector<std::vector<std::string> > tokens; member in class:ObjectMatch 55 return tokens.empty() ? false : domatch(name);
|
/gem5/ext/ply/test/ |
H A D | lex_empty.py | 10 tokens = [ variable
|
H A D | yacc_rr_unused.py | 12 tokens = ('A', 'B', 'C') variable
|
H A D | lex_many_tokens.py | 3 # Test lex's ability to handle a large number of tokens (beyond the 11 tokens = ["TOK%d" % i for i in range(1000)] variable 13 for tok in tokens:
|
H A D | lex_doc1.py | 10 tokens = [ variable
|
H A D | lex_error1.py | 10 tokens = [ variable
|
H A D | lex_error2.py | 10 tokens = [ variable
|
H A D | lex_error3.py | 10 tokens = [ variable
|
H A D | lex_literal1.py | 10 tokens = [ variable
|
H A D | lex_literal2.py | 10 tokens = [ variable
|
H A D | lex_re1.py | 10 tokens = [ variable
|
H A D | lex_re2.py | 10 tokens = [ variable
|
H A D | lex_rule1.py | 10 tokens = [ variable
|
H A D | lex_token2.py | 3 # Tests for tokens of wrong type 10 tokens = "PLUS MINUS NUMBER" variable
|
H A D | lex_token3.py | 3 # tokens is right type, but is missing a token for one rule 10 tokens = [ variable
|
H A D | lex_token4.py | 10 tokens = [ variable
|
H A D | lex_dup1.py | 10 tokens = [ variable
|
H A D | lex_dup2.py | 10 tokens = [ variable
|
H A D | lex_dup3.py | 10 tokens = [ variable
|
H A D | lex_error4.py | 10 tokens = [ variable
|
H A D | lex_ignore.py | 10 tokens = [ variable
|
H A D | lex_ignore2.py | 10 tokens = [ variable
|
H A D | lex_re3.py | 10 tokens = [ variable
|
/gem5/ext/ply/ply/ |
H A D | cpp.py | 13 # Default preprocessor lexer definitions. These tokens are enough to get 17 tokens = ( variable 113 # .value - Macro value (a list of tokens) 149 # Probe the lexer for selected tokens 160 # Utility function. Given a string of text, tokenize into a list of tokens 164 tokens = [] 169 tokens.append(tok) 170 return tokens 187 # with any suitable lexer regardless of how tokens have been named. 292 # Remove leading/trailing whitespace tokens fro [all...] |
Completed in 13 milliseconds
12345