Searched refs:tokens (Results 1 - 25 of 101) sorted by relevance

12345

/gem5/src/base/
H A Dmatch.cc49 tokens.insert(tokens.end(), other.tokens.begin(), other.tokens.end());
55 tokens.resize(1);
56 tokenize(tokens[0], expr, '.');
63 tokens.resize(0);
65 tokens.resize(expr.size());
67 tokenize(tokens[i], expr[i], '.');
82 int num_expr = tokens
[all...]
H A Dmatch.hh44 std::vector<std::vector<std::string> > tokens; member in class:ObjectMatch
55 return tokens.empty() ? false : domatch(name);
/gem5/ext/ply/test/
H A Dlex_empty.py10 tokens = [ variable
H A Dyacc_rr_unused.py12 tokens = ('A', 'B', 'C') variable
H A Dlex_many_tokens.py3 # Test lex's ability to handle a large number of tokens (beyond the
11 tokens = ["TOK%d" % i for i in range(1000)] variable
13 for tok in tokens:
H A Dlex_doc1.py10 tokens = [ variable
H A Dlex_error1.py10 tokens = [ variable
H A Dlex_error2.py10 tokens = [ variable
H A Dlex_error3.py10 tokens = [ variable
H A Dlex_literal1.py10 tokens = [ variable
H A Dlex_literal2.py10 tokens = [ variable
H A Dlex_re1.py10 tokens = [ variable
H A Dlex_re2.py10 tokens = [ variable
H A Dlex_rule1.py10 tokens = [ variable
H A Dlex_token2.py3 # Tests for tokens of wrong type
10 tokens = "PLUS MINUS NUMBER" variable
H A Dlex_token3.py3 # tokens is right type, but is missing a token for one rule
10 tokens = [ variable
H A Dlex_token4.py10 tokens = [ variable
H A Dlex_dup1.py10 tokens = [ variable
H A Dlex_dup2.py10 tokens = [ variable
H A Dlex_dup3.py10 tokens = [ variable
H A Dlex_error4.py10 tokens = [ variable
H A Dlex_ignore.py10 tokens = [ variable
H A Dlex_ignore2.py10 tokens = [ variable
H A Dlex_re3.py10 tokens = [ variable
/gem5/ext/ply/ply/
H A Dcpp.py13 # Default preprocessor lexer definitions. These tokens are enough to get
17 tokens = ( variable
113 # .value - Macro value (a list of tokens)
149 # Probe the lexer for selected tokens
160 # Utility function. Given a string of text, tokenize into a list of tokens
164 tokens = []
169 tokens.append(tok)
170 return tokens
187 # with any suitable lexer regardless of how tokens have been named.
292 # Remove leading/trailing whitespace tokens fro
[all...]

Completed in 14 milliseconds

12345