Searched refs:tokenize (Results 1 - 14 of 14) sorted by relevance
/gem5/src/unittest/ |
H A D | tokentest.cc | 55 tokenize(tokens1, test, token, false); 69 tokenize(tokens2, test, token, true);
|
/gem5/src/base/ |
H A D | match.cc | 56 tokenize(tokens[0], expr, '.'); 67 tokenize(tokens[i], expr[i], '.'); 79 tokenize(name_tokens, name, '.');
|
H A D | str.cc | 69 tokenize(vector<string>& v, const string &s, char token, bool ignore) function
|
H A D | statistics.cc | 179 tokenize(vec, name, '.'); 239 tokenize(v1, name1, '.'); 240 tokenize(v2, name2, '.');
|
H A D | str.hh | 102 tokenize(std::vector<std::string> &vector, const std::string &s,
|
H A D | inifile.cc | 260 tokenize(unref_ok_entries, entry->getValue(), ' ');
|
/gem5/ext/googletest/googlemock/scripts/generator/cpp/ |
H A D | ast.py | 46 from cpp import tokenize 551 if parts[-1].token_type == tokenize.NAME: 581 if (type_name and type_name[-1].token_type == tokenize.NAME and 582 p.token_type == tokenize.NAME): 583 type_name.append(tokenize.Token(tokenize.SYNTAX, ' ', 0, 0)) 739 if token.token_type == tokenize.NAME: 750 if next.token_type == tokenize.SYNTAX and next.name == '(': 755 syntax = tokenize.SYNTAX 764 new_temp = self._GetTokensUpTo(tokenize [all...] |
/gem5/src/sim/ |
H A D | cxx_config_ini.cc | 63 tokenize(values, value, ' ', true);
|
H A D | serialize.hh | 502 tokenize(tokens, str, ' '); 546 tokenize(tokens, str, ' '); 585 tokenize(tokens, str, ' '); 615 tokenize(tokens, str, ' ');
|
/gem5/src/sim/power/ |
H A D | mathexpr_powermodel.cc | 55 tokenize(path, name(), '.', true);
|
/gem5/util/cxx_config/ |
H A D | main.cc | 163 tokenize(values, argv[arg_ptr + 2], ',');
|
/gem5/util/systemc/gem5_within_systemc/ |
H A D | main.cc | 226 tokenize(values, argv[2], ',');
|
/gem5/ext/googletest/googlemock/ |
H A D | Makefile.am | 157 scripts/generator/cpp/tokenize.py \
|
/gem5/ext/ply/ply/ |
H A D | cpp.py | 158 # tokenize() 160 # Utility function. Given a string of text, tokenize into a list of tokens 163 def tokenize(self,text): member in class:Preprocessor 536 # tokens = tokenize(line) 775 tokens = self.tokenize(tokens)
|
Completed in 25 milliseconds