isa_parser.py (3950:19a99edda63b) isa_parser.py (3953:300d526414e6)
1# Copyright (c) 2003-2005 The Regents of The University of Michigan
2# All rights reserved.
3#
4# Redistribution and use in source and binary forms, with or without
5# modification, are permitted provided that the following conditions are
6# met: redistributions of source code must retain the above copyright
7# notice, this list of conditions and the following disclaimer;
8# redistributions in binary form must reproduce the above copyright
9# notice, this list of conditions and the following disclaimer in the
10# documentation and/or other materials provided with the distribution;
11# neither the name of the copyright holders nor the names of its
12# contributors may be used to endorse or promote products derived from
13# this software without specific prior written permission.
14#
15# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
18# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
21# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26#
27# Authors: Steve Reinhardt
28# Korey Sewell
29
30import os
31import sys
32import re
33import string
34import traceback
35# get type names
36from types import *
37
38# Prepend the directory where the PLY lex & yacc modules are found
39# to the search path. Assumes we're compiling in a subdirectory
40# of 'build' in the current tree.
41sys.path[0:0] = [os.environ['M5_PLY']]
42
43import lex
44import yacc
45
46#####################################################################
47#
48# Lexer
49#
50# The PLY lexer module takes two things as input:
51# - A list of token names (the string list 'tokens')
52# - A regular expression describing a match for each token. The
53# regexp for token FOO can be provided in two ways:
54# - as a string variable named t_FOO
55# - as the doc string for a function named t_FOO. In this case,
56# the function is also executed, allowing an action to be
57# associated with each token match.
58#
59#####################################################################
60
61# Reserved words. These are listed separately as they are matched
62# using the same regexp as generic IDs, but distinguished in the
63# t_ID() function. The PLY documentation suggests this approach.
64reserved = (
65 'BITFIELD', 'DECODE', 'DECODER', 'DEFAULT', 'DEF', 'EXEC', 'FORMAT',
66 'HEADER', 'LET', 'NAMESPACE', 'OPERAND_TYPES', 'OPERANDS',
67 'OUTPUT', 'SIGNED', 'TEMPLATE'
68 )
69
70# List of tokens. The lex module requires this.
71tokens = reserved + (
72 # identifier
73 'ID',
74
75 # integer literal
76 'INTLIT',
77
78 # string literal
79 'STRLIT',
80
81 # code literal
82 'CODELIT',
83
84 # ( ) [ ] { } < > , ; : :: *
85 'LPAREN', 'RPAREN',
86 'LBRACKET', 'RBRACKET',
87 'LBRACE', 'RBRACE',
88 'LESS', 'GREATER', 'EQUALS',
89 'COMMA', 'SEMI', 'COLON', 'DBLCOLON',
90 'ASTERISK',
91
92 # C preprocessor directives
93 'CPPDIRECTIVE'
94
95# The following are matched but never returned. commented out to
96# suppress PLY warning
97 # newfile directive
98# 'NEWFILE',
99
100 # endfile directive
101# 'ENDFILE'
102)
103
104# Regular expressions for token matching
105t_LPAREN = r'\('
106t_RPAREN = r'\)'
107t_LBRACKET = r'\['
108t_RBRACKET = r'\]'
109t_LBRACE = r'\{'
110t_RBRACE = r'\}'
111t_LESS = r'\<'
112t_GREATER = r'\>'
113t_EQUALS = r'='
114t_COMMA = r','
115t_SEMI = r';'
116t_COLON = r':'
117t_DBLCOLON = r'::'
118t_ASTERISK = r'\*'
119
120# Identifiers and reserved words
121reserved_map = { }
122for r in reserved:
123 reserved_map[r.lower()] = r
124
125def t_ID(t):
126 r'[A-Za-z_]\w*'
127 t.type = reserved_map.get(t.value,'ID')
128 return t
129
130# Integer literal
131def t_INTLIT(t):
132 r'(0x[\da-fA-F]+)|\d+'
133 try:
134 t.value = int(t.value,0)
135 except ValueError:
136 error(t.lineno, 'Integer value "%s" too large' % t.value)
137 t.value = 0
138 return t
139
140# String literal. Note that these use only single quotes, and
141# can span multiple lines.
142def t_STRLIT(t):
143 r"(?m)'([^'])+'"
144 # strip off quotes
145 t.value = t.value[1:-1]
146 t.lineno += t.value.count('\n')
147 return t
148
149
150# "Code literal"... like a string literal, but delimiters are
151# '{{' and '}}' so they get formatted nicely under emacs c-mode
152def t_CODELIT(t):
153 r"(?m)\{\{([^\}]|}(?!\}))+\}\}"
154 # strip off {{ & }}
155 t.value = t.value[2:-2]
156 t.lineno += t.value.count('\n')
157 return t
158
159def t_CPPDIRECTIVE(t):
160 r'^\#[^\#].*\n'
161 t.lineno += t.value.count('\n')
162 return t
163
164def t_NEWFILE(t):
165 r'^\#\#newfile\s+"[\w/.-]*"'
166 fileNameStack.push((t.value[11:-1], t.lineno))
167 t.lineno = 0
168
169def t_ENDFILE(t):
170 r'^\#\#endfile'
171 (old_filename, t.lineno) = fileNameStack.pop()
172
173#
174# The functions t_NEWLINE, t_ignore, and t_error are
175# special for the lex module.
176#
177
178# Newlines
179def t_NEWLINE(t):
180 r'\n+'
181 t.lineno += t.value.count('\n')
182
183# Comments
184def t_comment(t):
185 r'//.*'
186
187# Completely ignored characters
188t_ignore = ' \t\x0c'
189
190# Error handler
191def t_error(t):
192 error(t.lineno, "illegal character '%s'" % t.value[0])
193 t.skip(1)
194
195# Build the lexer
196lex.lex()
197
198#####################################################################
199#
200# Parser
201#
202# Every function whose name starts with 'p_' defines a grammar rule.
203# The rule is encoded in the function's doc string, while the
204# function body provides the action taken when the rule is matched.
205# The argument to each function is a list of the values of the
206# rule's symbols: t[0] for the LHS, and t[1..n] for the symbols
207# on the RHS. For tokens, the value is copied from the t.value
208# attribute provided by the lexer. For non-terminals, the value
209# is assigned by the producing rule; i.e., the job of the grammar
210# rule function is to set the value for the non-terminal on the LHS
211# (by assigning to t[0]).
212#####################################################################
213
214# The LHS of the first grammar rule is used as the start symbol
215# (in this case, 'specification'). Note that this rule enforces
216# that there will be exactly one namespace declaration, with 0 or more
217# global defs/decls before and after it. The defs & decls before
218# the namespace decl will be outside the namespace; those after
219# will be inside. The decoder function is always inside the namespace.
220def p_specification(t):
221 'specification : opt_defs_and_outputs name_decl opt_defs_and_outputs decode_block'
222 global_code = t[1]
223 isa_name = t[2]
224 namespace = isa_name + "Inst"
225 # wrap the decode block as a function definition
226 t[4].wrap_decode_block('''
227StaticInstPtr
228%(isa_name)s::decodeInst(%(isa_name)s::ExtMachInst machInst)
229{
230 using namespace %(namespace)s;
231''' % vars(), '}')
232 # both the latter output blocks and the decode block are in the namespace
233 namespace_code = t[3] + t[4]
234 # pass it all back to the caller of yacc.parse()
235 t[0] = (isa_name, namespace, global_code, namespace_code)
236
237# ISA name declaration looks like "namespace <foo>;"
238def p_name_decl(t):
239 'name_decl : NAMESPACE ID SEMI'
240 t[0] = t[2]
241
242# 'opt_defs_and_outputs' is a possibly empty sequence of
243# def and/or output statements.
244def p_opt_defs_and_outputs_0(t):
245 'opt_defs_and_outputs : empty'
246 t[0] = GenCode()
247
248def p_opt_defs_and_outputs_1(t):
249 'opt_defs_and_outputs : defs_and_outputs'
250 t[0] = t[1]
251
252def p_defs_and_outputs_0(t):
253 'defs_and_outputs : def_or_output'
254 t[0] = t[1]
255
256def p_defs_and_outputs_1(t):
257 'defs_and_outputs : defs_and_outputs def_or_output'
258 t[0] = t[1] + t[2]
259
260# The list of possible definition/output statements.
261def p_def_or_output(t):
262 '''def_or_output : def_format
263 | def_bitfield
264 | def_template
265 | def_operand_types
266 | def_operands
267 | output_header
268 | output_decoder
269 | output_exec
270 | global_let'''
271 t[0] = t[1]
272
273# Output blocks 'output <foo> {{...}}' (C++ code blocks) are copied
274# directly to the appropriate output section.
275
276
277# Protect any non-dict-substitution '%'s in a format string
278# (i.e. those not followed by '(')
279def protect_non_subst_percents(s):
280 return re.sub(r'%(?!\()', '%%', s)
281
282# Massage output block by substituting in template definitions and bit
283# operators. We handle '%'s embedded in the string that don't
284# indicate template substitutions (or CPU-specific symbols, which get
285# handled in GenCode) by doubling them first so that the format
286# operation will reduce them back to single '%'s.
287def process_output(s):
288 s = protect_non_subst_percents(s)
289 # protects cpu-specific symbols too
290 s = protect_cpu_symbols(s)
291 return substBitOps(s % templateMap)
292
293def p_output_header(t):
294 'output_header : OUTPUT HEADER CODELIT SEMI'
295 t[0] = GenCode(header_output = process_output(t[3]))
296
297def p_output_decoder(t):
298 'output_decoder : OUTPUT DECODER CODELIT SEMI'
299 t[0] = GenCode(decoder_output = process_output(t[3]))
300
301def p_output_exec(t):
302 'output_exec : OUTPUT EXEC CODELIT SEMI'
303 t[0] = GenCode(exec_output = process_output(t[3]))
304
305# global let blocks 'let {{...}}' (Python code blocks) are executed
306# directly when seen. Note that these execute in a special variable
307# context 'exportContext' to prevent the code from polluting this
308# script's namespace.
309def p_global_let(t):
310 'global_let : LET CODELIT SEMI'
311 updateExportContext()
312 try:
313 exec fixPythonIndentation(t[2]) in exportContext
314 except Exception, exc:
315 error(t.lineno(1),
316 'error: %s in global let block "%s".' % (exc, t[2]))
317 t[0] = GenCode() # contributes nothing to the output C++ file
318
319# Define the mapping from operand type extensions to C++ types and bit
320# widths (stored in operandTypeMap).
321def p_def_operand_types(t):
322 'def_operand_types : DEF OPERAND_TYPES CODELIT SEMI'
323 try:
324 userDict = eval('{' + t[3] + '}')
325 except Exception, exc:
326 error(t.lineno(1),
327 'error: %s in def operand_types block "%s".' % (exc, t[3]))
328 buildOperandTypeMap(userDict, t.lineno(1))
329 t[0] = GenCode() # contributes nothing to the output C++ file
330
331# Define the mapping from operand names to operand classes and other
332# traits. Stored in operandNameMap.
333def p_def_operands(t):
334 'def_operands : DEF OPERANDS CODELIT SEMI'
335 if not globals().has_key('operandTypeMap'):
336 error(t.lineno(1),
337 'error: operand types must be defined before operands')
338 try:
339 userDict = eval('{' + t[3] + '}')
340 except Exception, exc:
341 error(t.lineno(1),
342 'error: %s in def operands block "%s".' % (exc, t[3]))
343 buildOperandNameMap(userDict, t.lineno(1))
344 t[0] = GenCode() # contributes nothing to the output C++ file
345
346# A bitfield definition looks like:
347# 'def [signed] bitfield <ID> [<first>:<last>]'
348# This generates a preprocessor macro in the output file.
349def p_def_bitfield_0(t):
350 'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT COLON INTLIT GREATER SEMI'
351 expr = 'bits(machInst, %2d, %2d)' % (t[6], t[8])
352 if (t[2] == 'signed'):
353 expr = 'sext<%d>(%s)' % (t[6] - t[8] + 1, expr)
354 hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
355 t[0] = GenCode(header_output = hash_define)
356
357# alternate form for single bit: 'def [signed] bitfield <ID> [<bit>]'
358def p_def_bitfield_1(t):
359 'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT GREATER SEMI'
360 expr = 'bits(machInst, %2d, %2d)' % (t[6], t[6])
361 if (t[2] == 'signed'):
362 expr = 'sext<%d>(%s)' % (1, expr)
363 hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
364 t[0] = GenCode(header_output = hash_define)
365
366def p_opt_signed_0(t):
367 'opt_signed : SIGNED'
368 t[0] = t[1]
369
370def p_opt_signed_1(t):
371 'opt_signed : empty'
372 t[0] = ''
373
374# Global map variable to hold templates
375templateMap = {}
376
377def p_def_template(t):
378 'def_template : DEF TEMPLATE ID CODELIT SEMI'
379 templateMap[t[3]] = Template(t[4])
380 t[0] = GenCode()
381
382# An instruction format definition looks like
383# "def format <fmt>(<params>) {{...}};"
384def p_def_format(t):
385 'def_format : DEF FORMAT ID LPAREN param_list RPAREN CODELIT SEMI'
386 (id, params, code) = (t[3], t[5], t[7])
387 defFormat(id, params, code, t.lineno(1))
388 t[0] = GenCode()
389
390# The formal parameter list for an instruction format is a possibly
391# empty list of comma-separated parameters. Positional (standard,
392# non-keyword) parameters must come first, followed by keyword
393# parameters, followed by a '*foo' parameter that gets excess
394# positional arguments (as in Python). Each of these three parameter
395# categories is optional.
396#
397# Note that we do not support the '**foo' parameter for collecting
398# otherwise undefined keyword args. Otherwise the parameter list is
399# (I believe) identical to what is supported in Python.
400#
401# The param list generates a tuple, where the first element is a list of
402# the positional params and the second element is a dict containing the
403# keyword params.
404def p_param_list_0(t):
405 'param_list : positional_param_list COMMA nonpositional_param_list'
406 t[0] = t[1] + t[3]
407
408def p_param_list_1(t):
409 '''param_list : positional_param_list
410 | nonpositional_param_list'''
411 t[0] = t[1]
412
413def p_positional_param_list_0(t):
414 'positional_param_list : empty'
415 t[0] = []
416
417def p_positional_param_list_1(t):
418 'positional_param_list : ID'
419 t[0] = [t[1]]
420
421def p_positional_param_list_2(t):
422 'positional_param_list : positional_param_list COMMA ID'
423 t[0] = t[1] + [t[3]]
424
425def p_nonpositional_param_list_0(t):
426 'nonpositional_param_list : keyword_param_list COMMA excess_args_param'
427 t[0] = t[1] + t[3]
428
429def p_nonpositional_param_list_1(t):
430 '''nonpositional_param_list : keyword_param_list
431 | excess_args_param'''
432 t[0] = t[1]
433
434def p_keyword_param_list_0(t):
435 'keyword_param_list : keyword_param'
436 t[0] = [t[1]]
437
438def p_keyword_param_list_1(t):
439 'keyword_param_list : keyword_param_list COMMA keyword_param'
440 t[0] = t[1] + [t[3]]
441
442def p_keyword_param(t):
443 'keyword_param : ID EQUALS expr'
444 t[0] = t[1] + ' = ' + t[3].__repr__()
445
446def p_excess_args_param(t):
447 'excess_args_param : ASTERISK ID'
448 # Just concatenate them: '*ID'. Wrap in list to be consistent
449 # with positional_param_list and keyword_param_list.
450 t[0] = [t[1] + t[2]]
451
452# End of format definition-related rules.
453##############
454
455#
456# A decode block looks like:
457# decode <field1> [, <field2>]* [default <inst>] { ... }
458#
459def p_decode_block(t):
460 'decode_block : DECODE ID opt_default LBRACE decode_stmt_list RBRACE'
461 default_defaults = defaultStack.pop()
462 codeObj = t[5]
463 # use the "default defaults" only if there was no explicit
464 # default statement in decode_stmt_list
465 if not codeObj.has_decode_default:
466 codeObj += default_defaults
467 codeObj.wrap_decode_block('switch (%s) {\n' % t[2], '}\n')
468 t[0] = codeObj
469
470# The opt_default statement serves only to push the "default defaults"
471# onto defaultStack. This value will be used by nested decode blocks,
472# and used and popped off when the current decode_block is processed
473# (in p_decode_block() above).
474def p_opt_default_0(t):
475 'opt_default : empty'
476 # no default specified: reuse the one currently at the top of the stack
477 defaultStack.push(defaultStack.top())
478 # no meaningful value returned
479 t[0] = None
480
481def p_opt_default_1(t):
482 'opt_default : DEFAULT inst'
483 # push the new default
484 codeObj = t[2]
485 codeObj.wrap_decode_block('\ndefault:\n', 'break;\n')
486 defaultStack.push(codeObj)
487 # no meaningful value returned
488 t[0] = None
489
490def p_decode_stmt_list_0(t):
491 'decode_stmt_list : decode_stmt'
492 t[0] = t[1]
493
494def p_decode_stmt_list_1(t):
495 'decode_stmt_list : decode_stmt decode_stmt_list'
496 if (t[1].has_decode_default and t[2].has_decode_default):
497 error(t.lineno(1), 'Two default cases in decode block')
498 t[0] = t[1] + t[2]
499
500#
501# Decode statement rules
502#
503# There are four types of statements allowed in a decode block:
504# 1. Format blocks 'format <foo> { ... }'
505# 2. Nested decode blocks
506# 3. Instruction definitions.
507# 4. C preprocessor directives.
508
509
510# Preprocessor directives found in a decode statement list are passed
511# through to the output, replicated to all of the output code
512# streams. This works well for ifdefs, so we can ifdef out both the
513# declarations and the decode cases generated by an instruction
514# definition. Handling them as part of the grammar makes it easy to
515# keep them in the right place with respect to the code generated by
516# the other statements.
517def p_decode_stmt_cpp(t):
518 'decode_stmt : CPPDIRECTIVE'
519 t[0] = GenCode(t[1], t[1], t[1], t[1])
520
521# A format block 'format <foo> { ... }' sets the default instruction
522# format used to handle instruction definitions inside the block.
523# This format can be overridden by using an explicit format on the
524# instruction definition or with a nested format block.
525def p_decode_stmt_format(t):
526 'decode_stmt : FORMAT push_format_id LBRACE decode_stmt_list RBRACE'
527 # The format will be pushed on the stack when 'push_format_id' is
528 # processed (see below). Once the parser has recognized the full
529 # production (though the right brace), we're done with the format,
530 # so now we can pop it.
531 formatStack.pop()
532 t[0] = t[4]
533
534# This rule exists so we can set the current format (& push the stack)
535# when we recognize the format name part of the format block.
536def p_push_format_id(t):
537 'push_format_id : ID'
538 try:
539 formatStack.push(formatMap[t[1]])
540 t[0] = ('', '// format %s' % t[1])
541 except KeyError:
542 error(t.lineno(1), 'instruction format "%s" not defined.' % t[1])
543
544# Nested decode block: if the value of the current field matches the
545# specified constant, do a nested decode on some other field.
546def p_decode_stmt_decode(t):
547 'decode_stmt : case_label COLON decode_block'
548 label = t[1]
549 codeObj = t[3]
550 # just wrap the decoding code from the block as a case in the
551 # outer switch statement.
552 codeObj.wrap_decode_block('\n%s:\n' % label)
553 codeObj.has_decode_default = (label == 'default')
554 t[0] = codeObj
555
556# Instruction definition (finally!).
557def p_decode_stmt_inst(t):
558 'decode_stmt : case_label COLON inst SEMI'
559 label = t[1]
560 codeObj = t[3]
561 codeObj.wrap_decode_block('\n%s:' % label, 'break;\n')
562 codeObj.has_decode_default = (label == 'default')
563 t[0] = codeObj
564
565# The case label is either a list of one or more constants or 'default'
566def p_case_label_0(t):
567 'case_label : intlit_list'
568 t[0] = ': '.join(map(lambda a: 'case %#x' % a, t[1]))
569
570def p_case_label_1(t):
571 'case_label : DEFAULT'
572 t[0] = 'default'
573
574#
575# The constant list for a decode case label must be non-empty, but may have
576# one or more comma-separated integer literals in it.
577#
578def p_intlit_list_0(t):
579 'intlit_list : INTLIT'
580 t[0] = [t[1]]
581
582def p_intlit_list_1(t):
583 'intlit_list : intlit_list COMMA INTLIT'
584 t[0] = t[1]
585 t[0].append(t[3])
586
587# Define an instruction using the current instruction format (specified
588# by an enclosing format block).
589# "<mnemonic>(<args>)"
590def p_inst_0(t):
591 'inst : ID LPAREN arg_list RPAREN'
592 # Pass the ID and arg list to the current format class to deal with.
593 currentFormat = formatStack.top()
594 codeObj = currentFormat.defineInst(t[1], t[3], t.lineno(1))
595 args = ','.join(map(str, t[3]))
596 args = re.sub('(?m)^', '//', args)
597 args = re.sub('^//', '', args)
598 comment = '\n// %s::%s(%s)\n' % (currentFormat.id, t[1], args)
599 codeObj.prepend_all(comment)
600 t[0] = codeObj
601
602# Define an instruction using an explicitly specified format:
603# "<fmt>::<mnemonic>(<args>)"
604def p_inst_1(t):
605 'inst : ID DBLCOLON ID LPAREN arg_list RPAREN'
606 try:
607 format = formatMap[t[1]]
608 except KeyError:
609 error(t.lineno(1), 'instruction format "%s" not defined.' % t[1])
610 codeObj = format.defineInst(t[3], t[5], t.lineno(1))
611 comment = '\n// %s::%s(%s)\n' % (t[1], t[3], t[5])
612 codeObj.prepend_all(comment)
613 t[0] = codeObj
614
615# The arg list generates a tuple, where the first element is a list of
616# the positional args and the second element is a dict containing the
617# keyword args.
618def p_arg_list_0(t):
619 'arg_list : positional_arg_list COMMA keyword_arg_list'
620 t[0] = ( t[1], t[3] )
621
622def p_arg_list_1(t):
623 'arg_list : positional_arg_list'
624 t[0] = ( t[1], {} )
625
626def p_arg_list_2(t):
627 'arg_list : keyword_arg_list'
628 t[0] = ( [], t[1] )
629
630def p_positional_arg_list_0(t):
631 'positional_arg_list : empty'
632 t[0] = []
633
634def p_positional_arg_list_1(t):
635 'positional_arg_list : expr'
636 t[0] = [t[1]]
637
638def p_positional_arg_list_2(t):
639 'positional_arg_list : positional_arg_list COMMA expr'
640 t[0] = t[1] + [t[3]]
641
642def p_keyword_arg_list_0(t):
643 'keyword_arg_list : keyword_arg'
644 t[0] = t[1]
645
646def p_keyword_arg_list_1(t):
647 'keyword_arg_list : keyword_arg_list COMMA keyword_arg'
648 t[0] = t[1]
649 t[0].update(t[3])
650
651def p_keyword_arg(t):
652 'keyword_arg : ID EQUALS expr'
653 t[0] = { t[1] : t[3] }
654
655#
656# Basic expressions. These constitute the argument values of
657# "function calls" (i.e. instruction definitions in the decode block)
658# and default values for formal parameters of format functions.
659#
660# Right now, these are either strings, integers, or (recursively)
661# lists of exprs (using Python square-bracket list syntax). Note that
662# bare identifiers are trated as string constants here (since there
663# isn't really a variable namespace to refer to).
664#
665def p_expr_0(t):
666 '''expr : ID
667 | INTLIT
668 | STRLIT
669 | CODELIT'''
670 t[0] = t[1]
671
672def p_expr_1(t):
673 '''expr : LBRACKET list_expr RBRACKET'''
674 t[0] = t[2]
675
676def p_list_expr_0(t):
677 'list_expr : expr'
678 t[0] = [t[1]]
679
680def p_list_expr_1(t):
681 'list_expr : list_expr COMMA expr'
682 t[0] = t[1] + [t[3]]
683
684def p_list_expr_2(t):
685 'list_expr : empty'
686 t[0] = []
687
688#
689# Empty production... use in other rules for readability.
690#
691def p_empty(t):
692 'empty :'
693 pass
694
695# Parse error handler. Note that the argument here is the offending
696# *token*, not a grammar symbol (hence the need to use t.value)
697def p_error(t):
698 if t:
699 error(t.lineno, "syntax error at '%s'" % t.value)
700 else:
701 error(0, "unknown syntax error", True)
702
703# END OF GRAMMAR RULES
704#
705# Now build the parser.
706yacc.yacc()
707
708
709#####################################################################
710#
711# Support Classes
712#
713#####################################################################
714
715# Expand template with CPU-specific references into a dictionary with
716# an entry for each CPU model name. The entry key is the model name
717# and the corresponding value is the template with the CPU-specific
718# refs substituted for that model.
719def expand_cpu_symbols_to_dict(template):
720 # Protect '%'s that don't go with CPU-specific terms
721 t = re.sub(r'%(?!\(CPU_)', '%%', template)
722 result = {}
723 for cpu in cpu_models:
724 result[cpu.name] = t % cpu.strings
725 return result
726
727# *If* the template has CPU-specific references, return a single
728# string containing a copy of the template for each CPU model with the
729# corresponding values substituted in. If the template has no
730# CPU-specific references, it is returned unmodified.
731def expand_cpu_symbols_to_string(template):
732 if template.find('%(CPU_') != -1:
733 return reduce(lambda x,y: x+y,
734 expand_cpu_symbols_to_dict(template).values())
735 else:
736 return template
737
738# Protect CPU-specific references by doubling the corresponding '%'s
739# (in preparation for substituting a different set of references into
740# the template).
741def protect_cpu_symbols(template):
742 return re.sub(r'%(?=\(CPU_)', '%%', template)
743
744###############
745# GenCode class
746#
747# The GenCode class encapsulates generated code destined for various
748# output files. The header_output and decoder_output attributes are
749# strings containing code destined for decoder.hh and decoder.cc
750# respectively. The decode_block attribute contains code to be
751# incorporated in the decode function itself (that will also end up in
752# decoder.cc). The exec_output attribute is a dictionary with a key
753# for each CPU model name; the value associated with a particular key
754# is the string of code for that CPU model's exec.cc file. The
755# has_decode_default attribute is used in the decode block to allow
756# explicit default clauses to override default default clauses.
757
758class GenCode:
759 # Constructor. At this point we substitute out all CPU-specific
760 # symbols. For the exec output, these go into the per-model
761 # dictionary. For all other output types they get collapsed into
762 # a single string.
763 def __init__(self,
764 header_output = '', decoder_output = '', exec_output = '',
765 decode_block = '', has_decode_default = False):
766 self.header_output = expand_cpu_symbols_to_string(header_output)
767 self.decoder_output = expand_cpu_symbols_to_string(decoder_output)
768 if isinstance(exec_output, dict):
769 self.exec_output = exec_output
770 elif isinstance(exec_output, str):
771 # If the exec_output arg is a single string, we replicate
772 # it for each of the CPU models, substituting and
773 # %(CPU_foo)s params appropriately.
774 self.exec_output = expand_cpu_symbols_to_dict(exec_output)
775 self.decode_block = expand_cpu_symbols_to_string(decode_block)
776 self.has_decode_default = has_decode_default
777
778 # Override '+' operator: generate a new GenCode object that
779 # concatenates all the individual strings in the operands.
780 def __add__(self, other):
781 exec_output = {}
782 for cpu in cpu_models:
783 n = cpu.name
784 exec_output[n] = self.exec_output[n] + other.exec_output[n]
785 return GenCode(self.header_output + other.header_output,
786 self.decoder_output + other.decoder_output,
787 exec_output,
788 self.decode_block + other.decode_block,
789 self.has_decode_default or other.has_decode_default)
790
791 # Prepend a string (typically a comment) to all the strings.
792 def prepend_all(self, pre):
793 self.header_output = pre + self.header_output
794 self.decoder_output = pre + self.decoder_output
795 self.decode_block = pre + self.decode_block
796 for cpu in cpu_models:
797 self.exec_output[cpu.name] = pre + self.exec_output[cpu.name]
798
799 # Wrap the decode block in a pair of strings (e.g., 'case foo:'
800 # and 'break;'). Used to build the big nested switch statement.
801 def wrap_decode_block(self, pre, post = ''):
802 self.decode_block = pre + indent(self.decode_block) + post
803
804################
805# Format object.
806#
807# A format object encapsulates an instruction format. It must provide
808# a defineInst() method that generates the code for an instruction
809# definition.
810
811exportContextSymbols = ('InstObjParams', 'makeList', 're', 'string')
812
813exportContext = {}
814
815def updateExportContext():
816 exportContext.update(exportDict(*exportContextSymbols))
817 exportContext.update(templateMap)
818
819def exportDict(*symNames):
820 return dict([(s, eval(s)) for s in symNames])
821
822
823class Format:
824 def __init__(self, id, params, code):
825 # constructor: just save away arguments
826 self.id = id
827 self.params = params
828 label = 'def format ' + id
829 self.user_code = compile(fixPythonIndentation(code), label, 'exec')
830 param_list = string.join(params, ", ")
831 f = '''def defInst(_code, _context, %s):
832 my_locals = vars().copy()
833 exec _code in _context, my_locals
834 return my_locals\n''' % param_list
835 c = compile(f, label + ' wrapper', 'exec')
836 exec c
837 self.func = defInst
838
839 def defineInst(self, name, args, lineno):
840 context = {}
841 updateExportContext()
842 context.update(exportContext)
843 context.update({ 'name': name, 'Name': string.capitalize(name) })
844 try:
845 vars = self.func(self.user_code, context, *args[0], **args[1])
846 except Exception, exc:
847 error(lineno, 'error defining "%s": %s.' % (name, exc))
848 for k in vars.keys():
849 if k not in ('header_output', 'decoder_output',
850 'exec_output', 'decode_block'):
851 del vars[k]
852 return GenCode(**vars)
853
854# Special null format to catch an implicit-format instruction
855# definition outside of any format block.
856class NoFormat:
857 def __init__(self):
858 self.defaultInst = ''
859
860 def defineInst(self, name, args, lineno):
861 error(lineno,
862 'instruction definition "%s" with no active format!' % name)
863
864# This dictionary maps format name strings to Format objects.
865formatMap = {}
866
867# Define a new format
868def defFormat(id, params, code, lineno):
869 # make sure we haven't already defined this one
870 if formatMap.get(id, None) != None:
871 error(lineno, 'format %s redefined.' % id)
872 # create new object and store in global map
873 formatMap[id] = Format(id, params, code)
874
875
876##############
877# Stack: a simple stack object. Used for both formats (formatStack)
878# and default cases (defaultStack). Simply wraps a list to give more
879# stack-like syntax and enable initialization with an argument list
880# (as opposed to an argument that's a list).
881
882class Stack(list):
883 def __init__(self, *items):
884 list.__init__(self, items)
885
886 def push(self, item):
887 self.append(item);
888
889 def top(self):
890 return self[-1]
891
892# The global format stack.
893formatStack = Stack(NoFormat())
894
895# The global default case stack.
896defaultStack = Stack( None )
897
898# Global stack that tracks current file and line number.
899# Each element is a tuple (filename, lineno) that records the
900# *current* filename and the line number in the *previous* file where
901# it was included.
902fileNameStack = Stack()
903
904###################
905# Utility functions
906
907#
908# Indent every line in string 's' by two spaces
909# (except preprocessor directives).
910# Used to make nested code blocks look pretty.
911#
912def indent(s):
913 return re.sub(r'(?m)^(?!#)', ' ', s)
914
915#
916# Munge a somewhat arbitrarily formatted piece of Python code
917# (e.g. from a format 'let' block) into something whose indentation
918# will get by the Python parser.
919#
920# The two keys here are that Python will give a syntax error if
921# there's any whitespace at the beginning of the first line, and that
922# all lines at the same lexical nesting level must have identical
923# indentation. Unfortunately the way code literals work, an entire
924# let block tends to have some initial indentation. Rather than
925# trying to figure out what that is and strip it off, we prepend 'if
926# 1:' to make the let code the nested block inside the if (and have
927# the parser automatically deal with the indentation for us).
928#
929# We don't want to do this if (1) the code block is empty or (2) the
930# first line of the block doesn't have any whitespace at the front.
931
932def fixPythonIndentation(s):
933 # get rid of blank lines first
934 s = re.sub(r'(?m)^\s*\n', '', s);
935 if (s != '' and re.match(r'[ \t]', s[0])):
936 s = 'if 1:\n' + s
937 return s
938
939# Error handler. Just call exit. Output formatted to work under
940# Emacs compile-mode. Optional 'print_traceback' arg, if set to True,
941# prints a Python stack backtrace too (can be handy when trying to
942# debug the parser itself).
943def error(lineno, string, print_traceback = False):
944 spaces = ""
945 for (filename, line) in fileNameStack[0:-1]:
946 print spaces + "In file included from " + filename + ":"
947 spaces += " "
948 # Print a Python stack backtrace if requested.
949 if (print_traceback):
950 traceback.print_exc()
951 if lineno != 0:
952 line_str = "%d:" % lineno
953 else:
954 line_str = ""
955 sys.exit(spaces + "%s:%s %s" % (fileNameStack[-1][0], line_str, string))
956
957
958#####################################################################
959#
960# Bitfield Operator Support
961#
962#####################################################################
963
964bitOp1ArgRE = re.compile(r'<\s*(\w+)\s*:\s*>')
965
966bitOpWordRE = re.compile(r'(?<![\w\.])([\w\.]+)<\s*(\w+)\s*:\s*(\w+)\s*>')
967bitOpExprRE = re.compile(r'\)<\s*(\w+)\s*:\s*(\w+)\s*>')
968
969def substBitOps(code):
970 # first convert single-bit selectors to two-index form
971 # i.e., <n> --> <n:n>
972 code = bitOp1ArgRE.sub(r'<\1:\1>', code)
973 # simple case: selector applied to ID (name)
974 # i.e., foo<a:b> --> bits(foo, a, b)
975 code = bitOpWordRE.sub(r'bits(\1, \2, \3)', code)
976 # if selector is applied to expression (ending in ')'),
977 # we need to search backward for matching '('
978 match = bitOpExprRE.search(code)
979 while match:
980 exprEnd = match.start()
981 here = exprEnd - 1
982 nestLevel = 1
983 while nestLevel > 0:
984 if code[here] == '(':
985 nestLevel -= 1
986 elif code[here] == ')':
987 nestLevel += 1
988 here -= 1
989 if here < 0:
990 sys.exit("Didn't find '('!")
991 exprStart = here+1
992 newExpr = r'bits(%s, %s, %s)' % (code[exprStart:exprEnd+1],
993 match.group(1), match.group(2))
994 code = code[:exprStart] + newExpr + code[match.end():]
995 match = bitOpExprRE.search(code)
996 return code
997
998
999####################
1000# Template objects.
1001#
1002# Template objects are format strings that allow substitution from
1003# the attribute spaces of other objects (e.g. InstObjParams instances).
1004
1005labelRE = re.compile(r'[^%]%\(([^\)]+)\)[sd]')
1006
1007class Template:
1008 def __init__(self, t):
1009 self.template = t
1010
1011 def subst(self, d):
1012 myDict = None
1013
1014 # Protect non-Python-dict substitutions (e.g. if there's a printf
1015 # in the templated C++ code)
1016 template = protect_non_subst_percents(self.template)
1017 # CPU-model-specific substitutions are handled later (in GenCode).
1018 template = protect_cpu_symbols(template)
1019
1020 # if we're dealing with an InstObjParams object, we need to be a
1021 # little more sophisticated. Otherwise, just do what we've always
1022 # done
1023 if isinstance(d, InstObjParams):
1024 # The instruction wide parameters are already formed, but the
1025 # parameters which are only function wide still need to be
1026 # generated.
1027 perFuncNames = ['op_decl', 'op_src_decl', 'op_dest_decl', \
1028 'op_rd', 'op_wb', 'mem_acc_size', 'mem_acc_type']
1029 compositeCode = ''
1030
1031 myDict = templateMap.copy()
1032 myDict.update(d.__dict__)
1033 # The "operands" and "snippets" attributes of the InstObjParams
1034 # objects are for internal use and not substitution.
1035 del myDict['operands']
1036 del myDict['snippets']
1037
1038 for name in labelRE.findall(template):
1039 # Don't try to find a snippet to go with things that will
1040 # match against attributes of d, or that are other templates,
1041 # or that we're going to generate later, or that we've already
1042 # found.
1043 if not hasattr(d, name) and \
1044 not templateMap.has_key(name) and \
1045 not myDict.has_key(name) and \
1046 name not in perFuncNames:
1047 myDict[name] = d.snippets[name]
1048 if isinstance(myDict[name], str):
1049 myDict[name] = substMungedOpNames(substBitOps(myDict[name]))
1050 compositeCode += (" " + myDict[name])
1# Copyright (c) 2003-2005 The Regents of The University of Michigan
2# All rights reserved.
3#
4# Redistribution and use in source and binary forms, with or without
5# modification, are permitted provided that the following conditions are
6# met: redistributions of source code must retain the above copyright
7# notice, this list of conditions and the following disclaimer;
8# redistributions in binary form must reproduce the above copyright
9# notice, this list of conditions and the following disclaimer in the
10# documentation and/or other materials provided with the distribution;
11# neither the name of the copyright holders nor the names of its
12# contributors may be used to endorse or promote products derived from
13# this software without specific prior written permission.
14#
15# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
18# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
21# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26#
27# Authors: Steve Reinhardt
28# Korey Sewell
29
30import os
31import sys
32import re
33import string
34import traceback
35# get type names
36from types import *
37
38# Prepend the directory where the PLY lex & yacc modules are found
39# to the search path. Assumes we're compiling in a subdirectory
40# of 'build' in the current tree.
41sys.path[0:0] = [os.environ['M5_PLY']]
42
43import lex
44import yacc
45
46#####################################################################
47#
48# Lexer
49#
50# The PLY lexer module takes two things as input:
51# - A list of token names (the string list 'tokens')
52# - A regular expression describing a match for each token. The
53# regexp for token FOO can be provided in two ways:
54# - as a string variable named t_FOO
55# - as the doc string for a function named t_FOO. In this case,
56# the function is also executed, allowing an action to be
57# associated with each token match.
58#
59#####################################################################
60
61# Reserved words. These are listed separately as they are matched
62# using the same regexp as generic IDs, but distinguished in the
63# t_ID() function. The PLY documentation suggests this approach.
64reserved = (
65 'BITFIELD', 'DECODE', 'DECODER', 'DEFAULT', 'DEF', 'EXEC', 'FORMAT',
66 'HEADER', 'LET', 'NAMESPACE', 'OPERAND_TYPES', 'OPERANDS',
67 'OUTPUT', 'SIGNED', 'TEMPLATE'
68 )
69
70# List of tokens. The lex module requires this.
71tokens = reserved + (
72 # identifier
73 'ID',
74
75 # integer literal
76 'INTLIT',
77
78 # string literal
79 'STRLIT',
80
81 # code literal
82 'CODELIT',
83
84 # ( ) [ ] { } < > , ; : :: *
85 'LPAREN', 'RPAREN',
86 'LBRACKET', 'RBRACKET',
87 'LBRACE', 'RBRACE',
88 'LESS', 'GREATER', 'EQUALS',
89 'COMMA', 'SEMI', 'COLON', 'DBLCOLON',
90 'ASTERISK',
91
92 # C preprocessor directives
93 'CPPDIRECTIVE'
94
95# The following are matched but never returned. commented out to
96# suppress PLY warning
97 # newfile directive
98# 'NEWFILE',
99
100 # endfile directive
101# 'ENDFILE'
102)
103
104# Regular expressions for token matching
105t_LPAREN = r'\('
106t_RPAREN = r'\)'
107t_LBRACKET = r'\['
108t_RBRACKET = r'\]'
109t_LBRACE = r'\{'
110t_RBRACE = r'\}'
111t_LESS = r'\<'
112t_GREATER = r'\>'
113t_EQUALS = r'='
114t_COMMA = r','
115t_SEMI = r';'
116t_COLON = r':'
117t_DBLCOLON = r'::'
118t_ASTERISK = r'\*'
119
120# Identifiers and reserved words
121reserved_map = { }
122for r in reserved:
123 reserved_map[r.lower()] = r
124
125def t_ID(t):
126 r'[A-Za-z_]\w*'
127 t.type = reserved_map.get(t.value,'ID')
128 return t
129
130# Integer literal
131def t_INTLIT(t):
132 r'(0x[\da-fA-F]+)|\d+'
133 try:
134 t.value = int(t.value,0)
135 except ValueError:
136 error(t.lineno, 'Integer value "%s" too large' % t.value)
137 t.value = 0
138 return t
139
140# String literal. Note that these use only single quotes, and
141# can span multiple lines.
142def t_STRLIT(t):
143 r"(?m)'([^'])+'"
144 # strip off quotes
145 t.value = t.value[1:-1]
146 t.lineno += t.value.count('\n')
147 return t
148
149
150# "Code literal"... like a string literal, but delimiters are
151# '{{' and '}}' so they get formatted nicely under emacs c-mode
152def t_CODELIT(t):
153 r"(?m)\{\{([^\}]|}(?!\}))+\}\}"
154 # strip off {{ & }}
155 t.value = t.value[2:-2]
156 t.lineno += t.value.count('\n')
157 return t
158
159def t_CPPDIRECTIVE(t):
160 r'^\#[^\#].*\n'
161 t.lineno += t.value.count('\n')
162 return t
163
164def t_NEWFILE(t):
165 r'^\#\#newfile\s+"[\w/.-]*"'
166 fileNameStack.push((t.value[11:-1], t.lineno))
167 t.lineno = 0
168
169def t_ENDFILE(t):
170 r'^\#\#endfile'
171 (old_filename, t.lineno) = fileNameStack.pop()
172
173#
174# The functions t_NEWLINE, t_ignore, and t_error are
175# special for the lex module.
176#
177
178# Newlines
179def t_NEWLINE(t):
180 r'\n+'
181 t.lineno += t.value.count('\n')
182
183# Comments
184def t_comment(t):
185 r'//.*'
186
187# Completely ignored characters
188t_ignore = ' \t\x0c'
189
190# Error handler
191def t_error(t):
192 error(t.lineno, "illegal character '%s'" % t.value[0])
193 t.skip(1)
194
195# Build the lexer
196lex.lex()
197
198#####################################################################
199#
200# Parser
201#
202# Every function whose name starts with 'p_' defines a grammar rule.
203# The rule is encoded in the function's doc string, while the
204# function body provides the action taken when the rule is matched.
205# The argument to each function is a list of the values of the
206# rule's symbols: t[0] for the LHS, and t[1..n] for the symbols
207# on the RHS. For tokens, the value is copied from the t.value
208# attribute provided by the lexer. For non-terminals, the value
209# is assigned by the producing rule; i.e., the job of the grammar
210# rule function is to set the value for the non-terminal on the LHS
211# (by assigning to t[0]).
212#####################################################################
213
214# The LHS of the first grammar rule is used as the start symbol
215# (in this case, 'specification'). Note that this rule enforces
216# that there will be exactly one namespace declaration, with 0 or more
217# global defs/decls before and after it. The defs & decls before
218# the namespace decl will be outside the namespace; those after
219# will be inside. The decoder function is always inside the namespace.
220def p_specification(t):
221 'specification : opt_defs_and_outputs name_decl opt_defs_and_outputs decode_block'
222 global_code = t[1]
223 isa_name = t[2]
224 namespace = isa_name + "Inst"
225 # wrap the decode block as a function definition
226 t[4].wrap_decode_block('''
227StaticInstPtr
228%(isa_name)s::decodeInst(%(isa_name)s::ExtMachInst machInst)
229{
230 using namespace %(namespace)s;
231''' % vars(), '}')
232 # both the latter output blocks and the decode block are in the namespace
233 namespace_code = t[3] + t[4]
234 # pass it all back to the caller of yacc.parse()
235 t[0] = (isa_name, namespace, global_code, namespace_code)
236
237# ISA name declaration looks like "namespace <foo>;"
238def p_name_decl(t):
239 'name_decl : NAMESPACE ID SEMI'
240 t[0] = t[2]
241
242# 'opt_defs_and_outputs' is a possibly empty sequence of
243# def and/or output statements.
244def p_opt_defs_and_outputs_0(t):
245 'opt_defs_and_outputs : empty'
246 t[0] = GenCode()
247
248def p_opt_defs_and_outputs_1(t):
249 'opt_defs_and_outputs : defs_and_outputs'
250 t[0] = t[1]
251
252def p_defs_and_outputs_0(t):
253 'defs_and_outputs : def_or_output'
254 t[0] = t[1]
255
256def p_defs_and_outputs_1(t):
257 'defs_and_outputs : defs_and_outputs def_or_output'
258 t[0] = t[1] + t[2]
259
260# The list of possible definition/output statements.
261def p_def_or_output(t):
262 '''def_or_output : def_format
263 | def_bitfield
264 | def_template
265 | def_operand_types
266 | def_operands
267 | output_header
268 | output_decoder
269 | output_exec
270 | global_let'''
271 t[0] = t[1]
272
273# Output blocks 'output <foo> {{...}}' (C++ code blocks) are copied
274# directly to the appropriate output section.
275
276
277# Protect any non-dict-substitution '%'s in a format string
278# (i.e. those not followed by '(')
279def protect_non_subst_percents(s):
280 return re.sub(r'%(?!\()', '%%', s)
281
282# Massage output block by substituting in template definitions and bit
283# operators. We handle '%'s embedded in the string that don't
284# indicate template substitutions (or CPU-specific symbols, which get
285# handled in GenCode) by doubling them first so that the format
286# operation will reduce them back to single '%'s.
287def process_output(s):
288 s = protect_non_subst_percents(s)
289 # protects cpu-specific symbols too
290 s = protect_cpu_symbols(s)
291 return substBitOps(s % templateMap)
292
293def p_output_header(t):
294 'output_header : OUTPUT HEADER CODELIT SEMI'
295 t[0] = GenCode(header_output = process_output(t[3]))
296
297def p_output_decoder(t):
298 'output_decoder : OUTPUT DECODER CODELIT SEMI'
299 t[0] = GenCode(decoder_output = process_output(t[3]))
300
301def p_output_exec(t):
302 'output_exec : OUTPUT EXEC CODELIT SEMI'
303 t[0] = GenCode(exec_output = process_output(t[3]))
304
305# global let blocks 'let {{...}}' (Python code blocks) are executed
306# directly when seen. Note that these execute in a special variable
307# context 'exportContext' to prevent the code from polluting this
308# script's namespace.
309def p_global_let(t):
310 'global_let : LET CODELIT SEMI'
311 updateExportContext()
312 try:
313 exec fixPythonIndentation(t[2]) in exportContext
314 except Exception, exc:
315 error(t.lineno(1),
316 'error: %s in global let block "%s".' % (exc, t[2]))
317 t[0] = GenCode() # contributes nothing to the output C++ file
318
319# Define the mapping from operand type extensions to C++ types and bit
320# widths (stored in operandTypeMap).
321def p_def_operand_types(t):
322 'def_operand_types : DEF OPERAND_TYPES CODELIT SEMI'
323 try:
324 userDict = eval('{' + t[3] + '}')
325 except Exception, exc:
326 error(t.lineno(1),
327 'error: %s in def operand_types block "%s".' % (exc, t[3]))
328 buildOperandTypeMap(userDict, t.lineno(1))
329 t[0] = GenCode() # contributes nothing to the output C++ file
330
331# Define the mapping from operand names to operand classes and other
332# traits. Stored in operandNameMap.
333def p_def_operands(t):
334 'def_operands : DEF OPERANDS CODELIT SEMI'
335 if not globals().has_key('operandTypeMap'):
336 error(t.lineno(1),
337 'error: operand types must be defined before operands')
338 try:
339 userDict = eval('{' + t[3] + '}')
340 except Exception, exc:
341 error(t.lineno(1),
342 'error: %s in def operands block "%s".' % (exc, t[3]))
343 buildOperandNameMap(userDict, t.lineno(1))
344 t[0] = GenCode() # contributes nothing to the output C++ file
345
346# A bitfield definition looks like:
347# 'def [signed] bitfield <ID> [<first>:<last>]'
348# This generates a preprocessor macro in the output file.
349def p_def_bitfield_0(t):
350 'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT COLON INTLIT GREATER SEMI'
351 expr = 'bits(machInst, %2d, %2d)' % (t[6], t[8])
352 if (t[2] == 'signed'):
353 expr = 'sext<%d>(%s)' % (t[6] - t[8] + 1, expr)
354 hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
355 t[0] = GenCode(header_output = hash_define)
356
357# alternate form for single bit: 'def [signed] bitfield <ID> [<bit>]'
358def p_def_bitfield_1(t):
359 'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT GREATER SEMI'
360 expr = 'bits(machInst, %2d, %2d)' % (t[6], t[6])
361 if (t[2] == 'signed'):
362 expr = 'sext<%d>(%s)' % (1, expr)
363 hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
364 t[0] = GenCode(header_output = hash_define)
365
366def p_opt_signed_0(t):
367 'opt_signed : SIGNED'
368 t[0] = t[1]
369
370def p_opt_signed_1(t):
371 'opt_signed : empty'
372 t[0] = ''
373
374# Global map variable to hold templates
375templateMap = {}
376
377def p_def_template(t):
378 'def_template : DEF TEMPLATE ID CODELIT SEMI'
379 templateMap[t[3]] = Template(t[4])
380 t[0] = GenCode()
381
382# An instruction format definition looks like
383# "def format <fmt>(<params>) {{...}};"
384def p_def_format(t):
385 'def_format : DEF FORMAT ID LPAREN param_list RPAREN CODELIT SEMI'
386 (id, params, code) = (t[3], t[5], t[7])
387 defFormat(id, params, code, t.lineno(1))
388 t[0] = GenCode()
389
390# The formal parameter list for an instruction format is a possibly
391# empty list of comma-separated parameters. Positional (standard,
392# non-keyword) parameters must come first, followed by keyword
393# parameters, followed by a '*foo' parameter that gets excess
394# positional arguments (as in Python). Each of these three parameter
395# categories is optional.
396#
397# Note that we do not support the '**foo' parameter for collecting
398# otherwise undefined keyword args. Otherwise the parameter list is
399# (I believe) identical to what is supported in Python.
400#
401# The param list generates a tuple, where the first element is a list of
402# the positional params and the second element is a dict containing the
403# keyword params.
404def p_param_list_0(t):
405 'param_list : positional_param_list COMMA nonpositional_param_list'
406 t[0] = t[1] + t[3]
407
408def p_param_list_1(t):
409 '''param_list : positional_param_list
410 | nonpositional_param_list'''
411 t[0] = t[1]
412
413def p_positional_param_list_0(t):
414 'positional_param_list : empty'
415 t[0] = []
416
417def p_positional_param_list_1(t):
418 'positional_param_list : ID'
419 t[0] = [t[1]]
420
421def p_positional_param_list_2(t):
422 'positional_param_list : positional_param_list COMMA ID'
423 t[0] = t[1] + [t[3]]
424
425def p_nonpositional_param_list_0(t):
426 'nonpositional_param_list : keyword_param_list COMMA excess_args_param'
427 t[0] = t[1] + t[3]
428
429def p_nonpositional_param_list_1(t):
430 '''nonpositional_param_list : keyword_param_list
431 | excess_args_param'''
432 t[0] = t[1]
433
434def p_keyword_param_list_0(t):
435 'keyword_param_list : keyword_param'
436 t[0] = [t[1]]
437
438def p_keyword_param_list_1(t):
439 'keyword_param_list : keyword_param_list COMMA keyword_param'
440 t[0] = t[1] + [t[3]]
441
442def p_keyword_param(t):
443 'keyword_param : ID EQUALS expr'
444 t[0] = t[1] + ' = ' + t[3].__repr__()
445
446def p_excess_args_param(t):
447 'excess_args_param : ASTERISK ID'
448 # Just concatenate them: '*ID'. Wrap in list to be consistent
449 # with positional_param_list and keyword_param_list.
450 t[0] = [t[1] + t[2]]
451
452# End of format definition-related rules.
453##############
454
455#
456# A decode block looks like:
457# decode <field1> [, <field2>]* [default <inst>] { ... }
458#
459def p_decode_block(t):
460 'decode_block : DECODE ID opt_default LBRACE decode_stmt_list RBRACE'
461 default_defaults = defaultStack.pop()
462 codeObj = t[5]
463 # use the "default defaults" only if there was no explicit
464 # default statement in decode_stmt_list
465 if not codeObj.has_decode_default:
466 codeObj += default_defaults
467 codeObj.wrap_decode_block('switch (%s) {\n' % t[2], '}\n')
468 t[0] = codeObj
469
470# The opt_default statement serves only to push the "default defaults"
471# onto defaultStack. This value will be used by nested decode blocks,
472# and used and popped off when the current decode_block is processed
473# (in p_decode_block() above).
474def p_opt_default_0(t):
475 'opt_default : empty'
476 # no default specified: reuse the one currently at the top of the stack
477 defaultStack.push(defaultStack.top())
478 # no meaningful value returned
479 t[0] = None
480
481def p_opt_default_1(t):
482 'opt_default : DEFAULT inst'
483 # push the new default
484 codeObj = t[2]
485 codeObj.wrap_decode_block('\ndefault:\n', 'break;\n')
486 defaultStack.push(codeObj)
487 # no meaningful value returned
488 t[0] = None
489
490def p_decode_stmt_list_0(t):
491 'decode_stmt_list : decode_stmt'
492 t[0] = t[1]
493
494def p_decode_stmt_list_1(t):
495 'decode_stmt_list : decode_stmt decode_stmt_list'
496 if (t[1].has_decode_default and t[2].has_decode_default):
497 error(t.lineno(1), 'Two default cases in decode block')
498 t[0] = t[1] + t[2]
499
500#
501# Decode statement rules
502#
503# There are four types of statements allowed in a decode block:
504# 1. Format blocks 'format <foo> { ... }'
505# 2. Nested decode blocks
506# 3. Instruction definitions.
507# 4. C preprocessor directives.
508
509
510# Preprocessor directives found in a decode statement list are passed
511# through to the output, replicated to all of the output code
512# streams. This works well for ifdefs, so we can ifdef out both the
513# declarations and the decode cases generated by an instruction
514# definition. Handling them as part of the grammar makes it easy to
515# keep them in the right place with respect to the code generated by
516# the other statements.
517def p_decode_stmt_cpp(t):
518 'decode_stmt : CPPDIRECTIVE'
519 t[0] = GenCode(t[1], t[1], t[1], t[1])
520
521# A format block 'format <foo> { ... }' sets the default instruction
522# format used to handle instruction definitions inside the block.
523# This format can be overridden by using an explicit format on the
524# instruction definition or with a nested format block.
525def p_decode_stmt_format(t):
526 'decode_stmt : FORMAT push_format_id LBRACE decode_stmt_list RBRACE'
527 # The format will be pushed on the stack when 'push_format_id' is
528 # processed (see below). Once the parser has recognized the full
529 # production (though the right brace), we're done with the format,
530 # so now we can pop it.
531 formatStack.pop()
532 t[0] = t[4]
533
534# This rule exists so we can set the current format (& push the stack)
535# when we recognize the format name part of the format block.
536def p_push_format_id(t):
537 'push_format_id : ID'
538 try:
539 formatStack.push(formatMap[t[1]])
540 t[0] = ('', '// format %s' % t[1])
541 except KeyError:
542 error(t.lineno(1), 'instruction format "%s" not defined.' % t[1])
543
544# Nested decode block: if the value of the current field matches the
545# specified constant, do a nested decode on some other field.
546def p_decode_stmt_decode(t):
547 'decode_stmt : case_label COLON decode_block'
548 label = t[1]
549 codeObj = t[3]
550 # just wrap the decoding code from the block as a case in the
551 # outer switch statement.
552 codeObj.wrap_decode_block('\n%s:\n' % label)
553 codeObj.has_decode_default = (label == 'default')
554 t[0] = codeObj
555
556# Instruction definition (finally!).
557def p_decode_stmt_inst(t):
558 'decode_stmt : case_label COLON inst SEMI'
559 label = t[1]
560 codeObj = t[3]
561 codeObj.wrap_decode_block('\n%s:' % label, 'break;\n')
562 codeObj.has_decode_default = (label == 'default')
563 t[0] = codeObj
564
565# The case label is either a list of one or more constants or 'default'
566def p_case_label_0(t):
567 'case_label : intlit_list'
568 t[0] = ': '.join(map(lambda a: 'case %#x' % a, t[1]))
569
570def p_case_label_1(t):
571 'case_label : DEFAULT'
572 t[0] = 'default'
573
574#
575# The constant list for a decode case label must be non-empty, but may have
576# one or more comma-separated integer literals in it.
577#
578def p_intlit_list_0(t):
579 'intlit_list : INTLIT'
580 t[0] = [t[1]]
581
582def p_intlit_list_1(t):
583 'intlit_list : intlit_list COMMA INTLIT'
584 t[0] = t[1]
585 t[0].append(t[3])
586
587# Define an instruction using the current instruction format (specified
588# by an enclosing format block).
589# "<mnemonic>(<args>)"
590def p_inst_0(t):
591 'inst : ID LPAREN arg_list RPAREN'
592 # Pass the ID and arg list to the current format class to deal with.
593 currentFormat = formatStack.top()
594 codeObj = currentFormat.defineInst(t[1], t[3], t.lineno(1))
595 args = ','.join(map(str, t[3]))
596 args = re.sub('(?m)^', '//', args)
597 args = re.sub('^//', '', args)
598 comment = '\n// %s::%s(%s)\n' % (currentFormat.id, t[1], args)
599 codeObj.prepend_all(comment)
600 t[0] = codeObj
601
602# Define an instruction using an explicitly specified format:
603# "<fmt>::<mnemonic>(<args>)"
604def p_inst_1(t):
605 'inst : ID DBLCOLON ID LPAREN arg_list RPAREN'
606 try:
607 format = formatMap[t[1]]
608 except KeyError:
609 error(t.lineno(1), 'instruction format "%s" not defined.' % t[1])
610 codeObj = format.defineInst(t[3], t[5], t.lineno(1))
611 comment = '\n// %s::%s(%s)\n' % (t[1], t[3], t[5])
612 codeObj.prepend_all(comment)
613 t[0] = codeObj
614
615# The arg list generates a tuple, where the first element is a list of
616# the positional args and the second element is a dict containing the
617# keyword args.
618def p_arg_list_0(t):
619 'arg_list : positional_arg_list COMMA keyword_arg_list'
620 t[0] = ( t[1], t[3] )
621
622def p_arg_list_1(t):
623 'arg_list : positional_arg_list'
624 t[0] = ( t[1], {} )
625
626def p_arg_list_2(t):
627 'arg_list : keyword_arg_list'
628 t[0] = ( [], t[1] )
629
630def p_positional_arg_list_0(t):
631 'positional_arg_list : empty'
632 t[0] = []
633
634def p_positional_arg_list_1(t):
635 'positional_arg_list : expr'
636 t[0] = [t[1]]
637
638def p_positional_arg_list_2(t):
639 'positional_arg_list : positional_arg_list COMMA expr'
640 t[0] = t[1] + [t[3]]
641
642def p_keyword_arg_list_0(t):
643 'keyword_arg_list : keyword_arg'
644 t[0] = t[1]
645
646def p_keyword_arg_list_1(t):
647 'keyword_arg_list : keyword_arg_list COMMA keyword_arg'
648 t[0] = t[1]
649 t[0].update(t[3])
650
651def p_keyword_arg(t):
652 'keyword_arg : ID EQUALS expr'
653 t[0] = { t[1] : t[3] }
654
655#
656# Basic expressions. These constitute the argument values of
657# "function calls" (i.e. instruction definitions in the decode block)
658# and default values for formal parameters of format functions.
659#
660# Right now, these are either strings, integers, or (recursively)
661# lists of exprs (using Python square-bracket list syntax). Note that
662# bare identifiers are trated as string constants here (since there
663# isn't really a variable namespace to refer to).
664#
665def p_expr_0(t):
666 '''expr : ID
667 | INTLIT
668 | STRLIT
669 | CODELIT'''
670 t[0] = t[1]
671
672def p_expr_1(t):
673 '''expr : LBRACKET list_expr RBRACKET'''
674 t[0] = t[2]
675
676def p_list_expr_0(t):
677 'list_expr : expr'
678 t[0] = [t[1]]
679
680def p_list_expr_1(t):
681 'list_expr : list_expr COMMA expr'
682 t[0] = t[1] + [t[3]]
683
684def p_list_expr_2(t):
685 'list_expr : empty'
686 t[0] = []
687
688#
689# Empty production... use in other rules for readability.
690#
691def p_empty(t):
692 'empty :'
693 pass
694
695# Parse error handler. Note that the argument here is the offending
696# *token*, not a grammar symbol (hence the need to use t.value)
697def p_error(t):
698 if t:
699 error(t.lineno, "syntax error at '%s'" % t.value)
700 else:
701 error(0, "unknown syntax error", True)
702
703# END OF GRAMMAR RULES
704#
705# Now build the parser.
706yacc.yacc()
707
708
709#####################################################################
710#
711# Support Classes
712#
713#####################################################################
714
715# Expand template with CPU-specific references into a dictionary with
716# an entry for each CPU model name. The entry key is the model name
717# and the corresponding value is the template with the CPU-specific
718# refs substituted for that model.
719def expand_cpu_symbols_to_dict(template):
720 # Protect '%'s that don't go with CPU-specific terms
721 t = re.sub(r'%(?!\(CPU_)', '%%', template)
722 result = {}
723 for cpu in cpu_models:
724 result[cpu.name] = t % cpu.strings
725 return result
726
727# *If* the template has CPU-specific references, return a single
728# string containing a copy of the template for each CPU model with the
729# corresponding values substituted in. If the template has no
730# CPU-specific references, it is returned unmodified.
731def expand_cpu_symbols_to_string(template):
732 if template.find('%(CPU_') != -1:
733 return reduce(lambda x,y: x+y,
734 expand_cpu_symbols_to_dict(template).values())
735 else:
736 return template
737
738# Protect CPU-specific references by doubling the corresponding '%'s
739# (in preparation for substituting a different set of references into
740# the template).
741def protect_cpu_symbols(template):
742 return re.sub(r'%(?=\(CPU_)', '%%', template)
743
744###############
745# GenCode class
746#
747# The GenCode class encapsulates generated code destined for various
748# output files. The header_output and decoder_output attributes are
749# strings containing code destined for decoder.hh and decoder.cc
750# respectively. The decode_block attribute contains code to be
751# incorporated in the decode function itself (that will also end up in
752# decoder.cc). The exec_output attribute is a dictionary with a key
753# for each CPU model name; the value associated with a particular key
754# is the string of code for that CPU model's exec.cc file. The
755# has_decode_default attribute is used in the decode block to allow
756# explicit default clauses to override default default clauses.
757
758class GenCode:
759 # Constructor. At this point we substitute out all CPU-specific
760 # symbols. For the exec output, these go into the per-model
761 # dictionary. For all other output types they get collapsed into
762 # a single string.
763 def __init__(self,
764 header_output = '', decoder_output = '', exec_output = '',
765 decode_block = '', has_decode_default = False):
766 self.header_output = expand_cpu_symbols_to_string(header_output)
767 self.decoder_output = expand_cpu_symbols_to_string(decoder_output)
768 if isinstance(exec_output, dict):
769 self.exec_output = exec_output
770 elif isinstance(exec_output, str):
771 # If the exec_output arg is a single string, we replicate
772 # it for each of the CPU models, substituting and
773 # %(CPU_foo)s params appropriately.
774 self.exec_output = expand_cpu_symbols_to_dict(exec_output)
775 self.decode_block = expand_cpu_symbols_to_string(decode_block)
776 self.has_decode_default = has_decode_default
777
778 # Override '+' operator: generate a new GenCode object that
779 # concatenates all the individual strings in the operands.
780 def __add__(self, other):
781 exec_output = {}
782 for cpu in cpu_models:
783 n = cpu.name
784 exec_output[n] = self.exec_output[n] + other.exec_output[n]
785 return GenCode(self.header_output + other.header_output,
786 self.decoder_output + other.decoder_output,
787 exec_output,
788 self.decode_block + other.decode_block,
789 self.has_decode_default or other.has_decode_default)
790
791 # Prepend a string (typically a comment) to all the strings.
792 def prepend_all(self, pre):
793 self.header_output = pre + self.header_output
794 self.decoder_output = pre + self.decoder_output
795 self.decode_block = pre + self.decode_block
796 for cpu in cpu_models:
797 self.exec_output[cpu.name] = pre + self.exec_output[cpu.name]
798
799 # Wrap the decode block in a pair of strings (e.g., 'case foo:'
800 # and 'break;'). Used to build the big nested switch statement.
801 def wrap_decode_block(self, pre, post = ''):
802 self.decode_block = pre + indent(self.decode_block) + post
803
804################
805# Format object.
806#
807# A format object encapsulates an instruction format. It must provide
808# a defineInst() method that generates the code for an instruction
809# definition.
810
811exportContextSymbols = ('InstObjParams', 'makeList', 're', 'string')
812
813exportContext = {}
814
815def updateExportContext():
816 exportContext.update(exportDict(*exportContextSymbols))
817 exportContext.update(templateMap)
818
819def exportDict(*symNames):
820 return dict([(s, eval(s)) for s in symNames])
821
822
823class Format:
824 def __init__(self, id, params, code):
825 # constructor: just save away arguments
826 self.id = id
827 self.params = params
828 label = 'def format ' + id
829 self.user_code = compile(fixPythonIndentation(code), label, 'exec')
830 param_list = string.join(params, ", ")
831 f = '''def defInst(_code, _context, %s):
832 my_locals = vars().copy()
833 exec _code in _context, my_locals
834 return my_locals\n''' % param_list
835 c = compile(f, label + ' wrapper', 'exec')
836 exec c
837 self.func = defInst
838
839 def defineInst(self, name, args, lineno):
840 context = {}
841 updateExportContext()
842 context.update(exportContext)
843 context.update({ 'name': name, 'Name': string.capitalize(name) })
844 try:
845 vars = self.func(self.user_code, context, *args[0], **args[1])
846 except Exception, exc:
847 error(lineno, 'error defining "%s": %s.' % (name, exc))
848 for k in vars.keys():
849 if k not in ('header_output', 'decoder_output',
850 'exec_output', 'decode_block'):
851 del vars[k]
852 return GenCode(**vars)
853
854# Special null format to catch an implicit-format instruction
855# definition outside of any format block.
856class NoFormat:
857 def __init__(self):
858 self.defaultInst = ''
859
860 def defineInst(self, name, args, lineno):
861 error(lineno,
862 'instruction definition "%s" with no active format!' % name)
863
864# This dictionary maps format name strings to Format objects.
865formatMap = {}
866
867# Define a new format
868def defFormat(id, params, code, lineno):
869 # make sure we haven't already defined this one
870 if formatMap.get(id, None) != None:
871 error(lineno, 'format %s redefined.' % id)
872 # create new object and store in global map
873 formatMap[id] = Format(id, params, code)
874
875
876##############
877# Stack: a simple stack object. Used for both formats (formatStack)
878# and default cases (defaultStack). Simply wraps a list to give more
879# stack-like syntax and enable initialization with an argument list
880# (as opposed to an argument that's a list).
881
882class Stack(list):
883 def __init__(self, *items):
884 list.__init__(self, items)
885
886 def push(self, item):
887 self.append(item);
888
889 def top(self):
890 return self[-1]
891
892# The global format stack.
893formatStack = Stack(NoFormat())
894
895# The global default case stack.
896defaultStack = Stack( None )
897
898# Global stack that tracks current file and line number.
899# Each element is a tuple (filename, lineno) that records the
900# *current* filename and the line number in the *previous* file where
901# it was included.
902fileNameStack = Stack()
903
904###################
905# Utility functions
906
907#
908# Indent every line in string 's' by two spaces
909# (except preprocessor directives).
910# Used to make nested code blocks look pretty.
911#
912def indent(s):
913 return re.sub(r'(?m)^(?!#)', ' ', s)
914
915#
916# Munge a somewhat arbitrarily formatted piece of Python code
917# (e.g. from a format 'let' block) into something whose indentation
918# will get by the Python parser.
919#
920# The two keys here are that Python will give a syntax error if
921# there's any whitespace at the beginning of the first line, and that
922# all lines at the same lexical nesting level must have identical
923# indentation. Unfortunately the way code literals work, an entire
924# let block tends to have some initial indentation. Rather than
925# trying to figure out what that is and strip it off, we prepend 'if
926# 1:' to make the let code the nested block inside the if (and have
927# the parser automatically deal with the indentation for us).
928#
929# We don't want to do this if (1) the code block is empty or (2) the
930# first line of the block doesn't have any whitespace at the front.
931
932def fixPythonIndentation(s):
933 # get rid of blank lines first
934 s = re.sub(r'(?m)^\s*\n', '', s);
935 if (s != '' and re.match(r'[ \t]', s[0])):
936 s = 'if 1:\n' + s
937 return s
938
939# Error handler. Just call exit. Output formatted to work under
940# Emacs compile-mode. Optional 'print_traceback' arg, if set to True,
941# prints a Python stack backtrace too (can be handy when trying to
942# debug the parser itself).
943def error(lineno, string, print_traceback = False):
944 spaces = ""
945 for (filename, line) in fileNameStack[0:-1]:
946 print spaces + "In file included from " + filename + ":"
947 spaces += " "
948 # Print a Python stack backtrace if requested.
949 if (print_traceback):
950 traceback.print_exc()
951 if lineno != 0:
952 line_str = "%d:" % lineno
953 else:
954 line_str = ""
955 sys.exit(spaces + "%s:%s %s" % (fileNameStack[-1][0], line_str, string))
956
957
958#####################################################################
959#
960# Bitfield Operator Support
961#
962#####################################################################
963
964bitOp1ArgRE = re.compile(r'<\s*(\w+)\s*:\s*>')
965
966bitOpWordRE = re.compile(r'(?<![\w\.])([\w\.]+)<\s*(\w+)\s*:\s*(\w+)\s*>')
967bitOpExprRE = re.compile(r'\)<\s*(\w+)\s*:\s*(\w+)\s*>')
968
969def substBitOps(code):
970 # first convert single-bit selectors to two-index form
971 # i.e., <n> --> <n:n>
972 code = bitOp1ArgRE.sub(r'<\1:\1>', code)
973 # simple case: selector applied to ID (name)
974 # i.e., foo<a:b> --> bits(foo, a, b)
975 code = bitOpWordRE.sub(r'bits(\1, \2, \3)', code)
976 # if selector is applied to expression (ending in ')'),
977 # we need to search backward for matching '('
978 match = bitOpExprRE.search(code)
979 while match:
980 exprEnd = match.start()
981 here = exprEnd - 1
982 nestLevel = 1
983 while nestLevel > 0:
984 if code[here] == '(':
985 nestLevel -= 1
986 elif code[here] == ')':
987 nestLevel += 1
988 here -= 1
989 if here < 0:
990 sys.exit("Didn't find '('!")
991 exprStart = here+1
992 newExpr = r'bits(%s, %s, %s)' % (code[exprStart:exprEnd+1],
993 match.group(1), match.group(2))
994 code = code[:exprStart] + newExpr + code[match.end():]
995 match = bitOpExprRE.search(code)
996 return code
997
998
999####################
1000# Template objects.
1001#
1002# Template objects are format strings that allow substitution from
1003# the attribute spaces of other objects (e.g. InstObjParams instances).
1004
1005labelRE = re.compile(r'[^%]%\(([^\)]+)\)[sd]')
1006
1007class Template:
1008 def __init__(self, t):
1009 self.template = t
1010
1011 def subst(self, d):
1012 myDict = None
1013
1014 # Protect non-Python-dict substitutions (e.g. if there's a printf
1015 # in the templated C++ code)
1016 template = protect_non_subst_percents(self.template)
1017 # CPU-model-specific substitutions are handled later (in GenCode).
1018 template = protect_cpu_symbols(template)
1019
1020 # if we're dealing with an InstObjParams object, we need to be a
1021 # little more sophisticated. Otherwise, just do what we've always
1022 # done
1023 if isinstance(d, InstObjParams):
1024 # The instruction wide parameters are already formed, but the
1025 # parameters which are only function wide still need to be
1026 # generated.
1027 perFuncNames = ['op_decl', 'op_src_decl', 'op_dest_decl', \
1028 'op_rd', 'op_wb', 'mem_acc_size', 'mem_acc_type']
1029 compositeCode = ''
1030
1031 myDict = templateMap.copy()
1032 myDict.update(d.__dict__)
1033 # The "operands" and "snippets" attributes of the InstObjParams
1034 # objects are for internal use and not substitution.
1035 del myDict['operands']
1036 del myDict['snippets']
1037
1038 for name in labelRE.findall(template):
1039 # Don't try to find a snippet to go with things that will
1040 # match against attributes of d, or that are other templates,
1041 # or that we're going to generate later, or that we've already
1042 # found.
1043 if not hasattr(d, name) and \
1044 not templateMap.has_key(name) and \
1045 not myDict.has_key(name) and \
1046 name not in perFuncNames:
1047 myDict[name] = d.snippets[name]
1048 if isinstance(myDict[name], str):
1049 myDict[name] = substMungedOpNames(substBitOps(myDict[name]))
1050 compositeCode += (" " + myDict[name])
1051
1052 compositeCode += (" " + template)
1053
1051 operands = SubOperandList(compositeCode, d.operands)
1052
1053 myDict['op_decl'] = operands.concatAttrStrings('op_decl')
1054
1055 is_src = lambda op: op.is_src
1056 is_dest = lambda op: op.is_dest
1057
1058 myDict['op_src_decl'] = \
1059 operands.concatSomeAttrStrings(is_src, 'op_src_decl')
1060 myDict['op_dest_decl'] = \
1061 operands.concatSomeAttrStrings(is_dest, 'op_dest_decl')
1062
1063 myDict['op_rd'] = operands.concatAttrStrings('op_rd')
1064 myDict['op_wb'] = operands.concatAttrStrings('op_wb')
1065
1066 if d.operands.memOperand:
1067 myDict['mem_acc_size'] = d.operands.memOperand.mem_acc_size
1068 myDict['mem_acc_type'] = d.operands.memOperand.mem_acc_type
1069
1070 else:
1071 # Start with the template namespace. Make a copy since we're
1072 # going to modify it.
1073 myDict = templateMap.copy()
1074 # if the argument is a dictionary, we just use it.
1075 if isinstance(d, dict):
1076 myDict.update(d)
1077 # if the argument is an object, we use its attribute map.
1078 elif hasattr(d, '__dict__'):
1079 myDict.update(d.__dict__)
1080 else:
1081 raise TypeError, "Template.subst() arg must be or have dictionary"
1082 return template % myDict
1083
1084 # Convert to string. This handles the case when a template with a
1085 # CPU-specific term gets interpolated into another template or into
1086 # an output block.
1087 def __str__(self):
1088 return expand_cpu_symbols_to_string(self.template)
1089
1090#####################################################################
1091#
1092# Code Parser
1093#
1094# The remaining code is the support for automatically extracting
1095# instruction characteristics from pseudocode.
1096#
1097#####################################################################
1098
1099# Force the argument to be a list. Useful for flags, where a caller
1100# can specify a singleton flag or a list of flags. Also usful for
1101# converting tuples to lists so they can be modified.
1102def makeList(arg):
1103 if isinstance(arg, list):
1104 return arg
1105 elif isinstance(arg, tuple):
1106 return list(arg)
1107 elif not arg:
1108 return []
1109 else:
1110 return [ arg ]
1111
1112# Generate operandTypeMap from the user's 'def operand_types'
1113# statement.
1114def buildOperandTypeMap(userDict, lineno):
1115 global operandTypeMap
1116 operandTypeMap = {}
1117 for (ext, (desc, size)) in userDict.iteritems():
1118 if desc == 'signed int':
1119 ctype = 'int%d_t' % size
1120 is_signed = 1
1121 elif desc == 'unsigned int':
1122 ctype = 'uint%d_t' % size
1123 is_signed = 0
1124 elif desc == 'float':
1125 is_signed = 1 # shouldn't really matter
1126 if size == 32:
1127 ctype = 'float'
1128 elif size == 64:
1129 ctype = 'double'
1130 if ctype == '':
1131 error(lineno, 'Unrecognized type description "%s" in userDict')
1132 operandTypeMap[ext] = (size, ctype, is_signed)
1133
1134#
1135#
1136#
1137# Base class for operand descriptors. An instance of this class (or
1138# actually a class derived from this one) represents a specific
1139# operand for a code block (e.g, "Rc.sq" as a dest). Intermediate
1140# derived classes encapsulates the traits of a particular operand type
1141# (e.g., "32-bit integer register").
1142#
1143class Operand(object):
1144 def __init__(self, full_name, ext, is_src, is_dest):
1145 self.full_name = full_name
1146 self.ext = ext
1147 self.is_src = is_src
1148 self.is_dest = is_dest
1149 # The 'effective extension' (eff_ext) is either the actual
1150 # extension, if one was explicitly provided, or the default.
1151 if ext:
1152 self.eff_ext = ext
1153 else:
1154 self.eff_ext = self.dflt_ext
1155
1156 (self.size, self.ctype, self.is_signed) = operandTypeMap[self.eff_ext]
1157
1158 # note that mem_acc_size is undefined for non-mem operands...
1159 # template must be careful not to use it if it doesn't apply.
1160 if self.isMem():
1161 self.mem_acc_size = self.makeAccSize()
1162 self.mem_acc_type = self.ctype
1163
1164 # Finalize additional fields (primarily code fields). This step
1165 # is done separately since some of these fields may depend on the
1166 # register index enumeration that hasn't been performed yet at the
1167 # time of __init__().
1168 def finalize(self):
1169 self.flags = self.getFlags()
1170 self.constructor = self.makeConstructor()
1171 self.op_decl = self.makeDecl()
1172
1173 if self.is_src:
1174 self.op_rd = self.makeRead()
1175 self.op_src_decl = self.makeDecl()
1176 else:
1177 self.op_rd = ''
1178 self.op_src_decl = ''
1179
1180 if self.is_dest:
1181 self.op_wb = self.makeWrite()
1182 self.op_dest_decl = self.makeDecl()
1183 else:
1184 self.op_wb = ''
1185 self.op_dest_decl = ''
1186
1187 def isMem(self):
1188 return 0
1189
1190 def isReg(self):
1191 return 0
1192
1193 def isFloatReg(self):
1194 return 0
1195
1196 def isIntReg(self):
1197 return 0
1198
1199 def isControlReg(self):
1200 return 0
1201
1202 def getFlags(self):
1203 # note the empty slice '[:]' gives us a copy of self.flags[0]
1204 # instead of a reference to it
1205 my_flags = self.flags[0][:]
1206 if self.is_src:
1207 my_flags += self.flags[1]
1208 if self.is_dest:
1209 my_flags += self.flags[2]
1210 return my_flags
1211
1212 def makeDecl(self):
1213 # Note that initializations in the declarations are solely
1214 # to avoid 'uninitialized variable' errors from the compiler.
1215 return self.ctype + ' ' + self.base_name + ' = 0;\n';
1216
1217class IntRegOperand(Operand):
1218 def isReg(self):
1219 return 1
1220
1221 def isIntReg(self):
1222 return 1
1223
1224 def makeConstructor(self):
1225 c = ''
1226 if self.is_src:
1227 c += '\n\t_srcRegIdx[%d] = %s;' % \
1228 (self.src_reg_idx, self.reg_spec)
1229 if self.is_dest:
1230 c += '\n\t_destRegIdx[%d] = %s;' % \
1231 (self.dest_reg_idx, self.reg_spec)
1232 return c
1233
1234 def makeRead(self):
1235 if (self.ctype == 'float' or self.ctype == 'double'):
1236 error(0, 'Attempt to read integer register as FP')
1237 if (self.size == self.dflt_size):
1238 return '%s = xc->readIntRegOperand(this, %d);\n' % \
1239 (self.base_name, self.src_reg_idx)
1240 elif (self.size > self.dflt_size):
1241 int_reg_val = 'xc->readIntRegOperand(this, %d)' % \
1242 (self.src_reg_idx)
1243 if (self.is_signed):
1244 int_reg_val = 'sext<%d>(%s)' % (self.dflt_size, int_reg_val)
1245 return '%s = %s;\n' % (self.base_name, int_reg_val)
1246 else:
1247 return '%s = bits(xc->readIntRegOperand(this, %d), %d, 0);\n' % \
1248 (self.base_name, self.src_reg_idx, self.size-1)
1249
1250 def makeWrite(self):
1251 if (self.ctype == 'float' or self.ctype == 'double'):
1252 error(0, 'Attempt to write integer register as FP')
1253 if (self.size != self.dflt_size and self.is_signed):
1254 final_val = 'sext<%d>(%s)' % (self.size, self.base_name)
1255 else:
1256 final_val = self.base_name
1257 wb = '''
1258 {
1259 %s final_val = %s;
1260 xc->setIntRegOperand(this, %d, final_val);\n
1261 if (traceData) { traceData->setData(final_val); }
1262 }''' % (self.dflt_ctype, final_val, self.dest_reg_idx)
1263 return wb
1264
1265class FloatRegOperand(Operand):
1266 def isReg(self):
1267 return 1
1268
1269 def isFloatReg(self):
1270 return 1
1271
1272 def makeConstructor(self):
1273 c = ''
1274 if self.is_src:
1275 c += '\n\t_srcRegIdx[%d] = %s + FP_Base_DepTag;' % \
1276 (self.src_reg_idx, self.reg_spec)
1277 if self.is_dest:
1278 c += '\n\t_destRegIdx[%d] = %s + FP_Base_DepTag;' % \
1279 (self.dest_reg_idx, self.reg_spec)
1280 return c
1281
1282 def makeRead(self):
1283 bit_select = 0
1284 width = 0;
1285 if (self.ctype == 'float'):
1286 func = 'readFloatRegOperand'
1287 width = 32;
1288 elif (self.ctype == 'double'):
1289 func = 'readFloatRegOperand'
1290 width = 64;
1291 else:
1292 func = 'readFloatRegOperandBits'
1293 if (self.ctype == 'uint32_t'):
1294 width = 32;
1295 elif (self.ctype == 'uint64_t'):
1296 width = 64;
1297 if (self.size != self.dflt_size):
1298 bit_select = 1
1299 if width:
1300 base = 'xc->%s(this, %d, %d)' % \
1301 (func, self.src_reg_idx, width)
1302 else:
1303 base = 'xc->%s(this, %d)' % \
1304 (func, self.src_reg_idx)
1305 if bit_select:
1306 return '%s = bits(%s, %d, 0);\n' % \
1307 (self.base_name, base, self.size-1)
1308 else:
1309 return '%s = %s;\n' % (self.base_name, base)
1310
1311 def makeWrite(self):
1312 final_val = self.base_name
1313 final_ctype = self.ctype
1314 widthSpecifier = ''
1315 width = 0
1316 if (self.ctype == 'float'):
1317 width = 32
1318 func = 'setFloatRegOperand'
1319 elif (self.ctype == 'double'):
1320 width = 64
1321 func = 'setFloatRegOperand'
1322 elif (self.ctype == 'uint32_t'):
1323 func = 'setFloatRegOperandBits'
1324 width = 32
1325 elif (self.ctype == 'uint64_t'):
1326 func = 'setFloatRegOperandBits'
1327 width = 64
1328 else:
1329 func = 'setFloatRegOperandBits'
1330 final_ctype = 'uint%d_t' % self.dflt_size
1331 if (self.size != self.dflt_size and self.is_signed):
1332 final_val = 'sext<%d>(%s)' % (self.size, self.base_name)
1333 if width:
1334 widthSpecifier = ', %d' % width
1335 wb = '''
1336 {
1337 %s final_val = %s;
1338 xc->%s(this, %d, final_val%s);\n
1339 if (traceData) { traceData->setData(final_val); }
1340 }''' % (final_ctype, final_val, func, self.dest_reg_idx,
1341 widthSpecifier)
1342 return wb
1343
1344class ControlRegOperand(Operand):
1345 def isReg(self):
1346 return 1
1347
1348 def isControlReg(self):
1349 return 1
1350
1351 def makeConstructor(self):
1352 c = ''
1353 if self.is_src:
1354 c += '\n\t_srcRegIdx[%d] = %s + Ctrl_Base_DepTag;' % \
1355 (self.src_reg_idx, self.reg_spec)
1356 if self.is_dest:
1357 c += '\n\t_destRegIdx[%d] = %s + Ctrl_Base_DepTag;' % \
1358 (self.dest_reg_idx, self.reg_spec)
1359 return c
1360
1361 def makeRead(self):
1362 bit_select = 0
1363 if (self.ctype == 'float' or self.ctype == 'double'):
1364 error(0, 'Attempt to read control register as FP')
1365 base = 'xc->readMiscRegOperandWithEffect(this, %s)' % self.src_reg_idx
1366 if self.size == self.dflt_size:
1367 return '%s = %s;\n' % (self.base_name, base)
1368 else:
1369 return '%s = bits(%s, %d, 0);\n' % \
1370 (self.base_name, base, self.size-1)
1371
1372 def makeWrite(self):
1373 if (self.ctype == 'float' or self.ctype == 'double'):
1374 error(0, 'Attempt to write control register as FP')
1375 wb = 'xc->setMiscRegOperandWithEffect(this, %s, %s);\n' % \
1376 (self.dest_reg_idx, self.base_name)
1377 wb += 'if (traceData) { traceData->setData(%s); }' % \
1378 self.base_name
1379 return wb
1380
1381class MemOperand(Operand):
1382 def isMem(self):
1383 return 1
1384
1385 def makeConstructor(self):
1386 return ''
1387
1388 def makeDecl(self):
1389 # Note that initializations in the declarations are solely
1390 # to avoid 'uninitialized variable' errors from the compiler.
1391 # Declare memory data variable.
1392 c = '%s %s = 0;\n' % (self.ctype, self.base_name)
1393 return c
1394
1395 def makeRead(self):
1396 return ''
1397
1398 def makeWrite(self):
1399 return ''
1400
1401 # Return the memory access size *in bits*, suitable for
1402 # forming a type via "uint%d_t". Divide by 8 if you want bytes.
1403 def makeAccSize(self):
1404 return self.size
1405
1406
1407class NPCOperand(Operand):
1408 def makeConstructor(self):
1409 return ''
1410
1411 def makeRead(self):
1412 return '%s = xc->readNextPC();\n' % self.base_name
1413
1414 def makeWrite(self):
1415 return 'xc->setNextPC(%s);\n' % self.base_name
1416
1417class NNPCOperand(Operand):
1418 def makeConstructor(self):
1419 return ''
1420
1421 def makeRead(self):
1422 return '%s = xc->readNextNPC();\n' % self.base_name
1423
1424 def makeWrite(self):
1425 return 'xc->setNextNPC(%s);\n' % self.base_name
1426
1427def buildOperandNameMap(userDict, lineno):
1428 global operandNameMap
1429 operandNameMap = {}
1430 for (op_name, val) in userDict.iteritems():
1431 (base_cls_name, dflt_ext, reg_spec, flags, sort_pri) = val
1432 (dflt_size, dflt_ctype, dflt_is_signed) = operandTypeMap[dflt_ext]
1433 # Canonical flag structure is a triple of lists, where each list
1434 # indicates the set of flags implied by this operand always, when
1435 # used as a source, and when used as a dest, respectively.
1436 # For simplicity this can be initialized using a variety of fairly
1437 # obvious shortcuts; we convert these to canonical form here.
1438 if not flags:
1439 # no flags specified (e.g., 'None')
1440 flags = ( [], [], [] )
1441 elif isinstance(flags, str):
1442 # a single flag: assumed to be unconditional
1443 flags = ( [ flags ], [], [] )
1444 elif isinstance(flags, list):
1445 # a list of flags: also assumed to be unconditional
1446 flags = ( flags, [], [] )
1447 elif isinstance(flags, tuple):
1448 # it's a tuple: it should be a triple,
1449 # but each item could be a single string or a list
1450 (uncond_flags, src_flags, dest_flags) = flags
1451 flags = (makeList(uncond_flags),
1452 makeList(src_flags), makeList(dest_flags))
1453 # Accumulate attributes of new operand class in tmp_dict
1454 tmp_dict = {}
1455 for attr in ('dflt_ext', 'reg_spec', 'flags', 'sort_pri',
1456 'dflt_size', 'dflt_ctype', 'dflt_is_signed'):
1457 tmp_dict[attr] = eval(attr)
1458 tmp_dict['base_name'] = op_name
1459 # New class name will be e.g. "IntReg_Ra"
1460 cls_name = base_cls_name + '_' + op_name
1461 # Evaluate string arg to get class object. Note that the
1462 # actual base class for "IntReg" is "IntRegOperand", i.e. we
1463 # have to append "Operand".
1464 try:
1465 base_cls = eval(base_cls_name + 'Operand')
1466 except NameError:
1467 error(lineno,
1468 'error: unknown operand base class "%s"' % base_cls_name)
1469 # The following statement creates a new class called
1470 # <cls_name> as a subclass of <base_cls> with the attributes
1471 # in tmp_dict, just as if we evaluated a class declaration.
1472 operandNameMap[op_name] = type(cls_name, (base_cls,), tmp_dict)
1473
1474 # Define operand variables.
1475 operands = userDict.keys()
1476
1477 operandsREString = (r'''
1478 (?<![\w\.]) # neg. lookbehind assertion: prevent partial matches
1479 ((%s)(?:\.(\w+))?) # match: operand with optional '.' then suffix
1480 (?![\w\.]) # neg. lookahead assertion: prevent partial matches
1481 '''
1482 % string.join(operands, '|'))
1483
1484 global operandsRE
1485 operandsRE = re.compile(operandsREString, re.MULTILINE|re.VERBOSE)
1486
1487 # Same as operandsREString, but extension is mandatory, and only two
1488 # groups are returned (base and ext, not full name as above).
1489 # Used for subtituting '_' for '.' to make C++ identifiers.
1490 operandsWithExtREString = (r'(?<![\w\.])(%s)\.(\w+)(?![\w\.])'
1491 % string.join(operands, '|'))
1492
1493 global operandsWithExtRE
1494 operandsWithExtRE = re.compile(operandsWithExtREString, re.MULTILINE)
1495
1496
1497class OperandList:
1498
1499 # Find all the operands in the given code block. Returns an operand
1500 # descriptor list (instance of class OperandList).
1501 def __init__(self, code):
1502 self.items = []
1503 self.bases = {}
1504 # delete comments so we don't match on reg specifiers inside
1505 code = commentRE.sub('', code)
1506 # search for operands
1507 next_pos = 0
1508 while 1:
1509 match = operandsRE.search(code, next_pos)
1510 if not match:
1511 # no more matches: we're done
1512 break
1513 op = match.groups()
1514 # regexp groups are operand full name, base, and extension
1515 (op_full, op_base, op_ext) = op
1516 # if the token following the operand is an assignment, this is
1517 # a destination (LHS), else it's a source (RHS)
1518 is_dest = (assignRE.match(code, match.end()) != None)
1519 is_src = not is_dest
1520 # see if we've already seen this one
1521 op_desc = self.find_base(op_base)
1522 if op_desc:
1523 if op_desc.ext != op_ext:
1524 error(0, 'Inconsistent extensions for operand %s' % \
1525 op_base)
1526 op_desc.is_src = op_desc.is_src or is_src
1527 op_desc.is_dest = op_desc.is_dest or is_dest
1528 else:
1529 # new operand: create new descriptor
1530 op_desc = operandNameMap[op_base](op_full, op_ext,
1531 is_src, is_dest)
1532 self.append(op_desc)
1533 # start next search after end of current match
1534 next_pos = match.end()
1535 self.sort()
1536 # enumerate source & dest register operands... used in building
1537 # constructor later
1538 self.numSrcRegs = 0
1539 self.numDestRegs = 0
1540 self.numFPDestRegs = 0
1541 self.numIntDestRegs = 0
1542 self.memOperand = None
1543 for op_desc in self.items:
1544 if op_desc.isReg():
1545 if op_desc.is_src:
1546 op_desc.src_reg_idx = self.numSrcRegs
1547 self.numSrcRegs += 1
1548 if op_desc.is_dest:
1549 op_desc.dest_reg_idx = self.numDestRegs
1550 self.numDestRegs += 1
1551 if op_desc.isFloatReg():
1552 self.numFPDestRegs += 1
1553 elif op_desc.isIntReg():
1554 self.numIntDestRegs += 1
1555 elif op_desc.isMem():
1556 if self.memOperand:
1557 error(0, "Code block has more than one memory operand.")
1558 self.memOperand = op_desc
1559 # now make a final pass to finalize op_desc fields that may depend
1560 # on the register enumeration
1561 for op_desc in self.items:
1562 op_desc.finalize()
1563
1564 def __len__(self):
1565 return len(self.items)
1566
1567 def __getitem__(self, index):
1568 return self.items[index]
1569
1570 def append(self, op_desc):
1571 self.items.append(op_desc)
1572 self.bases[op_desc.base_name] = op_desc
1573
1574 def find_base(self, base_name):
1575 # like self.bases[base_name], but returns None if not found
1576 # (rather than raising exception)
1577 return self.bases.get(base_name)
1578
1579 # internal helper function for concat[Some]Attr{Strings|Lists}
1580 def __internalConcatAttrs(self, attr_name, filter, result):
1581 for op_desc in self.items:
1582 if filter(op_desc):
1583 result += getattr(op_desc, attr_name)
1584 return result
1585
1586 # return a single string that is the concatenation of the (string)
1587 # values of the specified attribute for all operands
1588 def concatAttrStrings(self, attr_name):
1589 return self.__internalConcatAttrs(attr_name, lambda x: 1, '')
1590
1591 # like concatAttrStrings, but only include the values for the operands
1592 # for which the provided filter function returns true
1593 def concatSomeAttrStrings(self, filter, attr_name):
1594 return self.__internalConcatAttrs(attr_name, filter, '')
1595
1596 # return a single list that is the concatenation of the (list)
1597 # values of the specified attribute for all operands
1598 def concatAttrLists(self, attr_name):
1599 return self.__internalConcatAttrs(attr_name, lambda x: 1, [])
1600
1601 # like concatAttrLists, but only include the values for the operands
1602 # for which the provided filter function returns true
1603 def concatSomeAttrLists(self, filter, attr_name):
1604 return self.__internalConcatAttrs(attr_name, filter, [])
1605
1606 def sort(self):
1607 self.items.sort(lambda a, b: a.sort_pri - b.sort_pri)
1608
1609class SubOperandList(OperandList):
1610
1611 # Find all the operands in the given code block. Returns an operand
1612 # descriptor list (instance of class OperandList).
1613 def __init__(self, code, master_list):
1614 self.items = []
1615 self.bases = {}
1616 # delete comments so we don't match on reg specifiers inside
1617 code = commentRE.sub('', code)
1618 # search for operands
1619 next_pos = 0
1620 while 1:
1621 match = operandsRE.search(code, next_pos)
1622 if not match:
1623 # no more matches: we're done
1624 break
1625 op = match.groups()
1626 # regexp groups are operand full name, base, and extension
1627 (op_full, op_base, op_ext) = op
1628 # find this op in the master list
1629 op_desc = master_list.find_base(op_base)
1630 if not op_desc:
1631 error(0, 'Found operand %s which is not in the master list!' \
1632 ' This is an internal error' % \
1633 op_base)
1634 else:
1635 # See if we've already found this operand
1636 op_desc = self.find_base(op_base)
1637 if not op_desc:
1638 # if not, add a reference to it to this sub list
1639 self.append(master_list.bases[op_base])
1640
1641 # start next search after end of current match
1642 next_pos = match.end()
1643 self.sort()
1644 self.memOperand = None
1645 for op_desc in self.items:
1646 if op_desc.isMem():
1647 if self.memOperand:
1648 error(0, "Code block has more than one memory operand.")
1649 self.memOperand = op_desc
1650
1651# Regular expression object to match C++ comments
1652# (used in findOperands())
1653commentRE = re.compile(r'//.*\n')
1654
1655# Regular expression object to match assignment statements
1656# (used in findOperands())
1657assignRE = re.compile(r'\s*=(?!=)', re.MULTILINE)
1658
1659# Munge operand names in code string to make legal C++ variable names.
1660# This means getting rid of the type extension if any.
1661# (Will match base_name attribute of Operand object.)
1662def substMungedOpNames(code):
1663 return operandsWithExtRE.sub(r'\1', code)
1664
1665def joinLists(t):
1666 return map(string.join, t)
1667
1668def makeFlagConstructor(flag_list):
1669 if len(flag_list) == 0:
1670 return ''
1671 # filter out repeated flags
1672 flag_list.sort()
1673 i = 1
1674 while i < len(flag_list):
1675 if flag_list[i] == flag_list[i-1]:
1676 del flag_list[i]
1677 else:
1678 i += 1
1679 pre = '\n\tflags['
1680 post = '] = true;'
1681 code = pre + string.join(flag_list, post + pre) + post
1682 return code
1683
1684# Assume all instruction flags are of the form 'IsFoo'
1685instFlagRE = re.compile(r'Is.*')
1686
1687# OpClass constants end in 'Op' except No_OpClass
1688opClassRE = re.compile(r'.*Op|No_OpClass')
1689
1690class InstObjParams:
1691 def __init__(self, mnem, class_name, base_class = '',
1692 snippets = None, opt_args = []):
1693 self.mnemonic = mnem
1694 self.class_name = class_name
1695 self.base_class = base_class
1696 compositeCode = ''
1697 if snippets:
1698 if not isinstance(snippets, dict):
1699 snippets = {'code' : snippets}
1700 for snippet in snippets.values():
1701 if isinstance(snippet, str):
1702 compositeCode += (" " + snippet)
1703 self.snippets = snippets
1704
1705 self.operands = OperandList(compositeCode)
1706 self.constructor = self.operands.concatAttrStrings('constructor')
1707 self.constructor += \
1708 '\n\t_numSrcRegs = %d;' % self.operands.numSrcRegs
1709 self.constructor += \
1710 '\n\t_numDestRegs = %d;' % self.operands.numDestRegs
1711 self.constructor += \
1712 '\n\t_numFPDestRegs = %d;' % self.operands.numFPDestRegs
1713 self.constructor += \
1714 '\n\t_numIntDestRegs = %d;' % self.operands.numIntDestRegs
1715 self.flags = self.operands.concatAttrLists('flags')
1716
1717 # Make a basic guess on the operand class (function unit type).
1718 # These are good enough for most cases, and can be overridden
1719 # later otherwise.
1720 if 'IsStore' in self.flags:
1721 self.op_class = 'MemWriteOp'
1722 elif 'IsLoad' in self.flags or 'IsPrefetch' in self.flags:
1723 self.op_class = 'MemReadOp'
1724 elif 'IsFloating' in self.flags:
1725 self.op_class = 'FloatAddOp'
1726 else:
1727 self.op_class = 'IntAluOp'
1728
1729 # Optional arguments are assumed to be either StaticInst flags
1730 # or an OpClass value. To avoid having to import a complete
1731 # list of these values to match against, we do it ad-hoc
1732 # with regexps.
1733 for oa in opt_args:
1734 if instFlagRE.match(oa):
1735 self.flags.append(oa)
1736 elif opClassRE.match(oa):
1737 self.op_class = oa
1738 else:
1739 error(0, 'InstObjParams: optional arg "%s" not recognized '
1740 'as StaticInst::Flag or OpClass.' % oa)
1741
1742 # add flag initialization to contructor here to include
1743 # any flags added via opt_args
1744 self.constructor += makeFlagConstructor(self.flags)
1745
1746 # if 'IsFloating' is set, add call to the FP enable check
1747 # function (which should be provided by isa_desc via a declare)
1748 if 'IsFloating' in self.flags:
1749 self.fp_enable_check = 'fault = checkFpEnableFault(xc);'
1750 else:
1751 self.fp_enable_check = ''
1752
1753#######################
1754#
1755# Output file template
1756#
1757
1758file_template = '''
1759/*
1760 * DO NOT EDIT THIS FILE!!!
1761 *
1762 * It was automatically generated from the ISA description in %(filename)s
1763 */
1764
1765%(includes)s
1766
1767%(global_output)s
1768
1769namespace %(namespace)s {
1770
1771%(namespace_output)s
1772
1773} // namespace %(namespace)s
1774
1775%(decode_function)s
1776'''
1777
1778
1779# Update the output file only if the new contents are different from
1780# the current contents. Minimizes the files that need to be rebuilt
1781# after minor changes.
1782def update_if_needed(file, contents):
1783 update = False
1784 if os.access(file, os.R_OK):
1785 f = open(file, 'r')
1786 old_contents = f.read()
1787 f.close()
1788 if contents != old_contents:
1789 print 'Updating', file
1790 os.remove(file) # in case it's write-protected
1791 update = True
1792 else:
1793 print 'File', file, 'is unchanged'
1794 else:
1795 print 'Generating', file
1796 update = True
1797 if update:
1798 f = open(file, 'w')
1799 f.write(contents)
1800 f.close()
1801
1802# This regular expression matches '##include' directives
1803includeRE = re.compile(r'^\s*##include\s+"(?P<filename>[\w/.-]*)".*$',
1804 re.MULTILINE)
1805
1806# Function to replace a matched '##include' directive with the
1807# contents of the specified file (with nested ##includes replaced
1808# recursively). 'matchobj' is an re match object (from a match of
1809# includeRE) and 'dirname' is the directory relative to which the file
1810# path should be resolved.
1811def replace_include(matchobj, dirname):
1812 fname = matchobj.group('filename')
1813 full_fname = os.path.normpath(os.path.join(dirname, fname))
1814 contents = '##newfile "%s"\n%s\n##endfile\n' % \
1815 (full_fname, read_and_flatten(full_fname))
1816 return contents
1817
1818# Read a file and recursively flatten nested '##include' files.
1819def read_and_flatten(filename):
1820 current_dir = os.path.dirname(filename)
1821 try:
1822 contents = open(filename).read()
1823 except IOError:
1824 error(0, 'Error including file "%s"' % filename)
1825 fileNameStack.push((filename, 0))
1826 # Find any includes and include them
1827 contents = includeRE.sub(lambda m: replace_include(m, current_dir),
1828 contents)
1829 fileNameStack.pop()
1830 return contents
1831
1832#
1833# Read in and parse the ISA description.
1834#
1835def parse_isa_desc(isa_desc_file, output_dir):
1836 # Read file and (recursively) all included files into a string.
1837 # PLY requires that the input be in a single string so we have to
1838 # do this up front.
1839 isa_desc = read_and_flatten(isa_desc_file)
1840
1841 # Initialize filename stack with outer file.
1842 fileNameStack.push((isa_desc_file, 0))
1843
1844 # Parse it.
1845 (isa_name, namespace, global_code, namespace_code) = yacc.parse(isa_desc)
1846
1847 # grab the last three path components of isa_desc_file to put in
1848 # the output
1849 filename = '/'.join(isa_desc_file.split('/')[-3:])
1850
1851 # generate decoder.hh
1852 includes = '#include "base/bitfield.hh" // for bitfield support'
1853 global_output = global_code.header_output
1854 namespace_output = namespace_code.header_output
1855 decode_function = ''
1856 update_if_needed(output_dir + '/decoder.hh', file_template % vars())
1857
1858 # generate decoder.cc
1859 includes = '#include "decoder.hh"'
1860 global_output = global_code.decoder_output
1861 namespace_output = namespace_code.decoder_output
1862 # namespace_output += namespace_code.decode_block
1863 decode_function = namespace_code.decode_block
1864 update_if_needed(output_dir + '/decoder.cc', file_template % vars())
1865
1866 # generate per-cpu exec files
1867 for cpu in cpu_models:
1868 includes = '#include "decoder.hh"\n'
1869 includes += cpu.includes
1870 global_output = global_code.exec_output[cpu.name]
1871 namespace_output = namespace_code.exec_output[cpu.name]
1872 decode_function = ''
1873 update_if_needed(output_dir + '/' + cpu.filename,
1874 file_template % vars())
1875
1876# global list of CpuModel objects (see cpu_models.py)
1877cpu_models = []
1878
1879# Called as script: get args from command line.
1880# Args are: <path to cpu_models.py> <isa desc file> <output dir> <cpu models>
1881if __name__ == '__main__':
1882 execfile(sys.argv[1]) # read in CpuModel definitions
1883 cpu_models = [CpuModel.dict[cpu] for cpu in sys.argv[4:]]
1884 parse_isa_desc(sys.argv[2], sys.argv[3])
1054 operands = SubOperandList(compositeCode, d.operands)
1055
1056 myDict['op_decl'] = operands.concatAttrStrings('op_decl')
1057
1058 is_src = lambda op: op.is_src
1059 is_dest = lambda op: op.is_dest
1060
1061 myDict['op_src_decl'] = \
1062 operands.concatSomeAttrStrings(is_src, 'op_src_decl')
1063 myDict['op_dest_decl'] = \
1064 operands.concatSomeAttrStrings(is_dest, 'op_dest_decl')
1065
1066 myDict['op_rd'] = operands.concatAttrStrings('op_rd')
1067 myDict['op_wb'] = operands.concatAttrStrings('op_wb')
1068
1069 if d.operands.memOperand:
1070 myDict['mem_acc_size'] = d.operands.memOperand.mem_acc_size
1071 myDict['mem_acc_type'] = d.operands.memOperand.mem_acc_type
1072
1073 else:
1074 # Start with the template namespace. Make a copy since we're
1075 # going to modify it.
1076 myDict = templateMap.copy()
1077 # if the argument is a dictionary, we just use it.
1078 if isinstance(d, dict):
1079 myDict.update(d)
1080 # if the argument is an object, we use its attribute map.
1081 elif hasattr(d, '__dict__'):
1082 myDict.update(d.__dict__)
1083 else:
1084 raise TypeError, "Template.subst() arg must be or have dictionary"
1085 return template % myDict
1086
1087 # Convert to string. This handles the case when a template with a
1088 # CPU-specific term gets interpolated into another template or into
1089 # an output block.
1090 def __str__(self):
1091 return expand_cpu_symbols_to_string(self.template)
1092
1093#####################################################################
1094#
1095# Code Parser
1096#
1097# The remaining code is the support for automatically extracting
1098# instruction characteristics from pseudocode.
1099#
1100#####################################################################
1101
1102# Force the argument to be a list. Useful for flags, where a caller
1103# can specify a singleton flag or a list of flags. Also usful for
1104# converting tuples to lists so they can be modified.
1105def makeList(arg):
1106 if isinstance(arg, list):
1107 return arg
1108 elif isinstance(arg, tuple):
1109 return list(arg)
1110 elif not arg:
1111 return []
1112 else:
1113 return [ arg ]
1114
1115# Generate operandTypeMap from the user's 'def operand_types'
1116# statement.
1117def buildOperandTypeMap(userDict, lineno):
1118 global operandTypeMap
1119 operandTypeMap = {}
1120 for (ext, (desc, size)) in userDict.iteritems():
1121 if desc == 'signed int':
1122 ctype = 'int%d_t' % size
1123 is_signed = 1
1124 elif desc == 'unsigned int':
1125 ctype = 'uint%d_t' % size
1126 is_signed = 0
1127 elif desc == 'float':
1128 is_signed = 1 # shouldn't really matter
1129 if size == 32:
1130 ctype = 'float'
1131 elif size == 64:
1132 ctype = 'double'
1133 if ctype == '':
1134 error(lineno, 'Unrecognized type description "%s" in userDict')
1135 operandTypeMap[ext] = (size, ctype, is_signed)
1136
1137#
1138#
1139#
1140# Base class for operand descriptors. An instance of this class (or
1141# actually a class derived from this one) represents a specific
1142# operand for a code block (e.g, "Rc.sq" as a dest). Intermediate
1143# derived classes encapsulates the traits of a particular operand type
1144# (e.g., "32-bit integer register").
1145#
1146class Operand(object):
1147 def __init__(self, full_name, ext, is_src, is_dest):
1148 self.full_name = full_name
1149 self.ext = ext
1150 self.is_src = is_src
1151 self.is_dest = is_dest
1152 # The 'effective extension' (eff_ext) is either the actual
1153 # extension, if one was explicitly provided, or the default.
1154 if ext:
1155 self.eff_ext = ext
1156 else:
1157 self.eff_ext = self.dflt_ext
1158
1159 (self.size, self.ctype, self.is_signed) = operandTypeMap[self.eff_ext]
1160
1161 # note that mem_acc_size is undefined for non-mem operands...
1162 # template must be careful not to use it if it doesn't apply.
1163 if self.isMem():
1164 self.mem_acc_size = self.makeAccSize()
1165 self.mem_acc_type = self.ctype
1166
1167 # Finalize additional fields (primarily code fields). This step
1168 # is done separately since some of these fields may depend on the
1169 # register index enumeration that hasn't been performed yet at the
1170 # time of __init__().
1171 def finalize(self):
1172 self.flags = self.getFlags()
1173 self.constructor = self.makeConstructor()
1174 self.op_decl = self.makeDecl()
1175
1176 if self.is_src:
1177 self.op_rd = self.makeRead()
1178 self.op_src_decl = self.makeDecl()
1179 else:
1180 self.op_rd = ''
1181 self.op_src_decl = ''
1182
1183 if self.is_dest:
1184 self.op_wb = self.makeWrite()
1185 self.op_dest_decl = self.makeDecl()
1186 else:
1187 self.op_wb = ''
1188 self.op_dest_decl = ''
1189
1190 def isMem(self):
1191 return 0
1192
1193 def isReg(self):
1194 return 0
1195
1196 def isFloatReg(self):
1197 return 0
1198
1199 def isIntReg(self):
1200 return 0
1201
1202 def isControlReg(self):
1203 return 0
1204
1205 def getFlags(self):
1206 # note the empty slice '[:]' gives us a copy of self.flags[0]
1207 # instead of a reference to it
1208 my_flags = self.flags[0][:]
1209 if self.is_src:
1210 my_flags += self.flags[1]
1211 if self.is_dest:
1212 my_flags += self.flags[2]
1213 return my_flags
1214
1215 def makeDecl(self):
1216 # Note that initializations in the declarations are solely
1217 # to avoid 'uninitialized variable' errors from the compiler.
1218 return self.ctype + ' ' + self.base_name + ' = 0;\n';
1219
1220class IntRegOperand(Operand):
1221 def isReg(self):
1222 return 1
1223
1224 def isIntReg(self):
1225 return 1
1226
1227 def makeConstructor(self):
1228 c = ''
1229 if self.is_src:
1230 c += '\n\t_srcRegIdx[%d] = %s;' % \
1231 (self.src_reg_idx, self.reg_spec)
1232 if self.is_dest:
1233 c += '\n\t_destRegIdx[%d] = %s;' % \
1234 (self.dest_reg_idx, self.reg_spec)
1235 return c
1236
1237 def makeRead(self):
1238 if (self.ctype == 'float' or self.ctype == 'double'):
1239 error(0, 'Attempt to read integer register as FP')
1240 if (self.size == self.dflt_size):
1241 return '%s = xc->readIntRegOperand(this, %d);\n' % \
1242 (self.base_name, self.src_reg_idx)
1243 elif (self.size > self.dflt_size):
1244 int_reg_val = 'xc->readIntRegOperand(this, %d)' % \
1245 (self.src_reg_idx)
1246 if (self.is_signed):
1247 int_reg_val = 'sext<%d>(%s)' % (self.dflt_size, int_reg_val)
1248 return '%s = %s;\n' % (self.base_name, int_reg_val)
1249 else:
1250 return '%s = bits(xc->readIntRegOperand(this, %d), %d, 0);\n' % \
1251 (self.base_name, self.src_reg_idx, self.size-1)
1252
1253 def makeWrite(self):
1254 if (self.ctype == 'float' or self.ctype == 'double'):
1255 error(0, 'Attempt to write integer register as FP')
1256 if (self.size != self.dflt_size and self.is_signed):
1257 final_val = 'sext<%d>(%s)' % (self.size, self.base_name)
1258 else:
1259 final_val = self.base_name
1260 wb = '''
1261 {
1262 %s final_val = %s;
1263 xc->setIntRegOperand(this, %d, final_val);\n
1264 if (traceData) { traceData->setData(final_val); }
1265 }''' % (self.dflt_ctype, final_val, self.dest_reg_idx)
1266 return wb
1267
1268class FloatRegOperand(Operand):
1269 def isReg(self):
1270 return 1
1271
1272 def isFloatReg(self):
1273 return 1
1274
1275 def makeConstructor(self):
1276 c = ''
1277 if self.is_src:
1278 c += '\n\t_srcRegIdx[%d] = %s + FP_Base_DepTag;' % \
1279 (self.src_reg_idx, self.reg_spec)
1280 if self.is_dest:
1281 c += '\n\t_destRegIdx[%d] = %s + FP_Base_DepTag;' % \
1282 (self.dest_reg_idx, self.reg_spec)
1283 return c
1284
1285 def makeRead(self):
1286 bit_select = 0
1287 width = 0;
1288 if (self.ctype == 'float'):
1289 func = 'readFloatRegOperand'
1290 width = 32;
1291 elif (self.ctype == 'double'):
1292 func = 'readFloatRegOperand'
1293 width = 64;
1294 else:
1295 func = 'readFloatRegOperandBits'
1296 if (self.ctype == 'uint32_t'):
1297 width = 32;
1298 elif (self.ctype == 'uint64_t'):
1299 width = 64;
1300 if (self.size != self.dflt_size):
1301 bit_select = 1
1302 if width:
1303 base = 'xc->%s(this, %d, %d)' % \
1304 (func, self.src_reg_idx, width)
1305 else:
1306 base = 'xc->%s(this, %d)' % \
1307 (func, self.src_reg_idx)
1308 if bit_select:
1309 return '%s = bits(%s, %d, 0);\n' % \
1310 (self.base_name, base, self.size-1)
1311 else:
1312 return '%s = %s;\n' % (self.base_name, base)
1313
1314 def makeWrite(self):
1315 final_val = self.base_name
1316 final_ctype = self.ctype
1317 widthSpecifier = ''
1318 width = 0
1319 if (self.ctype == 'float'):
1320 width = 32
1321 func = 'setFloatRegOperand'
1322 elif (self.ctype == 'double'):
1323 width = 64
1324 func = 'setFloatRegOperand'
1325 elif (self.ctype == 'uint32_t'):
1326 func = 'setFloatRegOperandBits'
1327 width = 32
1328 elif (self.ctype == 'uint64_t'):
1329 func = 'setFloatRegOperandBits'
1330 width = 64
1331 else:
1332 func = 'setFloatRegOperandBits'
1333 final_ctype = 'uint%d_t' % self.dflt_size
1334 if (self.size != self.dflt_size and self.is_signed):
1335 final_val = 'sext<%d>(%s)' % (self.size, self.base_name)
1336 if width:
1337 widthSpecifier = ', %d' % width
1338 wb = '''
1339 {
1340 %s final_val = %s;
1341 xc->%s(this, %d, final_val%s);\n
1342 if (traceData) { traceData->setData(final_val); }
1343 }''' % (final_ctype, final_val, func, self.dest_reg_idx,
1344 widthSpecifier)
1345 return wb
1346
1347class ControlRegOperand(Operand):
1348 def isReg(self):
1349 return 1
1350
1351 def isControlReg(self):
1352 return 1
1353
1354 def makeConstructor(self):
1355 c = ''
1356 if self.is_src:
1357 c += '\n\t_srcRegIdx[%d] = %s + Ctrl_Base_DepTag;' % \
1358 (self.src_reg_idx, self.reg_spec)
1359 if self.is_dest:
1360 c += '\n\t_destRegIdx[%d] = %s + Ctrl_Base_DepTag;' % \
1361 (self.dest_reg_idx, self.reg_spec)
1362 return c
1363
1364 def makeRead(self):
1365 bit_select = 0
1366 if (self.ctype == 'float' or self.ctype == 'double'):
1367 error(0, 'Attempt to read control register as FP')
1368 base = 'xc->readMiscRegOperandWithEffect(this, %s)' % self.src_reg_idx
1369 if self.size == self.dflt_size:
1370 return '%s = %s;\n' % (self.base_name, base)
1371 else:
1372 return '%s = bits(%s, %d, 0);\n' % \
1373 (self.base_name, base, self.size-1)
1374
1375 def makeWrite(self):
1376 if (self.ctype == 'float' or self.ctype == 'double'):
1377 error(0, 'Attempt to write control register as FP')
1378 wb = 'xc->setMiscRegOperandWithEffect(this, %s, %s);\n' % \
1379 (self.dest_reg_idx, self.base_name)
1380 wb += 'if (traceData) { traceData->setData(%s); }' % \
1381 self.base_name
1382 return wb
1383
1384class MemOperand(Operand):
1385 def isMem(self):
1386 return 1
1387
1388 def makeConstructor(self):
1389 return ''
1390
1391 def makeDecl(self):
1392 # Note that initializations in the declarations are solely
1393 # to avoid 'uninitialized variable' errors from the compiler.
1394 # Declare memory data variable.
1395 c = '%s %s = 0;\n' % (self.ctype, self.base_name)
1396 return c
1397
1398 def makeRead(self):
1399 return ''
1400
1401 def makeWrite(self):
1402 return ''
1403
1404 # Return the memory access size *in bits*, suitable for
1405 # forming a type via "uint%d_t". Divide by 8 if you want bytes.
1406 def makeAccSize(self):
1407 return self.size
1408
1409
1410class NPCOperand(Operand):
1411 def makeConstructor(self):
1412 return ''
1413
1414 def makeRead(self):
1415 return '%s = xc->readNextPC();\n' % self.base_name
1416
1417 def makeWrite(self):
1418 return 'xc->setNextPC(%s);\n' % self.base_name
1419
1420class NNPCOperand(Operand):
1421 def makeConstructor(self):
1422 return ''
1423
1424 def makeRead(self):
1425 return '%s = xc->readNextNPC();\n' % self.base_name
1426
1427 def makeWrite(self):
1428 return 'xc->setNextNPC(%s);\n' % self.base_name
1429
1430def buildOperandNameMap(userDict, lineno):
1431 global operandNameMap
1432 operandNameMap = {}
1433 for (op_name, val) in userDict.iteritems():
1434 (base_cls_name, dflt_ext, reg_spec, flags, sort_pri) = val
1435 (dflt_size, dflt_ctype, dflt_is_signed) = operandTypeMap[dflt_ext]
1436 # Canonical flag structure is a triple of lists, where each list
1437 # indicates the set of flags implied by this operand always, when
1438 # used as a source, and when used as a dest, respectively.
1439 # For simplicity this can be initialized using a variety of fairly
1440 # obvious shortcuts; we convert these to canonical form here.
1441 if not flags:
1442 # no flags specified (e.g., 'None')
1443 flags = ( [], [], [] )
1444 elif isinstance(flags, str):
1445 # a single flag: assumed to be unconditional
1446 flags = ( [ flags ], [], [] )
1447 elif isinstance(flags, list):
1448 # a list of flags: also assumed to be unconditional
1449 flags = ( flags, [], [] )
1450 elif isinstance(flags, tuple):
1451 # it's a tuple: it should be a triple,
1452 # but each item could be a single string or a list
1453 (uncond_flags, src_flags, dest_flags) = flags
1454 flags = (makeList(uncond_flags),
1455 makeList(src_flags), makeList(dest_flags))
1456 # Accumulate attributes of new operand class in tmp_dict
1457 tmp_dict = {}
1458 for attr in ('dflt_ext', 'reg_spec', 'flags', 'sort_pri',
1459 'dflt_size', 'dflt_ctype', 'dflt_is_signed'):
1460 tmp_dict[attr] = eval(attr)
1461 tmp_dict['base_name'] = op_name
1462 # New class name will be e.g. "IntReg_Ra"
1463 cls_name = base_cls_name + '_' + op_name
1464 # Evaluate string arg to get class object. Note that the
1465 # actual base class for "IntReg" is "IntRegOperand", i.e. we
1466 # have to append "Operand".
1467 try:
1468 base_cls = eval(base_cls_name + 'Operand')
1469 except NameError:
1470 error(lineno,
1471 'error: unknown operand base class "%s"' % base_cls_name)
1472 # The following statement creates a new class called
1473 # <cls_name> as a subclass of <base_cls> with the attributes
1474 # in tmp_dict, just as if we evaluated a class declaration.
1475 operandNameMap[op_name] = type(cls_name, (base_cls,), tmp_dict)
1476
1477 # Define operand variables.
1478 operands = userDict.keys()
1479
1480 operandsREString = (r'''
1481 (?<![\w\.]) # neg. lookbehind assertion: prevent partial matches
1482 ((%s)(?:\.(\w+))?) # match: operand with optional '.' then suffix
1483 (?![\w\.]) # neg. lookahead assertion: prevent partial matches
1484 '''
1485 % string.join(operands, '|'))
1486
1487 global operandsRE
1488 operandsRE = re.compile(operandsREString, re.MULTILINE|re.VERBOSE)
1489
1490 # Same as operandsREString, but extension is mandatory, and only two
1491 # groups are returned (base and ext, not full name as above).
1492 # Used for subtituting '_' for '.' to make C++ identifiers.
1493 operandsWithExtREString = (r'(?<![\w\.])(%s)\.(\w+)(?![\w\.])'
1494 % string.join(operands, '|'))
1495
1496 global operandsWithExtRE
1497 operandsWithExtRE = re.compile(operandsWithExtREString, re.MULTILINE)
1498
1499
1500class OperandList:
1501
1502 # Find all the operands in the given code block. Returns an operand
1503 # descriptor list (instance of class OperandList).
1504 def __init__(self, code):
1505 self.items = []
1506 self.bases = {}
1507 # delete comments so we don't match on reg specifiers inside
1508 code = commentRE.sub('', code)
1509 # search for operands
1510 next_pos = 0
1511 while 1:
1512 match = operandsRE.search(code, next_pos)
1513 if not match:
1514 # no more matches: we're done
1515 break
1516 op = match.groups()
1517 # regexp groups are operand full name, base, and extension
1518 (op_full, op_base, op_ext) = op
1519 # if the token following the operand is an assignment, this is
1520 # a destination (LHS), else it's a source (RHS)
1521 is_dest = (assignRE.match(code, match.end()) != None)
1522 is_src = not is_dest
1523 # see if we've already seen this one
1524 op_desc = self.find_base(op_base)
1525 if op_desc:
1526 if op_desc.ext != op_ext:
1527 error(0, 'Inconsistent extensions for operand %s' % \
1528 op_base)
1529 op_desc.is_src = op_desc.is_src or is_src
1530 op_desc.is_dest = op_desc.is_dest or is_dest
1531 else:
1532 # new operand: create new descriptor
1533 op_desc = operandNameMap[op_base](op_full, op_ext,
1534 is_src, is_dest)
1535 self.append(op_desc)
1536 # start next search after end of current match
1537 next_pos = match.end()
1538 self.sort()
1539 # enumerate source & dest register operands... used in building
1540 # constructor later
1541 self.numSrcRegs = 0
1542 self.numDestRegs = 0
1543 self.numFPDestRegs = 0
1544 self.numIntDestRegs = 0
1545 self.memOperand = None
1546 for op_desc in self.items:
1547 if op_desc.isReg():
1548 if op_desc.is_src:
1549 op_desc.src_reg_idx = self.numSrcRegs
1550 self.numSrcRegs += 1
1551 if op_desc.is_dest:
1552 op_desc.dest_reg_idx = self.numDestRegs
1553 self.numDestRegs += 1
1554 if op_desc.isFloatReg():
1555 self.numFPDestRegs += 1
1556 elif op_desc.isIntReg():
1557 self.numIntDestRegs += 1
1558 elif op_desc.isMem():
1559 if self.memOperand:
1560 error(0, "Code block has more than one memory operand.")
1561 self.memOperand = op_desc
1562 # now make a final pass to finalize op_desc fields that may depend
1563 # on the register enumeration
1564 for op_desc in self.items:
1565 op_desc.finalize()
1566
1567 def __len__(self):
1568 return len(self.items)
1569
1570 def __getitem__(self, index):
1571 return self.items[index]
1572
1573 def append(self, op_desc):
1574 self.items.append(op_desc)
1575 self.bases[op_desc.base_name] = op_desc
1576
1577 def find_base(self, base_name):
1578 # like self.bases[base_name], but returns None if not found
1579 # (rather than raising exception)
1580 return self.bases.get(base_name)
1581
1582 # internal helper function for concat[Some]Attr{Strings|Lists}
1583 def __internalConcatAttrs(self, attr_name, filter, result):
1584 for op_desc in self.items:
1585 if filter(op_desc):
1586 result += getattr(op_desc, attr_name)
1587 return result
1588
1589 # return a single string that is the concatenation of the (string)
1590 # values of the specified attribute for all operands
1591 def concatAttrStrings(self, attr_name):
1592 return self.__internalConcatAttrs(attr_name, lambda x: 1, '')
1593
1594 # like concatAttrStrings, but only include the values for the operands
1595 # for which the provided filter function returns true
1596 def concatSomeAttrStrings(self, filter, attr_name):
1597 return self.__internalConcatAttrs(attr_name, filter, '')
1598
1599 # return a single list that is the concatenation of the (list)
1600 # values of the specified attribute for all operands
1601 def concatAttrLists(self, attr_name):
1602 return self.__internalConcatAttrs(attr_name, lambda x: 1, [])
1603
1604 # like concatAttrLists, but only include the values for the operands
1605 # for which the provided filter function returns true
1606 def concatSomeAttrLists(self, filter, attr_name):
1607 return self.__internalConcatAttrs(attr_name, filter, [])
1608
1609 def sort(self):
1610 self.items.sort(lambda a, b: a.sort_pri - b.sort_pri)
1611
1612class SubOperandList(OperandList):
1613
1614 # Find all the operands in the given code block. Returns an operand
1615 # descriptor list (instance of class OperandList).
1616 def __init__(self, code, master_list):
1617 self.items = []
1618 self.bases = {}
1619 # delete comments so we don't match on reg specifiers inside
1620 code = commentRE.sub('', code)
1621 # search for operands
1622 next_pos = 0
1623 while 1:
1624 match = operandsRE.search(code, next_pos)
1625 if not match:
1626 # no more matches: we're done
1627 break
1628 op = match.groups()
1629 # regexp groups are operand full name, base, and extension
1630 (op_full, op_base, op_ext) = op
1631 # find this op in the master list
1632 op_desc = master_list.find_base(op_base)
1633 if not op_desc:
1634 error(0, 'Found operand %s which is not in the master list!' \
1635 ' This is an internal error' % \
1636 op_base)
1637 else:
1638 # See if we've already found this operand
1639 op_desc = self.find_base(op_base)
1640 if not op_desc:
1641 # if not, add a reference to it to this sub list
1642 self.append(master_list.bases[op_base])
1643
1644 # start next search after end of current match
1645 next_pos = match.end()
1646 self.sort()
1647 self.memOperand = None
1648 for op_desc in self.items:
1649 if op_desc.isMem():
1650 if self.memOperand:
1651 error(0, "Code block has more than one memory operand.")
1652 self.memOperand = op_desc
1653
1654# Regular expression object to match C++ comments
1655# (used in findOperands())
1656commentRE = re.compile(r'//.*\n')
1657
1658# Regular expression object to match assignment statements
1659# (used in findOperands())
1660assignRE = re.compile(r'\s*=(?!=)', re.MULTILINE)
1661
1662# Munge operand names in code string to make legal C++ variable names.
1663# This means getting rid of the type extension if any.
1664# (Will match base_name attribute of Operand object.)
1665def substMungedOpNames(code):
1666 return operandsWithExtRE.sub(r'\1', code)
1667
1668def joinLists(t):
1669 return map(string.join, t)
1670
1671def makeFlagConstructor(flag_list):
1672 if len(flag_list) == 0:
1673 return ''
1674 # filter out repeated flags
1675 flag_list.sort()
1676 i = 1
1677 while i < len(flag_list):
1678 if flag_list[i] == flag_list[i-1]:
1679 del flag_list[i]
1680 else:
1681 i += 1
1682 pre = '\n\tflags['
1683 post = '] = true;'
1684 code = pre + string.join(flag_list, post + pre) + post
1685 return code
1686
1687# Assume all instruction flags are of the form 'IsFoo'
1688instFlagRE = re.compile(r'Is.*')
1689
1690# OpClass constants end in 'Op' except No_OpClass
1691opClassRE = re.compile(r'.*Op|No_OpClass')
1692
1693class InstObjParams:
1694 def __init__(self, mnem, class_name, base_class = '',
1695 snippets = None, opt_args = []):
1696 self.mnemonic = mnem
1697 self.class_name = class_name
1698 self.base_class = base_class
1699 compositeCode = ''
1700 if snippets:
1701 if not isinstance(snippets, dict):
1702 snippets = {'code' : snippets}
1703 for snippet in snippets.values():
1704 if isinstance(snippet, str):
1705 compositeCode += (" " + snippet)
1706 self.snippets = snippets
1707
1708 self.operands = OperandList(compositeCode)
1709 self.constructor = self.operands.concatAttrStrings('constructor')
1710 self.constructor += \
1711 '\n\t_numSrcRegs = %d;' % self.operands.numSrcRegs
1712 self.constructor += \
1713 '\n\t_numDestRegs = %d;' % self.operands.numDestRegs
1714 self.constructor += \
1715 '\n\t_numFPDestRegs = %d;' % self.operands.numFPDestRegs
1716 self.constructor += \
1717 '\n\t_numIntDestRegs = %d;' % self.operands.numIntDestRegs
1718 self.flags = self.operands.concatAttrLists('flags')
1719
1720 # Make a basic guess on the operand class (function unit type).
1721 # These are good enough for most cases, and can be overridden
1722 # later otherwise.
1723 if 'IsStore' in self.flags:
1724 self.op_class = 'MemWriteOp'
1725 elif 'IsLoad' in self.flags or 'IsPrefetch' in self.flags:
1726 self.op_class = 'MemReadOp'
1727 elif 'IsFloating' in self.flags:
1728 self.op_class = 'FloatAddOp'
1729 else:
1730 self.op_class = 'IntAluOp'
1731
1732 # Optional arguments are assumed to be either StaticInst flags
1733 # or an OpClass value. To avoid having to import a complete
1734 # list of these values to match against, we do it ad-hoc
1735 # with regexps.
1736 for oa in opt_args:
1737 if instFlagRE.match(oa):
1738 self.flags.append(oa)
1739 elif opClassRE.match(oa):
1740 self.op_class = oa
1741 else:
1742 error(0, 'InstObjParams: optional arg "%s" not recognized '
1743 'as StaticInst::Flag or OpClass.' % oa)
1744
1745 # add flag initialization to contructor here to include
1746 # any flags added via opt_args
1747 self.constructor += makeFlagConstructor(self.flags)
1748
1749 # if 'IsFloating' is set, add call to the FP enable check
1750 # function (which should be provided by isa_desc via a declare)
1751 if 'IsFloating' in self.flags:
1752 self.fp_enable_check = 'fault = checkFpEnableFault(xc);'
1753 else:
1754 self.fp_enable_check = ''
1755
1756#######################
1757#
1758# Output file template
1759#
1760
1761file_template = '''
1762/*
1763 * DO NOT EDIT THIS FILE!!!
1764 *
1765 * It was automatically generated from the ISA description in %(filename)s
1766 */
1767
1768%(includes)s
1769
1770%(global_output)s
1771
1772namespace %(namespace)s {
1773
1774%(namespace_output)s
1775
1776} // namespace %(namespace)s
1777
1778%(decode_function)s
1779'''
1780
1781
1782# Update the output file only if the new contents are different from
1783# the current contents. Minimizes the files that need to be rebuilt
1784# after minor changes.
1785def update_if_needed(file, contents):
1786 update = False
1787 if os.access(file, os.R_OK):
1788 f = open(file, 'r')
1789 old_contents = f.read()
1790 f.close()
1791 if contents != old_contents:
1792 print 'Updating', file
1793 os.remove(file) # in case it's write-protected
1794 update = True
1795 else:
1796 print 'File', file, 'is unchanged'
1797 else:
1798 print 'Generating', file
1799 update = True
1800 if update:
1801 f = open(file, 'w')
1802 f.write(contents)
1803 f.close()
1804
1805# This regular expression matches '##include' directives
1806includeRE = re.compile(r'^\s*##include\s+"(?P<filename>[\w/.-]*)".*$',
1807 re.MULTILINE)
1808
1809# Function to replace a matched '##include' directive with the
1810# contents of the specified file (with nested ##includes replaced
1811# recursively). 'matchobj' is an re match object (from a match of
1812# includeRE) and 'dirname' is the directory relative to which the file
1813# path should be resolved.
1814def replace_include(matchobj, dirname):
1815 fname = matchobj.group('filename')
1816 full_fname = os.path.normpath(os.path.join(dirname, fname))
1817 contents = '##newfile "%s"\n%s\n##endfile\n' % \
1818 (full_fname, read_and_flatten(full_fname))
1819 return contents
1820
1821# Read a file and recursively flatten nested '##include' files.
1822def read_and_flatten(filename):
1823 current_dir = os.path.dirname(filename)
1824 try:
1825 contents = open(filename).read()
1826 except IOError:
1827 error(0, 'Error including file "%s"' % filename)
1828 fileNameStack.push((filename, 0))
1829 # Find any includes and include them
1830 contents = includeRE.sub(lambda m: replace_include(m, current_dir),
1831 contents)
1832 fileNameStack.pop()
1833 return contents
1834
1835#
1836# Read in and parse the ISA description.
1837#
1838def parse_isa_desc(isa_desc_file, output_dir):
1839 # Read file and (recursively) all included files into a string.
1840 # PLY requires that the input be in a single string so we have to
1841 # do this up front.
1842 isa_desc = read_and_flatten(isa_desc_file)
1843
1844 # Initialize filename stack with outer file.
1845 fileNameStack.push((isa_desc_file, 0))
1846
1847 # Parse it.
1848 (isa_name, namespace, global_code, namespace_code) = yacc.parse(isa_desc)
1849
1850 # grab the last three path components of isa_desc_file to put in
1851 # the output
1852 filename = '/'.join(isa_desc_file.split('/')[-3:])
1853
1854 # generate decoder.hh
1855 includes = '#include "base/bitfield.hh" // for bitfield support'
1856 global_output = global_code.header_output
1857 namespace_output = namespace_code.header_output
1858 decode_function = ''
1859 update_if_needed(output_dir + '/decoder.hh', file_template % vars())
1860
1861 # generate decoder.cc
1862 includes = '#include "decoder.hh"'
1863 global_output = global_code.decoder_output
1864 namespace_output = namespace_code.decoder_output
1865 # namespace_output += namespace_code.decode_block
1866 decode_function = namespace_code.decode_block
1867 update_if_needed(output_dir + '/decoder.cc', file_template % vars())
1868
1869 # generate per-cpu exec files
1870 for cpu in cpu_models:
1871 includes = '#include "decoder.hh"\n'
1872 includes += cpu.includes
1873 global_output = global_code.exec_output[cpu.name]
1874 namespace_output = namespace_code.exec_output[cpu.name]
1875 decode_function = ''
1876 update_if_needed(output_dir + '/' + cpu.filename,
1877 file_template % vars())
1878
1879# global list of CpuModel objects (see cpu_models.py)
1880cpu_models = []
1881
1882# Called as script: get args from command line.
1883# Args are: <path to cpu_models.py> <isa desc file> <output dir> <cpu models>
1884if __name__ == '__main__':
1885 execfile(sys.argv[1]) # read in CpuModel definitions
1886 cpu_models = [CpuModel.dict[cpu] for cpu in sys.argv[4:]]
1887 parse_isa_desc(sys.argv[2], sys.argv[3])