1# Copyright (c) 2003-2005 The Regents of The University of Michigan
2# All rights reserved.
3#
4# Redistribution and use in source and binary forms, with or without
5# modification, are permitted provided that the following conditions are
6# met: redistributions of source code must retain the above copyright
7# notice, this list of conditions and the following disclaimer;
8# redistributions in binary form must reproduce the above copyright
9# notice, this list of conditions and the following disclaimer in the
10# documentation and/or other materials provided with the distribution;
11# neither the name of the copyright holders nor the names of its
12# contributors may be used to endorse or promote products derived from
13# this software without specific prior written permission.
14#
15# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
18# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
21# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26#
27# Authors: Steve Reinhardt
28
29import os
30import sys
31import re
32import string
33import traceback
34# get type names
35from types import *
36
37# Prepend the directory where the PLY lex & yacc modules are found
38# to the search path. Assumes we're compiling in a subdirectory
39# of 'build' in the current tree.
40sys.path[0:0] = [os.environ['M5_PLY']]
41
42import lex
43import yacc
44
45#####################################################################
46#
47# Lexer
48#
49# The PLY lexer module takes two things as input:
50# - A list of token names (the string list 'tokens')
51# - A regular expression describing a match for each token. The
52# regexp for token FOO can be provided in two ways:
53# - as a string variable named t_FOO
54# - as the doc string for a function named t_FOO. In this case,
55# the function is also executed, allowing an action to be
56# associated with each token match.
57#
58#####################################################################
59
60# Reserved words. These are listed separately as they are matched
61# using the same regexp as generic IDs, but distinguished in the
62# t_ID() function. The PLY documentation suggests this approach.
63reserved = (
64 'BITFIELD', 'DECODE', 'DECODER', 'DEFAULT', 'DEF', 'EXEC', 'FORMAT',
65 'HEADER', 'LET', 'NAMESPACE', 'OPERAND_TYPES', 'OPERANDS',
66 'OUTPUT', 'SIGNED', 'TEMPLATE'
67 )
68
69# List of tokens. The lex module requires this.
70tokens = reserved + (
71 # identifier
72 'ID',
73
74 # integer literal
75 'INTLIT',
76
77 # string literal
78 'STRLIT',
79
80 # code literal
81 'CODELIT',
82
83 # ( ) [ ] { } < > , ; : :: *
84 'LPAREN', 'RPAREN',
85 'LBRACKET', 'RBRACKET',
86 'LBRACE', 'RBRACE',
87 'LESS', 'GREATER', 'EQUALS',
88 'COMMA', 'SEMI', 'COLON', 'DBLCOLON',
89 'ASTERISK',
90
91 # C preprocessor directives
92 'CPPDIRECTIVE'
93
94# The following are matched but never returned. commented out to
95# suppress PLY warning
96 # newfile directive
97# 'NEWFILE',
98
99 # endfile directive
100# 'ENDFILE'
101)
102
103# Regular expressions for token matching
104t_LPAREN = r'\('
105t_RPAREN = r'\)'
106t_LBRACKET = r'\['
107t_RBRACKET = r'\]'
108t_LBRACE = r'\{'
109t_RBRACE = r'\}'
110t_LESS = r'\<'
111t_GREATER = r'\>'
112t_EQUALS = r'='
113t_COMMA = r','
114t_SEMI = r';'
115t_COLON = r':'
116t_DBLCOLON = r'::'
117t_ASTERISK = r'\*'
118
119# Identifiers and reserved words
120reserved_map = { }
121for r in reserved:
122 reserved_map[r.lower()] = r
123
124def t_ID(t):
125 r'[A-Za-z_]\w*'
126 t.type = reserved_map.get(t.value,'ID')
127 return t
128
129# Integer literal
130def t_INTLIT(t):
131 r'(0x[\da-fA-F]+)|\d+'
132 try:
133 t.value = int(t.value,0)
134 except ValueError:
135 error(t.lineno, 'Integer value "%s" too large' % t.value)
136 t.value = 0
137 return t
138
139# String literal. Note that these use only single quotes, and
140# can span multiple lines.
141def t_STRLIT(t):
142 r"(?m)'([^'])+'"
143 # strip off quotes
144 t.value = t.value[1:-1]
145 t.lineno += t.value.count('\n')
146 return t
147
148
149# "Code literal"... like a string literal, but delimiters are
150# '{{' and '}}' so they get formatted nicely under emacs c-mode
151def t_CODELIT(t):
152 r"(?m)\{\{([^\}]|}(?!\}))+\}\}"
153 # strip off {{ & }}
154 t.value = t.value[2:-2]
155 t.lineno += t.value.count('\n')
156 return t
157
158def t_CPPDIRECTIVE(t):
159 r'^\#[^\#].*\n'
160 t.lineno += t.value.count('\n')
161 return t
162
163def t_NEWFILE(t):
164 r'^\#\#newfile\s+"[\w/.-]*"'
165 fileNameStack.push((t.value[11:-1], t.lineno))
166 t.lineno = 0
167
168def t_ENDFILE(t):
169 r'^\#\#endfile'
170 (old_filename, t.lineno) = fileNameStack.pop()
171
172#
173# The functions t_NEWLINE, t_ignore, and t_error are
174# special for the lex module.
175#
176
177# Newlines
178def t_NEWLINE(t):
179 r'\n+'
180 t.lineno += t.value.count('\n')
181
182# Comments
183def t_comment(t):
184 r'//.*'
185
186# Completely ignored characters
187t_ignore = ' \t\x0c'
188
189# Error handler
190def t_error(t):
191 error(t.lineno, "illegal character '%s'" % t.value[0])
192 t.skip(1)
193
194# Build the lexer
195lex.lex()
196
197#####################################################################
198#
199# Parser
200#
201# Every function whose name starts with 'p_' defines a grammar rule.
202# The rule is encoded in the function's doc string, while the
203# function body provides the action taken when the rule is matched.
204# The argument to each function is a list of the values of the
205# rule's symbols: t[0] for the LHS, and t[1..n] for the symbols
206# on the RHS. For tokens, the value is copied from the t.value
207# attribute provided by the lexer. For non-terminals, the value
208# is assigned by the producing rule; i.e., the job of the grammar
209# rule function is to set the value for the non-terminal on the LHS
210# (by assigning to t[0]).
211#####################################################################
212
213# The LHS of the first grammar rule is used as the start symbol
214# (in this case, 'specification'). Note that this rule enforces
215# that there will be exactly one namespace declaration, with 0 or more
216# global defs/decls before and after it. The defs & decls before
217# the namespace decl will be outside the namespace; those after
218# will be inside. The decoder function is always inside the namespace.
219def p_specification(t):
220 'specification : opt_defs_and_outputs name_decl opt_defs_and_outputs decode_block'
221 global_code = t[1]
222 isa_name = t[2]
223 namespace = isa_name + "Inst"
224 # wrap the decode block as a function definition
225 t[4].wrap_decode_block('''
226StaticInstPtr
227%(isa_name)s::decodeInst(%(isa_name)s::ExtMachInst machInst)
228{
229 using namespace %(namespace)s;
230''' % vars(), '}')
231 # both the latter output blocks and the decode block are in the namespace
232 namespace_code = t[3] + t[4]
233 # pass it all back to the caller of yacc.parse()
234 t[0] = (isa_name, namespace, global_code, namespace_code)
235
236# ISA name declaration looks like "namespace <foo>;"
237def p_name_decl(t):
238 'name_decl : NAMESPACE ID SEMI'
239 t[0] = t[2]
240
241# 'opt_defs_and_outputs' is a possibly empty sequence of
242# def and/or output statements.
243def p_opt_defs_and_outputs_0(t):
244 'opt_defs_and_outputs : empty'
245 t[0] = GenCode()
246
247def p_opt_defs_and_outputs_1(t):
248 'opt_defs_and_outputs : defs_and_outputs'
249 t[0] = t[1]
250
251def p_defs_and_outputs_0(t):
252 'defs_and_outputs : def_or_output'
253 t[0] = t[1]
254
255def p_defs_and_outputs_1(t):
256 'defs_and_outputs : defs_and_outputs def_or_output'
257 t[0] = t[1] + t[2]
258
259# The list of possible definition/output statements.
260def p_def_or_output(t):
261 '''def_or_output : def_format
262 | def_bitfield
263 | def_template
264 | def_operand_types
265 | def_operands
266 | output_header
267 | output_decoder
268 | output_exec
269 | global_let'''
270 t[0] = t[1]
271
272# Output blocks 'output <foo> {{...}}' (C++ code blocks) are copied
273# directly to the appropriate output section.
274
275
276# Protect any non-dict-substitution '%'s in a format string
277# (i.e. those not followed by '(')
278def protect_non_subst_percents(s):
279 return re.sub(r'%(?!\()', '%%', s)
280
281# Massage output block by substituting in template definitions and bit
282# operators. We handle '%'s embedded in the string that don't
283# indicate template substitutions (or CPU-specific symbols, which get
284# handled in GenCode) by doubling them first so that the format
285# operation will reduce them back to single '%'s.
286def process_output(s):
287 s = protect_non_subst_percents(s)
288 # protects cpu-specific symbols too
289 s = protect_cpu_symbols(s)
290 return substBitOps(s % templateMap)
291
292def p_output_header(t):
293 'output_header : OUTPUT HEADER CODELIT SEMI'
294 t[0] = GenCode(header_output = process_output(t[3]))
295
296def p_output_decoder(t):
297 'output_decoder : OUTPUT DECODER CODELIT SEMI'
298 t[0] = GenCode(decoder_output = process_output(t[3]))
299
300def p_output_exec(t):
301 'output_exec : OUTPUT EXEC CODELIT SEMI'
302 t[0] = GenCode(exec_output = process_output(t[3]))
303
304# global let blocks 'let {{...}}' (Python code blocks) are executed
305# directly when seen. Note that these execute in a special variable
306# context 'exportContext' to prevent the code from polluting this
307# script's namespace.
308def p_global_let(t):
309 'global_let : LET CODELIT SEMI'
310 updateExportContext()
311 try:
312 exec fixPythonIndentation(t[2]) in exportContext
313 except Exception, exc:
314 error(t.lineno(1),
315 'error: %s in global let block "%s".' % (exc, t[2]))
316 t[0] = GenCode() # contributes nothing to the output C++ file
317
318# Define the mapping from operand type extensions to C++ types and bit
319# widths (stored in operandTypeMap).
320def p_def_operand_types(t):
321 'def_operand_types : DEF OPERAND_TYPES CODELIT SEMI'
322 try:
323 userDict = eval('{' + t[3] + '}')
324 except Exception, exc:
325 error(t.lineno(1),
326 'error: %s in def operand_types block "%s".' % (exc, t[3]))
327 buildOperandTypeMap(userDict, t.lineno(1))
328 t[0] = GenCode() # contributes nothing to the output C++ file
329
330# Define the mapping from operand names to operand classes and other
331# traits. Stored in operandNameMap.
332def p_def_operands(t):
333 'def_operands : DEF OPERANDS CODELIT SEMI'
334 if not globals().has_key('operandTypeMap'):
335 error(t.lineno(1),
336 'error: operand types must be defined before operands')
337 try:
338 userDict = eval('{' + t[3] + '}')
339 except Exception, exc:
340 error(t.lineno(1),
341 'error: %s in def operands block "%s".' % (exc, t[3]))
342 buildOperandNameMap(userDict, t.lineno(1))
343 t[0] = GenCode() # contributes nothing to the output C++ file
344
345# A bitfield definition looks like:
346# 'def [signed] bitfield <ID> [<first>:<last>]'
347# This generates a preprocessor macro in the output file.
348def p_def_bitfield_0(t):
349 'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT COLON INTLIT GREATER SEMI'
350 expr = 'bits(machInst, %2d, %2d)' % (t[6], t[8])
351 if (t[2] == 'signed'):
352 expr = 'sext<%d>(%s)' % (t[6] - t[8] + 1, expr)
353 hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
354 t[0] = GenCode(header_output = hash_define)
355
356# alternate form for single bit: 'def [signed] bitfield <ID> [<bit>]'
357def p_def_bitfield_1(t):
358 'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT GREATER SEMI'
359 expr = 'bits(machInst, %2d, %2d)' % (t[6], t[6])
360 if (t[2] == 'signed'):
361 expr = 'sext<%d>(%s)' % (1, expr)
362 hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
363 t[0] = GenCode(header_output = hash_define)
364
365def p_opt_signed_0(t):
366 'opt_signed : SIGNED'
367 t[0] = t[1]
368
369def p_opt_signed_1(t):
370 'opt_signed : empty'
371 t[0] = ''
372
373# Global map variable to hold templates
374templateMap = {}
375
376def p_def_template(t):
377 'def_template : DEF TEMPLATE ID CODELIT SEMI'
378 templateMap[t[3]] = Template(t[4])
379 t[0] = GenCode()
380
381# An instruction format definition looks like
382# "def format <fmt>(<params>) {{...}};"
383def p_def_format(t):
384 'def_format : DEF FORMAT ID LPAREN param_list RPAREN CODELIT SEMI'
385 (id, params, code) = (t[3], t[5], t[7])
386 defFormat(id, params, code, t.lineno(1))
387 t[0] = GenCode()
388
389# The formal parameter list for an instruction format is a possibly
390# empty list of comma-separated parameters. Positional (standard,
391# non-keyword) parameters must come first, followed by keyword
392# parameters, followed by a '*foo' parameter that gets excess
393# positional arguments (as in Python). Each of these three parameter
394# categories is optional.
395#
396# Note that we do not support the '**foo' parameter for collecting
397# otherwise undefined keyword args. Otherwise the parameter list is
398# (I believe) identical to what is supported in Python.
399#
400# The param list generates a tuple, where the first element is a list of
401# the positional params and the second element is a dict containing the
402# keyword params.
403def p_param_list_0(t):
404 'param_list : positional_param_list COMMA nonpositional_param_list'
405 t[0] = t[1] + t[3]
406
407def p_param_list_1(t):
408 '''param_list : positional_param_list
409 | nonpositional_param_list'''
410 t[0] = t[1]
411
412def p_positional_param_list_0(t):
413 'positional_param_list : empty'
414 t[0] = []
415
416def p_positional_param_list_1(t):
417 'positional_param_list : ID'
418 t[0] = [t[1]]
419
420def p_positional_param_list_2(t):
421 'positional_param_list : positional_param_list COMMA ID'
422 t[0] = t[1] + [t[3]]
423
424def p_nonpositional_param_list_0(t):
425 'nonpositional_param_list : keyword_param_list COMMA excess_args_param'
426 t[0] = t[1] + t[3]
427
428def p_nonpositional_param_list_1(t):
429 '''nonpositional_param_list : keyword_param_list
430 | excess_args_param'''
431 t[0] = t[1]
432
433def p_keyword_param_list_0(t):
434 'keyword_param_list : keyword_param'
435 t[0] = [t[1]]
436
437def p_keyword_param_list_1(t):
438 'keyword_param_list : keyword_param_list COMMA keyword_param'
439 t[0] = t[1] + [t[3]]
440
441def p_keyword_param(t):
442 'keyword_param : ID EQUALS expr'
443 t[0] = t[1] + ' = ' + t[3].__repr__()
444
445def p_excess_args_param(t):
446 'excess_args_param : ASTERISK ID'
447 # Just concatenate them: '*ID'. Wrap in list to be consistent
448 # with positional_param_list and keyword_param_list.
449 t[0] = [t[1] + t[2]]
450
451# End of format definition-related rules.
452##############
453
454#
455# A decode block looks like:
456# decode <field1> [, <field2>]* [default <inst>] { ... }
457#
458def p_decode_block(t):
459 'decode_block : DECODE ID opt_default LBRACE decode_stmt_list RBRACE'
460 default_defaults = defaultStack.pop()
461 codeObj = t[5]
462 # use the "default defaults" only if there was no explicit
463 # default statement in decode_stmt_list
464 if not codeObj.has_decode_default:
465 codeObj += default_defaults
466 codeObj.wrap_decode_block('switch (%s) {\n' % t[2], '}\n')
467 t[0] = codeObj
468
469# The opt_default statement serves only to push the "default defaults"
470# onto defaultStack. This value will be used by nested decode blocks,
471# and used and popped off when the current decode_block is processed
472# (in p_decode_block() above).
473def p_opt_default_0(t):
474 'opt_default : empty'
475 # no default specified: reuse the one currently at the top of the stack
476 defaultStack.push(defaultStack.top())
477 # no meaningful value returned
478 t[0] = None
479
480def p_opt_default_1(t):
481 'opt_default : DEFAULT inst'
482 # push the new default
483 codeObj = t[2]
484 codeObj.wrap_decode_block('\ndefault:\n', 'break;\n')
485 defaultStack.push(codeObj)
486 # no meaningful value returned
487 t[0] = None
488
489def p_decode_stmt_list_0(t):
490 'decode_stmt_list : decode_stmt'
491 t[0] = t[1]
492
493def p_decode_stmt_list_1(t):
494 'decode_stmt_list : decode_stmt decode_stmt_list'
495 if (t[1].has_decode_default and t[2].has_decode_default):
496 error(t.lineno(1), 'Two default cases in decode block')
497 t[0] = t[1] + t[2]
498
499#
500# Decode statement rules
501#
502# There are four types of statements allowed in a decode block:
503# 1. Format blocks 'format <foo> { ... }'
504# 2. Nested decode blocks
505# 3. Instruction definitions.
506# 4. C preprocessor directives.
507
508
509# Preprocessor directives found in a decode statement list are passed
510# through to the output, replicated to all of the output code
511# streams. This works well for ifdefs, so we can ifdef out both the
512# declarations and the decode cases generated by an instruction
513# definition. Handling them as part of the grammar makes it easy to
514# keep them in the right place with respect to the code generated by
515# the other statements.
516def p_decode_stmt_cpp(t):
517 'decode_stmt : CPPDIRECTIVE'
518 t[0] = GenCode(t[1], t[1], t[1], t[1])
519
520# A format block 'format <foo> { ... }' sets the default instruction
521# format used to handle instruction definitions inside the block.
522# This format can be overridden by using an explicit format on the
523# instruction definition or with a nested format block.
524def p_decode_stmt_format(t):
525 'decode_stmt : FORMAT push_format_id LBRACE decode_stmt_list RBRACE'
526 # The format will be pushed on the stack when 'push_format_id' is
527 # processed (see below). Once the parser has recognized the full
528 # production (though the right brace), we're done with the format,
529 # so now we can pop it.
530 formatStack.pop()
531 t[0] = t[4]
532
533# This rule exists so we can set the current format (& push the stack)
534# when we recognize the format name part of the format block.
535def p_push_format_id(t):
536 'push_format_id : ID'
537 try:
538 formatStack.push(formatMap[t[1]])
539 t[0] = ('', '// format %s' % t[1])
540 except KeyError:
541 error(t.lineno(1), 'instruction format "%s" not defined.' % t[1])
542
543# Nested decode block: if the value of the current field matches the
544# specified constant, do a nested decode on some other field.
545def p_decode_stmt_decode(t):
546 'decode_stmt : case_label COLON decode_block'
547 label = t[1]
548 codeObj = t[3]
549 # just wrap the decoding code from the block as a case in the
550 # outer switch statement.
551 codeObj.wrap_decode_block('\n%s:\n' % label)
552 codeObj.has_decode_default = (label == 'default')
553 t[0] = codeObj
554
555# Instruction definition (finally!).
556def p_decode_stmt_inst(t):
557 'decode_stmt : case_label COLON inst SEMI'
558 label = t[1]
559 codeObj = t[3]
560 codeObj.wrap_decode_block('\n%s:' % label, 'break;\n')
561 codeObj.has_decode_default = (label == 'default')
562 t[0] = codeObj
563
564# The case label is either a list of one or more constants or 'default'
565def p_case_label_0(t):
566 'case_label : intlit_list'
567 t[0] = ': '.join(map(lambda a: 'case %#x' % a, t[1]))
568
569def p_case_label_1(t):
570 'case_label : DEFAULT'
571 t[0] = 'default'
572
573#
574# The constant list for a decode case label must be non-empty, but may have
575# one or more comma-separated integer literals in it.
576#
577def p_intlit_list_0(t):
578 'intlit_list : INTLIT'
579 t[0] = [t[1]]
580
581def p_intlit_list_1(t):
582 'intlit_list : intlit_list COMMA INTLIT'
583 t[0] = t[1]
584 t[0].append(t[3])
585
586# Define an instruction using the current instruction format (specified
587# by an enclosing format block).
588# "<mnemonic>(<args>)"
589def p_inst_0(t):
590 'inst : ID LPAREN arg_list RPAREN'
591 # Pass the ID and arg list to the current format class to deal with.
592 currentFormat = formatStack.top()
593 codeObj = currentFormat.defineInst(t[1], t[3], t.lineno(1))
594 args = ','.join(map(str, t[3]))
595 args = re.sub('(?m)^', '//', args)
596 args = re.sub('^//', '', args)
597 comment = '\n// %s::%s(%s)\n' % (currentFormat.id, t[1], args)
598 codeObj.prepend_all(comment)
599 t[0] = codeObj
600
601# Define an instruction using an explicitly specified format:
602# "<fmt>::<mnemonic>(<args>)"
603def p_inst_1(t):
604 'inst : ID DBLCOLON ID LPAREN arg_list RPAREN'
605 try:
606 format = formatMap[t[1]]
607 except KeyError:
608 error(t.lineno(1), 'instruction format "%s" not defined.' % t[1])
609 codeObj = format.defineInst(t[3], t[5], t.lineno(1))
610 comment = '\n// %s::%s(%s)\n' % (t[1], t[3], t[5])
611 codeObj.prepend_all(comment)
612 t[0] = codeObj
613
614# The arg list generates a tuple, where the first element is a list of
615# the positional args and the second element is a dict containing the
616# keyword args.
617def p_arg_list_0(t):
618 'arg_list : positional_arg_list COMMA keyword_arg_list'
619 t[0] = ( t[1], t[3] )
620
621def p_arg_list_1(t):
622 'arg_list : positional_arg_list'
623 t[0] = ( t[1], {} )
624
625def p_arg_list_2(t):
626 'arg_list : keyword_arg_list'
627 t[0] = ( [], t[1] )
628
629def p_positional_arg_list_0(t):
630 'positional_arg_list : empty'
631 t[0] = []
632
633def p_positional_arg_list_1(t):
634 'positional_arg_list : expr'
635 t[0] = [t[1]]
636
637def p_positional_arg_list_2(t):
638 'positional_arg_list : positional_arg_list COMMA expr'
639 t[0] = t[1] + [t[3]]
640
641def p_keyword_arg_list_0(t):
642 'keyword_arg_list : keyword_arg'
643 t[0] = t[1]
644
645def p_keyword_arg_list_1(t):
646 'keyword_arg_list : keyword_arg_list COMMA keyword_arg'
647 t[0] = t[1]
648 t[0].update(t[3])
649
650def p_keyword_arg(t):
651 'keyword_arg : ID EQUALS expr'
652 t[0] = { t[1] : t[3] }
653
654#
655# Basic expressions. These constitute the argument values of
656# "function calls" (i.e. instruction definitions in the decode block)
657# and default values for formal parameters of format functions.
658#
659# Right now, these are either strings, integers, or (recursively)
660# lists of exprs (using Python square-bracket list syntax). Note that
661# bare identifiers are trated as string constants here (since there
662# isn't really a variable namespace to refer to).
663#
664def p_expr_0(t):
665 '''expr : ID
666 | INTLIT
667 | STRLIT
668 | CODELIT'''
669 t[0] = t[1]
670
671def p_expr_1(t):
672 '''expr : LBRACKET list_expr RBRACKET'''
673 t[0] = t[2]
674
675def p_list_expr_0(t):
676 'list_expr : expr'
677 t[0] = [t[1]]
678
679def p_list_expr_1(t):
680 'list_expr : list_expr COMMA expr'
681 t[0] = t[1] + [t[3]]
682
683def p_list_expr_2(t):
684 'list_expr : empty'
685 t[0] = []
686
687#
688# Empty production... use in other rules for readability.
689#
690def p_empty(t):
691 'empty :'
692 pass
693
694# Parse error handler. Note that the argument here is the offending
695# *token*, not a grammar symbol (hence the need to use t.value)
696def p_error(t):
697 if t:
698 error(t.lineno, "syntax error at '%s'" % t.value)
699 else:
700 error(0, "unknown syntax error", True)
701
702# END OF GRAMMAR RULES
703#
704# Now build the parser.
705yacc.yacc()
706
707
708#####################################################################
709#
710# Support Classes
711#
712#####################################################################
713
714# Expand template with CPU-specific references into a dictionary with
715# an entry for each CPU model name. The entry key is the model name
716# and the corresponding value is the template with the CPU-specific
717# refs substituted for that model.
718def expand_cpu_symbols_to_dict(template):
719 # Protect '%'s that don't go with CPU-specific terms
720 t = re.sub(r'%(?!\(CPU_)', '%%', template)
721 result = {}
722 for cpu in cpu_models:
723 result[cpu.name] = t % cpu.strings
724 return result
725
726# *If* the template has CPU-specific references, return a single
727# string containing a copy of the template for each CPU model with the
728# corresponding values substituted in. If the template has no
729# CPU-specific references, it is returned unmodified.
730def expand_cpu_symbols_to_string(template):
731 if template.find('%(CPU_') != -1:
732 return reduce(lambda x,y: x+y,
733 expand_cpu_symbols_to_dict(template).values())
734 else:
735 return template
736
737# Protect CPU-specific references by doubling the corresponding '%'s
738# (in preparation for substituting a different set of references into
739# the template).
740def protect_cpu_symbols(template):
741 return re.sub(r'%(?=\(CPU_)', '%%', template)
742
743###############
744# GenCode class
745#
746# The GenCode class encapsulates generated code destined for various
747# output files. The header_output and decoder_output attributes are
748# strings containing code destined for decoder.hh and decoder.cc
749# respectively. The decode_block attribute contains code to be
750# incorporated in the decode function itself (that will also end up in
751# decoder.cc). The exec_output attribute is a dictionary with a key
752# for each CPU model name; the value associated with a particular key
753# is the string of code for that CPU model's exec.cc file. The
754# has_decode_default attribute is used in the decode block to allow
755# explicit default clauses to override default default clauses.
756
757class GenCode:
758 # Constructor. At this point we substitute out all CPU-specific
759 # symbols. For the exec output, these go into the per-model
760 # dictionary. For all other output types they get collapsed into
761 # a single string.
762 def __init__(self,
763 header_output = '', decoder_output = '', exec_output = '',
764 decode_block = '', has_decode_default = False):
765 self.header_output = expand_cpu_symbols_to_string(header_output)
766 self.decoder_output = expand_cpu_symbols_to_string(decoder_output)
767 if isinstance(exec_output, dict):
768 self.exec_output = exec_output
769 elif isinstance(exec_output, str):
770 # If the exec_output arg is a single string, we replicate
771 # it for each of the CPU models, substituting and
772 # %(CPU_foo)s params appropriately.
773 self.exec_output = expand_cpu_symbols_to_dict(exec_output)
774 self.decode_block = expand_cpu_symbols_to_string(decode_block)
775 self.has_decode_default = has_decode_default
776
777 # Override '+' operator: generate a new GenCode object that
778 # concatenates all the individual strings in the operands.
779 def __add__(self, other):
780 exec_output = {}
781 for cpu in cpu_models:
782 n = cpu.name
783 exec_output[n] = self.exec_output[n] + other.exec_output[n]
784 return GenCode(self.header_output + other.header_output,
785 self.decoder_output + other.decoder_output,
786 exec_output,
787 self.decode_block + other.decode_block,
788 self.has_decode_default or other.has_decode_default)
789
790 # Prepend a string (typically a comment) to all the strings.
791 def prepend_all(self, pre):
792 self.header_output = pre + self.header_output
793 self.decoder_output = pre + self.decoder_output
794 self.decode_block = pre + self.decode_block
795 for cpu in cpu_models:
796 self.exec_output[cpu.name] = pre + self.exec_output[cpu.name]
797
798 # Wrap the decode block in a pair of strings (e.g., 'case foo:'
799 # and 'break;'). Used to build the big nested switch statement.
800 def wrap_decode_block(self, pre, post = ''):
801 self.decode_block = pre + indent(self.decode_block) + post
802
803################
804# Format object.
805#
806# A format object encapsulates an instruction format. It must provide
807# a defineInst() method that generates the code for an instruction
808# definition.
809
810exportContextSymbols = ('InstObjParams', 'CodeBlock',
811 'makeList', 're', 'string')
812
813exportContext = {}
814
815def updateExportContext():
816 exportContext.update(exportDict(*exportContextSymbols))
817 exportContext.update(templateMap)
818
819def exportDict(*symNames):
820 return dict([(s, eval(s)) for s in symNames])
821
822
823class Format:
824 def __init__(self, id, params, code):
825 # constructor: just save away arguments
826 self.id = id
827 self.params = params
828 label = 'def format ' + id
829 self.user_code = compile(fixPythonIndentation(code), label, 'exec')
830 param_list = string.join(params, ", ")
831 f = '''def defInst(_code, _context, %s):
832 my_locals = vars().copy()
833 exec _code in _context, my_locals
834 return my_locals\n''' % param_list
835 c = compile(f, label + ' wrapper', 'exec')
836 exec c
837 self.func = defInst
838
839 def defineInst(self, name, args, lineno):
840 context = {}
841 updateExportContext()
842 context.update(exportContext)
843 context.update({ 'name': name, 'Name': string.capitalize(name) })
844 try:
845 vars = self.func(self.user_code, context, *args[0], **args[1])
846 except Exception, exc:
847 error(lineno, 'error defining "%s": %s.' % (name, exc))
848 for k in vars.keys():
849 if k not in ('header_output', 'decoder_output',
850 'exec_output', 'decode_block'):
851 del vars[k]
852 return GenCode(**vars)
853
854# Special null format to catch an implicit-format instruction
855# definition outside of any format block.
856class NoFormat:
857 def __init__(self):
858 self.defaultInst = ''
859
860 def defineInst(self, name, args, lineno):
861 error(lineno,
862 'instruction definition "%s" with no active format!' % name)
863
864# This dictionary maps format name strings to Format objects.
865formatMap = {}
866
867# Define a new format
868def defFormat(id, params, code, lineno):
869 # make sure we haven't already defined this one
870 if formatMap.get(id, None) != None:
871 error(lineno, 'format %s redefined.' % id)
872 # create new object and store in global map
873 formatMap[id] = Format(id, params, code)
874
875
876##############
877# Stack: a simple stack object. Used for both formats (formatStack)
878# and default cases (defaultStack). Simply wraps a list to give more
879# stack-like syntax and enable initialization with an argument list
880# (as opposed to an argument that's a list).
881
882class Stack(list):
883 def __init__(self, *items):
884 list.__init__(self, items)
885
886 def push(self, item):
887 self.append(item);
888
889 def top(self):
890 return self[-1]
891
892# The global format stack.
893formatStack = Stack(NoFormat())
894
895# The global default case stack.
896defaultStack = Stack( None )
897
898# Global stack that tracks current file and line number.
899# Each element is a tuple (filename, lineno) that records the
900# *current* filename and the line number in the *previous* file where
901# it was included.
902fileNameStack = Stack()
903
904###################
905# Utility functions
906
907#
908# Indent every line in string 's' by two spaces
909# (except preprocessor directives).
910# Used to make nested code blocks look pretty.
911#
912def indent(s):
913 return re.sub(r'(?m)^(?!#)', ' ', s)
914
915#
916# Munge a somewhat arbitrarily formatted piece of Python code
917# (e.g. from a format 'let' block) into something whose indentation
918# will get by the Python parser.
919#
920# The two keys here are that Python will give a syntax error if
921# there's any whitespace at the beginning of the first line, and that
922# all lines at the same lexical nesting level must have identical
923# indentation. Unfortunately the way code literals work, an entire
924# let block tends to have some initial indentation. Rather than
925# trying to figure out what that is and strip it off, we prepend 'if
926# 1:' to make the let code the nested block inside the if (and have
927# the parser automatically deal with the indentation for us).
928#
929# We don't want to do this if (1) the code block is empty or (2) the
930# first line of the block doesn't have any whitespace at the front.
931
932def fixPythonIndentation(s):
933 # get rid of blank lines first
934 s = re.sub(r'(?m)^\s*\n', '', s);
935 if (s != '' and re.match(r'[ \t]', s[0])):
936 s = 'if 1:\n' + s
937 return s
938
939# Error handler. Just call exit. Output formatted to work under
940# Emacs compile-mode. Optional 'print_traceback' arg, if set to True,
941# prints a Python stack backtrace too (can be handy when trying to
942# debug the parser itself).
943def error(lineno, string, print_traceback = False):
944 spaces = ""
945 for (filename, line) in fileNameStack[0:-1]:
946 print spaces + "In file included from " + filename + ":"
947 spaces += " "
948 # Print a Python stack backtrace if requested.
949 if (print_traceback):
950 traceback.print_exc()
951 if lineno != 0:
952 line_str = "%d:" % lineno
953 else:
954 line_str = ""
955 sys.exit(spaces + "%s:%s %s" % (fileNameStack[-1][0], line_str, string))
956
957
958#####################################################################
959#
960# Bitfield Operator Support
961#
962#####################################################################
963
964bitOp1ArgRE = re.compile(r'<\s*(\w+)\s*:\s*>')
965
966bitOpWordRE = re.compile(r'(?<![\w\.])([\w\.]+)<\s*(\w+)\s*:\s*(\w+)\s*>')
967bitOpExprRE = re.compile(r'\)<\s*(\w+)\s*:\s*(\w+)\s*>')
968
969def substBitOps(code):
970 # first convert single-bit selectors to two-index form
971 # i.e., <n> --> <n:n>
972 code = bitOp1ArgRE.sub(r'<\1:\1>', code)
973 # simple case: selector applied to ID (name)
974 # i.e., foo<a:b> --> bits(foo, a, b)
975 code = bitOpWordRE.sub(r'bits(\1, \2, \3)', code)
976 # if selector is applied to expression (ending in ')'),
977 # we need to search backward for matching '('
978 match = bitOpExprRE.search(code)
979 while match:
980 exprEnd = match.start()
981 here = exprEnd - 1
982 nestLevel = 1
983 while nestLevel > 0:
984 if code[here] == '(':
985 nestLevel -= 1
986 elif code[here] == ')':
987 nestLevel += 1
988 here -= 1
989 if here < 0:
990 sys.exit("Didn't find '('!")
991 exprStart = here+1
992 newExpr = r'bits(%s, %s, %s)' % (code[exprStart:exprEnd+1],
993 match.group(1), match.group(2))
994 code = code[:exprStart] + newExpr + code[match.end():]
995 match = bitOpExprRE.search(code)
996 return code
997
998
999####################
1000# Template objects.
1001#
1002# Template objects are format strings that allow substitution from
1003# the attribute spaces of other objects (e.g. InstObjParams instances).
1004
1005class Template:
1006 def __init__(self, t):
1007 self.template = t
1008
1009 def subst(self, d):
1010 # Start with the template namespace. Make a copy since we're
1011 # going to modify it.
1012 myDict = templateMap.copy()
1013 # if the argument is a dictionary, we just use it.
1014 if isinstance(d, dict):
1015 myDict.update(d)
1016 # if the argument is an object, we use its attribute map.
1017 elif hasattr(d, '__dict__'):
1018 myDict.update(d.__dict__)
1019 else:
1020 raise TypeError, "Template.subst() arg must be or have dictionary"
1021 # Protect non-Python-dict substitutions (e.g. if there's a printf
1022 # in the templated C++ code)
1023 template = protect_non_subst_percents(self.template)
1024 # CPU-model-specific substitutions are handled later (in GenCode).
1025 template = protect_cpu_symbols(template)
1026 return template % myDict
1027
1028 # Convert to string. This handles the case when a template with a
1029 # CPU-specific term gets interpolated into another template or into
1030 # an output block.
1031 def __str__(self):
1032 return expand_cpu_symbols_to_string(self.template)
1033
1034#####################################################################
1035#
1036# Code Parser
1037#
1038# The remaining code is the support for automatically extracting
1039# instruction characteristics from pseudocode.
1040#
1041#####################################################################
1042
1043# Force the argument to be a list. Useful for flags, where a caller
1044# can specify a singleton flag or a list of flags. Also usful for
1045# converting tuples to lists so they can be modified.
1046def makeList(arg):
1047 if isinstance(arg, list):
1048 return arg
1049 elif isinstance(arg, tuple):
1050 return list(arg)
1051 elif not arg:
1052 return []
1053 else:
1054 return [ arg ]
1055
1056# Generate operandTypeMap from the user's 'def operand_types'
1057# statement.
1058def buildOperandTypeMap(userDict, lineno):
1059 global operandTypeMap
1060 operandTypeMap = {}
1061 for (ext, (desc, size)) in userDict.iteritems():
1062 if desc == 'signed int':
1063 ctype = 'int%d_t' % size
1064 is_signed = 1
1065 elif desc == 'unsigned int':
1066 ctype = 'uint%d_t' % size
1067 is_signed = 0
1068 elif desc == 'float':
1069 is_signed = 1 # shouldn't really matter
1070 if size == 32:
1071 ctype = 'float'
1072 elif size == 64:
1073 ctype = 'double'
1074 if ctype == '':
1075 error(lineno, 'Unrecognized type description "%s" in userDict')
1076 operandTypeMap[ext] = (size, ctype, is_signed)
1077
1078#
1079#
1080#
1081# Base class for operand descriptors. An instance of this class (or
1082# actually a class derived from this one) represents a specific
1083# operand for a code block (e.g, "Rc.sq" as a dest). Intermediate
1084# derived classes encapsulates the traits of a particular operand type
1085# (e.g., "32-bit integer register").
1086#
1087class Operand(object):
1088 def __init__(self, full_name, ext, is_src, is_dest):
1089 self.full_name = full_name
1090 self.ext = ext
1091 self.is_src = is_src
1092 self.is_dest = is_dest
1093 # The 'effective extension' (eff_ext) is either the actual
1094 # extension, if one was explicitly provided, or the default.
1095 if ext:
1096 self.eff_ext = ext
1097 else:
1098 self.eff_ext = self.dflt_ext
1099
1100 (self.size, self.ctype, self.is_signed) = operandTypeMap[self.eff_ext]
1101
1102 # note that mem_acc_size is undefined for non-mem operands...
1103 # template must be careful not to use it if it doesn't apply.
1104 if self.isMem():
1105 self.mem_acc_size = self.makeAccSize()
1106 self.mem_acc_type = self.ctype
1107
1108 # Finalize additional fields (primarily code fields). This step
1109 # is done separately since some of these fields may depend on the
1110 # register index enumeration that hasn't been performed yet at the
1111 # time of __init__().
1112 def finalize(self):
1113 self.flags = self.getFlags()
1114 self.constructor = self.makeConstructor()
1115 self.op_decl = self.makeDecl()
1116
1117 if self.is_src:
1118 self.op_rd = self.makeRead()
1119 self.op_src_decl = self.makeDecl()
1120 else:
1121 self.op_rd = ''
1122 self.op_src_decl = ''
1123
1124 if self.is_dest:
1125 self.op_wb = self.makeWrite()
1126 self.op_dest_decl = self.makeDecl()
1127 else:
1128 self.op_wb = ''
1129 self.op_dest_decl = ''
1130
1131 def isMem(self):
1132 return 0
1133
1134 def isReg(self):
1135 return 0
1136
1137 def isFloatReg(self):
1138 return 0
1139
1140 def isIntReg(self):
1141 return 0
1142
1143 def isControlReg(self):
1144 return 0
1145
1146 def getFlags(self):
1147 # note the empty slice '[:]' gives us a copy of self.flags[0]
1148 # instead of a reference to it
1149 my_flags = self.flags[0][:]
1150 if self.is_src:
1151 my_flags += self.flags[1]
1152 if self.is_dest:
1153 my_flags += self.flags[2]
1154 return my_flags
1155
1156 def makeDecl(self):
1157 # Note that initializations in the declarations are solely
1158 # to avoid 'uninitialized variable' errors from the compiler.
1159 return self.ctype + ' ' + self.base_name + ' = 0;\n';
1160
1161class IntRegOperand(Operand):
1162 def isReg(self):
1163 return 1
1164
1165 def isIntReg(self):
1166 return 1
1167
1168 def makeConstructor(self):
1169 c = ''
1170 if self.is_src:
1171 c += '\n\t_srcRegIdx[%d] = %s;' % \
1172 (self.src_reg_idx, self.reg_spec)
1173 if self.is_dest:
1174 c += '\n\t_destRegIdx[%d] = %s;' % \
1175 (self.dest_reg_idx, self.reg_spec)
1176 return c
1177
1178 def makeRead(self):
1179 if (self.ctype == 'float' or self.ctype == 'double'):
1180 error(0, 'Attempt to read integer register as FP')
1181 if (self.size == self.dflt_size):
1182 return '%s = xc->readIntReg(this, %d);\n' % \
1183 (self.base_name, self.src_reg_idx)
1184 else:
1185 return '%s = bits(xc->readIntReg(this, %d), %d, 0);\n' % \
1186 (self.base_name, self.src_reg_idx, self.size-1)
1187
1188 def makeWrite(self):
1189 if (self.ctype == 'float' or self.ctype == 'double'):
1190 error(0, 'Attempt to write integer register as FP')
1191 if (self.size != self.dflt_size and self.is_signed):
1192 final_val = 'sext<%d>(%s)' % (self.size, self.base_name)
1193 else:
1194 final_val = self.base_name
1195 wb = '''
1196 {
1197 %s final_val = %s;
1198 xc->setIntReg(this, %d, final_val);\n
1199 if (traceData) { traceData->setData(final_val); }
1200 }''' % (self.dflt_ctype, final_val, self.dest_reg_idx)
1201 return wb
1202
1203class FloatRegOperand(Operand):
1204 def isReg(self):
1205 return 1
1206
1207 def isFloatReg(self):
1208 return 1
1209
1210 def makeConstructor(self):
1211 c = ''
1212 if self.is_src:
1213 c += '\n\t_srcRegIdx[%d] = %s + FP_Base_DepTag;' % \
1214 (self.src_reg_idx, self.reg_spec)
1215 if self.is_dest:
1216 c += '\n\t_destRegIdx[%d] = %s + FP_Base_DepTag;' % \
1217 (self.dest_reg_idx, self.reg_spec)
1218 return c
1219
1220 def makeRead(self):
1221 bit_select = 0
1222 width = 0;
1223 if (self.ctype == 'float'):
1224 func = 'readFloatReg'
1225 width = 32;
1226 elif (self.ctype == 'double'):
1227 func = 'readFloatReg'
1228 width = 64;
1229 else:
1230 func = 'readFloatRegBits'
1231 if (self.ctype == 'uint32_t'):
1232 width = 32;
1233 elif (self.ctype == 'uint64_t'):
1234 width = 64;
1235 if (self.size != self.dflt_size):
1236 bit_select = 1
1237 if width:
1238 base = 'xc->%s(this, %d, %d)' % \
1239 (func, self.src_reg_idx, width)
1240 else:
1241 base = 'xc->%s(this, %d)' % \
1242 (func, self.src_reg_idx)
1243 if bit_select:
1244 return '%s = bits(%s, %d, 0);\n' % \
1245 (self.base_name, base, self.size-1)
1246 else:
1247 return '%s = %s;\n' % (self.base_name, base)
1248
1249 def makeWrite(self):
1250 final_val = self.base_name
1251 final_ctype = self.ctype
1252 widthSpecifier = ''
1253 width = 0
1254 if (self.ctype == 'float'):
1255 width = 32
1256 func = 'setFloatReg'
1257 elif (self.ctype == 'double'):
1258 width = 64
1259 func = 'setFloatReg'
1260 elif (self.ctype == 'uint32_t'):
1261 func = 'setFloatRegBits'
1262 width = 32
1263 elif (self.ctype == 'uint64_t'):
1264 func = 'setFloatRegBits'
1265 width = 64
1266 else:
1267 func = 'setFloatRegBits'
1268 final_ctype = 'uint%d_t' % self.dflt_size
1269 if (self.size != self.dflt_size and self.is_signed):
1270 final_val = 'sext<%d>(%s)' % (self.size, self.base_name)
1271 if width:
1272 widthSpecifier = ', %d' % width
1273 wb = '''
1274 {
1275 %s final_val = %s;
1276 xc->%s(this, %d, final_val%s);\n
1277 if (traceData) { traceData->setData(final_val); }
1278 }''' % (final_ctype, final_val, func, self.dest_reg_idx,
1279 widthSpecifier)
1280 return wb
1281
1282class ControlRegOperand(Operand):
1283 def isReg(self):
1284 return 1
1285
1286 def isControlReg(self):
1287 return 1
1288
1289 def makeConstructor(self):
1290 c = ''
1291 if self.is_src:
1292 c += '\n\t_srcRegIdx[%d] = %s;' % \
1293 (self.src_reg_idx, self.reg_spec)
1294 if self.is_dest:
1295 c += '\n\t_destRegIdx[%d] = %s;' % \
1296 (self.dest_reg_idx, self.reg_spec)
1297 return c
1298
1299 def makeRead(self):
1300 bit_select = 0
1301 if (self.ctype == 'float' or self.ctype == 'double'):
1302 error(0, 'Attempt to read control register as FP')
1303 base = 'xc->readMiscReg(%s)' % self.reg_spec
1304 if self.size == self.dflt_size:
1305 return '%s = %s;\n' % (self.base_name, base)
1306 else:
1307 return '%s = bits(%s, %d, 0);\n' % \
1308 (self.base_name, base, self.size-1)
1309
1310 def makeWrite(self):
1311 if (self.ctype == 'float' or self.ctype == 'double'):
1312 error(0, 'Attempt to write control register as FP')
1313 wb = 'xc->setMiscReg(%s, %s);\n' % (self.reg_spec, self.base_name)
1314 wb += 'if (traceData) { traceData->setData(%s); }' % \
1315 self.base_name
1316 return wb
1317
1318class MemOperand(Operand):
1319 def isMem(self):
1320 return 1
1321
1322 def makeConstructor(self):
1323 return ''
1324
1325 def makeDecl(self):
1326 # Note that initializations in the declarations are solely
1327 # to avoid 'uninitialized variable' errors from the compiler.
1328 # Declare memory data variable.
1329 c = '%s %s = 0;\n' % (self.ctype, self.base_name)
1330 return c
1331
1332 def makeRead(self):
1333 return ''
1334
1335 def makeWrite(self):
1336 return ''
1337
1338 # Return the memory access size *in bits*, suitable for
1339 # forming a type via "uint%d_t". Divide by 8 if you want bytes.
1340 def makeAccSize(self):
1341 return self.size
1342
1343
1344class NPCOperand(Operand):
1345 def makeConstructor(self):
1346 return ''
1347
1348 def makeRead(self):
1349 return '%s = xc->readNextPC();\n' % self.base_name
1350
1351 def makeWrite(self):
1352 return 'xc->setNextPC(%s);\n' % self.base_name
1353
1354class NNPCOperand(Operand):
1355 def makeConstructor(self):
1356 return ''
1357
1358 def makeRead(self):
1359 return '%s = xc->readNextNPC();\n' % self.base_name
1360
1361 def makeWrite(self):
1362 return 'xc->setNextNPC(%s);\n' % self.base_name
1363
1364def buildOperandNameMap(userDict, lineno):
1365 global operandNameMap
1366 operandNameMap = {}
1367 for (op_name, val) in userDict.iteritems():
1368 (base_cls_name, dflt_ext, reg_spec, flags, sort_pri) = val
1369 (dflt_size, dflt_ctype, dflt_is_signed) = operandTypeMap[dflt_ext]
1370 # Canonical flag structure is a triple of lists, where each list
1371 # indicates the set of flags implied by this operand always, when
1372 # used as a source, and when used as a dest, respectively.
1373 # For simplicity this can be initialized using a variety of fairly
1374 # obvious shortcuts; we convert these to canonical form here.
1375 if not flags:
1376 # no flags specified (e.g., 'None')
1377 flags = ( [], [], [] )
1378 elif isinstance(flags, str):
1379 # a single flag: assumed to be unconditional
1380 flags = ( [ flags ], [], [] )
1381 elif isinstance(flags, list):
1382 # a list of flags: also assumed to be unconditional
1383 flags = ( flags, [], [] )
1384 elif isinstance(flags, tuple):
1385 # it's a tuple: it should be a triple,
1386 # but each item could be a single string or a list
1387 (uncond_flags, src_flags, dest_flags) = flags
1388 flags = (makeList(uncond_flags),
1389 makeList(src_flags), makeList(dest_flags))
1390 # Accumulate attributes of new operand class in tmp_dict
1391 tmp_dict = {}
1392 for attr in ('dflt_ext', 'reg_spec', 'flags', 'sort_pri',
1393 'dflt_size', 'dflt_ctype', 'dflt_is_signed'):
1394 tmp_dict[attr] = eval(attr)
1395 tmp_dict['base_name'] = op_name
1396 # New class name will be e.g. "IntReg_Ra"
1397 cls_name = base_cls_name + '_' + op_name
1398 # Evaluate string arg to get class object. Note that the
1399 # actual base class for "IntReg" is "IntRegOperand", i.e. we
1400 # have to append "Operand".
1401 try:
1402 base_cls = eval(base_cls_name + 'Operand')
1403 except NameError:
1404 error(lineno,
1405 'error: unknown operand base class "%s"' % base_cls_name)
1406 # The following statement creates a new class called
1407 # <cls_name> as a subclass of <base_cls> with the attributes
1408 # in tmp_dict, just as if we evaluated a class declaration.
1409 operandNameMap[op_name] = type(cls_name, (base_cls,), tmp_dict)
1410
1411 # Define operand variables.
1412 operands = userDict.keys()
1413
1414 operandsREString = (r'''
1415 (?<![\w\.]) # neg. lookbehind assertion: prevent partial matches
1416 ((%s)(?:\.(\w+))?) # match: operand with optional '.' then suffix
1417 (?![\w\.]) # neg. lookahead assertion: prevent partial matches
1418 '''
1419 % string.join(operands, '|'))
1420
1421 global operandsRE
1422 operandsRE = re.compile(operandsREString, re.MULTILINE|re.VERBOSE)
1423
1424 # Same as operandsREString, but extension is mandatory, and only two
1425 # groups are returned (base and ext, not full name as above).
1426 # Used for subtituting '_' for '.' to make C++ identifiers.
1427 operandsWithExtREString = (r'(?<![\w\.])(%s)\.(\w+)(?![\w\.])'
1428 % string.join(operands, '|'))
1429
1430 global operandsWithExtRE
1431 operandsWithExtRE = re.compile(operandsWithExtREString, re.MULTILINE)
1432
1433
1434class OperandList:
1435
1436 # Find all the operands in the given code block. Returns an operand
1437 # descriptor list (instance of class OperandList).
1438 def __init__(self, code):
1439 self.items = []
1440 self.bases = {}
1441 # delete comments so we don't match on reg specifiers inside
1442 code = commentRE.sub('', code)
1443 # search for operands
1444 next_pos = 0
1445 while 1:
1446 match = operandsRE.search(code, next_pos)
1447 if not match:
1448 # no more matches: we're done
1449 break
1450 op = match.groups()
1451 # regexp groups are operand full name, base, and extension
1452 (op_full, op_base, op_ext) = op
1453 # if the token following the operand is an assignment, this is
1454 # a destination (LHS), else it's a source (RHS)
1455 is_dest = (assignRE.match(code, match.end()) != None)
1456 is_src = not is_dest
1457 # see if we've already seen this one
1458 op_desc = self.find_base(op_base)
1459 if op_desc:
1460 if op_desc.ext != op_ext:
1461 error(0, 'Inconsistent extensions for operand %s' % \
1462 op_base)
1463 op_desc.is_src = op_desc.is_src or is_src
1464 op_desc.is_dest = op_desc.is_dest or is_dest
1465 else:
1466 # new operand: create new descriptor
1467 op_desc = operandNameMap[op_base](op_full, op_ext,
1468 is_src, is_dest)
1469 self.append(op_desc)
1470 # start next search after end of current match
1471 next_pos = match.end()
1472 self.sort()
1473 # enumerate source & dest register operands... used in building
1474 # constructor later
1475 self.numSrcRegs = 0
1476 self.numDestRegs = 0
1477 self.numFPDestRegs = 0
1478 self.numIntDestRegs = 0
1479 self.memOperand = None
1480 for op_desc in self.items:
1481 if op_desc.isReg():
1482 if op_desc.is_src:
1483 op_desc.src_reg_idx = self.numSrcRegs
1484 self.numSrcRegs += 1
1485 if op_desc.is_dest:
1486 op_desc.dest_reg_idx = self.numDestRegs
1487 self.numDestRegs += 1
1488 if op_desc.isFloatReg():
1489 self.numFPDestRegs += 1
1490 elif op_desc.isIntReg():
1491 self.numIntDestRegs += 1
1492 elif op_desc.isMem():
1493 if self.memOperand:
1494 error(0, "Code block has more than one memory operand.")
1495 self.memOperand = op_desc
1496 # now make a final pass to finalize op_desc fields that may depend
1497 # on the register enumeration
1498 for op_desc in self.items:
1499 op_desc.finalize()
1500
1501 def __len__(self):
1502 return len(self.items)
1503
1504 def __getitem__(self, index):
1505 return self.items[index]
1506
1507 def append(self, op_desc):
1508 self.items.append(op_desc)
1509 self.bases[op_desc.base_name] = op_desc
1510
1511 def find_base(self, base_name):
1512 # like self.bases[base_name], but returns None if not found
1513 # (rather than raising exception)
1514 return self.bases.get(base_name)
1515
1516 # internal helper function for concat[Some]Attr{Strings|Lists}
1517 def __internalConcatAttrs(self, attr_name, filter, result):
1518 for op_desc in self.items:
1519 if filter(op_desc):
1520 result += getattr(op_desc, attr_name)
1521 return result
1522
1523 # return a single string that is the concatenation of the (string)
1524 # values of the specified attribute for all operands
1525 def concatAttrStrings(self, attr_name):
1526 return self.__internalConcatAttrs(attr_name, lambda x: 1, '')
1527
1528 # like concatAttrStrings, but only include the values for the operands
1529 # for which the provided filter function returns true
1530 def concatSomeAttrStrings(self, filter, attr_name):
1531 return self.__internalConcatAttrs(attr_name, filter, '')
1532
1533 # return a single list that is the concatenation of the (list)
1534 # values of the specified attribute for all operands
1535 def concatAttrLists(self, attr_name):
1536 return self.__internalConcatAttrs(attr_name, lambda x: 1, [])
1537
1538 # like concatAttrLists, but only include the values for the operands
1539 # for which the provided filter function returns true
1540 def concatSomeAttrLists(self, filter, attr_name):
1541 return self.__internalConcatAttrs(attr_name, filter, [])
1542
1543 def sort(self):
1544 self.items.sort(lambda a, b: a.sort_pri - b.sort_pri)
1545
1546# Regular expression object to match C++ comments
1547# (used in findOperands())
1548commentRE = re.compile(r'//.*\n')
1549
1550# Regular expression object to match assignment statements
1551# (used in findOperands())
1552assignRE = re.compile(r'\s*=(?!=)', re.MULTILINE)
1553
1554# Munge operand names in code string to make legal C++ variable names.
1555# This means getting rid of the type extension if any.
1556# (Will match base_name attribute of Operand object.)
1557def substMungedOpNames(code):
1558 return operandsWithExtRE.sub(r'\1', code)
1559
1560def joinLists(t):
1561 return map(string.join, t)
1562
1563def makeFlagConstructor(flag_list):
1564 if len(flag_list) == 0:
1565 return ''
1566 # filter out repeated flags
1567 flag_list.sort()
1568 i = 1
1569 while i < len(flag_list):
1570 if flag_list[i] == flag_list[i-1]:
1571 del flag_list[i]
1572 else:
1573 i += 1
1574 pre = '\n\tflags['
1575 post = '] = true;'
1576 code = pre + string.join(flag_list, post + pre) + post
1577 return code
1578
1579class CodeBlock:
1580 def __init__(self, code):
1581 self.orig_code = code
1582 self.operands = OperandList(code)
1583 self.code = substMungedOpNames(substBitOps(code))
1584 self.constructor = self.operands.concatAttrStrings('constructor')
1585 self.constructor += \
1586 '\n\t_numSrcRegs = %d;' % self.operands.numSrcRegs
1587 self.constructor += \
1588 '\n\t_numDestRegs = %d;' % self.operands.numDestRegs
1589 self.constructor += \
1590 '\n\t_numFPDestRegs = %d;' % self.operands.numFPDestRegs
1591 self.constructor += \
1592 '\n\t_numIntDestRegs = %d;' % self.operands.numIntDestRegs
1593
1594 self.op_decl = self.operands.concatAttrStrings('op_decl')
1595
1596 is_src = lambda op: op.is_src
1597 is_dest = lambda op: op.is_dest
1598
1599 self.op_src_decl = \
1600 self.operands.concatSomeAttrStrings(is_src, 'op_src_decl')
1601 self.op_dest_decl = \
1602 self.operands.concatSomeAttrStrings(is_dest, 'op_dest_decl')
1603
1604 self.op_rd = self.operands.concatAttrStrings('op_rd')
1605 self.op_wb = self.operands.concatAttrStrings('op_wb')
1606
1607 self.flags = self.operands.concatAttrLists('flags')
1608
1609 if self.operands.memOperand:
1610 self.mem_acc_size = self.operands.memOperand.mem_acc_size
1611 self.mem_acc_type = self.operands.memOperand.mem_acc_type
1612
1613 # Make a basic guess on the operand class (function unit type).
1614 # These are good enough for most cases, and will be overridden
1615 # later otherwise.
1616 if 'IsStore' in self.flags:
1617 self.op_class = 'MemWriteOp'
1618 elif 'IsLoad' in self.flags or 'IsPrefetch' in self.flags:
1619 self.op_class = 'MemReadOp'
1620 elif 'IsFloating' in self.flags:
1621 self.op_class = 'FloatAddOp'
1622 else:
1623 self.op_class = 'IntAluOp'
1624
1625# Assume all instruction flags are of the form 'IsFoo'
1626instFlagRE = re.compile(r'Is.*')
1627
1628# OpClass constants end in 'Op' except No_OpClass
1629opClassRE = re.compile(r'.*Op|No_OpClass')
1630
1631class InstObjParams:
1632 def __init__(self, mnem, class_name, base_class = '',
1633 code = None, opt_args = [], *extras):
1634 self.mnemonic = mnem
1635 self.class_name = class_name
1636 self.base_class = base_class
1637 if code:
1638 #If the user already made a CodeBlock, pick the parts from it
1639 if isinstance(code, CodeBlock):
1640 origCode = code.orig_code
1641 codeBlock = code
1642 else:
1643 origCode = code
1644 codeBlock = CodeBlock(code)
1645 compositeCode = '\n'.join([origCode] +
1646 [pair[1] for pair in extras])
1647 compositeBlock = CodeBlock(compositeCode)
1648 for code_attr in compositeBlock.__dict__.keys():
1649 setattr(self, code_attr, getattr(compositeBlock, code_attr))
1650 for (key, snippet) in extras:
1651 setattr(self, key, CodeBlock(snippet).code)
1652 self.code = codeBlock.code
1653 self.orig_code = origCode
1654 else:
1655 self.constructor = ''
1656 self.flags = []
1657 # Optional arguments are assumed to be either StaticInst flags
1658 # or an OpClass value. To avoid having to import a complete
1659 # list of these values to match against, we do it ad-hoc
1660 # with regexps.
1661 for oa in opt_args:
1662 if instFlagRE.match(oa):
1663 self.flags.append(oa)
1664 elif opClassRE.match(oa):
1665 self.op_class = oa
1666 else:
1667 error(0, 'InstObjParams: optional arg "%s" not recognized '
1668 'as StaticInst::Flag or OpClass.' % oa)
1669
1670 # add flag initialization to contructor here to include
1671 # any flags added via opt_args
1672 self.constructor += makeFlagConstructor(self.flags)
1673
1674 # if 'IsFloating' is set, add call to the FP enable check
1675 # function (which should be provided by isa_desc via a declare)
1676 if 'IsFloating' in self.flags:
1677 self.fp_enable_check = 'fault = checkFpEnableFault(xc);'
1678 else:
1679 self.fp_enable_check = ''
1680
1681#######################
1682#
1683# Output file template
1684#
1685
1686file_template = '''
1687/*
1688 * DO NOT EDIT THIS FILE!!!
1689 *
1690 * It was automatically generated from the ISA description in %(filename)s
1691 */
1692
1693%(includes)s
1694
1695%(global_output)s
1696
1697namespace %(namespace)s {
1698
1699%(namespace_output)s
1700
1701} // namespace %(namespace)s
1702
1703%(decode_function)s
1704'''
1705
1706
1707# Update the output file only if the new contents are different from
1708# the current contents. Minimizes the files that need to be rebuilt
1709# after minor changes.
1710def update_if_needed(file, contents):
1711 update = False
1712 if os.access(file, os.R_OK):
1713 f = open(file, 'r')
1714 old_contents = f.read()
1715 f.close()
1716 if contents != old_contents:
1717 print 'Updating', file
1718 os.remove(file) # in case it's write-protected
1719 update = True
1720 else:
1721 print 'File', file, 'is unchanged'
1722 else:
1723 print 'Generating', file
1724 update = True
1725 if update:
1726 f = open(file, 'w')
1727 f.write(contents)
1728 f.close()
1729
1730# This regular expression matches '##include' directives
1731includeRE = re.compile(r'^\s*##include\s+"(?P<filename>[\w/.-]*)".*$',
1732 re.MULTILINE)
1733
1734# Function to replace a matched '##include' directive with the
1735# contents of the specified file (with nested ##includes replaced
1736# recursively). 'matchobj' is an re match object (from a match of
1737# includeRE) and 'dirname' is the directory relative to which the file
1738# path should be resolved.
1739def replace_include(matchobj, dirname):
1740 fname = matchobj.group('filename')
1741 full_fname = os.path.normpath(os.path.join(dirname, fname))
1742 contents = '##newfile "%s"\n%s\n##endfile\n' % \
1743 (full_fname, read_and_flatten(full_fname))
1744 return contents
1745
1746# Read a file and recursively flatten nested '##include' files.
1747def read_and_flatten(filename):
1748 current_dir = os.path.dirname(filename)
1749 try:
1750 contents = open(filename).read()
1751 except IOError:
1752 error(0, 'Error including file "%s"' % filename)
1753 fileNameStack.push((filename, 0))
1754 # Find any includes and include them
1755 contents = includeRE.sub(lambda m: replace_include(m, current_dir),
1756 contents)
1757 fileNameStack.pop()
1758 return contents
1759
1760#
1761# Read in and parse the ISA description.
1762#
1763def parse_isa_desc(isa_desc_file, output_dir):
1764 # Read file and (recursively) all included files into a string.
1765 # PLY requires that the input be in a single string so we have to
1766 # do this up front.
1767 isa_desc = read_and_flatten(isa_desc_file)
1768
1769 # Initialize filename stack with outer file.
1770 fileNameStack.push((isa_desc_file, 0))
1771
1772 # Parse it.
1773 (isa_name, namespace, global_code, namespace_code) = yacc.parse(isa_desc)
1774
1775 # grab the last three path components of isa_desc_file to put in
1776 # the output
1777 filename = '/'.join(isa_desc_file.split('/')[-3:])
1778
1779 # generate decoder.hh
1780 includes = '#include "base/bitfield.hh" // for bitfield support'
1781 global_output = global_code.header_output
1782 namespace_output = namespace_code.header_output
1783 decode_function = ''
1784 update_if_needed(output_dir + '/decoder.hh', file_template % vars())
1785
1786 # generate decoder.cc
1787 includes = '#include "decoder.hh"'
1788 global_output = global_code.decoder_output
1789 namespace_output = namespace_code.decoder_output
1790 # namespace_output += namespace_code.decode_block
1791 decode_function = namespace_code.decode_block
1792 update_if_needed(output_dir + '/decoder.cc', file_template % vars())
1793
1794 # generate per-cpu exec files
1795 for cpu in cpu_models:
1796 includes = '#include "decoder.hh"\n'
1797 includes += cpu.includes
1798 global_output = global_code.exec_output[cpu.name]
1799 namespace_output = namespace_code.exec_output[cpu.name]
1800 decode_function = ''
1801 update_if_needed(output_dir + '/' + cpu.filename,
1802 file_template % vars())
1803
1804# global list of CpuModel objects (see cpu_models.py)
1805cpu_models = []
1806
1807# Called as script: get args from command line.
1808# Args are: <path to cpu_models.py> <isa desc file> <output dir> <cpu models>
1809if __name__ == '__main__':
1810 execfile(sys.argv[1]) # read in CpuModel definitions
1811 cpu_models = [CpuModel.dict[cpu] for cpu in sys.argv[4:]]
1812 parse_isa_desc(sys.argv[2], sys.argv[3])