Deleted Added
sdiff udiff text old ( 4662:96fb82f4f3d5 ) new ( 4663:449d172ca8ae )
full compact
1# Copyright (c) 2003-2005 The Regents of The University of Michigan
2# All rights reserved.
3#
4# Redistribution and use in source and binary forms, with or without
5# modification, are permitted provided that the following conditions are
6# met: redistributions of source code must retain the above copyright
7# notice, this list of conditions and the following disclaimer;
8# redistributions in binary form must reproduce the above copyright
9# notice, this list of conditions and the following disclaimer in the
10# documentation and/or other materials provided with the distribution;
11# neither the name of the copyright holders nor the names of its
12# contributors may be used to endorse or promote products derived from
13# this software without specific prior written permission.
14#
15# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
18# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
21# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26#
27# Authors: Steve Reinhardt
28# Gabe Black
29# Korey Sewell
30
31import os
32import sys
33import re
34import string
35import traceback
36# get type names
37from types import *
38
39# Prepend the directory where the PLY lex & yacc modules are found
40# to the search path. Assumes we're compiling in a subdirectory
41# of 'build' in the current tree.
42sys.path[0:0] = [os.environ['M5_PLY']]
43
44from ply import lex
45from ply import yacc
46
47#####################################################################
48#
49# Lexer
50#
51# The PLY lexer module takes two things as input:
52# - A list of token names (the string list 'tokens')
53# - A regular expression describing a match for each token. The
54# regexp for token FOO can be provided in two ways:
55# - as a string variable named t_FOO
56# - as the doc string for a function named t_FOO. In this case,
57# the function is also executed, allowing an action to be
58# associated with each token match.
59#
60#####################################################################
61
62# Reserved words. These are listed separately as they are matched
63# using the same regexp as generic IDs, but distinguished in the
64# t_ID() function. The PLY documentation suggests this approach.
65reserved = (
66 'BITFIELD', 'DECODE', 'DECODER', 'DEFAULT', 'DEF', 'EXEC', 'FORMAT',
67 'HEADER', 'LET', 'NAMESPACE', 'OPERAND_TYPES', 'OPERANDS',
68 'OUTPUT', 'SIGNED', 'TEMPLATE'
69 )
70
71# List of tokens. The lex module requires this.
72tokens = reserved + (
73 # identifier
74 'ID',
75
76 # integer literal
77 'INTLIT',
78
79 # string literal
80 'STRLIT',
81
82 # code literal
83 'CODELIT',
84
85 # ( ) [ ] { } < > , ; . : :: *
86 'LPAREN', 'RPAREN',
87 'LBRACKET', 'RBRACKET',
88 'LBRACE', 'RBRACE',
89 'LESS', 'GREATER', 'EQUALS',
90 'COMMA', 'SEMI', 'DOT', 'COLON', 'DBLCOLON',
91 'ASTERISK',
92
93 # C preprocessor directives
94 'CPPDIRECTIVE'
95
96# The following are matched but never returned. commented out to
97# suppress PLY warning
98 # newfile directive
99# 'NEWFILE',
100
101 # endfile directive
102# 'ENDFILE'
103)
104
105# Regular expressions for token matching
106t_LPAREN = r'\('
107t_RPAREN = r'\)'
108t_LBRACKET = r'\['
109t_RBRACKET = r'\]'
110t_LBRACE = r'\{'
111t_RBRACE = r'\}'
112t_LESS = r'\<'
113t_GREATER = r'\>'
114t_EQUALS = r'='
115t_COMMA = r','
116t_SEMI = r';'
117t_DOT = r'\.'
118t_COLON = r':'
119t_DBLCOLON = r'::'
120t_ASTERISK = r'\*'
121
122# Identifiers and reserved words
123reserved_map = { }
124for r in reserved:
125 reserved_map[r.lower()] = r
126
127def t_ID(t):
128 r'[A-Za-z_]\w*'
129 t.type = reserved_map.get(t.value,'ID')
130 return t
131
132# Integer literal
133def t_INTLIT(t):
134 r'(0x[\da-fA-F]+)|\d+'
135 try:
136 t.value = int(t.value,0)
137 except ValueError:
138 error(t.lineno, 'Integer value "%s" too large' % t.value)
139 t.value = 0
140 return t
141
142# String literal. Note that these use only single quotes, and
143# can span multiple lines.
144def t_STRLIT(t):
145 r"(?m)'([^'])+'"
146 # strip off quotes
147 t.value = t.value[1:-1]
148 t.lineno += t.value.count('\n')
149 return t
150
151
152# "Code literal"... like a string literal, but delimiters are
153# '{{' and '}}' so they get formatted nicely under emacs c-mode
154def t_CODELIT(t):
155 r"(?m)\{\{([^\}]|}(?!\}))+\}\}"
156 # strip off {{ & }}
157 t.value = t.value[2:-2]
158 t.lineno += t.value.count('\n')
159 return t
160
161def t_CPPDIRECTIVE(t):
162 r'^\#[^\#].*\n'
163 t.lineno += t.value.count('\n')
164 return t
165
166def t_NEWFILE(t):
167 r'^\#\#newfile\s+"[\w/.-]*"'
168 fileNameStack.push((t.value[11:-1], t.lineno))
169 t.lineno = 0
170
171def t_ENDFILE(t):
172 r'^\#\#endfile'
173 (old_filename, t.lineno) = fileNameStack.pop()
174
175#
176# The functions t_NEWLINE, t_ignore, and t_error are
177# special for the lex module.
178#
179
180# Newlines
181def t_NEWLINE(t):
182 r'\n+'
183 t.lineno += t.value.count('\n')
184
185# Comments
186def t_comment(t):
187 r'//.*'
188
189# Completely ignored characters
190t_ignore = ' \t\x0c'
191
192# Error handler
193def t_error(t):
194 error(t.lineno, "illegal character '%s'" % t.value[0])
195 t.skip(1)
196
197# Build the lexer
198lexer = lex.lex()
199
200#####################################################################
201#
202# Parser
203#
204# Every function whose name starts with 'p_' defines a grammar rule.
205# The rule is encoded in the function's doc string, while the
206# function body provides the action taken when the rule is matched.
207# The argument to each function is a list of the values of the
208# rule's symbols: t[0] for the LHS, and t[1..n] for the symbols
209# on the RHS. For tokens, the value is copied from the t.value
210# attribute provided by the lexer. For non-terminals, the value
211# is assigned by the producing rule; i.e., the job of the grammar
212# rule function is to set the value for the non-terminal on the LHS
213# (by assigning to t[0]).
214#####################################################################
215
216# The LHS of the first grammar rule is used as the start symbol
217# (in this case, 'specification'). Note that this rule enforces
218# that there will be exactly one namespace declaration, with 0 or more
219# global defs/decls before and after it. The defs & decls before
220# the namespace decl will be outside the namespace; those after
221# will be inside. The decoder function is always inside the namespace.
222def p_specification(t):
223 'specification : opt_defs_and_outputs name_decl opt_defs_and_outputs decode_block'
224 global_code = t[1]
225 isa_name = t[2]
226 namespace = isa_name + "Inst"
227 # wrap the decode block as a function definition
228 t[4].wrap_decode_block('''
229StaticInstPtr
230%(isa_name)s::decodeInst(%(isa_name)s::ExtMachInst machInst)
231{
232 using namespace %(namespace)s;
233''' % vars(), '}')
234 # both the latter output blocks and the decode block are in the namespace
235 namespace_code = t[3] + t[4]
236 # pass it all back to the caller of yacc.parse()
237 t[0] = (isa_name, namespace, global_code, namespace_code)
238
239# ISA name declaration looks like "namespace <foo>;"
240def p_name_decl(t):
241 'name_decl : NAMESPACE ID SEMI'
242 t[0] = t[2]
243
244# 'opt_defs_and_outputs' is a possibly empty sequence of
245# def and/or output statements.
246def p_opt_defs_and_outputs_0(t):
247 'opt_defs_and_outputs : empty'
248 t[0] = GenCode()
249
250def p_opt_defs_and_outputs_1(t):
251 'opt_defs_and_outputs : defs_and_outputs'
252 t[0] = t[1]
253
254def p_defs_and_outputs_0(t):
255 'defs_and_outputs : def_or_output'
256 t[0] = t[1]
257
258def p_defs_and_outputs_1(t):
259 'defs_and_outputs : defs_and_outputs def_or_output'
260 t[0] = t[1] + t[2]
261
262# The list of possible definition/output statements.
263def p_def_or_output(t):
264 '''def_or_output : def_format
265 | def_bitfield
266 | def_bitfield_struct
267 | def_template
268 | def_operand_types
269 | def_operands
270 | output_header
271 | output_decoder
272 | output_exec
273 | global_let'''
274 t[0] = t[1]
275
276# Output blocks 'output <foo> {{...}}' (C++ code blocks) are copied
277# directly to the appropriate output section.
278
279
280# Protect any non-dict-substitution '%'s in a format string
281# (i.e. those not followed by '(')
282def protect_non_subst_percents(s):
283 return re.sub(r'%(?!\()', '%%', s)
284
285# Massage output block by substituting in template definitions and bit
286# operators. We handle '%'s embedded in the string that don't
287# indicate template substitutions (or CPU-specific symbols, which get
288# handled in GenCode) by doubling them first so that the format
289# operation will reduce them back to single '%'s.
290def process_output(s):
291 s = protect_non_subst_percents(s)
292 # protects cpu-specific symbols too
293 s = protect_cpu_symbols(s)
294 return substBitOps(s % templateMap)
295
296def p_output_header(t):
297 'output_header : OUTPUT HEADER CODELIT SEMI'
298 t[0] = GenCode(header_output = process_output(t[3]))
299
300def p_output_decoder(t):
301 'output_decoder : OUTPUT DECODER CODELIT SEMI'
302 t[0] = GenCode(decoder_output = process_output(t[3]))
303
304def p_output_exec(t):
305 'output_exec : OUTPUT EXEC CODELIT SEMI'
306 t[0] = GenCode(exec_output = process_output(t[3]))
307
308# global let blocks 'let {{...}}' (Python code blocks) are executed
309# directly when seen. Note that these execute in a special variable
310# context 'exportContext' to prevent the code from polluting this
311# script's namespace.
312def p_global_let(t):
313 'global_let : LET CODELIT SEMI'
314 updateExportContext()
315 exportContext["header_output"] = ''
316 exportContext["decoder_output"] = ''
317 exportContext["exec_output"] = ''
318 exportContext["decode_block"] = ''
319 try:
320 exec fixPythonIndentation(t[2]) in exportContext
321 except Exception, exc:
322 error(t.lineno(1),
323 'error: %s in global let block "%s".' % (exc, t[2]))
324 t[0] = GenCode(header_output = exportContext["header_output"],
325 decoder_output = exportContext["decoder_output"],
326 exec_output = exportContext["exec_output"],
327 decode_block = exportContext["decode_block"])
328
329# Define the mapping from operand type extensions to C++ types and bit
330# widths (stored in operandTypeMap).
331def p_def_operand_types(t):
332 'def_operand_types : DEF OPERAND_TYPES CODELIT SEMI'
333 try:
334 userDict = eval('{' + t[3] + '}')
335 except Exception, exc:
336 error(t.lineno(1),
337 'error: %s in def operand_types block "%s".' % (exc, t[3]))
338 buildOperandTypeMap(userDict, t.lineno(1))
339 t[0] = GenCode() # contributes nothing to the output C++ file
340
341# Define the mapping from operand names to operand classes and other
342# traits. Stored in operandNameMap.
343def p_def_operands(t):
344 'def_operands : DEF OPERANDS CODELIT SEMI'
345 if not globals().has_key('operandTypeMap'):
346 error(t.lineno(1),
347 'error: operand types must be defined before operands')
348 try:
349 userDict = eval('{' + t[3] + '}')
350 except Exception, exc:
351 error(t.lineno(1),
352 'error: %s in def operands block "%s".' % (exc, t[3]))
353 buildOperandNameMap(userDict, t.lineno(1))
354 t[0] = GenCode() # contributes nothing to the output C++ file
355
356# A bitfield definition looks like:
357# 'def [signed] bitfield <ID> [<first>:<last>]'
358# This generates a preprocessor macro in the output file.
359def p_def_bitfield_0(t):
360 'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT COLON INTLIT GREATER SEMI'
361 expr = 'bits(machInst, %2d, %2d)' % (t[6], t[8])
362 if (t[2] == 'signed'):
363 expr = 'sext<%d>(%s)' % (t[6] - t[8] + 1, expr)
364 hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
365 t[0] = GenCode(header_output = hash_define)
366
367# alternate form for single bit: 'def [signed] bitfield <ID> [<bit>]'
368def p_def_bitfield_1(t):
369 'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT GREATER SEMI'
370 expr = 'bits(machInst, %2d, %2d)' % (t[6], t[6])
371 if (t[2] == 'signed'):
372 expr = 'sext<%d>(%s)' % (1, expr)
373 hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
374 t[0] = GenCode(header_output = hash_define)
375
376# alternate form for structure member: 'def bitfield <ID> <ID>'
377def p_def_bitfield_struct(t):
378 'def_bitfield_struct : DEF opt_signed BITFIELD ID id_with_dot SEMI'
379 if (t[2] != ''):
380 error(t.lineno(1), 'error: structure bitfields are always unsigned.')
381 expr = 'machInst.%s' % t[5]
382 hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
383 t[0] = GenCode(header_output = hash_define)
384
385def p_id_with_dot_0(t):
386 'id_with_dot : ID'
387 t[0] = t[1]
388
389def p_id_with_dot_1(t):
390 'id_with_dot : ID DOT id_with_dot'
391 t[0] = t[1] + t[2] + t[3]
392
393def p_opt_signed_0(t):
394 'opt_signed : SIGNED'
395 t[0] = t[1]
396
397def p_opt_signed_1(t):
398 'opt_signed : empty'
399 t[0] = ''
400
401# Global map variable to hold templates
402templateMap = {}
403
404def p_def_template(t):
405 'def_template : DEF TEMPLATE ID CODELIT SEMI'
406 templateMap[t[3]] = Template(t[4])
407 t[0] = GenCode()
408
409# An instruction format definition looks like
410# "def format <fmt>(<params>) {{...}};"
411def p_def_format(t):
412 'def_format : DEF FORMAT ID LPAREN param_list RPAREN CODELIT SEMI'
413 (id, params, code) = (t[3], t[5], t[7])
414 defFormat(id, params, code, t.lineno(1))
415 t[0] = GenCode()
416
417# The formal parameter list for an instruction format is a possibly
418# empty list of comma-separated parameters. Positional (standard,
419# non-keyword) parameters must come first, followed by keyword
420# parameters, followed by a '*foo' parameter that gets excess
421# positional arguments (as in Python). Each of these three parameter
422# categories is optional.
423#
424# Note that we do not support the '**foo' parameter for collecting
425# otherwise undefined keyword args. Otherwise the parameter list is
426# (I believe) identical to what is supported in Python.
427#
428# The param list generates a tuple, where the first element is a list of
429# the positional params and the second element is a dict containing the
430# keyword params.
431def p_param_list_0(t):
432 'param_list : positional_param_list COMMA nonpositional_param_list'
433 t[0] = t[1] + t[3]
434
435def p_param_list_1(t):
436 '''param_list : positional_param_list
437 | nonpositional_param_list'''
438 t[0] = t[1]
439
440def p_positional_param_list_0(t):
441 'positional_param_list : empty'
442 t[0] = []
443
444def p_positional_param_list_1(t):
445 'positional_param_list : ID'
446 t[0] = [t[1]]
447
448def p_positional_param_list_2(t):
449 'positional_param_list : positional_param_list COMMA ID'
450 t[0] = t[1] + [t[3]]
451
452def p_nonpositional_param_list_0(t):
453 'nonpositional_param_list : keyword_param_list COMMA excess_args_param'
454 t[0] = t[1] + t[3]
455
456def p_nonpositional_param_list_1(t):
457 '''nonpositional_param_list : keyword_param_list
458 | excess_args_param'''
459 t[0] = t[1]
460
461def p_keyword_param_list_0(t):
462 'keyword_param_list : keyword_param'
463 t[0] = [t[1]]
464
465def p_keyword_param_list_1(t):
466 'keyword_param_list : keyword_param_list COMMA keyword_param'
467 t[0] = t[1] + [t[3]]
468
469def p_keyword_param(t):
470 'keyword_param : ID EQUALS expr'
471 t[0] = t[1] + ' = ' + t[3].__repr__()
472
473def p_excess_args_param(t):
474 'excess_args_param : ASTERISK ID'
475 # Just concatenate them: '*ID'. Wrap in list to be consistent
476 # with positional_param_list and keyword_param_list.
477 t[0] = [t[1] + t[2]]
478
479# End of format definition-related rules.
480##############
481
482#
483# A decode block looks like:
484# decode <field1> [, <field2>]* [default <inst>] { ... }
485#
486def p_decode_block(t):
487 'decode_block : DECODE ID opt_default LBRACE decode_stmt_list RBRACE'
488 default_defaults = defaultStack.pop()
489 codeObj = t[5]
490 # use the "default defaults" only if there was no explicit
491 # default statement in decode_stmt_list
492 if not codeObj.has_decode_default:
493 codeObj += default_defaults
494 codeObj.wrap_decode_block('switch (%s) {\n' % t[2], '}\n')
495 t[0] = codeObj
496
497# The opt_default statement serves only to push the "default defaults"
498# onto defaultStack. This value will be used by nested decode blocks,
499# and used and popped off when the current decode_block is processed
500# (in p_decode_block() above).
501def p_opt_default_0(t):
502 'opt_default : empty'
503 # no default specified: reuse the one currently at the top of the stack
504 defaultStack.push(defaultStack.top())
505 # no meaningful value returned
506 t[0] = None
507
508def p_opt_default_1(t):
509 'opt_default : DEFAULT inst'
510 # push the new default
511 codeObj = t[2]
512 codeObj.wrap_decode_block('\ndefault:\n', 'break;\n')
513 defaultStack.push(codeObj)
514 # no meaningful value returned
515 t[0] = None
516
517def p_decode_stmt_list_0(t):
518 'decode_stmt_list : decode_stmt'
519 t[0] = t[1]
520
521def p_decode_stmt_list_1(t):
522 'decode_stmt_list : decode_stmt decode_stmt_list'
523 if (t[1].has_decode_default and t[2].has_decode_default):
524 error(t.lineno(1), 'Two default cases in decode block')
525 t[0] = t[1] + t[2]
526
527#
528# Decode statement rules
529#
530# There are four types of statements allowed in a decode block:
531# 1. Format blocks 'format <foo> { ... }'
532# 2. Nested decode blocks
533# 3. Instruction definitions.
534# 4. C preprocessor directives.
535
536
537# Preprocessor directives found in a decode statement list are passed
538# through to the output, replicated to all of the output code
539# streams. This works well for ifdefs, so we can ifdef out both the
540# declarations and the decode cases generated by an instruction
541# definition. Handling them as part of the grammar makes it easy to
542# keep them in the right place with respect to the code generated by
543# the other statements.
544def p_decode_stmt_cpp(t):
545 'decode_stmt : CPPDIRECTIVE'
546 t[0] = GenCode(t[1], t[1], t[1], t[1])
547
548# A format block 'format <foo> { ... }' sets the default instruction
549# format used to handle instruction definitions inside the block.
550# This format can be overridden by using an explicit format on the
551# instruction definition or with a nested format block.
552def p_decode_stmt_format(t):
553 'decode_stmt : FORMAT push_format_id LBRACE decode_stmt_list RBRACE'
554 # The format will be pushed on the stack when 'push_format_id' is
555 # processed (see below). Once the parser has recognized the full
556 # production (though the right brace), we're done with the format,
557 # so now we can pop it.
558 formatStack.pop()
559 t[0] = t[4]
560
561# This rule exists so we can set the current format (& push the stack)
562# when we recognize the format name part of the format block.
563def p_push_format_id(t):
564 'push_format_id : ID'
565 try:
566 formatStack.push(formatMap[t[1]])
567 t[0] = ('', '// format %s' % t[1])
568 except KeyError:
569 error(t.lineno(1), 'instruction format "%s" not defined.' % t[1])
570
571# Nested decode block: if the value of the current field matches the
572# specified constant, do a nested decode on some other field.
573def p_decode_stmt_decode(t):
574 'decode_stmt : case_label COLON decode_block'
575 label = t[1]
576 codeObj = t[3]
577 # just wrap the decoding code from the block as a case in the
578 # outer switch statement.
579 codeObj.wrap_decode_block('\n%s:\n' % label)
580 codeObj.has_decode_default = (label == 'default')
581 t[0] = codeObj
582
583# Instruction definition (finally!).
584def p_decode_stmt_inst(t):
585 'decode_stmt : case_label COLON inst SEMI'
586 label = t[1]
587 codeObj = t[3]
588 codeObj.wrap_decode_block('\n%s:' % label, 'break;\n')
589 codeObj.has_decode_default = (label == 'default')
590 t[0] = codeObj
591
592# The case label is either a list of one or more constants or 'default'
593def p_case_label_0(t):
594 'case_label : intlit_list'
595 t[0] = ': '.join(map(lambda a: 'case %#x' % a, t[1]))
596
597def p_case_label_1(t):
598 'case_label : DEFAULT'
599 t[0] = 'default'
600
601#
602# The constant list for a decode case label must be non-empty, but may have
603# one or more comma-separated integer literals in it.
604#
605def p_intlit_list_0(t):
606 'intlit_list : INTLIT'
607 t[0] = [t[1]]
608
609def p_intlit_list_1(t):
610 'intlit_list : intlit_list COMMA INTLIT'
611 t[0] = t[1]
612 t[0].append(t[3])
613
614# Define an instruction using the current instruction format (specified
615# by an enclosing format block).
616# "<mnemonic>(<args>)"
617def p_inst_0(t):
618 'inst : ID LPAREN arg_list RPAREN'
619 # Pass the ID and arg list to the current format class to deal with.
620 currentFormat = formatStack.top()
621 codeObj = currentFormat.defineInst(t[1], t[3], t.lineno(1))
622 args = ','.join(map(str, t[3]))
623 args = re.sub('(?m)^', '//', args)
624 args = re.sub('^//', '', args)
625 comment = '\n// %s::%s(%s)\n' % (currentFormat.id, t[1], args)
626 codeObj.prepend_all(comment)
627 t[0] = codeObj
628
629# Define an instruction using an explicitly specified format:
630# "<fmt>::<mnemonic>(<args>)"
631def p_inst_1(t):
632 'inst : ID DBLCOLON ID LPAREN arg_list RPAREN'
633 try:
634 format = formatMap[t[1]]
635 except KeyError:
636 error(t.lineno(1), 'instruction format "%s" not defined.' % t[1])
637 codeObj = format.defineInst(t[3], t[5], t.lineno(1))
638 comment = '\n// %s::%s(%s)\n' % (t[1], t[3], t[5])
639 codeObj.prepend_all(comment)
640 t[0] = codeObj
641
642# The arg list generates a tuple, where the first element is a list of
643# the positional args and the second element is a dict containing the
644# keyword args.
645def p_arg_list_0(t):
646 'arg_list : positional_arg_list COMMA keyword_arg_list'
647 t[0] = ( t[1], t[3] )
648
649def p_arg_list_1(t):
650 'arg_list : positional_arg_list'
651 t[0] = ( t[1], {} )
652
653def p_arg_list_2(t):
654 'arg_list : keyword_arg_list'
655 t[0] = ( [], t[1] )
656
657def p_positional_arg_list_0(t):
658 'positional_arg_list : empty'
659 t[0] = []
660
661def p_positional_arg_list_1(t):
662 'positional_arg_list : expr'
663 t[0] = [t[1]]
664
665def p_positional_arg_list_2(t):
666 'positional_arg_list : positional_arg_list COMMA expr'
667 t[0] = t[1] + [t[3]]
668
669def p_keyword_arg_list_0(t):
670 'keyword_arg_list : keyword_arg'
671 t[0] = t[1]
672
673def p_keyword_arg_list_1(t):
674 'keyword_arg_list : keyword_arg_list COMMA keyword_arg'
675 t[0] = t[1]
676 t[0].update(t[3])
677
678def p_keyword_arg(t):
679 'keyword_arg : ID EQUALS expr'
680 t[0] = { t[1] : t[3] }
681
682#
683# Basic expressions. These constitute the argument values of
684# "function calls" (i.e. instruction definitions in the decode block)
685# and default values for formal parameters of format functions.
686#
687# Right now, these are either strings, integers, or (recursively)
688# lists of exprs (using Python square-bracket list syntax). Note that
689# bare identifiers are trated as string constants here (since there
690# isn't really a variable namespace to refer to).
691#
692def p_expr_0(t):
693 '''expr : ID
694 | INTLIT
695 | STRLIT
696 | CODELIT'''
697 t[0] = t[1]
698
699def p_expr_1(t):
700 '''expr : LBRACKET list_expr RBRACKET'''
701 t[0] = t[2]
702
703def p_list_expr_0(t):
704 'list_expr : expr'
705 t[0] = [t[1]]
706
707def p_list_expr_1(t):
708 'list_expr : list_expr COMMA expr'
709 t[0] = t[1] + [t[3]]
710
711def p_list_expr_2(t):
712 'list_expr : empty'
713 t[0] = []
714
715#
716# Empty production... use in other rules for readability.
717#
718def p_empty(t):
719 'empty :'
720 pass
721
722# Parse error handler. Note that the argument here is the offending
723# *token*, not a grammar symbol (hence the need to use t.value)
724def p_error(t):
725 if t:
726 error(t.lineno, "syntax error at '%s'" % t.value)
727 else:
728 error(0, "unknown syntax error", True)
729
730# END OF GRAMMAR RULES
731#
732# Now build the parser.
733parser = yacc.yacc()
734
735
736#####################################################################
737#
738# Support Classes
739#
740#####################################################################
741
742# Expand template with CPU-specific references into a dictionary with
743# an entry for each CPU model name. The entry key is the model name
744# and the corresponding value is the template with the CPU-specific
745# refs substituted for that model.
746def expand_cpu_symbols_to_dict(template):
747 # Protect '%'s that don't go with CPU-specific terms
748 t = re.sub(r'%(?!\(CPU_)', '%%', template)
749 result = {}
750 for cpu in cpu_models:
751 result[cpu.name] = t % cpu.strings
752 return result
753
754# *If* the template has CPU-specific references, return a single
755# string containing a copy of the template for each CPU model with the
756# corresponding values substituted in. If the template has no
757# CPU-specific references, it is returned unmodified.
758def expand_cpu_symbols_to_string(template):
759 if template.find('%(CPU_') != -1:
760 return reduce(lambda x,y: x+y,
761 expand_cpu_symbols_to_dict(template).values())
762 else:
763 return template
764
765# Protect CPU-specific references by doubling the corresponding '%'s
766# (in preparation for substituting a different set of references into
767# the template).
768def protect_cpu_symbols(template):
769 return re.sub(r'%(?=\(CPU_)', '%%', template)
770
771###############
772# GenCode class
773#
774# The GenCode class encapsulates generated code destined for various
775# output files. The header_output and decoder_output attributes are
776# strings containing code destined for decoder.hh and decoder.cc
777# respectively. The decode_block attribute contains code to be
778# incorporated in the decode function itself (that will also end up in
779# decoder.cc). The exec_output attribute is a dictionary with a key
780# for each CPU model name; the value associated with a particular key
781# is the string of code for that CPU model's exec.cc file. The
782# has_decode_default attribute is used in the decode block to allow
783# explicit default clauses to override default default clauses.
784
785class GenCode:
786 # Constructor. At this point we substitute out all CPU-specific
787 # symbols. For the exec output, these go into the per-model
788 # dictionary. For all other output types they get collapsed into
789 # a single string.
790 def __init__(self,
791 header_output = '', decoder_output = '', exec_output = '',
792 decode_block = '', has_decode_default = False):
793 self.header_output = expand_cpu_symbols_to_string(header_output)
794 self.decoder_output = expand_cpu_symbols_to_string(decoder_output)
795 if isinstance(exec_output, dict):
796 self.exec_output = exec_output
797 elif isinstance(exec_output, str):
798 # If the exec_output arg is a single string, we replicate
799 # it for each of the CPU models, substituting and
800 # %(CPU_foo)s params appropriately.
801 self.exec_output = expand_cpu_symbols_to_dict(exec_output)
802 self.decode_block = expand_cpu_symbols_to_string(decode_block)
803 self.has_decode_default = has_decode_default
804
805 # Override '+' operator: generate a new GenCode object that
806 # concatenates all the individual strings in the operands.
807 def __add__(self, other):
808 exec_output = {}
809 for cpu in cpu_models:
810 n = cpu.name
811 exec_output[n] = self.exec_output[n] + other.exec_output[n]
812 return GenCode(self.header_output + other.header_output,
813 self.decoder_output + other.decoder_output,
814 exec_output,
815 self.decode_block + other.decode_block,
816 self.has_decode_default or other.has_decode_default)
817
818 # Prepend a string (typically a comment) to all the strings.
819 def prepend_all(self, pre):
820 self.header_output = pre + self.header_output
821 self.decoder_output = pre + self.decoder_output
822 self.decode_block = pre + self.decode_block
823 for cpu in cpu_models:
824 self.exec_output[cpu.name] = pre + self.exec_output[cpu.name]
825
826 # Wrap the decode block in a pair of strings (e.g., 'case foo:'
827 # and 'break;'). Used to build the big nested switch statement.
828 def wrap_decode_block(self, pre, post = ''):
829 self.decode_block = pre + indent(self.decode_block) + post
830
831################
832# Format object.
833#
834# A format object encapsulates an instruction format. It must provide
835# a defineInst() method that generates the code for an instruction
836# definition.
837
838exportContextSymbols = ('InstObjParams', 'makeList', 're', 'string')
839
840exportContext = {}
841
842def updateExportContext():
843 exportContext.update(exportDict(*exportContextSymbols))
844 exportContext.update(templateMap)
845
846def exportDict(*symNames):
847 return dict([(s, eval(s)) for s in symNames])
848
849
850class Format:
851 def __init__(self, id, params, code):
852 # constructor: just save away arguments
853 self.id = id
854 self.params = params
855 label = 'def format ' + id
856 self.user_code = compile(fixPythonIndentation(code), label, 'exec')
857 param_list = string.join(params, ", ")
858 f = '''def defInst(_code, _context, %s):
859 my_locals = vars().copy()
860 exec _code in _context, my_locals
861 return my_locals\n''' % param_list
862 c = compile(f, label + ' wrapper', 'exec')
863 exec c
864 self.func = defInst
865
866 def defineInst(self, name, args, lineno):
867 context = {}
868 updateExportContext()
869 context.update(exportContext)
870 if len(name):
871 Name = name[0].upper()
872 if len(name) > 1:
873 Name += name[1:]
874 context.update({ 'name': name, 'Name': Name })
875 try:
876 vars = self.func(self.user_code, context, *args[0], **args[1])
877 except Exception, exc:
878 error(lineno, 'error defining "%s": %s.' % (name, exc))
879 for k in vars.keys():
880 if k not in ('header_output', 'decoder_output',
881 'exec_output', 'decode_block'):
882 del vars[k]
883 return GenCode(**vars)
884
885# Special null format to catch an implicit-format instruction
886# definition outside of any format block.
887class NoFormat:
888 def __init__(self):
889 self.defaultInst = ''
890
891 def defineInst(self, name, args, lineno):
892 error(lineno,
893 'instruction definition "%s" with no active format!' % name)
894
895# This dictionary maps format name strings to Format objects.
896formatMap = {}
897
898# Define a new format
899def defFormat(id, params, code, lineno):
900 # make sure we haven't already defined this one
901 if formatMap.get(id, None) != None:
902 error(lineno, 'format %s redefined.' % id)
903 # create new object and store in global map
904 formatMap[id] = Format(id, params, code)
905
906
907##############
908# Stack: a simple stack object. Used for both formats (formatStack)
909# and default cases (defaultStack). Simply wraps a list to give more
910# stack-like syntax and enable initialization with an argument list
911# (as opposed to an argument that's a list).
912
913class Stack(list):
914 def __init__(self, *items):
915 list.__init__(self, items)
916
917 def push(self, item):
918 self.append(item);
919
920 def top(self):
921 return self[-1]
922
923# The global format stack.
924formatStack = Stack(NoFormat())
925
926# The global default case stack.
927defaultStack = Stack( None )
928
929# Global stack that tracks current file and line number.
930# Each element is a tuple (filename, lineno) that records the
931# *current* filename and the line number in the *previous* file where
932# it was included.
933fileNameStack = Stack()
934
935###################
936# Utility functions
937
938#
939# Indent every line in string 's' by two spaces
940# (except preprocessor directives).
941# Used to make nested code blocks look pretty.
942#
943def indent(s):
944 return re.sub(r'(?m)^(?!#)', ' ', s)
945
946#
947# Munge a somewhat arbitrarily formatted piece of Python code
948# (e.g. from a format 'let' block) into something whose indentation
949# will get by the Python parser.
950#
951# The two keys here are that Python will give a syntax error if
952# there's any whitespace at the beginning of the first line, and that
953# all lines at the same lexical nesting level must have identical
954# indentation. Unfortunately the way code literals work, an entire
955# let block tends to have some initial indentation. Rather than
956# trying to figure out what that is and strip it off, we prepend 'if
957# 1:' to make the let code the nested block inside the if (and have
958# the parser automatically deal with the indentation for us).
959#
960# We don't want to do this if (1) the code block is empty or (2) the
961# first line of the block doesn't have any whitespace at the front.
962
963def fixPythonIndentation(s):
964 # get rid of blank lines first
965 s = re.sub(r'(?m)^\s*\n', '', s);
966 if (s != '' and re.match(r'[ \t]', s[0])):
967 s = 'if 1:\n' + s
968 return s
969
970# Error handler. Just call exit. Output formatted to work under
971# Emacs compile-mode. Optional 'print_traceback' arg, if set to True,
972# prints a Python stack backtrace too (can be handy when trying to
973# debug the parser itself).
974def error(lineno, string, print_traceback = False):
975 spaces = ""
976 for (filename, line) in fileNameStack[0:-1]:
977 print spaces + "In file included from " + filename + ":"
978 spaces += " "
979 # Print a Python stack backtrace if requested.
980 if (print_traceback):
981 traceback.print_exc()
982 if lineno != 0:
983 line_str = "%d:" % lineno
984 else:
985 line_str = ""
986 sys.exit(spaces + "%s:%s %s" % (fileNameStack[-1][0], line_str, string))
987
988
989#####################################################################
990#
991# Bitfield Operator Support
992#
993#####################################################################
994
995bitOp1ArgRE = re.compile(r'<\s*(\w+)\s*:\s*>')
996
997bitOpWordRE = re.compile(r'(?<![\w\.])([\w\.]+)<\s*(\w+)\s*:\s*(\w+)\s*>')
998bitOpExprRE = re.compile(r'\)<\s*(\w+)\s*:\s*(\w+)\s*>')
999
1000def substBitOps(code):
1001 # first convert single-bit selectors to two-index form
1002 # i.e., <n> --> <n:n>
1003 code = bitOp1ArgRE.sub(r'<\1:\1>', code)
1004 # simple case: selector applied to ID (name)
1005 # i.e., foo<a:b> --> bits(foo, a, b)
1006 code = bitOpWordRE.sub(r'bits(\1, \2, \3)', code)
1007 # if selector is applied to expression (ending in ')'),
1008 # we need to search backward for matching '('
1009 match = bitOpExprRE.search(code)
1010 while match:
1011 exprEnd = match.start()
1012 here = exprEnd - 1
1013 nestLevel = 1
1014 while nestLevel > 0:
1015 if code[here] == '(':
1016 nestLevel -= 1
1017 elif code[here] == ')':
1018 nestLevel += 1
1019 here -= 1
1020 if here < 0:
1021 sys.exit("Didn't find '('!")
1022 exprStart = here+1
1023 newExpr = r'bits(%s, %s, %s)' % (code[exprStart:exprEnd+1],
1024 match.group(1), match.group(2))
1025 code = code[:exprStart] + newExpr + code[match.end():]
1026 match = bitOpExprRE.search(code)
1027 return code
1028
1029
1030####################
1031# Template objects.
1032#
1033# Template objects are format strings that allow substitution from
1034# the attribute spaces of other objects (e.g. InstObjParams instances).
1035
1036labelRE = re.compile(r'(?<!%)%\(([^\)]+)\)[sd]')
1037
1038class Template:
1039 def __init__(self, t):
1040 self.template = t
1041
1042 def subst(self, d):
1043 myDict = None
1044
1045 # Protect non-Python-dict substitutions (e.g. if there's a printf
1046 # in the templated C++ code)
1047 template = protect_non_subst_percents(self.template)
1048 # CPU-model-specific substitutions are handled later (in GenCode).
1049 template = protect_cpu_symbols(template)
1050
1051 # Build a dict ('myDict') to use for the template substitution.
1052 # Start with the template namespace. Make a copy since we're
1053 # going to modify it.
1054 myDict = templateMap.copy()
1055
1056 if isinstance(d, InstObjParams):
1057 # If we're dealing with an InstObjParams object, we need
1058 # to be a little more sophisticated. The instruction-wide
1059 # parameters are already formed, but the parameters which
1060 # are only function wide still need to be generated.
1061 compositeCode = ''
1062
1063 myDict.update(d.__dict__)
1064 # The "operands" and "snippets" attributes of the InstObjParams
1065 # objects are for internal use and not substitution.
1066 del myDict['operands']
1067 del myDict['snippets']
1068
1069 snippetLabels = [l for l in labelRE.findall(template)
1070 if d.snippets.has_key(l)]
1071
1072 snippets = dict([(s, mungeSnippet(d.snippets[s]))
1073 for s in snippetLabels])
1074
1075 myDict.update(snippets)
1076
1077 compositeCode = ' '.join(map(str, snippets.values()))
1078
1079 # Add in template itself in case it references any
1080 # operands explicitly (like Mem)
1081 compositeCode += ' ' + template
1082
1083 operands = SubOperandList(compositeCode, d.operands)
1084
1085 myDict['op_decl'] = operands.concatAttrStrings('op_decl')
1086
1087 is_src = lambda op: op.is_src
1088 is_dest = lambda op: op.is_dest
1089
1090 myDict['op_src_decl'] = \
1091 operands.concatSomeAttrStrings(is_src, 'op_src_decl')
1092 myDict['op_dest_decl'] = \
1093 operands.concatSomeAttrStrings(is_dest, 'op_dest_decl')
1094
1095 myDict['op_rd'] = operands.concatAttrStrings('op_rd')
1096 myDict['op_wb'] = operands.concatAttrStrings('op_wb')
1097
1098 if d.operands.memOperand:
1099 myDict['mem_acc_size'] = d.operands.memOperand.mem_acc_size
1100 myDict['mem_acc_type'] = d.operands.memOperand.mem_acc_type
1101
1102 elif isinstance(d, dict):
1103 # if the argument is a dictionary, we just use it.
1104 myDict.update(d)
1105 elif hasattr(d, '__dict__'):
1106 # if the argument is an object, we use its attribute map.
1107 myDict.update(d.__dict__)
1108 else:
1109 raise TypeError, "Template.subst() arg must be or have dictionary"
1110 return template % myDict
1111
1112 # Convert to string. This handles the case when a template with a
1113 # CPU-specific term gets interpolated into another template or into
1114 # an output block.
1115 def __str__(self):
1116 return expand_cpu_symbols_to_string(self.template)
1117
1118#####################################################################
1119#
1120# Code Parser
1121#
1122# The remaining code is the support for automatically extracting
1123# instruction characteristics from pseudocode.
1124#
1125#####################################################################
1126
1127# Force the argument to be a list. Useful for flags, where a caller
1128# can specify a singleton flag or a list of flags. Also usful for
1129# converting tuples to lists so they can be modified.
1130def makeList(arg):
1131 if isinstance(arg, list):
1132 return arg
1133 elif isinstance(arg, tuple):
1134 return list(arg)
1135 elif not arg:
1136 return []
1137 else:
1138 return [ arg ]
1139
1140# Generate operandTypeMap from the user's 'def operand_types'
1141# statement.
1142def buildOperandTypeMap(userDict, lineno):
1143 global operandTypeMap
1144 operandTypeMap = {}
1145 for (ext, (desc, size)) in userDict.iteritems():
1146 if desc == 'signed int':
1147 ctype = 'int%d_t' % size
1148 is_signed = 1
1149 elif desc == 'unsigned int':
1150 ctype = 'uint%d_t' % size
1151 is_signed = 0
1152 elif desc == 'float':
1153 is_signed = 1 # shouldn't really matter
1154 if size == 32:
1155 ctype = 'float'
1156 elif size == 64:
1157 ctype = 'double'
1158 elif desc == 'twin64 int':
1159 is_signed = 0
1160 ctype = 'Twin64_t'
1161 elif desc == 'twin32 int':
1162 is_signed = 0
1163 ctype = 'Twin32_t'
1164 if ctype == '':
1165 error(lineno, 'Unrecognized type description "%s" in userDict')
1166 operandTypeMap[ext] = (size, ctype, is_signed)
1167
1168#
1169#
1170#
1171# Base class for operand descriptors. An instance of this class (or
1172# actually a class derived from this one) represents a specific
1173# operand for a code block (e.g, "Rc.sq" as a dest). Intermediate
1174# derived classes encapsulates the traits of a particular operand type
1175# (e.g., "32-bit integer register").
1176#
1177class Operand(object):
1178 def __init__(self, full_name, ext, is_src, is_dest):
1179 self.full_name = full_name
1180 self.ext = ext
1181 self.is_src = is_src
1182 self.is_dest = is_dest
1183 # The 'effective extension' (eff_ext) is either the actual
1184 # extension, if one was explicitly provided, or the default.
1185 if ext:
1186 self.eff_ext = ext
1187 else:
1188 self.eff_ext = self.dflt_ext
1189
1190 (self.size, self.ctype, self.is_signed) = operandTypeMap[self.eff_ext]
1191
1192 # note that mem_acc_size is undefined for non-mem operands...
1193 # template must be careful not to use it if it doesn't apply.
1194 if self.isMem():
1195 self.mem_acc_size = self.makeAccSize()
1196 if self.ctype in ['Twin32_t', 'Twin64_t']:
1197 self.mem_acc_type = 'Twin'
1198 else:
1199 self.mem_acc_type = 'uint'
1200
1201 # Finalize additional fields (primarily code fields). This step
1202 # is done separately since some of these fields may depend on the
1203 # register index enumeration that hasn't been performed yet at the
1204 # time of __init__().
1205 def finalize(self):
1206 self.flags = self.getFlags()
1207 self.constructor = self.makeConstructor()
1208 self.op_decl = self.makeDecl()
1209
1210 if self.is_src:
1211 self.op_rd = self.makeRead()
1212 self.op_src_decl = self.makeDecl()
1213 else:
1214 self.op_rd = ''
1215 self.op_src_decl = ''
1216
1217 if self.is_dest:
1218 self.op_wb = self.makeWrite()
1219 self.op_dest_decl = self.makeDecl()
1220 else:
1221 self.op_wb = ''
1222 self.op_dest_decl = ''
1223
1224 def isMem(self):
1225 return 0
1226
1227 def isReg(self):
1228 return 0
1229
1230 def isFloatReg(self):
1231 return 0
1232
1233 def isIntReg(self):
1234 return 0
1235
1236 def isControlReg(self):
1237 return 0
1238
1239 def getFlags(self):
1240 # note the empty slice '[:]' gives us a copy of self.flags[0]
1241 # instead of a reference to it
1242 my_flags = self.flags[0][:]
1243 if self.is_src:
1244 my_flags += self.flags[1]
1245 if self.is_dest:
1246 my_flags += self.flags[2]
1247 return my_flags
1248
1249 def makeDecl(self):
1250 # Note that initializations in the declarations are solely
1251 # to avoid 'uninitialized variable' errors from the compiler.
1252 return self.ctype + ' ' + self.base_name + ' = 0;\n';
1253
1254class IntRegOperand(Operand):
1255 def isReg(self):
1256 return 1
1257
1258 def isIntReg(self):
1259 return 1
1260
1261 def makeConstructor(self):
1262 c = ''
1263 if self.is_src:
1264 c += '\n\t_srcRegIdx[%d] = %s;' % \
1265 (self.src_reg_idx, self.reg_spec)
1266 if self.is_dest:
1267 c += '\n\t_destRegIdx[%d] = %s;' % \
1268 (self.dest_reg_idx, self.reg_spec)
1269 return c
1270
1271 def makeRead(self):
1272 if (self.ctype == 'float' or self.ctype == 'double'):
1273 error(0, 'Attempt to read integer register as FP')
1274 if (self.size == self.dflt_size):
1275 return '%s = xc->readIntRegOperand(this, %d);\n' % \
1276 (self.base_name, self.src_reg_idx)
1277 elif (self.size > self.dflt_size):
1278 int_reg_val = 'xc->readIntRegOperand(this, %d)' % \
1279 (self.src_reg_idx)
1280 if (self.is_signed):
1281 int_reg_val = 'sext<%d>(%s)' % (self.dflt_size, int_reg_val)
1282 return '%s = %s;\n' % (self.base_name, int_reg_val)
1283 else:
1284 return '%s = bits(xc->readIntRegOperand(this, %d), %d, 0);\n' % \
1285 (self.base_name, self.src_reg_idx, self.size-1)
1286
1287 def makeWrite(self):
1288 if (self.ctype == 'float' or self.ctype == 'double'):
1289 error(0, 'Attempt to write integer register as FP')
1290 if (self.size != self.dflt_size and self.is_signed):
1291 final_val = 'sext<%d>(%s)' % (self.size, self.base_name)
1292 else:
1293 final_val = self.base_name
1294 wb = '''
1295 {
1296 %s final_val = %s;
1297 xc->setIntRegOperand(this, %d, final_val);\n
1298 if (traceData) { traceData->setData(final_val); }
1299 }''' % (self.dflt_ctype, final_val, self.dest_reg_idx)
1300 return wb
1301
1302class FloatRegOperand(Operand):
1303 def isReg(self):
1304 return 1
1305
1306 def isFloatReg(self):
1307 return 1
1308
1309 def makeConstructor(self):
1310 c = ''
1311 if self.is_src:
1312 c += '\n\t_srcRegIdx[%d] = %s + FP_Base_DepTag;' % \
1313 (self.src_reg_idx, self.reg_spec)
1314 if self.is_dest:
1315 c += '\n\t_destRegIdx[%d] = %s + FP_Base_DepTag;' % \
1316 (self.dest_reg_idx, self.reg_spec)
1317 return c
1318
1319 def makeRead(self):
1320 bit_select = 0
1321 width = 0;
1322 if (self.ctype == 'float'):
1323 func = 'readFloatRegOperand'
1324 width = 32;
1325 elif (self.ctype == 'double'):
1326 func = 'readFloatRegOperand'
1327 width = 64;
1328 else:
1329 func = 'readFloatRegOperandBits'
1330 if (self.ctype == 'uint32_t'):
1331 width = 32;
1332 elif (self.ctype == 'uint64_t'):
1333 width = 64;
1334 if (self.size != self.dflt_size):
1335 bit_select = 1
1336 if width:
1337 base = 'xc->%s(this, %d, %d)' % \
1338 (func, self.src_reg_idx, width)
1339 else:
1340 base = 'xc->%s(this, %d)' % \
1341 (func, self.src_reg_idx)
1342 if bit_select:
1343 return '%s = bits(%s, %d, 0);\n' % \
1344 (self.base_name, base, self.size-1)
1345 else:
1346 return '%s = %s;\n' % (self.base_name, base)
1347
1348 def makeWrite(self):
1349 final_val = self.base_name
1350 final_ctype = self.ctype
1351 widthSpecifier = ''
1352 width = 0
1353 if (self.ctype == 'float'):
1354 width = 32
1355 func = 'setFloatRegOperand'
1356 elif (self.ctype == 'double'):
1357 width = 64
1358 func = 'setFloatRegOperand'
1359 elif (self.ctype == 'uint32_t'):
1360 func = 'setFloatRegOperandBits'
1361 width = 32
1362 elif (self.ctype == 'uint64_t'):
1363 func = 'setFloatRegOperandBits'
1364 width = 64
1365 else:
1366 func = 'setFloatRegOperandBits'
1367 final_ctype = 'uint%d_t' % self.dflt_size
1368 if (self.size != self.dflt_size and self.is_signed):
1369 final_val = 'sext<%d>(%s)' % (self.size, self.base_name)
1370 if width:
1371 widthSpecifier = ', %d' % width
1372 wb = '''
1373 {
1374 %s final_val = %s;
1375 xc->%s(this, %d, final_val%s);\n
1376 if (traceData) { traceData->setData(final_val); }
1377 }''' % (final_ctype, final_val, func, self.dest_reg_idx,
1378 widthSpecifier)
1379 return wb
1380
1381class ControlRegOperand(Operand):
1382 def isReg(self):
1383 return 1
1384
1385 def isControlReg(self):
1386 return 1
1387
1388 def makeConstructor(self):
1389 c = ''
1390 if self.is_src:
1391 c += '\n\t_srcRegIdx[%d] = %s + Ctrl_Base_DepTag;' % \
1392 (self.src_reg_idx, self.reg_spec)
1393 if self.is_dest:
1394 c += '\n\t_destRegIdx[%d] = %s + Ctrl_Base_DepTag;' % \
1395 (self.dest_reg_idx, self.reg_spec)
1396 return c
1397
1398 def makeRead(self):
1399 bit_select = 0
1400 if (self.ctype == 'float' or self.ctype == 'double'):
1401 error(0, 'Attempt to read control register as FP')
1402 base = 'xc->readMiscRegOperand(this, %s)' % self.src_reg_idx
1403 if self.size == self.dflt_size:
1404 return '%s = %s;\n' % (self.base_name, base)
1405 else:
1406 return '%s = bits(%s, %d, 0);\n' % \
1407 (self.base_name, base, self.size-1)
1408
1409 def makeWrite(self):
1410 if (self.ctype == 'float' or self.ctype == 'double'):
1411 error(0, 'Attempt to write control register as FP')
1412 wb = 'xc->setMiscRegOperand(this, %s, %s);\n' % \
1413 (self.dest_reg_idx, self.base_name)
1414
1415class ControlBitfieldOperand(ControlRegOperand):
1416 def makeRead(self):
1417 bit_select = 0
1418 if (self.ctype == 'float' or self.ctype == 'double'):
1419 error(0, 'Attempt to read control register as FP')
1420 base = 'xc->readMiscReg(%s)' % self.reg_spec
1421 name = self.base_name
1422 return '%s = bits(%s, %s_HI, %s_LO);' % \
1423 (name, base, name, name)
1424
1425 def makeWrite(self):
1426 if (self.ctype == 'float' or self.ctype == 'double'):
1427 error(0, 'Attempt to write control register as FP')
1428 base = 'xc->readMiscReg(%s)' % self.reg_spec
1429 name = self.base_name
1430 wb_val = 'insertBits(%s, %s_HI, %s_LO, %s)' % \
1431 (base, name, name, self.base_name)
1432 wb = 'xc->setMiscRegOperand(this, %s, %s );\n' % (self.dest_reg_idx, wb_val)
1433 wb += 'if (traceData) { traceData->setData(%s); }' % \
1434 self.base_name
1435 return wb
1436
1437class MemOperand(Operand):
1438 def isMem(self):
1439 return 1
1440
1441 def makeConstructor(self):
1442 return ''
1443
1444 def makeDecl(self):
1445 # Note that initializations in the declarations are solely
1446 # to avoid 'uninitialized variable' errors from the compiler.
1447 # Declare memory data variable.
1448 if self.ctype in ['Twin32_t','Twin64_t']:
1449 return "%s %s; %s.a = 0; %s.b = 0;\n" % (self.ctype, self.base_name,
1450 self.base_name, self.base_name)
1451 c = '%s %s = 0;\n' % (self.ctype, self.base_name)
1452 return c
1453
1454 def makeRead(self):
1455 return ''
1456
1457 def makeWrite(self):
1458 return ''
1459
1460 # Return the memory access size *in bits*, suitable for
1461 # forming a type via "uint%d_t". Divide by 8 if you want bytes.
1462 def makeAccSize(self):
1463 return self.size
1464
1465
1466class NPCOperand(Operand):
1467 def makeConstructor(self):
1468 return ''
1469
1470 def makeRead(self):
1471 return '%s = xc->readNextPC();\n' % self.base_name
1472
1473 def makeWrite(self):
1474 return 'xc->setNextPC(%s);\n' % self.base_name
1475
1476class NNPCOperand(Operand):
1477 def makeConstructor(self):
1478 return ''
1479
1480 def makeRead(self):
1481 return '%s = xc->readNextNPC();\n' % self.base_name
1482
1483 def makeWrite(self):
1484 return 'xc->setNextNPC(%s);\n' % self.base_name
1485
1486def buildOperandNameMap(userDict, lineno):
1487 global operandNameMap
1488 operandNameMap = {}
1489 for (op_name, val) in userDict.iteritems():
1490 (base_cls_name, dflt_ext, reg_spec, flags, sort_pri) = val
1491 (dflt_size, dflt_ctype, dflt_is_signed) = operandTypeMap[dflt_ext]
1492 # Canonical flag structure is a triple of lists, where each list
1493 # indicates the set of flags implied by this operand always, when
1494 # used as a source, and when used as a dest, respectively.
1495 # For simplicity this can be initialized using a variety of fairly
1496 # obvious shortcuts; we convert these to canonical form here.
1497 if not flags:
1498 # no flags specified (e.g., 'None')
1499 flags = ( [], [], [] )
1500 elif isinstance(flags, str):
1501 # a single flag: assumed to be unconditional
1502 flags = ( [ flags ], [], [] )
1503 elif isinstance(flags, list):
1504 # a list of flags: also assumed to be unconditional
1505 flags = ( flags, [], [] )
1506 elif isinstance(flags, tuple):
1507 # it's a tuple: it should be a triple,
1508 # but each item could be a single string or a list
1509 (uncond_flags, src_flags, dest_flags) = flags
1510 flags = (makeList(uncond_flags),
1511 makeList(src_flags), makeList(dest_flags))
1512 # Accumulate attributes of new operand class in tmp_dict
1513 tmp_dict = {}
1514 for attr in ('dflt_ext', 'reg_spec', 'flags', 'sort_pri',
1515 'dflt_size', 'dflt_ctype', 'dflt_is_signed'):
1516 tmp_dict[attr] = eval(attr)
1517 tmp_dict['base_name'] = op_name
1518 # New class name will be e.g. "IntReg_Ra"
1519 cls_name = base_cls_name + '_' + op_name
1520 # Evaluate string arg to get class object. Note that the
1521 # actual base class for "IntReg" is "IntRegOperand", i.e. we
1522 # have to append "Operand".
1523 try:
1524 base_cls = eval(base_cls_name + 'Operand')
1525 except NameError:
1526 error(lineno,
1527 'error: unknown operand base class "%s"' % base_cls_name)
1528 # The following statement creates a new class called
1529 # <cls_name> as a subclass of <base_cls> with the attributes
1530 # in tmp_dict, just as if we evaluated a class declaration.
1531 operandNameMap[op_name] = type(cls_name, (base_cls,), tmp_dict)
1532
1533 # Define operand variables.
1534 operands = userDict.keys()
1535
1536 operandsREString = (r'''
1537 (?<![\w\.]) # neg. lookbehind assertion: prevent partial matches
1538 ((%s)(?:\.(\w+))?) # match: operand with optional '.' then suffix
1539 (?![\w\.]) # neg. lookahead assertion: prevent partial matches
1540 '''
1541 % string.join(operands, '|'))
1542
1543 global operandsRE
1544 operandsRE = re.compile(operandsREString, re.MULTILINE|re.VERBOSE)
1545
1546 # Same as operandsREString, but extension is mandatory, and only two
1547 # groups are returned (base and ext, not full name as above).
1548 # Used for subtituting '_' for '.' to make C++ identifiers.
1549 operandsWithExtREString = (r'(?<![\w\.])(%s)\.(\w+)(?![\w\.])'
1550 % string.join(operands, '|'))
1551
1552 global operandsWithExtRE
1553 operandsWithExtRE = re.compile(operandsWithExtREString, re.MULTILINE)
1554
1555
1556class OperandList:
1557
1558 # Find all the operands in the given code block. Returns an operand
1559 # descriptor list (instance of class OperandList).
1560 def __init__(self, code):
1561 self.items = []
1562 self.bases = {}
1563 # delete comments so we don't match on reg specifiers inside
1564 code = commentRE.sub('', code)
1565 # search for operands
1566 next_pos = 0
1567 while 1:
1568 match = operandsRE.search(code, next_pos)
1569 if not match:
1570 # no more matches: we're done
1571 break
1572 op = match.groups()
1573 # regexp groups are operand full name, base, and extension
1574 (op_full, op_base, op_ext) = op
1575 # if the token following the operand is an assignment, this is
1576 # a destination (LHS), else it's a source (RHS)
1577 is_dest = (assignRE.match(code, match.end()) != None)
1578 is_src = not is_dest
1579 # see if we've already seen this one
1580 op_desc = self.find_base(op_base)
1581 if op_desc:
1582 if op_desc.ext != op_ext:
1583 error(0, 'Inconsistent extensions for operand %s' % \
1584 op_base)
1585 op_desc.is_src = op_desc.is_src or is_src
1586 op_desc.is_dest = op_desc.is_dest or is_dest
1587 else:
1588 # new operand: create new descriptor
1589 op_desc = operandNameMap[op_base](op_full, op_ext,
1590 is_src, is_dest)
1591 self.append(op_desc)
1592 # start next search after end of current match
1593 next_pos = match.end()
1594 self.sort()
1595 # enumerate source & dest register operands... used in building
1596 # constructor later
1597 self.numSrcRegs = 0
1598 self.numDestRegs = 0
1599 self.numFPDestRegs = 0
1600 self.numIntDestRegs = 0
1601 self.memOperand = None
1602 for op_desc in self.items:
1603 if op_desc.isReg():
1604 if op_desc.is_src:
1605 op_desc.src_reg_idx = self.numSrcRegs
1606 self.numSrcRegs += 1
1607 if op_desc.is_dest:
1608 op_desc.dest_reg_idx = self.numDestRegs
1609 self.numDestRegs += 1
1610 if op_desc.isFloatReg():
1611 self.numFPDestRegs += 1
1612 elif op_desc.isIntReg():
1613 self.numIntDestRegs += 1
1614 elif op_desc.isMem():
1615 if self.memOperand:
1616 error(0, "Code block has more than one memory operand.")
1617 self.memOperand = op_desc
1618 # now make a final pass to finalize op_desc fields that may depend
1619 # on the register enumeration
1620 for op_desc in self.items:
1621 op_desc.finalize()
1622
1623 def __len__(self):
1624 return len(self.items)
1625
1626 def __getitem__(self, index):
1627 return self.items[index]
1628
1629 def append(self, op_desc):
1630 self.items.append(op_desc)
1631 self.bases[op_desc.base_name] = op_desc
1632
1633 def find_base(self, base_name):
1634 # like self.bases[base_name], but returns None if not found
1635 # (rather than raising exception)
1636 return self.bases.get(base_name)
1637
1638 # internal helper function for concat[Some]Attr{Strings|Lists}
1639 def __internalConcatAttrs(self, attr_name, filter, result):
1640 for op_desc in self.items:
1641 if filter(op_desc):
1642 result += getattr(op_desc, attr_name)
1643 return result
1644
1645 # return a single string that is the concatenation of the (string)
1646 # values of the specified attribute for all operands
1647 def concatAttrStrings(self, attr_name):
1648 return self.__internalConcatAttrs(attr_name, lambda x: 1, '')
1649
1650 # like concatAttrStrings, but only include the values for the operands
1651 # for which the provided filter function returns true
1652 def concatSomeAttrStrings(self, filter, attr_name):
1653 return self.__internalConcatAttrs(attr_name, filter, '')
1654
1655 # return a single list that is the concatenation of the (list)
1656 # values of the specified attribute for all operands
1657 def concatAttrLists(self, attr_name):
1658 return self.__internalConcatAttrs(attr_name, lambda x: 1, [])
1659
1660 # like concatAttrLists, but only include the values for the operands
1661 # for which the provided filter function returns true
1662 def concatSomeAttrLists(self, filter, attr_name):
1663 return self.__internalConcatAttrs(attr_name, filter, [])
1664
1665 def sort(self):
1666 self.items.sort(lambda a, b: a.sort_pri - b.sort_pri)
1667
1668class SubOperandList(OperandList):
1669
1670 # Find all the operands in the given code block. Returns an operand
1671 # descriptor list (instance of class OperandList).
1672 def __init__(self, code, master_list):
1673 self.items = []
1674 self.bases = {}
1675 # delete comments so we don't match on reg specifiers inside
1676 code = commentRE.sub('', code)
1677 # search for operands
1678 next_pos = 0
1679 while 1:
1680 match = operandsRE.search(code, next_pos)
1681 if not match:
1682 # no more matches: we're done
1683 break
1684 op = match.groups()
1685 # regexp groups are operand full name, base, and extension
1686 (op_full, op_base, op_ext) = op
1687 # find this op in the master list
1688 op_desc = master_list.find_base(op_base)
1689 if not op_desc:
1690 error(0, 'Found operand %s which is not in the master list!' \
1691 ' This is an internal error' % \
1692 op_base)
1693 else:
1694 # See if we've already found this operand
1695 op_desc = self.find_base(op_base)
1696 if not op_desc:
1697 # if not, add a reference to it to this sub list
1698 self.append(master_list.bases[op_base])
1699
1700 # start next search after end of current match
1701 next_pos = match.end()
1702 self.sort()
1703 self.memOperand = None
1704 for op_desc in self.items:
1705 if op_desc.isMem():
1706 if self.memOperand:
1707 error(0, "Code block has more than one memory operand.")
1708 self.memOperand = op_desc
1709
1710# Regular expression object to match C++ comments
1711# (used in findOperands())
1712commentRE = re.compile(r'//.*\n')
1713
1714# Regular expression object to match assignment statements
1715# (used in findOperands())
1716assignRE = re.compile(r'\s*=(?!=)', re.MULTILINE)
1717
1718# Munge operand names in code string to make legal C++ variable names.
1719# This means getting rid of the type extension if any.
1720# (Will match base_name attribute of Operand object.)
1721def substMungedOpNames(code):
1722 return operandsWithExtRE.sub(r'\1', code)
1723
1724# Fix up code snippets for final substitution in templates.
1725def mungeSnippet(s):
1726 if isinstance(s, str):
1727 return substMungedOpNames(substBitOps(s))
1728 else:
1729 return s
1730
1731def makeFlagConstructor(flag_list):
1732 if len(flag_list) == 0:
1733 return ''
1734 # filter out repeated flags
1735 flag_list.sort()
1736 i = 1
1737 while i < len(flag_list):
1738 if flag_list[i] == flag_list[i-1]:
1739 del flag_list[i]
1740 else:
1741 i += 1
1742 pre = '\n\tflags['
1743 post = '] = true;'
1744 code = pre + string.join(flag_list, post + pre) + post
1745 return code
1746
1747# Assume all instruction flags are of the form 'IsFoo'
1748instFlagRE = re.compile(r'Is.*')
1749
1750# OpClass constants end in 'Op' except No_OpClass
1751opClassRE = re.compile(r'.*Op|No_OpClass')
1752
1753class InstObjParams:
1754 def __init__(self, mnem, class_name, base_class = '',
1755 snippets = {}, opt_args = []):
1756 self.mnemonic = mnem
1757 self.class_name = class_name
1758 self.base_class = base_class
1759 if not isinstance(snippets, dict):
1760 snippets = {'code' : snippets}
1761 compositeCode = ' '.join(map(str, snippets.values()))
1762 self.snippets = snippets
1763
1764 self.operands = OperandList(compositeCode)
1765 self.constructor = self.operands.concatAttrStrings('constructor')
1766 self.constructor += \
1767 '\n\t_numSrcRegs = %d;' % self.operands.numSrcRegs
1768 self.constructor += \
1769 '\n\t_numDestRegs = %d;' % self.operands.numDestRegs
1770 self.constructor += \
1771 '\n\t_numFPDestRegs = %d;' % self.operands.numFPDestRegs
1772 self.constructor += \
1773 '\n\t_numIntDestRegs = %d;' % self.operands.numIntDestRegs
1774 self.flags = self.operands.concatAttrLists('flags')
1775
1776 # Make a basic guess on the operand class (function unit type).
1777 # These are good enough for most cases, and can be overridden
1778 # later otherwise.
1779 if 'IsStore' in self.flags:
1780 self.op_class = 'MemWriteOp'
1781 elif 'IsLoad' in self.flags or 'IsPrefetch' in self.flags:
1782 self.op_class = 'MemReadOp'
1783 elif 'IsFloating' in self.flags:
1784 self.op_class = 'FloatAddOp'
1785 else:
1786 self.op_class = 'IntAluOp'
1787
1788 # Optional arguments are assumed to be either StaticInst flags
1789 # or an OpClass value. To avoid having to import a complete
1790 # list of these values to match against, we do it ad-hoc
1791 # with regexps.
1792 for oa in opt_args:
1793 if instFlagRE.match(oa):
1794 self.flags.append(oa)
1795 elif opClassRE.match(oa):
1796 self.op_class = oa
1797 else:
1798 error(0, 'InstObjParams: optional arg "%s" not recognized '
1799 'as StaticInst::Flag or OpClass.' % oa)
1800
1801 # add flag initialization to contructor here to include
1802 # any flags added via opt_args
1803 self.constructor += makeFlagConstructor(self.flags)
1804
1805 # if 'IsFloating' is set, add call to the FP enable check
1806 # function (which should be provided by isa_desc via a declare)
1807 if 'IsFloating' in self.flags:
1808 self.fp_enable_check = 'fault = checkFpEnableFault(xc);'
1809 else:
1810 self.fp_enable_check = ''
1811
1812#######################
1813#
1814# Output file template
1815#
1816
1817file_template = '''
1818/*
1819 * DO NOT EDIT THIS FILE!!!
1820 *
1821 * It was automatically generated from the ISA description in %(filename)s
1822 */
1823
1824%(includes)s
1825
1826%(global_output)s
1827
1828namespace %(namespace)s {
1829
1830%(namespace_output)s
1831
1832} // namespace %(namespace)s
1833
1834%(decode_function)s
1835'''
1836
1837
1838# Update the output file only if the new contents are different from
1839# the current contents. Minimizes the files that need to be rebuilt
1840# after minor changes.
1841def update_if_needed(file, contents):
1842 update = False
1843 if os.access(file, os.R_OK):
1844 f = open(file, 'r')
1845 old_contents = f.read()
1846 f.close()
1847 if contents != old_contents:
1848 print 'Updating', file
1849 os.remove(file) # in case it's write-protected
1850 update = True
1851 else:
1852 print 'File', file, 'is unchanged'
1853 else:
1854 print 'Generating', file
1855 update = True
1856 if update:
1857 f = open(file, 'w')
1858 f.write(contents)
1859 f.close()
1860
1861# This regular expression matches '##include' directives
1862includeRE = re.compile(r'^\s*##include\s+"(?P<filename>[\w/.-]*)".*$',
1863 re.MULTILINE)
1864
1865# Function to replace a matched '##include' directive with the
1866# contents of the specified file (with nested ##includes replaced
1867# recursively). 'matchobj' is an re match object (from a match of
1868# includeRE) and 'dirname' is the directory relative to which the file
1869# path should be resolved.
1870def replace_include(matchobj, dirname):
1871 fname = matchobj.group('filename')
1872 full_fname = os.path.normpath(os.path.join(dirname, fname))
1873 contents = '##newfile "%s"\n%s\n##endfile\n' % \
1874 (full_fname, read_and_flatten(full_fname))
1875 return contents
1876
1877# Read a file and recursively flatten nested '##include' files.
1878def read_and_flatten(filename):
1879 current_dir = os.path.dirname(filename)
1880 try:
1881 contents = open(filename).read()
1882 except IOError:
1883 error(0, 'Error including file "%s"' % filename)
1884 fileNameStack.push((filename, 0))
1885 # Find any includes and include them
1886 contents = includeRE.sub(lambda m: replace_include(m, current_dir),
1887 contents)
1888 fileNameStack.pop()
1889 return contents
1890
1891#
1892# Read in and parse the ISA description.
1893#
1894def parse_isa_desc(isa_desc_file, output_dir):
1895 # Read file and (recursively) all included files into a string.
1896 # PLY requires that the input be in a single string so we have to
1897 # do this up front.
1898 isa_desc = read_and_flatten(isa_desc_file)
1899
1900 # Initialize filename stack with outer file.
1901 fileNameStack.push((isa_desc_file, 0))
1902
1903 # Parse it.
1904 (isa_name, namespace, global_code, namespace_code) = \
1905 parser.parse(isa_desc, lexer=lexer)
1906
1907 # grab the last three path components of isa_desc_file to put in
1908 # the output
1909 filename = '/'.join(isa_desc_file.split('/')[-3:])
1910
1911 # generate decoder.hh
1912 includes = '#include "base/bitfield.hh" // for bitfield support'
1913 global_output = global_code.header_output
1914 namespace_output = namespace_code.header_output
1915 decode_function = ''
1916 update_if_needed(output_dir + '/decoder.hh', file_template % vars())
1917
1918 # generate decoder.cc
1919 includes = '#include "decoder.hh"'
1920 global_output = global_code.decoder_output
1921 namespace_output = namespace_code.decoder_output
1922 # namespace_output += namespace_code.decode_block
1923 decode_function = namespace_code.decode_block
1924 update_if_needed(output_dir + '/decoder.cc', file_template % vars())
1925
1926 # generate per-cpu exec files
1927 for cpu in cpu_models:
1928 includes = '#include "decoder.hh"\n'
1929 includes += cpu.includes
1930 global_output = global_code.exec_output[cpu.name]
1931 namespace_output = namespace_code.exec_output[cpu.name]
1932 decode_function = ''
1933 update_if_needed(output_dir + '/' + cpu.filename,
1934 file_template % vars())
1935
1936# global list of CpuModel objects (see cpu_models.py)
1937cpu_models = []
1938
1939# Called as script: get args from command line.
1940# Args are: <path to cpu_models.py> <isa desc file> <output dir> <cpu models>
1941if __name__ == '__main__':
1942 execfile(sys.argv[1]) # read in CpuModel definitions
1943 cpu_models = [CpuModel.dict[cpu] for cpu in sys.argv[4:]]
1944 parse_isa_desc(sys.argv[2], sys.argv[3])