isa_parser.py (4297:c4da727c0ded) isa_parser.py (4335:56e0cd2d3dad)
1# Copyright (c) 2003-2005 The Regents of The University of Michigan
2# All rights reserved.
3#
4# Redistribution and use in source and binary forms, with or without
5# modification, are permitted provided that the following conditions are
6# met: redistributions of source code must retain the above copyright
7# notice, this list of conditions and the following disclaimer;
8# redistributions in binary form must reproduce the above copyright
9# notice, this list of conditions and the following disclaimer in the
10# documentation and/or other materials provided with the distribution;
11# neither the name of the copyright holders nor the names of its
12# contributors may be used to endorse or promote products derived from
13# this software without specific prior written permission.
14#
15# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
18# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
21# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26#
27# Authors: Steve Reinhardt
28# Korey Sewell
29
30import os
31import sys
32import re
33import string
34import traceback
35# get type names
36from types import *
37
38# Prepend the directory where the PLY lex & yacc modules are found
39# to the search path. Assumes we're compiling in a subdirectory
40# of 'build' in the current tree.
41sys.path[0:0] = [os.environ['M5_PLY']]
42
43import lex
44import yacc
45
46#####################################################################
47#
48# Lexer
49#
50# The PLY lexer module takes two things as input:
51# - A list of token names (the string list 'tokens')
52# - A regular expression describing a match for each token. The
53# regexp for token FOO can be provided in two ways:
54# - as a string variable named t_FOO
55# - as the doc string for a function named t_FOO. In this case,
56# the function is also executed, allowing an action to be
57# associated with each token match.
58#
59#####################################################################
60
61# Reserved words. These are listed separately as they are matched
62# using the same regexp as generic IDs, but distinguished in the
63# t_ID() function. The PLY documentation suggests this approach.
64reserved = (
65 'BITFIELD', 'DECODE', 'DECODER', 'DEFAULT', 'DEF', 'EXEC', 'FORMAT',
66 'HEADER', 'LET', 'NAMESPACE', 'OPERAND_TYPES', 'OPERANDS',
67 'OUTPUT', 'SIGNED', 'TEMPLATE'
68 )
69
70# List of tokens. The lex module requires this.
71tokens = reserved + (
72 # identifier
73 'ID',
74
75 # integer literal
76 'INTLIT',
77
78 # string literal
79 'STRLIT',
80
81 # code literal
82 'CODELIT',
83
84 # ( ) [ ] { } < > , ; . : :: *
85 'LPAREN', 'RPAREN',
86 'LBRACKET', 'RBRACKET',
87 'LBRACE', 'RBRACE',
88 'LESS', 'GREATER', 'EQUALS',
89 'COMMA', 'SEMI', 'DOT', 'COLON', 'DBLCOLON',
90 'ASTERISK',
91
92 # C preprocessor directives
93 'CPPDIRECTIVE'
94
95# The following are matched but never returned. commented out to
96# suppress PLY warning
97 # newfile directive
98# 'NEWFILE',
99
100 # endfile directive
101# 'ENDFILE'
102)
103
104# Regular expressions for token matching
105t_LPAREN = r'\('
106t_RPAREN = r'\)'
107t_LBRACKET = r'\['
108t_RBRACKET = r'\]'
109t_LBRACE = r'\{'
110t_RBRACE = r'\}'
111t_LESS = r'\<'
112t_GREATER = r'\>'
113t_EQUALS = r'='
114t_COMMA = r','
115t_SEMI = r';'
116t_DOT = r'\.'
117t_COLON = r':'
118t_DBLCOLON = r'::'
119t_ASTERISK = r'\*'
120
121# Identifiers and reserved words
122reserved_map = { }
123for r in reserved:
124 reserved_map[r.lower()] = r
125
126def t_ID(t):
127 r'[A-Za-z_]\w*'
128 t.type = reserved_map.get(t.value,'ID')
129 return t
130
131# Integer literal
132def t_INTLIT(t):
133 r'(0x[\da-fA-F]+)|\d+'
134 try:
135 t.value = int(t.value,0)
136 except ValueError:
137 error(t.lineno, 'Integer value "%s" too large' % t.value)
138 t.value = 0
139 return t
140
141# String literal. Note that these use only single quotes, and
142# can span multiple lines.
143def t_STRLIT(t):
144 r"(?m)'([^'])+'"
145 # strip off quotes
146 t.value = t.value[1:-1]
147 t.lineno += t.value.count('\n')
148 return t
149
150
151# "Code literal"... like a string literal, but delimiters are
152# '{{' and '}}' so they get formatted nicely under emacs c-mode
153def t_CODELIT(t):
154 r"(?m)\{\{([^\}]|}(?!\}))+\}\}"
155 # strip off {{ & }}
156 t.value = t.value[2:-2]
157 t.lineno += t.value.count('\n')
158 return t
159
160def t_CPPDIRECTIVE(t):
161 r'^\#[^\#].*\n'
162 t.lineno += t.value.count('\n')
163 return t
164
165def t_NEWFILE(t):
166 r'^\#\#newfile\s+"[\w/.-]*"'
167 fileNameStack.push((t.value[11:-1], t.lineno))
168 t.lineno = 0
169
170def t_ENDFILE(t):
171 r'^\#\#endfile'
172 (old_filename, t.lineno) = fileNameStack.pop()
173
174#
175# The functions t_NEWLINE, t_ignore, and t_error are
176# special for the lex module.
177#
178
179# Newlines
180def t_NEWLINE(t):
181 r'\n+'
182 t.lineno += t.value.count('\n')
183
184# Comments
185def t_comment(t):
186 r'//.*'
187
188# Completely ignored characters
189t_ignore = ' \t\x0c'
190
191# Error handler
192def t_error(t):
193 error(t.lineno, "illegal character '%s'" % t.value[0])
194 t.skip(1)
195
196# Build the lexer
197lex.lex()
198
199#####################################################################
200#
201# Parser
202#
203# Every function whose name starts with 'p_' defines a grammar rule.
204# The rule is encoded in the function's doc string, while the
205# function body provides the action taken when the rule is matched.
206# The argument to each function is a list of the values of the
207# rule's symbols: t[0] for the LHS, and t[1..n] for the symbols
208# on the RHS. For tokens, the value is copied from the t.value
209# attribute provided by the lexer. For non-terminals, the value
210# is assigned by the producing rule; i.e., the job of the grammar
211# rule function is to set the value for the non-terminal on the LHS
212# (by assigning to t[0]).
213#####################################################################
214
215# The LHS of the first grammar rule is used as the start symbol
216# (in this case, 'specification'). Note that this rule enforces
217# that there will be exactly one namespace declaration, with 0 or more
218# global defs/decls before and after it. The defs & decls before
219# the namespace decl will be outside the namespace; those after
220# will be inside. The decoder function is always inside the namespace.
221def p_specification(t):
222 'specification : opt_defs_and_outputs name_decl opt_defs_and_outputs decode_block'
223 global_code = t[1]
224 isa_name = t[2]
225 namespace = isa_name + "Inst"
226 # wrap the decode block as a function definition
227 t[4].wrap_decode_block('''
228StaticInstPtr
229%(isa_name)s::decodeInst(%(isa_name)s::ExtMachInst machInst)
230{
231 using namespace %(namespace)s;
232''' % vars(), '}')
233 # both the latter output blocks and the decode block are in the namespace
234 namespace_code = t[3] + t[4]
235 # pass it all back to the caller of yacc.parse()
236 t[0] = (isa_name, namespace, global_code, namespace_code)
237
238# ISA name declaration looks like "namespace <foo>;"
239def p_name_decl(t):
240 'name_decl : NAMESPACE ID SEMI'
241 t[0] = t[2]
242
243# 'opt_defs_and_outputs' is a possibly empty sequence of
244# def and/or output statements.
245def p_opt_defs_and_outputs_0(t):
246 'opt_defs_and_outputs : empty'
247 t[0] = GenCode()
248
249def p_opt_defs_and_outputs_1(t):
250 'opt_defs_and_outputs : defs_and_outputs'
251 t[0] = t[1]
252
253def p_defs_and_outputs_0(t):
254 'defs_and_outputs : def_or_output'
255 t[0] = t[1]
256
257def p_defs_and_outputs_1(t):
258 'defs_and_outputs : defs_and_outputs def_or_output'
259 t[0] = t[1] + t[2]
260
261# The list of possible definition/output statements.
262def p_def_or_output(t):
263 '''def_or_output : def_format
264 | def_bitfield
265 | def_bitfield_struct
266 | def_template
267 | def_operand_types
268 | def_operands
269 | output_header
270 | output_decoder
271 | output_exec
272 | global_let'''
273 t[0] = t[1]
274
275# Output blocks 'output <foo> {{...}}' (C++ code blocks) are copied
276# directly to the appropriate output section.
277
278
279# Protect any non-dict-substitution '%'s in a format string
280# (i.e. those not followed by '(')
281def protect_non_subst_percents(s):
282 return re.sub(r'%(?!\()', '%%', s)
283
284# Massage output block by substituting in template definitions and bit
285# operators. We handle '%'s embedded in the string that don't
286# indicate template substitutions (or CPU-specific symbols, which get
287# handled in GenCode) by doubling them first so that the format
288# operation will reduce them back to single '%'s.
289def process_output(s):
290 s = protect_non_subst_percents(s)
291 # protects cpu-specific symbols too
292 s = protect_cpu_symbols(s)
293 return substBitOps(s % templateMap)
294
295def p_output_header(t):
296 'output_header : OUTPUT HEADER CODELIT SEMI'
297 t[0] = GenCode(header_output = process_output(t[3]))
298
299def p_output_decoder(t):
300 'output_decoder : OUTPUT DECODER CODELIT SEMI'
301 t[0] = GenCode(decoder_output = process_output(t[3]))
302
303def p_output_exec(t):
304 'output_exec : OUTPUT EXEC CODELIT SEMI'
305 t[0] = GenCode(exec_output = process_output(t[3]))
306
307# global let blocks 'let {{...}}' (Python code blocks) are executed
308# directly when seen. Note that these execute in a special variable
309# context 'exportContext' to prevent the code from polluting this
310# script's namespace.
311def p_global_let(t):
312 'global_let : LET CODELIT SEMI'
313 updateExportContext()
314 exportContext["header_output"] = ''
315 exportContext["decoder_output"] = ''
316 exportContext["exec_output"] = ''
317 exportContext["decode_block"] = ''
318 try:
319 exec fixPythonIndentation(t[2]) in exportContext
320 except Exception, exc:
321 error(t.lineno(1),
322 'error: %s in global let block "%s".' % (exc, t[2]))
323 t[0] = GenCode(header_output = exportContext["header_output"],
324 decoder_output = exportContext["decoder_output"],
325 exec_output = exportContext["exec_output"],
326 decode_block = exportContext["decode_block"])
327
328# Define the mapping from operand type extensions to C++ types and bit
329# widths (stored in operandTypeMap).
330def p_def_operand_types(t):
331 'def_operand_types : DEF OPERAND_TYPES CODELIT SEMI'
332 try:
333 userDict = eval('{' + t[3] + '}')
334 except Exception, exc:
335 error(t.lineno(1),
336 'error: %s in def operand_types block "%s".' % (exc, t[3]))
337 buildOperandTypeMap(userDict, t.lineno(1))
338 t[0] = GenCode() # contributes nothing to the output C++ file
339
340# Define the mapping from operand names to operand classes and other
341# traits. Stored in operandNameMap.
342def p_def_operands(t):
343 'def_operands : DEF OPERANDS CODELIT SEMI'
344 if not globals().has_key('operandTypeMap'):
345 error(t.lineno(1),
346 'error: operand types must be defined before operands')
347 try:
348 userDict = eval('{' + t[3] + '}')
349 except Exception, exc:
350 error(t.lineno(1),
351 'error: %s in def operands block "%s".' % (exc, t[3]))
352 buildOperandNameMap(userDict, t.lineno(1))
353 t[0] = GenCode() # contributes nothing to the output C++ file
354
355# A bitfield definition looks like:
356# 'def [signed] bitfield <ID> [<first>:<last>]'
357# This generates a preprocessor macro in the output file.
358def p_def_bitfield_0(t):
359 'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT COLON INTLIT GREATER SEMI'
360 expr = 'bits(machInst, %2d, %2d)' % (t[6], t[8])
361 if (t[2] == 'signed'):
362 expr = 'sext<%d>(%s)' % (t[6] - t[8] + 1, expr)
363 hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
364 t[0] = GenCode(header_output = hash_define)
365
366# alternate form for single bit: 'def [signed] bitfield <ID> [<bit>]'
367def p_def_bitfield_1(t):
368 'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT GREATER SEMI'
369 expr = 'bits(machInst, %2d, %2d)' % (t[6], t[6])
370 if (t[2] == 'signed'):
371 expr = 'sext<%d>(%s)' % (1, expr)
372 hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
373 t[0] = GenCode(header_output = hash_define)
374
375# alternate form for structure member: 'def bitfield <ID> <ID>'
376def p_def_bitfield_struct(t):
377 'def_bitfield_struct : DEF opt_signed BITFIELD ID id_with_dot SEMI'
378 if (t[2] != ''):
379 error(t.lineno(1), 'error: structure bitfields are always unsigned.')
380 expr = 'machInst.%s' % t[5]
381 hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
382 t[0] = GenCode(header_output = hash_define)
383
384def p_id_with_dot_0(t):
385 'id_with_dot : ID'
386 t[0] = t[1]
387
388def p_id_with_dot_1(t):
389 'id_with_dot : ID DOT id_with_dot'
390 t[0] = t[1] + t[2] + t[3]
391
392def p_opt_signed_0(t):
393 'opt_signed : SIGNED'
394 t[0] = t[1]
395
396def p_opt_signed_1(t):
397 'opt_signed : empty'
398 t[0] = ''
399
400# Global map variable to hold templates
401templateMap = {}
402
403def p_def_template(t):
404 'def_template : DEF TEMPLATE ID CODELIT SEMI'
405 templateMap[t[3]] = Template(t[4])
406 t[0] = GenCode()
407
408# An instruction format definition looks like
409# "def format <fmt>(<params>) {{...}};"
410def p_def_format(t):
411 'def_format : DEF FORMAT ID LPAREN param_list RPAREN CODELIT SEMI'
412 (id, params, code) = (t[3], t[5], t[7])
413 defFormat(id, params, code, t.lineno(1))
414 t[0] = GenCode()
415
416# The formal parameter list for an instruction format is a possibly
417# empty list of comma-separated parameters. Positional (standard,
418# non-keyword) parameters must come first, followed by keyword
419# parameters, followed by a '*foo' parameter that gets excess
420# positional arguments (as in Python). Each of these three parameter
421# categories is optional.
422#
423# Note that we do not support the '**foo' parameter for collecting
424# otherwise undefined keyword args. Otherwise the parameter list is
425# (I believe) identical to what is supported in Python.
426#
427# The param list generates a tuple, where the first element is a list of
428# the positional params and the second element is a dict containing the
429# keyword params.
430def p_param_list_0(t):
431 'param_list : positional_param_list COMMA nonpositional_param_list'
432 t[0] = t[1] + t[3]
433
434def p_param_list_1(t):
435 '''param_list : positional_param_list
436 | nonpositional_param_list'''
437 t[0] = t[1]
438
439def p_positional_param_list_0(t):
440 'positional_param_list : empty'
441 t[0] = []
442
443def p_positional_param_list_1(t):
444 'positional_param_list : ID'
445 t[0] = [t[1]]
446
447def p_positional_param_list_2(t):
448 'positional_param_list : positional_param_list COMMA ID'
449 t[0] = t[1] + [t[3]]
450
451def p_nonpositional_param_list_0(t):
452 'nonpositional_param_list : keyword_param_list COMMA excess_args_param'
453 t[0] = t[1] + t[3]
454
455def p_nonpositional_param_list_1(t):
456 '''nonpositional_param_list : keyword_param_list
457 | excess_args_param'''
458 t[0] = t[1]
459
460def p_keyword_param_list_0(t):
461 'keyword_param_list : keyword_param'
462 t[0] = [t[1]]
463
464def p_keyword_param_list_1(t):
465 'keyword_param_list : keyword_param_list COMMA keyword_param'
466 t[0] = t[1] + [t[3]]
467
468def p_keyword_param(t):
469 'keyword_param : ID EQUALS expr'
470 t[0] = t[1] + ' = ' + t[3].__repr__()
471
472def p_excess_args_param(t):
473 'excess_args_param : ASTERISK ID'
474 # Just concatenate them: '*ID'. Wrap in list to be consistent
475 # with positional_param_list and keyword_param_list.
476 t[0] = [t[1] + t[2]]
477
478# End of format definition-related rules.
479##############
480
481#
482# A decode block looks like:
483# decode <field1> [, <field2>]* [default <inst>] { ... }
484#
485def p_decode_block(t):
486 'decode_block : DECODE ID opt_default LBRACE decode_stmt_list RBRACE'
487 default_defaults = defaultStack.pop()
488 codeObj = t[5]
489 # use the "default defaults" only if there was no explicit
490 # default statement in decode_stmt_list
491 if not codeObj.has_decode_default:
492 codeObj += default_defaults
493 codeObj.wrap_decode_block('switch (%s) {\n' % t[2], '}\n')
494 t[0] = codeObj
495
496# The opt_default statement serves only to push the "default defaults"
497# onto defaultStack. This value will be used by nested decode blocks,
498# and used and popped off when the current decode_block is processed
499# (in p_decode_block() above).
500def p_opt_default_0(t):
501 'opt_default : empty'
502 # no default specified: reuse the one currently at the top of the stack
503 defaultStack.push(defaultStack.top())
504 # no meaningful value returned
505 t[0] = None
506
507def p_opt_default_1(t):
508 'opt_default : DEFAULT inst'
509 # push the new default
510 codeObj = t[2]
511 codeObj.wrap_decode_block('\ndefault:\n', 'break;\n')
512 defaultStack.push(codeObj)
513 # no meaningful value returned
514 t[0] = None
515
516def p_decode_stmt_list_0(t):
517 'decode_stmt_list : decode_stmt'
518 t[0] = t[1]
519
520def p_decode_stmt_list_1(t):
521 'decode_stmt_list : decode_stmt decode_stmt_list'
522 if (t[1].has_decode_default and t[2].has_decode_default):
523 error(t.lineno(1), 'Two default cases in decode block')
524 t[0] = t[1] + t[2]
525
526#
527# Decode statement rules
528#
529# There are four types of statements allowed in a decode block:
530# 1. Format blocks 'format <foo> { ... }'
531# 2. Nested decode blocks
532# 3. Instruction definitions.
533# 4. C preprocessor directives.
534
535
536# Preprocessor directives found in a decode statement list are passed
537# through to the output, replicated to all of the output code
538# streams. This works well for ifdefs, so we can ifdef out both the
539# declarations and the decode cases generated by an instruction
540# definition. Handling them as part of the grammar makes it easy to
541# keep them in the right place with respect to the code generated by
542# the other statements.
543def p_decode_stmt_cpp(t):
544 'decode_stmt : CPPDIRECTIVE'
545 t[0] = GenCode(t[1], t[1], t[1], t[1])
546
547# A format block 'format <foo> { ... }' sets the default instruction
548# format used to handle instruction definitions inside the block.
549# This format can be overridden by using an explicit format on the
550# instruction definition or with a nested format block.
551def p_decode_stmt_format(t):
552 'decode_stmt : FORMAT push_format_id LBRACE decode_stmt_list RBRACE'
553 # The format will be pushed on the stack when 'push_format_id' is
554 # processed (see below). Once the parser has recognized the full
555 # production (though the right brace), we're done with the format,
556 # so now we can pop it.
557 formatStack.pop()
558 t[0] = t[4]
559
560# This rule exists so we can set the current format (& push the stack)
561# when we recognize the format name part of the format block.
562def p_push_format_id(t):
563 'push_format_id : ID'
564 try:
565 formatStack.push(formatMap[t[1]])
566 t[0] = ('', '// format %s' % t[1])
567 except KeyError:
568 error(t.lineno(1), 'instruction format "%s" not defined.' % t[1])
569
570# Nested decode block: if the value of the current field matches the
571# specified constant, do a nested decode on some other field.
572def p_decode_stmt_decode(t):
573 'decode_stmt : case_label COLON decode_block'
574 label = t[1]
575 codeObj = t[3]
576 # just wrap the decoding code from the block as a case in the
577 # outer switch statement.
578 codeObj.wrap_decode_block('\n%s:\n' % label)
579 codeObj.has_decode_default = (label == 'default')
580 t[0] = codeObj
581
582# Instruction definition (finally!).
583def p_decode_stmt_inst(t):
584 'decode_stmt : case_label COLON inst SEMI'
585 label = t[1]
586 codeObj = t[3]
587 codeObj.wrap_decode_block('\n%s:' % label, 'break;\n')
588 codeObj.has_decode_default = (label == 'default')
589 t[0] = codeObj
590
591# The case label is either a list of one or more constants or 'default'
592def p_case_label_0(t):
593 'case_label : intlit_list'
594 t[0] = ': '.join(map(lambda a: 'case %#x' % a, t[1]))
595
596def p_case_label_1(t):
597 'case_label : DEFAULT'
598 t[0] = 'default'
599
600#
601# The constant list for a decode case label must be non-empty, but may have
602# one or more comma-separated integer literals in it.
603#
604def p_intlit_list_0(t):
605 'intlit_list : INTLIT'
606 t[0] = [t[1]]
607
608def p_intlit_list_1(t):
609 'intlit_list : intlit_list COMMA INTLIT'
610 t[0] = t[1]
611 t[0].append(t[3])
612
613# Define an instruction using the current instruction format (specified
614# by an enclosing format block).
615# "<mnemonic>(<args>)"
616def p_inst_0(t):
617 'inst : ID LPAREN arg_list RPAREN'
618 # Pass the ID and arg list to the current format class to deal with.
619 currentFormat = formatStack.top()
620 codeObj = currentFormat.defineInst(t[1], t[3], t.lineno(1))
621 args = ','.join(map(str, t[3]))
622 args = re.sub('(?m)^', '//', args)
623 args = re.sub('^//', '', args)
624 comment = '\n// %s::%s(%s)\n' % (currentFormat.id, t[1], args)
625 codeObj.prepend_all(comment)
626 t[0] = codeObj
627
628# Define an instruction using an explicitly specified format:
629# "<fmt>::<mnemonic>(<args>)"
630def p_inst_1(t):
631 'inst : ID DBLCOLON ID LPAREN arg_list RPAREN'
632 try:
633 format = formatMap[t[1]]
634 except KeyError:
635 error(t.lineno(1), 'instruction format "%s" not defined.' % t[1])
636 codeObj = format.defineInst(t[3], t[5], t.lineno(1))
637 comment = '\n// %s::%s(%s)\n' % (t[1], t[3], t[5])
638 codeObj.prepend_all(comment)
639 t[0] = codeObj
640
641# The arg list generates a tuple, where the first element is a list of
642# the positional args and the second element is a dict containing the
643# keyword args.
644def p_arg_list_0(t):
645 'arg_list : positional_arg_list COMMA keyword_arg_list'
646 t[0] = ( t[1], t[3] )
647
648def p_arg_list_1(t):
649 'arg_list : positional_arg_list'
650 t[0] = ( t[1], {} )
651
652def p_arg_list_2(t):
653 'arg_list : keyword_arg_list'
654 t[0] = ( [], t[1] )
655
656def p_positional_arg_list_0(t):
657 'positional_arg_list : empty'
658 t[0] = []
659
660def p_positional_arg_list_1(t):
661 'positional_arg_list : expr'
662 t[0] = [t[1]]
663
664def p_positional_arg_list_2(t):
665 'positional_arg_list : positional_arg_list COMMA expr'
666 t[0] = t[1] + [t[3]]
667
668def p_keyword_arg_list_0(t):
669 'keyword_arg_list : keyword_arg'
670 t[0] = t[1]
671
672def p_keyword_arg_list_1(t):
673 'keyword_arg_list : keyword_arg_list COMMA keyword_arg'
674 t[0] = t[1]
675 t[0].update(t[3])
676
677def p_keyword_arg(t):
678 'keyword_arg : ID EQUALS expr'
679 t[0] = { t[1] : t[3] }
680
681#
682# Basic expressions. These constitute the argument values of
683# "function calls" (i.e. instruction definitions in the decode block)
684# and default values for formal parameters of format functions.
685#
686# Right now, these are either strings, integers, or (recursively)
687# lists of exprs (using Python square-bracket list syntax). Note that
688# bare identifiers are trated as string constants here (since there
689# isn't really a variable namespace to refer to).
690#
691def p_expr_0(t):
692 '''expr : ID
693 | INTLIT
694 | STRLIT
695 | CODELIT'''
696 t[0] = t[1]
697
698def p_expr_1(t):
699 '''expr : LBRACKET list_expr RBRACKET'''
700 t[0] = t[2]
701
702def p_list_expr_0(t):
703 'list_expr : expr'
704 t[0] = [t[1]]
705
706def p_list_expr_1(t):
707 'list_expr : list_expr COMMA expr'
708 t[0] = t[1] + [t[3]]
709
710def p_list_expr_2(t):
711 'list_expr : empty'
712 t[0] = []
713
714#
715# Empty production... use in other rules for readability.
716#
717def p_empty(t):
718 'empty :'
719 pass
720
721# Parse error handler. Note that the argument here is the offending
722# *token*, not a grammar symbol (hence the need to use t.value)
723def p_error(t):
724 if t:
725 error(t.lineno, "syntax error at '%s'" % t.value)
726 else:
727 error(0, "unknown syntax error", True)
728
729# END OF GRAMMAR RULES
730#
731# Now build the parser.
732yacc.yacc()
733
734
735#####################################################################
736#
737# Support Classes
738#
739#####################################################################
740
741# Expand template with CPU-specific references into a dictionary with
742# an entry for each CPU model name. The entry key is the model name
743# and the corresponding value is the template with the CPU-specific
744# refs substituted for that model.
745def expand_cpu_symbols_to_dict(template):
746 # Protect '%'s that don't go with CPU-specific terms
747 t = re.sub(r'%(?!\(CPU_)', '%%', template)
748 result = {}
749 for cpu in cpu_models:
750 result[cpu.name] = t % cpu.strings
751 return result
752
753# *If* the template has CPU-specific references, return a single
754# string containing a copy of the template for each CPU model with the
755# corresponding values substituted in. If the template has no
756# CPU-specific references, it is returned unmodified.
757def expand_cpu_symbols_to_string(template):
758 if template.find('%(CPU_') != -1:
759 return reduce(lambda x,y: x+y,
760 expand_cpu_symbols_to_dict(template).values())
761 else:
762 return template
763
764# Protect CPU-specific references by doubling the corresponding '%'s
765# (in preparation for substituting a different set of references into
766# the template).
767def protect_cpu_symbols(template):
768 return re.sub(r'%(?=\(CPU_)', '%%', template)
769
770###############
771# GenCode class
772#
773# The GenCode class encapsulates generated code destined for various
774# output files. The header_output and decoder_output attributes are
775# strings containing code destined for decoder.hh and decoder.cc
776# respectively. The decode_block attribute contains code to be
777# incorporated in the decode function itself (that will also end up in
778# decoder.cc). The exec_output attribute is a dictionary with a key
779# for each CPU model name; the value associated with a particular key
780# is the string of code for that CPU model's exec.cc file. The
781# has_decode_default attribute is used in the decode block to allow
782# explicit default clauses to override default default clauses.
783
784class GenCode:
785 # Constructor. At this point we substitute out all CPU-specific
786 # symbols. For the exec output, these go into the per-model
787 # dictionary. For all other output types they get collapsed into
788 # a single string.
789 def __init__(self,
790 header_output = '', decoder_output = '', exec_output = '',
791 decode_block = '', has_decode_default = False):
792 self.header_output = expand_cpu_symbols_to_string(header_output)
793 self.decoder_output = expand_cpu_symbols_to_string(decoder_output)
794 if isinstance(exec_output, dict):
795 self.exec_output = exec_output
796 elif isinstance(exec_output, str):
797 # If the exec_output arg is a single string, we replicate
798 # it for each of the CPU models, substituting and
799 # %(CPU_foo)s params appropriately.
800 self.exec_output = expand_cpu_symbols_to_dict(exec_output)
801 self.decode_block = expand_cpu_symbols_to_string(decode_block)
802 self.has_decode_default = has_decode_default
803
804 # Override '+' operator: generate a new GenCode object that
805 # concatenates all the individual strings in the operands.
806 def __add__(self, other):
807 exec_output = {}
808 for cpu in cpu_models:
809 n = cpu.name
810 exec_output[n] = self.exec_output[n] + other.exec_output[n]
811 return GenCode(self.header_output + other.header_output,
812 self.decoder_output + other.decoder_output,
813 exec_output,
814 self.decode_block + other.decode_block,
815 self.has_decode_default or other.has_decode_default)
816
817 # Prepend a string (typically a comment) to all the strings.
818 def prepend_all(self, pre):
819 self.header_output = pre + self.header_output
820 self.decoder_output = pre + self.decoder_output
821 self.decode_block = pre + self.decode_block
822 for cpu in cpu_models:
823 self.exec_output[cpu.name] = pre + self.exec_output[cpu.name]
824
825 # Wrap the decode block in a pair of strings (e.g., 'case foo:'
826 # and 'break;'). Used to build the big nested switch statement.
827 def wrap_decode_block(self, pre, post = ''):
828 self.decode_block = pre + indent(self.decode_block) + post
829
830################
831# Format object.
832#
833# A format object encapsulates an instruction format. It must provide
834# a defineInst() method that generates the code for an instruction
835# definition.
836
837exportContextSymbols = ('InstObjParams', 'makeList', 're', 'string')
838
839exportContext = {}
840
841def updateExportContext():
842 exportContext.update(exportDict(*exportContextSymbols))
843 exportContext.update(templateMap)
844
845def exportDict(*symNames):
846 return dict([(s, eval(s)) for s in symNames])
847
848
849class Format:
850 def __init__(self, id, params, code):
851 # constructor: just save away arguments
852 self.id = id
853 self.params = params
854 label = 'def format ' + id
855 self.user_code = compile(fixPythonIndentation(code), label, 'exec')
856 param_list = string.join(params, ", ")
857 f = '''def defInst(_code, _context, %s):
858 my_locals = vars().copy()
859 exec _code in _context, my_locals
860 return my_locals\n''' % param_list
861 c = compile(f, label + ' wrapper', 'exec')
862 exec c
863 self.func = defInst
864
865 def defineInst(self, name, args, lineno):
866 context = {}
867 updateExportContext()
868 context.update(exportContext)
1# Copyright (c) 2003-2005 The Regents of The University of Michigan
2# All rights reserved.
3#
4# Redistribution and use in source and binary forms, with or without
5# modification, are permitted provided that the following conditions are
6# met: redistributions of source code must retain the above copyright
7# notice, this list of conditions and the following disclaimer;
8# redistributions in binary form must reproduce the above copyright
9# notice, this list of conditions and the following disclaimer in the
10# documentation and/or other materials provided with the distribution;
11# neither the name of the copyright holders nor the names of its
12# contributors may be used to endorse or promote products derived from
13# this software without specific prior written permission.
14#
15# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
18# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
21# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26#
27# Authors: Steve Reinhardt
28# Korey Sewell
29
30import os
31import sys
32import re
33import string
34import traceback
35# get type names
36from types import *
37
38# Prepend the directory where the PLY lex & yacc modules are found
39# to the search path. Assumes we're compiling in a subdirectory
40# of 'build' in the current tree.
41sys.path[0:0] = [os.environ['M5_PLY']]
42
43import lex
44import yacc
45
46#####################################################################
47#
48# Lexer
49#
50# The PLY lexer module takes two things as input:
51# - A list of token names (the string list 'tokens')
52# - A regular expression describing a match for each token. The
53# regexp for token FOO can be provided in two ways:
54# - as a string variable named t_FOO
55# - as the doc string for a function named t_FOO. In this case,
56# the function is also executed, allowing an action to be
57# associated with each token match.
58#
59#####################################################################
60
61# Reserved words. These are listed separately as they are matched
62# using the same regexp as generic IDs, but distinguished in the
63# t_ID() function. The PLY documentation suggests this approach.
64reserved = (
65 'BITFIELD', 'DECODE', 'DECODER', 'DEFAULT', 'DEF', 'EXEC', 'FORMAT',
66 'HEADER', 'LET', 'NAMESPACE', 'OPERAND_TYPES', 'OPERANDS',
67 'OUTPUT', 'SIGNED', 'TEMPLATE'
68 )
69
70# List of tokens. The lex module requires this.
71tokens = reserved + (
72 # identifier
73 'ID',
74
75 # integer literal
76 'INTLIT',
77
78 # string literal
79 'STRLIT',
80
81 # code literal
82 'CODELIT',
83
84 # ( ) [ ] { } < > , ; . : :: *
85 'LPAREN', 'RPAREN',
86 'LBRACKET', 'RBRACKET',
87 'LBRACE', 'RBRACE',
88 'LESS', 'GREATER', 'EQUALS',
89 'COMMA', 'SEMI', 'DOT', 'COLON', 'DBLCOLON',
90 'ASTERISK',
91
92 # C preprocessor directives
93 'CPPDIRECTIVE'
94
95# The following are matched but never returned. commented out to
96# suppress PLY warning
97 # newfile directive
98# 'NEWFILE',
99
100 # endfile directive
101# 'ENDFILE'
102)
103
104# Regular expressions for token matching
105t_LPAREN = r'\('
106t_RPAREN = r'\)'
107t_LBRACKET = r'\['
108t_RBRACKET = r'\]'
109t_LBRACE = r'\{'
110t_RBRACE = r'\}'
111t_LESS = r'\<'
112t_GREATER = r'\>'
113t_EQUALS = r'='
114t_COMMA = r','
115t_SEMI = r';'
116t_DOT = r'\.'
117t_COLON = r':'
118t_DBLCOLON = r'::'
119t_ASTERISK = r'\*'
120
121# Identifiers and reserved words
122reserved_map = { }
123for r in reserved:
124 reserved_map[r.lower()] = r
125
126def t_ID(t):
127 r'[A-Za-z_]\w*'
128 t.type = reserved_map.get(t.value,'ID')
129 return t
130
131# Integer literal
132def t_INTLIT(t):
133 r'(0x[\da-fA-F]+)|\d+'
134 try:
135 t.value = int(t.value,0)
136 except ValueError:
137 error(t.lineno, 'Integer value "%s" too large' % t.value)
138 t.value = 0
139 return t
140
141# String literal. Note that these use only single quotes, and
142# can span multiple lines.
143def t_STRLIT(t):
144 r"(?m)'([^'])+'"
145 # strip off quotes
146 t.value = t.value[1:-1]
147 t.lineno += t.value.count('\n')
148 return t
149
150
151# "Code literal"... like a string literal, but delimiters are
152# '{{' and '}}' so they get formatted nicely under emacs c-mode
153def t_CODELIT(t):
154 r"(?m)\{\{([^\}]|}(?!\}))+\}\}"
155 # strip off {{ & }}
156 t.value = t.value[2:-2]
157 t.lineno += t.value.count('\n')
158 return t
159
160def t_CPPDIRECTIVE(t):
161 r'^\#[^\#].*\n'
162 t.lineno += t.value.count('\n')
163 return t
164
165def t_NEWFILE(t):
166 r'^\#\#newfile\s+"[\w/.-]*"'
167 fileNameStack.push((t.value[11:-1], t.lineno))
168 t.lineno = 0
169
170def t_ENDFILE(t):
171 r'^\#\#endfile'
172 (old_filename, t.lineno) = fileNameStack.pop()
173
174#
175# The functions t_NEWLINE, t_ignore, and t_error are
176# special for the lex module.
177#
178
179# Newlines
180def t_NEWLINE(t):
181 r'\n+'
182 t.lineno += t.value.count('\n')
183
184# Comments
185def t_comment(t):
186 r'//.*'
187
188# Completely ignored characters
189t_ignore = ' \t\x0c'
190
191# Error handler
192def t_error(t):
193 error(t.lineno, "illegal character '%s'" % t.value[0])
194 t.skip(1)
195
196# Build the lexer
197lex.lex()
198
199#####################################################################
200#
201# Parser
202#
203# Every function whose name starts with 'p_' defines a grammar rule.
204# The rule is encoded in the function's doc string, while the
205# function body provides the action taken when the rule is matched.
206# The argument to each function is a list of the values of the
207# rule's symbols: t[0] for the LHS, and t[1..n] for the symbols
208# on the RHS. For tokens, the value is copied from the t.value
209# attribute provided by the lexer. For non-terminals, the value
210# is assigned by the producing rule; i.e., the job of the grammar
211# rule function is to set the value for the non-terminal on the LHS
212# (by assigning to t[0]).
213#####################################################################
214
215# The LHS of the first grammar rule is used as the start symbol
216# (in this case, 'specification'). Note that this rule enforces
217# that there will be exactly one namespace declaration, with 0 or more
218# global defs/decls before and after it. The defs & decls before
219# the namespace decl will be outside the namespace; those after
220# will be inside. The decoder function is always inside the namespace.
221def p_specification(t):
222 'specification : opt_defs_and_outputs name_decl opt_defs_and_outputs decode_block'
223 global_code = t[1]
224 isa_name = t[2]
225 namespace = isa_name + "Inst"
226 # wrap the decode block as a function definition
227 t[4].wrap_decode_block('''
228StaticInstPtr
229%(isa_name)s::decodeInst(%(isa_name)s::ExtMachInst machInst)
230{
231 using namespace %(namespace)s;
232''' % vars(), '}')
233 # both the latter output blocks and the decode block are in the namespace
234 namespace_code = t[3] + t[4]
235 # pass it all back to the caller of yacc.parse()
236 t[0] = (isa_name, namespace, global_code, namespace_code)
237
238# ISA name declaration looks like "namespace <foo>;"
239def p_name_decl(t):
240 'name_decl : NAMESPACE ID SEMI'
241 t[0] = t[2]
242
243# 'opt_defs_and_outputs' is a possibly empty sequence of
244# def and/or output statements.
245def p_opt_defs_and_outputs_0(t):
246 'opt_defs_and_outputs : empty'
247 t[0] = GenCode()
248
249def p_opt_defs_and_outputs_1(t):
250 'opt_defs_and_outputs : defs_and_outputs'
251 t[0] = t[1]
252
253def p_defs_and_outputs_0(t):
254 'defs_and_outputs : def_or_output'
255 t[0] = t[1]
256
257def p_defs_and_outputs_1(t):
258 'defs_and_outputs : defs_and_outputs def_or_output'
259 t[0] = t[1] + t[2]
260
261# The list of possible definition/output statements.
262def p_def_or_output(t):
263 '''def_or_output : def_format
264 | def_bitfield
265 | def_bitfield_struct
266 | def_template
267 | def_operand_types
268 | def_operands
269 | output_header
270 | output_decoder
271 | output_exec
272 | global_let'''
273 t[0] = t[1]
274
275# Output blocks 'output <foo> {{...}}' (C++ code blocks) are copied
276# directly to the appropriate output section.
277
278
279# Protect any non-dict-substitution '%'s in a format string
280# (i.e. those not followed by '(')
281def protect_non_subst_percents(s):
282 return re.sub(r'%(?!\()', '%%', s)
283
284# Massage output block by substituting in template definitions and bit
285# operators. We handle '%'s embedded in the string that don't
286# indicate template substitutions (or CPU-specific symbols, which get
287# handled in GenCode) by doubling them first so that the format
288# operation will reduce them back to single '%'s.
289def process_output(s):
290 s = protect_non_subst_percents(s)
291 # protects cpu-specific symbols too
292 s = protect_cpu_symbols(s)
293 return substBitOps(s % templateMap)
294
295def p_output_header(t):
296 'output_header : OUTPUT HEADER CODELIT SEMI'
297 t[0] = GenCode(header_output = process_output(t[3]))
298
299def p_output_decoder(t):
300 'output_decoder : OUTPUT DECODER CODELIT SEMI'
301 t[0] = GenCode(decoder_output = process_output(t[3]))
302
303def p_output_exec(t):
304 'output_exec : OUTPUT EXEC CODELIT SEMI'
305 t[0] = GenCode(exec_output = process_output(t[3]))
306
307# global let blocks 'let {{...}}' (Python code blocks) are executed
308# directly when seen. Note that these execute in a special variable
309# context 'exportContext' to prevent the code from polluting this
310# script's namespace.
311def p_global_let(t):
312 'global_let : LET CODELIT SEMI'
313 updateExportContext()
314 exportContext["header_output"] = ''
315 exportContext["decoder_output"] = ''
316 exportContext["exec_output"] = ''
317 exportContext["decode_block"] = ''
318 try:
319 exec fixPythonIndentation(t[2]) in exportContext
320 except Exception, exc:
321 error(t.lineno(1),
322 'error: %s in global let block "%s".' % (exc, t[2]))
323 t[0] = GenCode(header_output = exportContext["header_output"],
324 decoder_output = exportContext["decoder_output"],
325 exec_output = exportContext["exec_output"],
326 decode_block = exportContext["decode_block"])
327
328# Define the mapping from operand type extensions to C++ types and bit
329# widths (stored in operandTypeMap).
330def p_def_operand_types(t):
331 'def_operand_types : DEF OPERAND_TYPES CODELIT SEMI'
332 try:
333 userDict = eval('{' + t[3] + '}')
334 except Exception, exc:
335 error(t.lineno(1),
336 'error: %s in def operand_types block "%s".' % (exc, t[3]))
337 buildOperandTypeMap(userDict, t.lineno(1))
338 t[0] = GenCode() # contributes nothing to the output C++ file
339
340# Define the mapping from operand names to operand classes and other
341# traits. Stored in operandNameMap.
342def p_def_operands(t):
343 'def_operands : DEF OPERANDS CODELIT SEMI'
344 if not globals().has_key('operandTypeMap'):
345 error(t.lineno(1),
346 'error: operand types must be defined before operands')
347 try:
348 userDict = eval('{' + t[3] + '}')
349 except Exception, exc:
350 error(t.lineno(1),
351 'error: %s in def operands block "%s".' % (exc, t[3]))
352 buildOperandNameMap(userDict, t.lineno(1))
353 t[0] = GenCode() # contributes nothing to the output C++ file
354
355# A bitfield definition looks like:
356# 'def [signed] bitfield <ID> [<first>:<last>]'
357# This generates a preprocessor macro in the output file.
358def p_def_bitfield_0(t):
359 'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT COLON INTLIT GREATER SEMI'
360 expr = 'bits(machInst, %2d, %2d)' % (t[6], t[8])
361 if (t[2] == 'signed'):
362 expr = 'sext<%d>(%s)' % (t[6] - t[8] + 1, expr)
363 hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
364 t[0] = GenCode(header_output = hash_define)
365
366# alternate form for single bit: 'def [signed] bitfield <ID> [<bit>]'
367def p_def_bitfield_1(t):
368 'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT GREATER SEMI'
369 expr = 'bits(machInst, %2d, %2d)' % (t[6], t[6])
370 if (t[2] == 'signed'):
371 expr = 'sext<%d>(%s)' % (1, expr)
372 hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
373 t[0] = GenCode(header_output = hash_define)
374
375# alternate form for structure member: 'def bitfield <ID> <ID>'
376def p_def_bitfield_struct(t):
377 'def_bitfield_struct : DEF opt_signed BITFIELD ID id_with_dot SEMI'
378 if (t[2] != ''):
379 error(t.lineno(1), 'error: structure bitfields are always unsigned.')
380 expr = 'machInst.%s' % t[5]
381 hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
382 t[0] = GenCode(header_output = hash_define)
383
384def p_id_with_dot_0(t):
385 'id_with_dot : ID'
386 t[0] = t[1]
387
388def p_id_with_dot_1(t):
389 'id_with_dot : ID DOT id_with_dot'
390 t[0] = t[1] + t[2] + t[3]
391
392def p_opt_signed_0(t):
393 'opt_signed : SIGNED'
394 t[0] = t[1]
395
396def p_opt_signed_1(t):
397 'opt_signed : empty'
398 t[0] = ''
399
400# Global map variable to hold templates
401templateMap = {}
402
403def p_def_template(t):
404 'def_template : DEF TEMPLATE ID CODELIT SEMI'
405 templateMap[t[3]] = Template(t[4])
406 t[0] = GenCode()
407
408# An instruction format definition looks like
409# "def format <fmt>(<params>) {{...}};"
410def p_def_format(t):
411 'def_format : DEF FORMAT ID LPAREN param_list RPAREN CODELIT SEMI'
412 (id, params, code) = (t[3], t[5], t[7])
413 defFormat(id, params, code, t.lineno(1))
414 t[0] = GenCode()
415
416# The formal parameter list for an instruction format is a possibly
417# empty list of comma-separated parameters. Positional (standard,
418# non-keyword) parameters must come first, followed by keyword
419# parameters, followed by a '*foo' parameter that gets excess
420# positional arguments (as in Python). Each of these three parameter
421# categories is optional.
422#
423# Note that we do not support the '**foo' parameter for collecting
424# otherwise undefined keyword args. Otherwise the parameter list is
425# (I believe) identical to what is supported in Python.
426#
427# The param list generates a tuple, where the first element is a list of
428# the positional params and the second element is a dict containing the
429# keyword params.
430def p_param_list_0(t):
431 'param_list : positional_param_list COMMA nonpositional_param_list'
432 t[0] = t[1] + t[3]
433
434def p_param_list_1(t):
435 '''param_list : positional_param_list
436 | nonpositional_param_list'''
437 t[0] = t[1]
438
439def p_positional_param_list_0(t):
440 'positional_param_list : empty'
441 t[0] = []
442
443def p_positional_param_list_1(t):
444 'positional_param_list : ID'
445 t[0] = [t[1]]
446
447def p_positional_param_list_2(t):
448 'positional_param_list : positional_param_list COMMA ID'
449 t[0] = t[1] + [t[3]]
450
451def p_nonpositional_param_list_0(t):
452 'nonpositional_param_list : keyword_param_list COMMA excess_args_param'
453 t[0] = t[1] + t[3]
454
455def p_nonpositional_param_list_1(t):
456 '''nonpositional_param_list : keyword_param_list
457 | excess_args_param'''
458 t[0] = t[1]
459
460def p_keyword_param_list_0(t):
461 'keyword_param_list : keyword_param'
462 t[0] = [t[1]]
463
464def p_keyword_param_list_1(t):
465 'keyword_param_list : keyword_param_list COMMA keyword_param'
466 t[0] = t[1] + [t[3]]
467
468def p_keyword_param(t):
469 'keyword_param : ID EQUALS expr'
470 t[0] = t[1] + ' = ' + t[3].__repr__()
471
472def p_excess_args_param(t):
473 'excess_args_param : ASTERISK ID'
474 # Just concatenate them: '*ID'. Wrap in list to be consistent
475 # with positional_param_list and keyword_param_list.
476 t[0] = [t[1] + t[2]]
477
478# End of format definition-related rules.
479##############
480
481#
482# A decode block looks like:
483# decode <field1> [, <field2>]* [default <inst>] { ... }
484#
485def p_decode_block(t):
486 'decode_block : DECODE ID opt_default LBRACE decode_stmt_list RBRACE'
487 default_defaults = defaultStack.pop()
488 codeObj = t[5]
489 # use the "default defaults" only if there was no explicit
490 # default statement in decode_stmt_list
491 if not codeObj.has_decode_default:
492 codeObj += default_defaults
493 codeObj.wrap_decode_block('switch (%s) {\n' % t[2], '}\n')
494 t[0] = codeObj
495
496# The opt_default statement serves only to push the "default defaults"
497# onto defaultStack. This value will be used by nested decode blocks,
498# and used and popped off when the current decode_block is processed
499# (in p_decode_block() above).
500def p_opt_default_0(t):
501 'opt_default : empty'
502 # no default specified: reuse the one currently at the top of the stack
503 defaultStack.push(defaultStack.top())
504 # no meaningful value returned
505 t[0] = None
506
507def p_opt_default_1(t):
508 'opt_default : DEFAULT inst'
509 # push the new default
510 codeObj = t[2]
511 codeObj.wrap_decode_block('\ndefault:\n', 'break;\n')
512 defaultStack.push(codeObj)
513 # no meaningful value returned
514 t[0] = None
515
516def p_decode_stmt_list_0(t):
517 'decode_stmt_list : decode_stmt'
518 t[0] = t[1]
519
520def p_decode_stmt_list_1(t):
521 'decode_stmt_list : decode_stmt decode_stmt_list'
522 if (t[1].has_decode_default and t[2].has_decode_default):
523 error(t.lineno(1), 'Two default cases in decode block')
524 t[0] = t[1] + t[2]
525
526#
527# Decode statement rules
528#
529# There are four types of statements allowed in a decode block:
530# 1. Format blocks 'format <foo> { ... }'
531# 2. Nested decode blocks
532# 3. Instruction definitions.
533# 4. C preprocessor directives.
534
535
536# Preprocessor directives found in a decode statement list are passed
537# through to the output, replicated to all of the output code
538# streams. This works well for ifdefs, so we can ifdef out both the
539# declarations and the decode cases generated by an instruction
540# definition. Handling them as part of the grammar makes it easy to
541# keep them in the right place with respect to the code generated by
542# the other statements.
543def p_decode_stmt_cpp(t):
544 'decode_stmt : CPPDIRECTIVE'
545 t[0] = GenCode(t[1], t[1], t[1], t[1])
546
547# A format block 'format <foo> { ... }' sets the default instruction
548# format used to handle instruction definitions inside the block.
549# This format can be overridden by using an explicit format on the
550# instruction definition or with a nested format block.
551def p_decode_stmt_format(t):
552 'decode_stmt : FORMAT push_format_id LBRACE decode_stmt_list RBRACE'
553 # The format will be pushed on the stack when 'push_format_id' is
554 # processed (see below). Once the parser has recognized the full
555 # production (though the right brace), we're done with the format,
556 # so now we can pop it.
557 formatStack.pop()
558 t[0] = t[4]
559
560# This rule exists so we can set the current format (& push the stack)
561# when we recognize the format name part of the format block.
562def p_push_format_id(t):
563 'push_format_id : ID'
564 try:
565 formatStack.push(formatMap[t[1]])
566 t[0] = ('', '// format %s' % t[1])
567 except KeyError:
568 error(t.lineno(1), 'instruction format "%s" not defined.' % t[1])
569
570# Nested decode block: if the value of the current field matches the
571# specified constant, do a nested decode on some other field.
572def p_decode_stmt_decode(t):
573 'decode_stmt : case_label COLON decode_block'
574 label = t[1]
575 codeObj = t[3]
576 # just wrap the decoding code from the block as a case in the
577 # outer switch statement.
578 codeObj.wrap_decode_block('\n%s:\n' % label)
579 codeObj.has_decode_default = (label == 'default')
580 t[0] = codeObj
581
582# Instruction definition (finally!).
583def p_decode_stmt_inst(t):
584 'decode_stmt : case_label COLON inst SEMI'
585 label = t[1]
586 codeObj = t[3]
587 codeObj.wrap_decode_block('\n%s:' % label, 'break;\n')
588 codeObj.has_decode_default = (label == 'default')
589 t[0] = codeObj
590
591# The case label is either a list of one or more constants or 'default'
592def p_case_label_0(t):
593 'case_label : intlit_list'
594 t[0] = ': '.join(map(lambda a: 'case %#x' % a, t[1]))
595
596def p_case_label_1(t):
597 'case_label : DEFAULT'
598 t[0] = 'default'
599
600#
601# The constant list for a decode case label must be non-empty, but may have
602# one or more comma-separated integer literals in it.
603#
604def p_intlit_list_0(t):
605 'intlit_list : INTLIT'
606 t[0] = [t[1]]
607
608def p_intlit_list_1(t):
609 'intlit_list : intlit_list COMMA INTLIT'
610 t[0] = t[1]
611 t[0].append(t[3])
612
613# Define an instruction using the current instruction format (specified
614# by an enclosing format block).
615# "<mnemonic>(<args>)"
616def p_inst_0(t):
617 'inst : ID LPAREN arg_list RPAREN'
618 # Pass the ID and arg list to the current format class to deal with.
619 currentFormat = formatStack.top()
620 codeObj = currentFormat.defineInst(t[1], t[3], t.lineno(1))
621 args = ','.join(map(str, t[3]))
622 args = re.sub('(?m)^', '//', args)
623 args = re.sub('^//', '', args)
624 comment = '\n// %s::%s(%s)\n' % (currentFormat.id, t[1], args)
625 codeObj.prepend_all(comment)
626 t[0] = codeObj
627
628# Define an instruction using an explicitly specified format:
629# "<fmt>::<mnemonic>(<args>)"
630def p_inst_1(t):
631 'inst : ID DBLCOLON ID LPAREN arg_list RPAREN'
632 try:
633 format = formatMap[t[1]]
634 except KeyError:
635 error(t.lineno(1), 'instruction format "%s" not defined.' % t[1])
636 codeObj = format.defineInst(t[3], t[5], t.lineno(1))
637 comment = '\n// %s::%s(%s)\n' % (t[1], t[3], t[5])
638 codeObj.prepend_all(comment)
639 t[0] = codeObj
640
641# The arg list generates a tuple, where the first element is a list of
642# the positional args and the second element is a dict containing the
643# keyword args.
644def p_arg_list_0(t):
645 'arg_list : positional_arg_list COMMA keyword_arg_list'
646 t[0] = ( t[1], t[3] )
647
648def p_arg_list_1(t):
649 'arg_list : positional_arg_list'
650 t[0] = ( t[1], {} )
651
652def p_arg_list_2(t):
653 'arg_list : keyword_arg_list'
654 t[0] = ( [], t[1] )
655
656def p_positional_arg_list_0(t):
657 'positional_arg_list : empty'
658 t[0] = []
659
660def p_positional_arg_list_1(t):
661 'positional_arg_list : expr'
662 t[0] = [t[1]]
663
664def p_positional_arg_list_2(t):
665 'positional_arg_list : positional_arg_list COMMA expr'
666 t[0] = t[1] + [t[3]]
667
668def p_keyword_arg_list_0(t):
669 'keyword_arg_list : keyword_arg'
670 t[0] = t[1]
671
672def p_keyword_arg_list_1(t):
673 'keyword_arg_list : keyword_arg_list COMMA keyword_arg'
674 t[0] = t[1]
675 t[0].update(t[3])
676
677def p_keyword_arg(t):
678 'keyword_arg : ID EQUALS expr'
679 t[0] = { t[1] : t[3] }
680
681#
682# Basic expressions. These constitute the argument values of
683# "function calls" (i.e. instruction definitions in the decode block)
684# and default values for formal parameters of format functions.
685#
686# Right now, these are either strings, integers, or (recursively)
687# lists of exprs (using Python square-bracket list syntax). Note that
688# bare identifiers are trated as string constants here (since there
689# isn't really a variable namespace to refer to).
690#
691def p_expr_0(t):
692 '''expr : ID
693 | INTLIT
694 | STRLIT
695 | CODELIT'''
696 t[0] = t[1]
697
698def p_expr_1(t):
699 '''expr : LBRACKET list_expr RBRACKET'''
700 t[0] = t[2]
701
702def p_list_expr_0(t):
703 'list_expr : expr'
704 t[0] = [t[1]]
705
706def p_list_expr_1(t):
707 'list_expr : list_expr COMMA expr'
708 t[0] = t[1] + [t[3]]
709
710def p_list_expr_2(t):
711 'list_expr : empty'
712 t[0] = []
713
714#
715# Empty production... use in other rules for readability.
716#
717def p_empty(t):
718 'empty :'
719 pass
720
721# Parse error handler. Note that the argument here is the offending
722# *token*, not a grammar symbol (hence the need to use t.value)
723def p_error(t):
724 if t:
725 error(t.lineno, "syntax error at '%s'" % t.value)
726 else:
727 error(0, "unknown syntax error", True)
728
729# END OF GRAMMAR RULES
730#
731# Now build the parser.
732yacc.yacc()
733
734
735#####################################################################
736#
737# Support Classes
738#
739#####################################################################
740
741# Expand template with CPU-specific references into a dictionary with
742# an entry for each CPU model name. The entry key is the model name
743# and the corresponding value is the template with the CPU-specific
744# refs substituted for that model.
745def expand_cpu_symbols_to_dict(template):
746 # Protect '%'s that don't go with CPU-specific terms
747 t = re.sub(r'%(?!\(CPU_)', '%%', template)
748 result = {}
749 for cpu in cpu_models:
750 result[cpu.name] = t % cpu.strings
751 return result
752
753# *If* the template has CPU-specific references, return a single
754# string containing a copy of the template for each CPU model with the
755# corresponding values substituted in. If the template has no
756# CPU-specific references, it is returned unmodified.
757def expand_cpu_symbols_to_string(template):
758 if template.find('%(CPU_') != -1:
759 return reduce(lambda x,y: x+y,
760 expand_cpu_symbols_to_dict(template).values())
761 else:
762 return template
763
764# Protect CPU-specific references by doubling the corresponding '%'s
765# (in preparation for substituting a different set of references into
766# the template).
767def protect_cpu_symbols(template):
768 return re.sub(r'%(?=\(CPU_)', '%%', template)
769
770###############
771# GenCode class
772#
773# The GenCode class encapsulates generated code destined for various
774# output files. The header_output and decoder_output attributes are
775# strings containing code destined for decoder.hh and decoder.cc
776# respectively. The decode_block attribute contains code to be
777# incorporated in the decode function itself (that will also end up in
778# decoder.cc). The exec_output attribute is a dictionary with a key
779# for each CPU model name; the value associated with a particular key
780# is the string of code for that CPU model's exec.cc file. The
781# has_decode_default attribute is used in the decode block to allow
782# explicit default clauses to override default default clauses.
783
784class GenCode:
785 # Constructor. At this point we substitute out all CPU-specific
786 # symbols. For the exec output, these go into the per-model
787 # dictionary. For all other output types they get collapsed into
788 # a single string.
789 def __init__(self,
790 header_output = '', decoder_output = '', exec_output = '',
791 decode_block = '', has_decode_default = False):
792 self.header_output = expand_cpu_symbols_to_string(header_output)
793 self.decoder_output = expand_cpu_symbols_to_string(decoder_output)
794 if isinstance(exec_output, dict):
795 self.exec_output = exec_output
796 elif isinstance(exec_output, str):
797 # If the exec_output arg is a single string, we replicate
798 # it for each of the CPU models, substituting and
799 # %(CPU_foo)s params appropriately.
800 self.exec_output = expand_cpu_symbols_to_dict(exec_output)
801 self.decode_block = expand_cpu_symbols_to_string(decode_block)
802 self.has_decode_default = has_decode_default
803
804 # Override '+' operator: generate a new GenCode object that
805 # concatenates all the individual strings in the operands.
806 def __add__(self, other):
807 exec_output = {}
808 for cpu in cpu_models:
809 n = cpu.name
810 exec_output[n] = self.exec_output[n] + other.exec_output[n]
811 return GenCode(self.header_output + other.header_output,
812 self.decoder_output + other.decoder_output,
813 exec_output,
814 self.decode_block + other.decode_block,
815 self.has_decode_default or other.has_decode_default)
816
817 # Prepend a string (typically a comment) to all the strings.
818 def prepend_all(self, pre):
819 self.header_output = pre + self.header_output
820 self.decoder_output = pre + self.decoder_output
821 self.decode_block = pre + self.decode_block
822 for cpu in cpu_models:
823 self.exec_output[cpu.name] = pre + self.exec_output[cpu.name]
824
825 # Wrap the decode block in a pair of strings (e.g., 'case foo:'
826 # and 'break;'). Used to build the big nested switch statement.
827 def wrap_decode_block(self, pre, post = ''):
828 self.decode_block = pre + indent(self.decode_block) + post
829
830################
831# Format object.
832#
833# A format object encapsulates an instruction format. It must provide
834# a defineInst() method that generates the code for an instruction
835# definition.
836
837exportContextSymbols = ('InstObjParams', 'makeList', 're', 'string')
838
839exportContext = {}
840
841def updateExportContext():
842 exportContext.update(exportDict(*exportContextSymbols))
843 exportContext.update(templateMap)
844
845def exportDict(*symNames):
846 return dict([(s, eval(s)) for s in symNames])
847
848
849class Format:
850 def __init__(self, id, params, code):
851 # constructor: just save away arguments
852 self.id = id
853 self.params = params
854 label = 'def format ' + id
855 self.user_code = compile(fixPythonIndentation(code), label, 'exec')
856 param_list = string.join(params, ", ")
857 f = '''def defInst(_code, _context, %s):
858 my_locals = vars().copy()
859 exec _code in _context, my_locals
860 return my_locals\n''' % param_list
861 c = compile(f, label + ' wrapper', 'exec')
862 exec c
863 self.func = defInst
864
865 def defineInst(self, name, args, lineno):
866 context = {}
867 updateExportContext()
868 context.update(exportContext)
869 context.update({ 'name': name, 'Name': string.capitalize(name) })
869 if len(name):
870 Name = name[0].upper()
871 if len(name) > 1:
872 Name += name[1:]
873 context.update({ 'name': name, 'Name': Name })
870 try:
871 vars = self.func(self.user_code, context, *args[0], **args[1])
872 except Exception, exc:
873 error(lineno, 'error defining "%s": %s.' % (name, exc))
874 for k in vars.keys():
875 if k not in ('header_output', 'decoder_output',
876 'exec_output', 'decode_block'):
877 del vars[k]
878 return GenCode(**vars)
879
880# Special null format to catch an implicit-format instruction
881# definition outside of any format block.
882class NoFormat:
883 def __init__(self):
884 self.defaultInst = ''
885
886 def defineInst(self, name, args, lineno):
887 error(lineno,
888 'instruction definition "%s" with no active format!' % name)
889
890# This dictionary maps format name strings to Format objects.
891formatMap = {}
892
893# Define a new format
894def defFormat(id, params, code, lineno):
895 # make sure we haven't already defined this one
896 if formatMap.get(id, None) != None:
897 error(lineno, 'format %s redefined.' % id)
898 # create new object and store in global map
899 formatMap[id] = Format(id, params, code)
900
901
902##############
903# Stack: a simple stack object. Used for both formats (formatStack)
904# and default cases (defaultStack). Simply wraps a list to give more
905# stack-like syntax and enable initialization with an argument list
906# (as opposed to an argument that's a list).
907
908class Stack(list):
909 def __init__(self, *items):
910 list.__init__(self, items)
911
912 def push(self, item):
913 self.append(item);
914
915 def top(self):
916 return self[-1]
917
918# The global format stack.
919formatStack = Stack(NoFormat())
920
921# The global default case stack.
922defaultStack = Stack( None )
923
924# Global stack that tracks current file and line number.
925# Each element is a tuple (filename, lineno) that records the
926# *current* filename and the line number in the *previous* file where
927# it was included.
928fileNameStack = Stack()
929
930###################
931# Utility functions
932
933#
934# Indent every line in string 's' by two spaces
935# (except preprocessor directives).
936# Used to make nested code blocks look pretty.
937#
938def indent(s):
939 return re.sub(r'(?m)^(?!#)', ' ', s)
940
941#
942# Munge a somewhat arbitrarily formatted piece of Python code
943# (e.g. from a format 'let' block) into something whose indentation
944# will get by the Python parser.
945#
946# The two keys here are that Python will give a syntax error if
947# there's any whitespace at the beginning of the first line, and that
948# all lines at the same lexical nesting level must have identical
949# indentation. Unfortunately the way code literals work, an entire
950# let block tends to have some initial indentation. Rather than
951# trying to figure out what that is and strip it off, we prepend 'if
952# 1:' to make the let code the nested block inside the if (and have
953# the parser automatically deal with the indentation for us).
954#
955# We don't want to do this if (1) the code block is empty or (2) the
956# first line of the block doesn't have any whitespace at the front.
957
958def fixPythonIndentation(s):
959 # get rid of blank lines first
960 s = re.sub(r'(?m)^\s*\n', '', s);
961 if (s != '' and re.match(r'[ \t]', s[0])):
962 s = 'if 1:\n' + s
963 return s
964
965# Error handler. Just call exit. Output formatted to work under
966# Emacs compile-mode. Optional 'print_traceback' arg, if set to True,
967# prints a Python stack backtrace too (can be handy when trying to
968# debug the parser itself).
969def error(lineno, string, print_traceback = False):
970 spaces = ""
971 for (filename, line) in fileNameStack[0:-1]:
972 print spaces + "In file included from " + filename + ":"
973 spaces += " "
974 # Print a Python stack backtrace if requested.
975 if (print_traceback):
976 traceback.print_exc()
977 if lineno != 0:
978 line_str = "%d:" % lineno
979 else:
980 line_str = ""
981 sys.exit(spaces + "%s:%s %s" % (fileNameStack[-1][0], line_str, string))
982
983
984#####################################################################
985#
986# Bitfield Operator Support
987#
988#####################################################################
989
990bitOp1ArgRE = re.compile(r'<\s*(\w+)\s*:\s*>')
991
992bitOpWordRE = re.compile(r'(?<![\w\.])([\w\.]+)<\s*(\w+)\s*:\s*(\w+)\s*>')
993bitOpExprRE = re.compile(r'\)<\s*(\w+)\s*:\s*(\w+)\s*>')
994
995def substBitOps(code):
996 # first convert single-bit selectors to two-index form
997 # i.e., <n> --> <n:n>
998 code = bitOp1ArgRE.sub(r'<\1:\1>', code)
999 # simple case: selector applied to ID (name)
1000 # i.e., foo<a:b> --> bits(foo, a, b)
1001 code = bitOpWordRE.sub(r'bits(\1, \2, \3)', code)
1002 # if selector is applied to expression (ending in ')'),
1003 # we need to search backward for matching '('
1004 match = bitOpExprRE.search(code)
1005 while match:
1006 exprEnd = match.start()
1007 here = exprEnd - 1
1008 nestLevel = 1
1009 while nestLevel > 0:
1010 if code[here] == '(':
1011 nestLevel -= 1
1012 elif code[here] == ')':
1013 nestLevel += 1
1014 here -= 1
1015 if here < 0:
1016 sys.exit("Didn't find '('!")
1017 exprStart = here+1
1018 newExpr = r'bits(%s, %s, %s)' % (code[exprStart:exprEnd+1],
1019 match.group(1), match.group(2))
1020 code = code[:exprStart] + newExpr + code[match.end():]
1021 match = bitOpExprRE.search(code)
1022 return code
1023
1024
1025####################
1026# Template objects.
1027#
1028# Template objects are format strings that allow substitution from
1029# the attribute spaces of other objects (e.g. InstObjParams instances).
1030
1031labelRE = re.compile(r'[^%]%\(([^\)]+)\)[sd]')
1032
1033class Template:
1034 def __init__(self, t):
1035 self.template = t
1036
1037 def subst(self, d):
1038 myDict = None
1039
1040 # Protect non-Python-dict substitutions (e.g. if there's a printf
1041 # in the templated C++ code)
1042 template = protect_non_subst_percents(self.template)
1043 # CPU-model-specific substitutions are handled later (in GenCode).
1044 template = protect_cpu_symbols(template)
1045
1046 # Build a dict ('myDict') to use for the template substitution.
1047 # Start with the template namespace. Make a copy since we're
1048 # going to modify it.
1049 myDict = templateMap.copy()
1050
1051 if isinstance(d, InstObjParams):
1052 # If we're dealing with an InstObjParams object, we need
1053 # to be a little more sophisticated. The instruction-wide
1054 # parameters are already formed, but the parameters which
1055 # are only function wide still need to be generated.
1056 compositeCode = ''
1057
1058 myDict.update(d.__dict__)
1059 # The "operands" and "snippets" attributes of the InstObjParams
1060 # objects are for internal use and not substitution.
1061 del myDict['operands']
1062 del myDict['snippets']
1063
1064 snippetLabels = [l for l in labelRE.findall(template)
1065 if d.snippets.has_key(l)]
1066
1067 snippets = dict([(s, mungeSnippet(d.snippets[s]))
1068 for s in snippetLabels])
1069
1070 myDict.update(snippets)
1071
1072 compositeCode = ' '.join(map(str, snippets.values()))
1073
1074 # Add in template itself in case it references any
1075 # operands explicitly (like Mem)
1076 compositeCode += ' ' + template
1077
1078 operands = SubOperandList(compositeCode, d.operands)
1079
1080 myDict['op_decl'] = operands.concatAttrStrings('op_decl')
1081
1082 is_src = lambda op: op.is_src
1083 is_dest = lambda op: op.is_dest
1084
1085 myDict['op_src_decl'] = \
1086 operands.concatSomeAttrStrings(is_src, 'op_src_decl')
1087 myDict['op_dest_decl'] = \
1088 operands.concatSomeAttrStrings(is_dest, 'op_dest_decl')
1089
1090 myDict['op_rd'] = operands.concatAttrStrings('op_rd')
1091 myDict['op_wb'] = operands.concatAttrStrings('op_wb')
1092
1093 if d.operands.memOperand:
1094 myDict['mem_acc_size'] = d.operands.memOperand.mem_acc_size
1095 myDict['mem_acc_type'] = d.operands.memOperand.mem_acc_type
1096
1097 elif isinstance(d, dict):
1098 # if the argument is a dictionary, we just use it.
1099 myDict.update(d)
1100 elif hasattr(d, '__dict__'):
1101 # if the argument is an object, we use its attribute map.
1102 myDict.update(d.__dict__)
1103 else:
1104 raise TypeError, "Template.subst() arg must be or have dictionary"
1105 return template % myDict
1106
1107 # Convert to string. This handles the case when a template with a
1108 # CPU-specific term gets interpolated into another template or into
1109 # an output block.
1110 def __str__(self):
1111 return expand_cpu_symbols_to_string(self.template)
1112
1113#####################################################################
1114#
1115# Code Parser
1116#
1117# The remaining code is the support for automatically extracting
1118# instruction characteristics from pseudocode.
1119#
1120#####################################################################
1121
1122# Force the argument to be a list. Useful for flags, where a caller
1123# can specify a singleton flag or a list of flags. Also usful for
1124# converting tuples to lists so they can be modified.
1125def makeList(arg):
1126 if isinstance(arg, list):
1127 return arg
1128 elif isinstance(arg, tuple):
1129 return list(arg)
1130 elif not arg:
1131 return []
1132 else:
1133 return [ arg ]
1134
1135# Generate operandTypeMap from the user's 'def operand_types'
1136# statement.
1137def buildOperandTypeMap(userDict, lineno):
1138 global operandTypeMap
1139 operandTypeMap = {}
1140 for (ext, (desc, size)) in userDict.iteritems():
1141 if desc == 'signed int':
1142 ctype = 'int%d_t' % size
1143 is_signed = 1
1144 elif desc == 'unsigned int':
1145 ctype = 'uint%d_t' % size
1146 is_signed = 0
1147 elif desc == 'float':
1148 is_signed = 1 # shouldn't really matter
1149 if size == 32:
1150 ctype = 'float'
1151 elif size == 64:
1152 ctype = 'double'
1153 elif desc == 'twin64 int':
1154 is_signed = 0
1155 ctype = 'Twin64_t'
1156 elif desc == 'twin32 int':
1157 is_signed = 0
1158 ctype = 'Twin32_t'
1159 if ctype == '':
1160 error(lineno, 'Unrecognized type description "%s" in userDict')
1161 operandTypeMap[ext] = (size, ctype, is_signed)
1162
1163#
1164#
1165#
1166# Base class for operand descriptors. An instance of this class (or
1167# actually a class derived from this one) represents a specific
1168# operand for a code block (e.g, "Rc.sq" as a dest). Intermediate
1169# derived classes encapsulates the traits of a particular operand type
1170# (e.g., "32-bit integer register").
1171#
1172class Operand(object):
1173 def __init__(self, full_name, ext, is_src, is_dest):
1174 self.full_name = full_name
1175 self.ext = ext
1176 self.is_src = is_src
1177 self.is_dest = is_dest
1178 # The 'effective extension' (eff_ext) is either the actual
1179 # extension, if one was explicitly provided, or the default.
1180 if ext:
1181 self.eff_ext = ext
1182 else:
1183 self.eff_ext = self.dflt_ext
1184
1185 (self.size, self.ctype, self.is_signed) = operandTypeMap[self.eff_ext]
1186
1187 # note that mem_acc_size is undefined for non-mem operands...
1188 # template must be careful not to use it if it doesn't apply.
1189 if self.isMem():
1190 self.mem_acc_size = self.makeAccSize()
1191 if self.ctype in ['Twin32_t', 'Twin64_t']:
1192 self.mem_acc_type = 'Twin'
1193 else:
1194 self.mem_acc_type = 'uint'
1195
1196 # Finalize additional fields (primarily code fields). This step
1197 # is done separately since some of these fields may depend on the
1198 # register index enumeration that hasn't been performed yet at the
1199 # time of __init__().
1200 def finalize(self):
1201 self.flags = self.getFlags()
1202 self.constructor = self.makeConstructor()
1203 self.op_decl = self.makeDecl()
1204
1205 if self.is_src:
1206 self.op_rd = self.makeRead()
1207 self.op_src_decl = self.makeDecl()
1208 else:
1209 self.op_rd = ''
1210 self.op_src_decl = ''
1211
1212 if self.is_dest:
1213 self.op_wb = self.makeWrite()
1214 self.op_dest_decl = self.makeDecl()
1215 else:
1216 self.op_wb = ''
1217 self.op_dest_decl = ''
1218
1219 def isMem(self):
1220 return 0
1221
1222 def isReg(self):
1223 return 0
1224
1225 def isFloatReg(self):
1226 return 0
1227
1228 def isIntReg(self):
1229 return 0
1230
1231 def isControlReg(self):
1232 return 0
1233
1234 def getFlags(self):
1235 # note the empty slice '[:]' gives us a copy of self.flags[0]
1236 # instead of a reference to it
1237 my_flags = self.flags[0][:]
1238 if self.is_src:
1239 my_flags += self.flags[1]
1240 if self.is_dest:
1241 my_flags += self.flags[2]
1242 return my_flags
1243
1244 def makeDecl(self):
1245 # Note that initializations in the declarations are solely
1246 # to avoid 'uninitialized variable' errors from the compiler.
1247 return self.ctype + ' ' + self.base_name + ' = 0;\n';
1248
1249class IntRegOperand(Operand):
1250 def isReg(self):
1251 return 1
1252
1253 def isIntReg(self):
1254 return 1
1255
1256 def makeConstructor(self):
1257 c = ''
1258 if self.is_src:
1259 c += '\n\t_srcRegIdx[%d] = %s;' % \
1260 (self.src_reg_idx, self.reg_spec)
1261 if self.is_dest:
1262 c += '\n\t_destRegIdx[%d] = %s;' % \
1263 (self.dest_reg_idx, self.reg_spec)
1264 return c
1265
1266 def makeRead(self):
1267 if (self.ctype == 'float' or self.ctype == 'double'):
1268 error(0, 'Attempt to read integer register as FP')
1269 if (self.size == self.dflt_size):
1270 return '%s = xc->readIntRegOperand(this, %d);\n' % \
1271 (self.base_name, self.src_reg_idx)
1272 elif (self.size > self.dflt_size):
1273 int_reg_val = 'xc->readIntRegOperand(this, %d)' % \
1274 (self.src_reg_idx)
1275 if (self.is_signed):
1276 int_reg_val = 'sext<%d>(%s)' % (self.dflt_size, int_reg_val)
1277 return '%s = %s;\n' % (self.base_name, int_reg_val)
1278 else:
1279 return '%s = bits(xc->readIntRegOperand(this, %d), %d, 0);\n' % \
1280 (self.base_name, self.src_reg_idx, self.size-1)
1281
1282 def makeWrite(self):
1283 if (self.ctype == 'float' or self.ctype == 'double'):
1284 error(0, 'Attempt to write integer register as FP')
1285 if (self.size != self.dflt_size and self.is_signed):
1286 final_val = 'sext<%d>(%s)' % (self.size, self.base_name)
1287 else:
1288 final_val = self.base_name
1289 wb = '''
1290 {
1291 %s final_val = %s;
1292 xc->setIntRegOperand(this, %d, final_val);\n
1293 if (traceData) { traceData->setData(final_val); }
1294 }''' % (self.dflt_ctype, final_val, self.dest_reg_idx)
1295 return wb
1296
1297class FloatRegOperand(Operand):
1298 def isReg(self):
1299 return 1
1300
1301 def isFloatReg(self):
1302 return 1
1303
1304 def makeConstructor(self):
1305 c = ''
1306 if self.is_src:
1307 c += '\n\t_srcRegIdx[%d] = %s + FP_Base_DepTag;' % \
1308 (self.src_reg_idx, self.reg_spec)
1309 if self.is_dest:
1310 c += '\n\t_destRegIdx[%d] = %s + FP_Base_DepTag;' % \
1311 (self.dest_reg_idx, self.reg_spec)
1312 return c
1313
1314 def makeRead(self):
1315 bit_select = 0
1316 width = 0;
1317 if (self.ctype == 'float'):
1318 func = 'readFloatRegOperand'
1319 width = 32;
1320 elif (self.ctype == 'double'):
1321 func = 'readFloatRegOperand'
1322 width = 64;
1323 else:
1324 func = 'readFloatRegOperandBits'
1325 if (self.ctype == 'uint32_t'):
1326 width = 32;
1327 elif (self.ctype == 'uint64_t'):
1328 width = 64;
1329 if (self.size != self.dflt_size):
1330 bit_select = 1
1331 if width:
1332 base = 'xc->%s(this, %d, %d)' % \
1333 (func, self.src_reg_idx, width)
1334 else:
1335 base = 'xc->%s(this, %d)' % \
1336 (func, self.src_reg_idx)
1337 if bit_select:
1338 return '%s = bits(%s, %d, 0);\n' % \
1339 (self.base_name, base, self.size-1)
1340 else:
1341 return '%s = %s;\n' % (self.base_name, base)
1342
1343 def makeWrite(self):
1344 final_val = self.base_name
1345 final_ctype = self.ctype
1346 widthSpecifier = ''
1347 width = 0
1348 if (self.ctype == 'float'):
1349 width = 32
1350 func = 'setFloatRegOperand'
1351 elif (self.ctype == 'double'):
1352 width = 64
1353 func = 'setFloatRegOperand'
1354 elif (self.ctype == 'uint32_t'):
1355 func = 'setFloatRegOperandBits'
1356 width = 32
1357 elif (self.ctype == 'uint64_t'):
1358 func = 'setFloatRegOperandBits'
1359 width = 64
1360 else:
1361 func = 'setFloatRegOperandBits'
1362 final_ctype = 'uint%d_t' % self.dflt_size
1363 if (self.size != self.dflt_size and self.is_signed):
1364 final_val = 'sext<%d>(%s)' % (self.size, self.base_name)
1365 if width:
1366 widthSpecifier = ', %d' % width
1367 wb = '''
1368 {
1369 %s final_val = %s;
1370 xc->%s(this, %d, final_val%s);\n
1371 if (traceData) { traceData->setData(final_val); }
1372 }''' % (final_ctype, final_val, func, self.dest_reg_idx,
1373 widthSpecifier)
1374 return wb
1375
1376class ControlRegOperand(Operand):
1377 def isReg(self):
1378 return 1
1379
1380 def isControlReg(self):
1381 return 1
1382
1383 def makeConstructor(self):
1384 c = ''
1385 if self.is_src:
1386 c += '\n\t_srcRegIdx[%d] = %s + Ctrl_Base_DepTag;' % \
1387 (self.src_reg_idx, self.reg_spec)
1388 if self.is_dest:
1389 c += '\n\t_destRegIdx[%d] = %s + Ctrl_Base_DepTag;' % \
1390 (self.dest_reg_idx, self.reg_spec)
1391 return c
1392
1393 def makeRead(self):
1394 bit_select = 0
1395 if (self.ctype == 'float' or self.ctype == 'double'):
1396 error(0, 'Attempt to read control register as FP')
1397 base = 'xc->readMiscRegOperand(this, %s)' % self.src_reg_idx
1398 if self.size == self.dflt_size:
1399 return '%s = %s;\n' % (self.base_name, base)
1400 else:
1401 return '%s = bits(%s, %d, 0);\n' % \
1402 (self.base_name, base, self.size-1)
1403
1404 def makeWrite(self):
1405 if (self.ctype == 'float' or self.ctype == 'double'):
1406 error(0, 'Attempt to write control register as FP')
1407 wb = 'xc->setMiscRegOperand(this, %s, %s);\n' % \
1408 (self.dest_reg_idx, self.base_name)
1409 wb += 'if (traceData) { traceData->setData(%s); }' % \
1410 self.base_name
1411 return wb
1412
1413class MemOperand(Operand):
1414 def isMem(self):
1415 return 1
1416
1417 def makeConstructor(self):
1418 return ''
1419
1420 def makeDecl(self):
1421 # Note that initializations in the declarations are solely
1422 # to avoid 'uninitialized variable' errors from the compiler.
1423 # Declare memory data variable.
1424 if self.ctype in ['Twin32_t','Twin64_t']:
1425 return "%s %s; %s.a = 0; %s.b = 0;\n" % (self.ctype, self.base_name,
1426 self.base_name, self.base_name)
1427 c = '%s %s = 0;\n' % (self.ctype, self.base_name)
1428 return c
1429
1430 def makeRead(self):
1431 return ''
1432
1433 def makeWrite(self):
1434 return ''
1435
1436 # Return the memory access size *in bits*, suitable for
1437 # forming a type via "uint%d_t". Divide by 8 if you want bytes.
1438 def makeAccSize(self):
1439 return self.size
1440
1441
1442class NPCOperand(Operand):
1443 def makeConstructor(self):
1444 return ''
1445
1446 def makeRead(self):
1447 return '%s = xc->readNextPC();\n' % self.base_name
1448
1449 def makeWrite(self):
1450 return 'xc->setNextPC(%s);\n' % self.base_name
1451
1452class NNPCOperand(Operand):
1453 def makeConstructor(self):
1454 return ''
1455
1456 def makeRead(self):
1457 return '%s = xc->readNextNPC();\n' % self.base_name
1458
1459 def makeWrite(self):
1460 return 'xc->setNextNPC(%s);\n' % self.base_name
1461
1462def buildOperandNameMap(userDict, lineno):
1463 global operandNameMap
1464 operandNameMap = {}
1465 for (op_name, val) in userDict.iteritems():
1466 (base_cls_name, dflt_ext, reg_spec, flags, sort_pri) = val
1467 (dflt_size, dflt_ctype, dflt_is_signed) = operandTypeMap[dflt_ext]
1468 # Canonical flag structure is a triple of lists, where each list
1469 # indicates the set of flags implied by this operand always, when
1470 # used as a source, and when used as a dest, respectively.
1471 # For simplicity this can be initialized using a variety of fairly
1472 # obvious shortcuts; we convert these to canonical form here.
1473 if not flags:
1474 # no flags specified (e.g., 'None')
1475 flags = ( [], [], [] )
1476 elif isinstance(flags, str):
1477 # a single flag: assumed to be unconditional
1478 flags = ( [ flags ], [], [] )
1479 elif isinstance(flags, list):
1480 # a list of flags: also assumed to be unconditional
1481 flags = ( flags, [], [] )
1482 elif isinstance(flags, tuple):
1483 # it's a tuple: it should be a triple,
1484 # but each item could be a single string or a list
1485 (uncond_flags, src_flags, dest_flags) = flags
1486 flags = (makeList(uncond_flags),
1487 makeList(src_flags), makeList(dest_flags))
1488 # Accumulate attributes of new operand class in tmp_dict
1489 tmp_dict = {}
1490 for attr in ('dflt_ext', 'reg_spec', 'flags', 'sort_pri',
1491 'dflt_size', 'dflt_ctype', 'dflt_is_signed'):
1492 tmp_dict[attr] = eval(attr)
1493 tmp_dict['base_name'] = op_name
1494 # New class name will be e.g. "IntReg_Ra"
1495 cls_name = base_cls_name + '_' + op_name
1496 # Evaluate string arg to get class object. Note that the
1497 # actual base class for "IntReg" is "IntRegOperand", i.e. we
1498 # have to append "Operand".
1499 try:
1500 base_cls = eval(base_cls_name + 'Operand')
1501 except NameError:
1502 error(lineno,
1503 'error: unknown operand base class "%s"' % base_cls_name)
1504 # The following statement creates a new class called
1505 # <cls_name> as a subclass of <base_cls> with the attributes
1506 # in tmp_dict, just as if we evaluated a class declaration.
1507 operandNameMap[op_name] = type(cls_name, (base_cls,), tmp_dict)
1508
1509 # Define operand variables.
1510 operands = userDict.keys()
1511
1512 operandsREString = (r'''
1513 (?<![\w\.]) # neg. lookbehind assertion: prevent partial matches
1514 ((%s)(?:\.(\w+))?) # match: operand with optional '.' then suffix
1515 (?![\w\.]) # neg. lookahead assertion: prevent partial matches
1516 '''
1517 % string.join(operands, '|'))
1518
1519 global operandsRE
1520 operandsRE = re.compile(operandsREString, re.MULTILINE|re.VERBOSE)
1521
1522 # Same as operandsREString, but extension is mandatory, and only two
1523 # groups are returned (base and ext, not full name as above).
1524 # Used for subtituting '_' for '.' to make C++ identifiers.
1525 operandsWithExtREString = (r'(?<![\w\.])(%s)\.(\w+)(?![\w\.])'
1526 % string.join(operands, '|'))
1527
1528 global operandsWithExtRE
1529 operandsWithExtRE = re.compile(operandsWithExtREString, re.MULTILINE)
1530
1531
1532class OperandList:
1533
1534 # Find all the operands in the given code block. Returns an operand
1535 # descriptor list (instance of class OperandList).
1536 def __init__(self, code):
1537 self.items = []
1538 self.bases = {}
1539 # delete comments so we don't match on reg specifiers inside
1540 code = commentRE.sub('', code)
1541 # search for operands
1542 next_pos = 0
1543 while 1:
1544 match = operandsRE.search(code, next_pos)
1545 if not match:
1546 # no more matches: we're done
1547 break
1548 op = match.groups()
1549 # regexp groups are operand full name, base, and extension
1550 (op_full, op_base, op_ext) = op
1551 # if the token following the operand is an assignment, this is
1552 # a destination (LHS), else it's a source (RHS)
1553 is_dest = (assignRE.match(code, match.end()) != None)
1554 is_src = not is_dest
1555 # see if we've already seen this one
1556 op_desc = self.find_base(op_base)
1557 if op_desc:
1558 if op_desc.ext != op_ext:
1559 error(0, 'Inconsistent extensions for operand %s' % \
1560 op_base)
1561 op_desc.is_src = op_desc.is_src or is_src
1562 op_desc.is_dest = op_desc.is_dest or is_dest
1563 else:
1564 # new operand: create new descriptor
1565 op_desc = operandNameMap[op_base](op_full, op_ext,
1566 is_src, is_dest)
1567 self.append(op_desc)
1568 # start next search after end of current match
1569 next_pos = match.end()
1570 self.sort()
1571 # enumerate source & dest register operands... used in building
1572 # constructor later
1573 self.numSrcRegs = 0
1574 self.numDestRegs = 0
1575 self.numFPDestRegs = 0
1576 self.numIntDestRegs = 0
1577 self.memOperand = None
1578 for op_desc in self.items:
1579 if op_desc.isReg():
1580 if op_desc.is_src:
1581 op_desc.src_reg_idx = self.numSrcRegs
1582 self.numSrcRegs += 1
1583 if op_desc.is_dest:
1584 op_desc.dest_reg_idx = self.numDestRegs
1585 self.numDestRegs += 1
1586 if op_desc.isFloatReg():
1587 self.numFPDestRegs += 1
1588 elif op_desc.isIntReg():
1589 self.numIntDestRegs += 1
1590 elif op_desc.isMem():
1591 if self.memOperand:
1592 error(0, "Code block has more than one memory operand.")
1593 self.memOperand = op_desc
1594 # now make a final pass to finalize op_desc fields that may depend
1595 # on the register enumeration
1596 for op_desc in self.items:
1597 op_desc.finalize()
1598
1599 def __len__(self):
1600 return len(self.items)
1601
1602 def __getitem__(self, index):
1603 return self.items[index]
1604
1605 def append(self, op_desc):
1606 self.items.append(op_desc)
1607 self.bases[op_desc.base_name] = op_desc
1608
1609 def find_base(self, base_name):
1610 # like self.bases[base_name], but returns None if not found
1611 # (rather than raising exception)
1612 return self.bases.get(base_name)
1613
1614 # internal helper function for concat[Some]Attr{Strings|Lists}
1615 def __internalConcatAttrs(self, attr_name, filter, result):
1616 for op_desc in self.items:
1617 if filter(op_desc):
1618 result += getattr(op_desc, attr_name)
1619 return result
1620
1621 # return a single string that is the concatenation of the (string)
1622 # values of the specified attribute for all operands
1623 def concatAttrStrings(self, attr_name):
1624 return self.__internalConcatAttrs(attr_name, lambda x: 1, '')
1625
1626 # like concatAttrStrings, but only include the values for the operands
1627 # for which the provided filter function returns true
1628 def concatSomeAttrStrings(self, filter, attr_name):
1629 return self.__internalConcatAttrs(attr_name, filter, '')
1630
1631 # return a single list that is the concatenation of the (list)
1632 # values of the specified attribute for all operands
1633 def concatAttrLists(self, attr_name):
1634 return self.__internalConcatAttrs(attr_name, lambda x: 1, [])
1635
1636 # like concatAttrLists, but only include the values for the operands
1637 # for which the provided filter function returns true
1638 def concatSomeAttrLists(self, filter, attr_name):
1639 return self.__internalConcatAttrs(attr_name, filter, [])
1640
1641 def sort(self):
1642 self.items.sort(lambda a, b: a.sort_pri - b.sort_pri)
1643
1644class SubOperandList(OperandList):
1645
1646 # Find all the operands in the given code block. Returns an operand
1647 # descriptor list (instance of class OperandList).
1648 def __init__(self, code, master_list):
1649 self.items = []
1650 self.bases = {}
1651 # delete comments so we don't match on reg specifiers inside
1652 code = commentRE.sub('', code)
1653 # search for operands
1654 next_pos = 0
1655 while 1:
1656 match = operandsRE.search(code, next_pos)
1657 if not match:
1658 # no more matches: we're done
1659 break
1660 op = match.groups()
1661 # regexp groups are operand full name, base, and extension
1662 (op_full, op_base, op_ext) = op
1663 # find this op in the master list
1664 op_desc = master_list.find_base(op_base)
1665 if not op_desc:
1666 error(0, 'Found operand %s which is not in the master list!' \
1667 ' This is an internal error' % \
1668 op_base)
1669 else:
1670 # See if we've already found this operand
1671 op_desc = self.find_base(op_base)
1672 if not op_desc:
1673 # if not, add a reference to it to this sub list
1674 self.append(master_list.bases[op_base])
1675
1676 # start next search after end of current match
1677 next_pos = match.end()
1678 self.sort()
1679 self.memOperand = None
1680 for op_desc in self.items:
1681 if op_desc.isMem():
1682 if self.memOperand:
1683 error(0, "Code block has more than one memory operand.")
1684 self.memOperand = op_desc
1685
1686# Regular expression object to match C++ comments
1687# (used in findOperands())
1688commentRE = re.compile(r'//.*\n')
1689
1690# Regular expression object to match assignment statements
1691# (used in findOperands())
1692assignRE = re.compile(r'\s*=(?!=)', re.MULTILINE)
1693
1694# Munge operand names in code string to make legal C++ variable names.
1695# This means getting rid of the type extension if any.
1696# (Will match base_name attribute of Operand object.)
1697def substMungedOpNames(code):
1698 return operandsWithExtRE.sub(r'\1', code)
1699
1700# Fix up code snippets for final substitution in templates.
1701def mungeSnippet(s):
1702 if isinstance(s, str):
1703 return substMungedOpNames(substBitOps(s))
1704 else:
1705 return s
1706
1707def makeFlagConstructor(flag_list):
1708 if len(flag_list) == 0:
1709 return ''
1710 # filter out repeated flags
1711 flag_list.sort()
1712 i = 1
1713 while i < len(flag_list):
1714 if flag_list[i] == flag_list[i-1]:
1715 del flag_list[i]
1716 else:
1717 i += 1
1718 pre = '\n\tflags['
1719 post = '] = true;'
1720 code = pre + string.join(flag_list, post + pre) + post
1721 return code
1722
1723# Assume all instruction flags are of the form 'IsFoo'
1724instFlagRE = re.compile(r'Is.*')
1725
1726# OpClass constants end in 'Op' except No_OpClass
1727opClassRE = re.compile(r'.*Op|No_OpClass')
1728
1729class InstObjParams:
1730 def __init__(self, mnem, class_name, base_class = '',
1731 snippets = {}, opt_args = []):
1732 self.mnemonic = mnem
1733 self.class_name = class_name
1734 self.base_class = base_class
1735 if not isinstance(snippets, dict):
1736 snippets = {'code' : snippets}
1737 compositeCode = ' '.join(map(str, snippets.values()))
1738 self.snippets = snippets
1739
1740 self.operands = OperandList(compositeCode)
1741 self.constructor = self.operands.concatAttrStrings('constructor')
1742 self.constructor += \
1743 '\n\t_numSrcRegs = %d;' % self.operands.numSrcRegs
1744 self.constructor += \
1745 '\n\t_numDestRegs = %d;' % self.operands.numDestRegs
1746 self.constructor += \
1747 '\n\t_numFPDestRegs = %d;' % self.operands.numFPDestRegs
1748 self.constructor += \
1749 '\n\t_numIntDestRegs = %d;' % self.operands.numIntDestRegs
1750 self.flags = self.operands.concatAttrLists('flags')
1751
1752 # Make a basic guess on the operand class (function unit type).
1753 # These are good enough for most cases, and can be overridden
1754 # later otherwise.
1755 if 'IsStore' in self.flags:
1756 self.op_class = 'MemWriteOp'
1757 elif 'IsLoad' in self.flags or 'IsPrefetch' in self.flags:
1758 self.op_class = 'MemReadOp'
1759 elif 'IsFloating' in self.flags:
1760 self.op_class = 'FloatAddOp'
1761 else:
1762 self.op_class = 'IntAluOp'
1763
1764 # Optional arguments are assumed to be either StaticInst flags
1765 # or an OpClass value. To avoid having to import a complete
1766 # list of these values to match against, we do it ad-hoc
1767 # with regexps.
1768 for oa in opt_args:
1769 if instFlagRE.match(oa):
1770 self.flags.append(oa)
1771 elif opClassRE.match(oa):
1772 self.op_class = oa
1773 else:
1774 error(0, 'InstObjParams: optional arg "%s" not recognized '
1775 'as StaticInst::Flag or OpClass.' % oa)
1776
1777 # add flag initialization to contructor here to include
1778 # any flags added via opt_args
1779 self.constructor += makeFlagConstructor(self.flags)
1780
1781 # if 'IsFloating' is set, add call to the FP enable check
1782 # function (which should be provided by isa_desc via a declare)
1783 if 'IsFloating' in self.flags:
1784 self.fp_enable_check = 'fault = checkFpEnableFault(xc);'
1785 else:
1786 self.fp_enable_check = ''
1787
1788#######################
1789#
1790# Output file template
1791#
1792
1793file_template = '''
1794/*
1795 * DO NOT EDIT THIS FILE!!!
1796 *
1797 * It was automatically generated from the ISA description in %(filename)s
1798 */
1799
1800%(includes)s
1801
1802%(global_output)s
1803
1804namespace %(namespace)s {
1805
1806%(namespace_output)s
1807
1808} // namespace %(namespace)s
1809
1810%(decode_function)s
1811'''
1812
1813
1814# Update the output file only if the new contents are different from
1815# the current contents. Minimizes the files that need to be rebuilt
1816# after minor changes.
1817def update_if_needed(file, contents):
1818 update = False
1819 if os.access(file, os.R_OK):
1820 f = open(file, 'r')
1821 old_contents = f.read()
1822 f.close()
1823 if contents != old_contents:
1824 print 'Updating', file
1825 os.remove(file) # in case it's write-protected
1826 update = True
1827 else:
1828 print 'File', file, 'is unchanged'
1829 else:
1830 print 'Generating', file
1831 update = True
1832 if update:
1833 f = open(file, 'w')
1834 f.write(contents)
1835 f.close()
1836
1837# This regular expression matches '##include' directives
1838includeRE = re.compile(r'^\s*##include\s+"(?P<filename>[\w/.-]*)".*$',
1839 re.MULTILINE)
1840
1841# Function to replace a matched '##include' directive with the
1842# contents of the specified file (with nested ##includes replaced
1843# recursively). 'matchobj' is an re match object (from a match of
1844# includeRE) and 'dirname' is the directory relative to which the file
1845# path should be resolved.
1846def replace_include(matchobj, dirname):
1847 fname = matchobj.group('filename')
1848 full_fname = os.path.normpath(os.path.join(dirname, fname))
1849 contents = '##newfile "%s"\n%s\n##endfile\n' % \
1850 (full_fname, read_and_flatten(full_fname))
1851 return contents
1852
1853# Read a file and recursively flatten nested '##include' files.
1854def read_and_flatten(filename):
1855 current_dir = os.path.dirname(filename)
1856 try:
1857 contents = open(filename).read()
1858 except IOError:
1859 error(0, 'Error including file "%s"' % filename)
1860 fileNameStack.push((filename, 0))
1861 # Find any includes and include them
1862 contents = includeRE.sub(lambda m: replace_include(m, current_dir),
1863 contents)
1864 fileNameStack.pop()
1865 return contents
1866
1867#
1868# Read in and parse the ISA description.
1869#
1870def parse_isa_desc(isa_desc_file, output_dir):
1871 # Read file and (recursively) all included files into a string.
1872 # PLY requires that the input be in a single string so we have to
1873 # do this up front.
1874 isa_desc = read_and_flatten(isa_desc_file)
1875
1876 # Initialize filename stack with outer file.
1877 fileNameStack.push((isa_desc_file, 0))
1878
1879 # Parse it.
1880 (isa_name, namespace, global_code, namespace_code) = yacc.parse(isa_desc)
1881
1882 # grab the last three path components of isa_desc_file to put in
1883 # the output
1884 filename = '/'.join(isa_desc_file.split('/')[-3:])
1885
1886 # generate decoder.hh
1887 includes = '#include "base/bitfield.hh" // for bitfield support'
1888 global_output = global_code.header_output
1889 namespace_output = namespace_code.header_output
1890 decode_function = ''
1891 update_if_needed(output_dir + '/decoder.hh', file_template % vars())
1892
1893 # generate decoder.cc
1894 includes = '#include "decoder.hh"'
1895 global_output = global_code.decoder_output
1896 namespace_output = namespace_code.decoder_output
1897 # namespace_output += namespace_code.decode_block
1898 decode_function = namespace_code.decode_block
1899 update_if_needed(output_dir + '/decoder.cc', file_template % vars())
1900
1901 # generate per-cpu exec files
1902 for cpu in cpu_models:
1903 includes = '#include "decoder.hh"\n'
1904 includes += cpu.includes
1905 global_output = global_code.exec_output[cpu.name]
1906 namespace_output = namespace_code.exec_output[cpu.name]
1907 decode_function = ''
1908 update_if_needed(output_dir + '/' + cpu.filename,
1909 file_template % vars())
1910
1911# global list of CpuModel objects (see cpu_models.py)
1912cpu_models = []
1913
1914# Called as script: get args from command line.
1915# Args are: <path to cpu_models.py> <isa desc file> <output dir> <cpu models>
1916if __name__ == '__main__':
1917 execfile(sys.argv[1]) # read in CpuModel definitions
1918 cpu_models = [CpuModel.dict[cpu] for cpu in sys.argv[4:]]
1919 parse_isa_desc(sys.argv[2], sys.argv[3])
874 try:
875 vars = self.func(self.user_code, context, *args[0], **args[1])
876 except Exception, exc:
877 error(lineno, 'error defining "%s": %s.' % (name, exc))
878 for k in vars.keys():
879 if k not in ('header_output', 'decoder_output',
880 'exec_output', 'decode_block'):
881 del vars[k]
882 return GenCode(**vars)
883
884# Special null format to catch an implicit-format instruction
885# definition outside of any format block.
886class NoFormat:
887 def __init__(self):
888 self.defaultInst = ''
889
890 def defineInst(self, name, args, lineno):
891 error(lineno,
892 'instruction definition "%s" with no active format!' % name)
893
894# This dictionary maps format name strings to Format objects.
895formatMap = {}
896
897# Define a new format
898def defFormat(id, params, code, lineno):
899 # make sure we haven't already defined this one
900 if formatMap.get(id, None) != None:
901 error(lineno, 'format %s redefined.' % id)
902 # create new object and store in global map
903 formatMap[id] = Format(id, params, code)
904
905
906##############
907# Stack: a simple stack object. Used for both formats (formatStack)
908# and default cases (defaultStack). Simply wraps a list to give more
909# stack-like syntax and enable initialization with an argument list
910# (as opposed to an argument that's a list).
911
912class Stack(list):
913 def __init__(self, *items):
914 list.__init__(self, items)
915
916 def push(self, item):
917 self.append(item);
918
919 def top(self):
920 return self[-1]
921
922# The global format stack.
923formatStack = Stack(NoFormat())
924
925# The global default case stack.
926defaultStack = Stack( None )
927
928# Global stack that tracks current file and line number.
929# Each element is a tuple (filename, lineno) that records the
930# *current* filename and the line number in the *previous* file where
931# it was included.
932fileNameStack = Stack()
933
934###################
935# Utility functions
936
937#
938# Indent every line in string 's' by two spaces
939# (except preprocessor directives).
940# Used to make nested code blocks look pretty.
941#
942def indent(s):
943 return re.sub(r'(?m)^(?!#)', ' ', s)
944
945#
946# Munge a somewhat arbitrarily formatted piece of Python code
947# (e.g. from a format 'let' block) into something whose indentation
948# will get by the Python parser.
949#
950# The two keys here are that Python will give a syntax error if
951# there's any whitespace at the beginning of the first line, and that
952# all lines at the same lexical nesting level must have identical
953# indentation. Unfortunately the way code literals work, an entire
954# let block tends to have some initial indentation. Rather than
955# trying to figure out what that is and strip it off, we prepend 'if
956# 1:' to make the let code the nested block inside the if (and have
957# the parser automatically deal with the indentation for us).
958#
959# We don't want to do this if (1) the code block is empty or (2) the
960# first line of the block doesn't have any whitespace at the front.
961
962def fixPythonIndentation(s):
963 # get rid of blank lines first
964 s = re.sub(r'(?m)^\s*\n', '', s);
965 if (s != '' and re.match(r'[ \t]', s[0])):
966 s = 'if 1:\n' + s
967 return s
968
969# Error handler. Just call exit. Output formatted to work under
970# Emacs compile-mode. Optional 'print_traceback' arg, if set to True,
971# prints a Python stack backtrace too (can be handy when trying to
972# debug the parser itself).
973def error(lineno, string, print_traceback = False):
974 spaces = ""
975 for (filename, line) in fileNameStack[0:-1]:
976 print spaces + "In file included from " + filename + ":"
977 spaces += " "
978 # Print a Python stack backtrace if requested.
979 if (print_traceback):
980 traceback.print_exc()
981 if lineno != 0:
982 line_str = "%d:" % lineno
983 else:
984 line_str = ""
985 sys.exit(spaces + "%s:%s %s" % (fileNameStack[-1][0], line_str, string))
986
987
988#####################################################################
989#
990# Bitfield Operator Support
991#
992#####################################################################
993
994bitOp1ArgRE = re.compile(r'<\s*(\w+)\s*:\s*>')
995
996bitOpWordRE = re.compile(r'(?<![\w\.])([\w\.]+)<\s*(\w+)\s*:\s*(\w+)\s*>')
997bitOpExprRE = re.compile(r'\)<\s*(\w+)\s*:\s*(\w+)\s*>')
998
999def substBitOps(code):
1000 # first convert single-bit selectors to two-index form
1001 # i.e., <n> --> <n:n>
1002 code = bitOp1ArgRE.sub(r'<\1:\1>', code)
1003 # simple case: selector applied to ID (name)
1004 # i.e., foo<a:b> --> bits(foo, a, b)
1005 code = bitOpWordRE.sub(r'bits(\1, \2, \3)', code)
1006 # if selector is applied to expression (ending in ')'),
1007 # we need to search backward for matching '('
1008 match = bitOpExprRE.search(code)
1009 while match:
1010 exprEnd = match.start()
1011 here = exprEnd - 1
1012 nestLevel = 1
1013 while nestLevel > 0:
1014 if code[here] == '(':
1015 nestLevel -= 1
1016 elif code[here] == ')':
1017 nestLevel += 1
1018 here -= 1
1019 if here < 0:
1020 sys.exit("Didn't find '('!")
1021 exprStart = here+1
1022 newExpr = r'bits(%s, %s, %s)' % (code[exprStart:exprEnd+1],
1023 match.group(1), match.group(2))
1024 code = code[:exprStart] + newExpr + code[match.end():]
1025 match = bitOpExprRE.search(code)
1026 return code
1027
1028
1029####################
1030# Template objects.
1031#
1032# Template objects are format strings that allow substitution from
1033# the attribute spaces of other objects (e.g. InstObjParams instances).
1034
1035labelRE = re.compile(r'[^%]%\(([^\)]+)\)[sd]')
1036
1037class Template:
1038 def __init__(self, t):
1039 self.template = t
1040
1041 def subst(self, d):
1042 myDict = None
1043
1044 # Protect non-Python-dict substitutions (e.g. if there's a printf
1045 # in the templated C++ code)
1046 template = protect_non_subst_percents(self.template)
1047 # CPU-model-specific substitutions are handled later (in GenCode).
1048 template = protect_cpu_symbols(template)
1049
1050 # Build a dict ('myDict') to use for the template substitution.
1051 # Start with the template namespace. Make a copy since we're
1052 # going to modify it.
1053 myDict = templateMap.copy()
1054
1055 if isinstance(d, InstObjParams):
1056 # If we're dealing with an InstObjParams object, we need
1057 # to be a little more sophisticated. The instruction-wide
1058 # parameters are already formed, but the parameters which
1059 # are only function wide still need to be generated.
1060 compositeCode = ''
1061
1062 myDict.update(d.__dict__)
1063 # The "operands" and "snippets" attributes of the InstObjParams
1064 # objects are for internal use and not substitution.
1065 del myDict['operands']
1066 del myDict['snippets']
1067
1068 snippetLabels = [l for l in labelRE.findall(template)
1069 if d.snippets.has_key(l)]
1070
1071 snippets = dict([(s, mungeSnippet(d.snippets[s]))
1072 for s in snippetLabels])
1073
1074 myDict.update(snippets)
1075
1076 compositeCode = ' '.join(map(str, snippets.values()))
1077
1078 # Add in template itself in case it references any
1079 # operands explicitly (like Mem)
1080 compositeCode += ' ' + template
1081
1082 operands = SubOperandList(compositeCode, d.operands)
1083
1084 myDict['op_decl'] = operands.concatAttrStrings('op_decl')
1085
1086 is_src = lambda op: op.is_src
1087 is_dest = lambda op: op.is_dest
1088
1089 myDict['op_src_decl'] = \
1090 operands.concatSomeAttrStrings(is_src, 'op_src_decl')
1091 myDict['op_dest_decl'] = \
1092 operands.concatSomeAttrStrings(is_dest, 'op_dest_decl')
1093
1094 myDict['op_rd'] = operands.concatAttrStrings('op_rd')
1095 myDict['op_wb'] = operands.concatAttrStrings('op_wb')
1096
1097 if d.operands.memOperand:
1098 myDict['mem_acc_size'] = d.operands.memOperand.mem_acc_size
1099 myDict['mem_acc_type'] = d.operands.memOperand.mem_acc_type
1100
1101 elif isinstance(d, dict):
1102 # if the argument is a dictionary, we just use it.
1103 myDict.update(d)
1104 elif hasattr(d, '__dict__'):
1105 # if the argument is an object, we use its attribute map.
1106 myDict.update(d.__dict__)
1107 else:
1108 raise TypeError, "Template.subst() arg must be or have dictionary"
1109 return template % myDict
1110
1111 # Convert to string. This handles the case when a template with a
1112 # CPU-specific term gets interpolated into another template or into
1113 # an output block.
1114 def __str__(self):
1115 return expand_cpu_symbols_to_string(self.template)
1116
1117#####################################################################
1118#
1119# Code Parser
1120#
1121# The remaining code is the support for automatically extracting
1122# instruction characteristics from pseudocode.
1123#
1124#####################################################################
1125
1126# Force the argument to be a list. Useful for flags, where a caller
1127# can specify a singleton flag or a list of flags. Also usful for
1128# converting tuples to lists so they can be modified.
1129def makeList(arg):
1130 if isinstance(arg, list):
1131 return arg
1132 elif isinstance(arg, tuple):
1133 return list(arg)
1134 elif not arg:
1135 return []
1136 else:
1137 return [ arg ]
1138
1139# Generate operandTypeMap from the user's 'def operand_types'
1140# statement.
1141def buildOperandTypeMap(userDict, lineno):
1142 global operandTypeMap
1143 operandTypeMap = {}
1144 for (ext, (desc, size)) in userDict.iteritems():
1145 if desc == 'signed int':
1146 ctype = 'int%d_t' % size
1147 is_signed = 1
1148 elif desc == 'unsigned int':
1149 ctype = 'uint%d_t' % size
1150 is_signed = 0
1151 elif desc == 'float':
1152 is_signed = 1 # shouldn't really matter
1153 if size == 32:
1154 ctype = 'float'
1155 elif size == 64:
1156 ctype = 'double'
1157 elif desc == 'twin64 int':
1158 is_signed = 0
1159 ctype = 'Twin64_t'
1160 elif desc == 'twin32 int':
1161 is_signed = 0
1162 ctype = 'Twin32_t'
1163 if ctype == '':
1164 error(lineno, 'Unrecognized type description "%s" in userDict')
1165 operandTypeMap[ext] = (size, ctype, is_signed)
1166
1167#
1168#
1169#
1170# Base class for operand descriptors. An instance of this class (or
1171# actually a class derived from this one) represents a specific
1172# operand for a code block (e.g, "Rc.sq" as a dest). Intermediate
1173# derived classes encapsulates the traits of a particular operand type
1174# (e.g., "32-bit integer register").
1175#
1176class Operand(object):
1177 def __init__(self, full_name, ext, is_src, is_dest):
1178 self.full_name = full_name
1179 self.ext = ext
1180 self.is_src = is_src
1181 self.is_dest = is_dest
1182 # The 'effective extension' (eff_ext) is either the actual
1183 # extension, if one was explicitly provided, or the default.
1184 if ext:
1185 self.eff_ext = ext
1186 else:
1187 self.eff_ext = self.dflt_ext
1188
1189 (self.size, self.ctype, self.is_signed) = operandTypeMap[self.eff_ext]
1190
1191 # note that mem_acc_size is undefined for non-mem operands...
1192 # template must be careful not to use it if it doesn't apply.
1193 if self.isMem():
1194 self.mem_acc_size = self.makeAccSize()
1195 if self.ctype in ['Twin32_t', 'Twin64_t']:
1196 self.mem_acc_type = 'Twin'
1197 else:
1198 self.mem_acc_type = 'uint'
1199
1200 # Finalize additional fields (primarily code fields). This step
1201 # is done separately since some of these fields may depend on the
1202 # register index enumeration that hasn't been performed yet at the
1203 # time of __init__().
1204 def finalize(self):
1205 self.flags = self.getFlags()
1206 self.constructor = self.makeConstructor()
1207 self.op_decl = self.makeDecl()
1208
1209 if self.is_src:
1210 self.op_rd = self.makeRead()
1211 self.op_src_decl = self.makeDecl()
1212 else:
1213 self.op_rd = ''
1214 self.op_src_decl = ''
1215
1216 if self.is_dest:
1217 self.op_wb = self.makeWrite()
1218 self.op_dest_decl = self.makeDecl()
1219 else:
1220 self.op_wb = ''
1221 self.op_dest_decl = ''
1222
1223 def isMem(self):
1224 return 0
1225
1226 def isReg(self):
1227 return 0
1228
1229 def isFloatReg(self):
1230 return 0
1231
1232 def isIntReg(self):
1233 return 0
1234
1235 def isControlReg(self):
1236 return 0
1237
1238 def getFlags(self):
1239 # note the empty slice '[:]' gives us a copy of self.flags[0]
1240 # instead of a reference to it
1241 my_flags = self.flags[0][:]
1242 if self.is_src:
1243 my_flags += self.flags[1]
1244 if self.is_dest:
1245 my_flags += self.flags[2]
1246 return my_flags
1247
1248 def makeDecl(self):
1249 # Note that initializations in the declarations are solely
1250 # to avoid 'uninitialized variable' errors from the compiler.
1251 return self.ctype + ' ' + self.base_name + ' = 0;\n';
1252
1253class IntRegOperand(Operand):
1254 def isReg(self):
1255 return 1
1256
1257 def isIntReg(self):
1258 return 1
1259
1260 def makeConstructor(self):
1261 c = ''
1262 if self.is_src:
1263 c += '\n\t_srcRegIdx[%d] = %s;' % \
1264 (self.src_reg_idx, self.reg_spec)
1265 if self.is_dest:
1266 c += '\n\t_destRegIdx[%d] = %s;' % \
1267 (self.dest_reg_idx, self.reg_spec)
1268 return c
1269
1270 def makeRead(self):
1271 if (self.ctype == 'float' or self.ctype == 'double'):
1272 error(0, 'Attempt to read integer register as FP')
1273 if (self.size == self.dflt_size):
1274 return '%s = xc->readIntRegOperand(this, %d);\n' % \
1275 (self.base_name, self.src_reg_idx)
1276 elif (self.size > self.dflt_size):
1277 int_reg_val = 'xc->readIntRegOperand(this, %d)' % \
1278 (self.src_reg_idx)
1279 if (self.is_signed):
1280 int_reg_val = 'sext<%d>(%s)' % (self.dflt_size, int_reg_val)
1281 return '%s = %s;\n' % (self.base_name, int_reg_val)
1282 else:
1283 return '%s = bits(xc->readIntRegOperand(this, %d), %d, 0);\n' % \
1284 (self.base_name, self.src_reg_idx, self.size-1)
1285
1286 def makeWrite(self):
1287 if (self.ctype == 'float' or self.ctype == 'double'):
1288 error(0, 'Attempt to write integer register as FP')
1289 if (self.size != self.dflt_size and self.is_signed):
1290 final_val = 'sext<%d>(%s)' % (self.size, self.base_name)
1291 else:
1292 final_val = self.base_name
1293 wb = '''
1294 {
1295 %s final_val = %s;
1296 xc->setIntRegOperand(this, %d, final_val);\n
1297 if (traceData) { traceData->setData(final_val); }
1298 }''' % (self.dflt_ctype, final_val, self.dest_reg_idx)
1299 return wb
1300
1301class FloatRegOperand(Operand):
1302 def isReg(self):
1303 return 1
1304
1305 def isFloatReg(self):
1306 return 1
1307
1308 def makeConstructor(self):
1309 c = ''
1310 if self.is_src:
1311 c += '\n\t_srcRegIdx[%d] = %s + FP_Base_DepTag;' % \
1312 (self.src_reg_idx, self.reg_spec)
1313 if self.is_dest:
1314 c += '\n\t_destRegIdx[%d] = %s + FP_Base_DepTag;' % \
1315 (self.dest_reg_idx, self.reg_spec)
1316 return c
1317
1318 def makeRead(self):
1319 bit_select = 0
1320 width = 0;
1321 if (self.ctype == 'float'):
1322 func = 'readFloatRegOperand'
1323 width = 32;
1324 elif (self.ctype == 'double'):
1325 func = 'readFloatRegOperand'
1326 width = 64;
1327 else:
1328 func = 'readFloatRegOperandBits'
1329 if (self.ctype == 'uint32_t'):
1330 width = 32;
1331 elif (self.ctype == 'uint64_t'):
1332 width = 64;
1333 if (self.size != self.dflt_size):
1334 bit_select = 1
1335 if width:
1336 base = 'xc->%s(this, %d, %d)' % \
1337 (func, self.src_reg_idx, width)
1338 else:
1339 base = 'xc->%s(this, %d)' % \
1340 (func, self.src_reg_idx)
1341 if bit_select:
1342 return '%s = bits(%s, %d, 0);\n' % \
1343 (self.base_name, base, self.size-1)
1344 else:
1345 return '%s = %s;\n' % (self.base_name, base)
1346
1347 def makeWrite(self):
1348 final_val = self.base_name
1349 final_ctype = self.ctype
1350 widthSpecifier = ''
1351 width = 0
1352 if (self.ctype == 'float'):
1353 width = 32
1354 func = 'setFloatRegOperand'
1355 elif (self.ctype == 'double'):
1356 width = 64
1357 func = 'setFloatRegOperand'
1358 elif (self.ctype == 'uint32_t'):
1359 func = 'setFloatRegOperandBits'
1360 width = 32
1361 elif (self.ctype == 'uint64_t'):
1362 func = 'setFloatRegOperandBits'
1363 width = 64
1364 else:
1365 func = 'setFloatRegOperandBits'
1366 final_ctype = 'uint%d_t' % self.dflt_size
1367 if (self.size != self.dflt_size and self.is_signed):
1368 final_val = 'sext<%d>(%s)' % (self.size, self.base_name)
1369 if width:
1370 widthSpecifier = ', %d' % width
1371 wb = '''
1372 {
1373 %s final_val = %s;
1374 xc->%s(this, %d, final_val%s);\n
1375 if (traceData) { traceData->setData(final_val); }
1376 }''' % (final_ctype, final_val, func, self.dest_reg_idx,
1377 widthSpecifier)
1378 return wb
1379
1380class ControlRegOperand(Operand):
1381 def isReg(self):
1382 return 1
1383
1384 def isControlReg(self):
1385 return 1
1386
1387 def makeConstructor(self):
1388 c = ''
1389 if self.is_src:
1390 c += '\n\t_srcRegIdx[%d] = %s + Ctrl_Base_DepTag;' % \
1391 (self.src_reg_idx, self.reg_spec)
1392 if self.is_dest:
1393 c += '\n\t_destRegIdx[%d] = %s + Ctrl_Base_DepTag;' % \
1394 (self.dest_reg_idx, self.reg_spec)
1395 return c
1396
1397 def makeRead(self):
1398 bit_select = 0
1399 if (self.ctype == 'float' or self.ctype == 'double'):
1400 error(0, 'Attempt to read control register as FP')
1401 base = 'xc->readMiscRegOperand(this, %s)' % self.src_reg_idx
1402 if self.size == self.dflt_size:
1403 return '%s = %s;\n' % (self.base_name, base)
1404 else:
1405 return '%s = bits(%s, %d, 0);\n' % \
1406 (self.base_name, base, self.size-1)
1407
1408 def makeWrite(self):
1409 if (self.ctype == 'float' or self.ctype == 'double'):
1410 error(0, 'Attempt to write control register as FP')
1411 wb = 'xc->setMiscRegOperand(this, %s, %s);\n' % \
1412 (self.dest_reg_idx, self.base_name)
1413 wb += 'if (traceData) { traceData->setData(%s); }' % \
1414 self.base_name
1415 return wb
1416
1417class MemOperand(Operand):
1418 def isMem(self):
1419 return 1
1420
1421 def makeConstructor(self):
1422 return ''
1423
1424 def makeDecl(self):
1425 # Note that initializations in the declarations are solely
1426 # to avoid 'uninitialized variable' errors from the compiler.
1427 # Declare memory data variable.
1428 if self.ctype in ['Twin32_t','Twin64_t']:
1429 return "%s %s; %s.a = 0; %s.b = 0;\n" % (self.ctype, self.base_name,
1430 self.base_name, self.base_name)
1431 c = '%s %s = 0;\n' % (self.ctype, self.base_name)
1432 return c
1433
1434 def makeRead(self):
1435 return ''
1436
1437 def makeWrite(self):
1438 return ''
1439
1440 # Return the memory access size *in bits*, suitable for
1441 # forming a type via "uint%d_t". Divide by 8 if you want bytes.
1442 def makeAccSize(self):
1443 return self.size
1444
1445
1446class NPCOperand(Operand):
1447 def makeConstructor(self):
1448 return ''
1449
1450 def makeRead(self):
1451 return '%s = xc->readNextPC();\n' % self.base_name
1452
1453 def makeWrite(self):
1454 return 'xc->setNextPC(%s);\n' % self.base_name
1455
1456class NNPCOperand(Operand):
1457 def makeConstructor(self):
1458 return ''
1459
1460 def makeRead(self):
1461 return '%s = xc->readNextNPC();\n' % self.base_name
1462
1463 def makeWrite(self):
1464 return 'xc->setNextNPC(%s);\n' % self.base_name
1465
1466def buildOperandNameMap(userDict, lineno):
1467 global operandNameMap
1468 operandNameMap = {}
1469 for (op_name, val) in userDict.iteritems():
1470 (base_cls_name, dflt_ext, reg_spec, flags, sort_pri) = val
1471 (dflt_size, dflt_ctype, dflt_is_signed) = operandTypeMap[dflt_ext]
1472 # Canonical flag structure is a triple of lists, where each list
1473 # indicates the set of flags implied by this operand always, when
1474 # used as a source, and when used as a dest, respectively.
1475 # For simplicity this can be initialized using a variety of fairly
1476 # obvious shortcuts; we convert these to canonical form here.
1477 if not flags:
1478 # no flags specified (e.g., 'None')
1479 flags = ( [], [], [] )
1480 elif isinstance(flags, str):
1481 # a single flag: assumed to be unconditional
1482 flags = ( [ flags ], [], [] )
1483 elif isinstance(flags, list):
1484 # a list of flags: also assumed to be unconditional
1485 flags = ( flags, [], [] )
1486 elif isinstance(flags, tuple):
1487 # it's a tuple: it should be a triple,
1488 # but each item could be a single string or a list
1489 (uncond_flags, src_flags, dest_flags) = flags
1490 flags = (makeList(uncond_flags),
1491 makeList(src_flags), makeList(dest_flags))
1492 # Accumulate attributes of new operand class in tmp_dict
1493 tmp_dict = {}
1494 for attr in ('dflt_ext', 'reg_spec', 'flags', 'sort_pri',
1495 'dflt_size', 'dflt_ctype', 'dflt_is_signed'):
1496 tmp_dict[attr] = eval(attr)
1497 tmp_dict['base_name'] = op_name
1498 # New class name will be e.g. "IntReg_Ra"
1499 cls_name = base_cls_name + '_' + op_name
1500 # Evaluate string arg to get class object. Note that the
1501 # actual base class for "IntReg" is "IntRegOperand", i.e. we
1502 # have to append "Operand".
1503 try:
1504 base_cls = eval(base_cls_name + 'Operand')
1505 except NameError:
1506 error(lineno,
1507 'error: unknown operand base class "%s"' % base_cls_name)
1508 # The following statement creates a new class called
1509 # <cls_name> as a subclass of <base_cls> with the attributes
1510 # in tmp_dict, just as if we evaluated a class declaration.
1511 operandNameMap[op_name] = type(cls_name, (base_cls,), tmp_dict)
1512
1513 # Define operand variables.
1514 operands = userDict.keys()
1515
1516 operandsREString = (r'''
1517 (?<![\w\.]) # neg. lookbehind assertion: prevent partial matches
1518 ((%s)(?:\.(\w+))?) # match: operand with optional '.' then suffix
1519 (?![\w\.]) # neg. lookahead assertion: prevent partial matches
1520 '''
1521 % string.join(operands, '|'))
1522
1523 global operandsRE
1524 operandsRE = re.compile(operandsREString, re.MULTILINE|re.VERBOSE)
1525
1526 # Same as operandsREString, but extension is mandatory, and only two
1527 # groups are returned (base and ext, not full name as above).
1528 # Used for subtituting '_' for '.' to make C++ identifiers.
1529 operandsWithExtREString = (r'(?<![\w\.])(%s)\.(\w+)(?![\w\.])'
1530 % string.join(operands, '|'))
1531
1532 global operandsWithExtRE
1533 operandsWithExtRE = re.compile(operandsWithExtREString, re.MULTILINE)
1534
1535
1536class OperandList:
1537
1538 # Find all the operands in the given code block. Returns an operand
1539 # descriptor list (instance of class OperandList).
1540 def __init__(self, code):
1541 self.items = []
1542 self.bases = {}
1543 # delete comments so we don't match on reg specifiers inside
1544 code = commentRE.sub('', code)
1545 # search for operands
1546 next_pos = 0
1547 while 1:
1548 match = operandsRE.search(code, next_pos)
1549 if not match:
1550 # no more matches: we're done
1551 break
1552 op = match.groups()
1553 # regexp groups are operand full name, base, and extension
1554 (op_full, op_base, op_ext) = op
1555 # if the token following the operand is an assignment, this is
1556 # a destination (LHS), else it's a source (RHS)
1557 is_dest = (assignRE.match(code, match.end()) != None)
1558 is_src = not is_dest
1559 # see if we've already seen this one
1560 op_desc = self.find_base(op_base)
1561 if op_desc:
1562 if op_desc.ext != op_ext:
1563 error(0, 'Inconsistent extensions for operand %s' % \
1564 op_base)
1565 op_desc.is_src = op_desc.is_src or is_src
1566 op_desc.is_dest = op_desc.is_dest or is_dest
1567 else:
1568 # new operand: create new descriptor
1569 op_desc = operandNameMap[op_base](op_full, op_ext,
1570 is_src, is_dest)
1571 self.append(op_desc)
1572 # start next search after end of current match
1573 next_pos = match.end()
1574 self.sort()
1575 # enumerate source & dest register operands... used in building
1576 # constructor later
1577 self.numSrcRegs = 0
1578 self.numDestRegs = 0
1579 self.numFPDestRegs = 0
1580 self.numIntDestRegs = 0
1581 self.memOperand = None
1582 for op_desc in self.items:
1583 if op_desc.isReg():
1584 if op_desc.is_src:
1585 op_desc.src_reg_idx = self.numSrcRegs
1586 self.numSrcRegs += 1
1587 if op_desc.is_dest:
1588 op_desc.dest_reg_idx = self.numDestRegs
1589 self.numDestRegs += 1
1590 if op_desc.isFloatReg():
1591 self.numFPDestRegs += 1
1592 elif op_desc.isIntReg():
1593 self.numIntDestRegs += 1
1594 elif op_desc.isMem():
1595 if self.memOperand:
1596 error(0, "Code block has more than one memory operand.")
1597 self.memOperand = op_desc
1598 # now make a final pass to finalize op_desc fields that may depend
1599 # on the register enumeration
1600 for op_desc in self.items:
1601 op_desc.finalize()
1602
1603 def __len__(self):
1604 return len(self.items)
1605
1606 def __getitem__(self, index):
1607 return self.items[index]
1608
1609 def append(self, op_desc):
1610 self.items.append(op_desc)
1611 self.bases[op_desc.base_name] = op_desc
1612
1613 def find_base(self, base_name):
1614 # like self.bases[base_name], but returns None if not found
1615 # (rather than raising exception)
1616 return self.bases.get(base_name)
1617
1618 # internal helper function for concat[Some]Attr{Strings|Lists}
1619 def __internalConcatAttrs(self, attr_name, filter, result):
1620 for op_desc in self.items:
1621 if filter(op_desc):
1622 result += getattr(op_desc, attr_name)
1623 return result
1624
1625 # return a single string that is the concatenation of the (string)
1626 # values of the specified attribute for all operands
1627 def concatAttrStrings(self, attr_name):
1628 return self.__internalConcatAttrs(attr_name, lambda x: 1, '')
1629
1630 # like concatAttrStrings, but only include the values for the operands
1631 # for which the provided filter function returns true
1632 def concatSomeAttrStrings(self, filter, attr_name):
1633 return self.__internalConcatAttrs(attr_name, filter, '')
1634
1635 # return a single list that is the concatenation of the (list)
1636 # values of the specified attribute for all operands
1637 def concatAttrLists(self, attr_name):
1638 return self.__internalConcatAttrs(attr_name, lambda x: 1, [])
1639
1640 # like concatAttrLists, but only include the values for the operands
1641 # for which the provided filter function returns true
1642 def concatSomeAttrLists(self, filter, attr_name):
1643 return self.__internalConcatAttrs(attr_name, filter, [])
1644
1645 def sort(self):
1646 self.items.sort(lambda a, b: a.sort_pri - b.sort_pri)
1647
1648class SubOperandList(OperandList):
1649
1650 # Find all the operands in the given code block. Returns an operand
1651 # descriptor list (instance of class OperandList).
1652 def __init__(self, code, master_list):
1653 self.items = []
1654 self.bases = {}
1655 # delete comments so we don't match on reg specifiers inside
1656 code = commentRE.sub('', code)
1657 # search for operands
1658 next_pos = 0
1659 while 1:
1660 match = operandsRE.search(code, next_pos)
1661 if not match:
1662 # no more matches: we're done
1663 break
1664 op = match.groups()
1665 # regexp groups are operand full name, base, and extension
1666 (op_full, op_base, op_ext) = op
1667 # find this op in the master list
1668 op_desc = master_list.find_base(op_base)
1669 if not op_desc:
1670 error(0, 'Found operand %s which is not in the master list!' \
1671 ' This is an internal error' % \
1672 op_base)
1673 else:
1674 # See if we've already found this operand
1675 op_desc = self.find_base(op_base)
1676 if not op_desc:
1677 # if not, add a reference to it to this sub list
1678 self.append(master_list.bases[op_base])
1679
1680 # start next search after end of current match
1681 next_pos = match.end()
1682 self.sort()
1683 self.memOperand = None
1684 for op_desc in self.items:
1685 if op_desc.isMem():
1686 if self.memOperand:
1687 error(0, "Code block has more than one memory operand.")
1688 self.memOperand = op_desc
1689
1690# Regular expression object to match C++ comments
1691# (used in findOperands())
1692commentRE = re.compile(r'//.*\n')
1693
1694# Regular expression object to match assignment statements
1695# (used in findOperands())
1696assignRE = re.compile(r'\s*=(?!=)', re.MULTILINE)
1697
1698# Munge operand names in code string to make legal C++ variable names.
1699# This means getting rid of the type extension if any.
1700# (Will match base_name attribute of Operand object.)
1701def substMungedOpNames(code):
1702 return operandsWithExtRE.sub(r'\1', code)
1703
1704# Fix up code snippets for final substitution in templates.
1705def mungeSnippet(s):
1706 if isinstance(s, str):
1707 return substMungedOpNames(substBitOps(s))
1708 else:
1709 return s
1710
1711def makeFlagConstructor(flag_list):
1712 if len(flag_list) == 0:
1713 return ''
1714 # filter out repeated flags
1715 flag_list.sort()
1716 i = 1
1717 while i < len(flag_list):
1718 if flag_list[i] == flag_list[i-1]:
1719 del flag_list[i]
1720 else:
1721 i += 1
1722 pre = '\n\tflags['
1723 post = '] = true;'
1724 code = pre + string.join(flag_list, post + pre) + post
1725 return code
1726
1727# Assume all instruction flags are of the form 'IsFoo'
1728instFlagRE = re.compile(r'Is.*')
1729
1730# OpClass constants end in 'Op' except No_OpClass
1731opClassRE = re.compile(r'.*Op|No_OpClass')
1732
1733class InstObjParams:
1734 def __init__(self, mnem, class_name, base_class = '',
1735 snippets = {}, opt_args = []):
1736 self.mnemonic = mnem
1737 self.class_name = class_name
1738 self.base_class = base_class
1739 if not isinstance(snippets, dict):
1740 snippets = {'code' : snippets}
1741 compositeCode = ' '.join(map(str, snippets.values()))
1742 self.snippets = snippets
1743
1744 self.operands = OperandList(compositeCode)
1745 self.constructor = self.operands.concatAttrStrings('constructor')
1746 self.constructor += \
1747 '\n\t_numSrcRegs = %d;' % self.operands.numSrcRegs
1748 self.constructor += \
1749 '\n\t_numDestRegs = %d;' % self.operands.numDestRegs
1750 self.constructor += \
1751 '\n\t_numFPDestRegs = %d;' % self.operands.numFPDestRegs
1752 self.constructor += \
1753 '\n\t_numIntDestRegs = %d;' % self.operands.numIntDestRegs
1754 self.flags = self.operands.concatAttrLists('flags')
1755
1756 # Make a basic guess on the operand class (function unit type).
1757 # These are good enough for most cases, and can be overridden
1758 # later otherwise.
1759 if 'IsStore' in self.flags:
1760 self.op_class = 'MemWriteOp'
1761 elif 'IsLoad' in self.flags or 'IsPrefetch' in self.flags:
1762 self.op_class = 'MemReadOp'
1763 elif 'IsFloating' in self.flags:
1764 self.op_class = 'FloatAddOp'
1765 else:
1766 self.op_class = 'IntAluOp'
1767
1768 # Optional arguments are assumed to be either StaticInst flags
1769 # or an OpClass value. To avoid having to import a complete
1770 # list of these values to match against, we do it ad-hoc
1771 # with regexps.
1772 for oa in opt_args:
1773 if instFlagRE.match(oa):
1774 self.flags.append(oa)
1775 elif opClassRE.match(oa):
1776 self.op_class = oa
1777 else:
1778 error(0, 'InstObjParams: optional arg "%s" not recognized '
1779 'as StaticInst::Flag or OpClass.' % oa)
1780
1781 # add flag initialization to contructor here to include
1782 # any flags added via opt_args
1783 self.constructor += makeFlagConstructor(self.flags)
1784
1785 # if 'IsFloating' is set, add call to the FP enable check
1786 # function (which should be provided by isa_desc via a declare)
1787 if 'IsFloating' in self.flags:
1788 self.fp_enable_check = 'fault = checkFpEnableFault(xc);'
1789 else:
1790 self.fp_enable_check = ''
1791
1792#######################
1793#
1794# Output file template
1795#
1796
1797file_template = '''
1798/*
1799 * DO NOT EDIT THIS FILE!!!
1800 *
1801 * It was automatically generated from the ISA description in %(filename)s
1802 */
1803
1804%(includes)s
1805
1806%(global_output)s
1807
1808namespace %(namespace)s {
1809
1810%(namespace_output)s
1811
1812} // namespace %(namespace)s
1813
1814%(decode_function)s
1815'''
1816
1817
1818# Update the output file only if the new contents are different from
1819# the current contents. Minimizes the files that need to be rebuilt
1820# after minor changes.
1821def update_if_needed(file, contents):
1822 update = False
1823 if os.access(file, os.R_OK):
1824 f = open(file, 'r')
1825 old_contents = f.read()
1826 f.close()
1827 if contents != old_contents:
1828 print 'Updating', file
1829 os.remove(file) # in case it's write-protected
1830 update = True
1831 else:
1832 print 'File', file, 'is unchanged'
1833 else:
1834 print 'Generating', file
1835 update = True
1836 if update:
1837 f = open(file, 'w')
1838 f.write(contents)
1839 f.close()
1840
1841# This regular expression matches '##include' directives
1842includeRE = re.compile(r'^\s*##include\s+"(?P<filename>[\w/.-]*)".*$',
1843 re.MULTILINE)
1844
1845# Function to replace a matched '##include' directive with the
1846# contents of the specified file (with nested ##includes replaced
1847# recursively). 'matchobj' is an re match object (from a match of
1848# includeRE) and 'dirname' is the directory relative to which the file
1849# path should be resolved.
1850def replace_include(matchobj, dirname):
1851 fname = matchobj.group('filename')
1852 full_fname = os.path.normpath(os.path.join(dirname, fname))
1853 contents = '##newfile "%s"\n%s\n##endfile\n' % \
1854 (full_fname, read_and_flatten(full_fname))
1855 return contents
1856
1857# Read a file and recursively flatten nested '##include' files.
1858def read_and_flatten(filename):
1859 current_dir = os.path.dirname(filename)
1860 try:
1861 contents = open(filename).read()
1862 except IOError:
1863 error(0, 'Error including file "%s"' % filename)
1864 fileNameStack.push((filename, 0))
1865 # Find any includes and include them
1866 contents = includeRE.sub(lambda m: replace_include(m, current_dir),
1867 contents)
1868 fileNameStack.pop()
1869 return contents
1870
1871#
1872# Read in and parse the ISA description.
1873#
1874def parse_isa_desc(isa_desc_file, output_dir):
1875 # Read file and (recursively) all included files into a string.
1876 # PLY requires that the input be in a single string so we have to
1877 # do this up front.
1878 isa_desc = read_and_flatten(isa_desc_file)
1879
1880 # Initialize filename stack with outer file.
1881 fileNameStack.push((isa_desc_file, 0))
1882
1883 # Parse it.
1884 (isa_name, namespace, global_code, namespace_code) = yacc.parse(isa_desc)
1885
1886 # grab the last three path components of isa_desc_file to put in
1887 # the output
1888 filename = '/'.join(isa_desc_file.split('/')[-3:])
1889
1890 # generate decoder.hh
1891 includes = '#include "base/bitfield.hh" // for bitfield support'
1892 global_output = global_code.header_output
1893 namespace_output = namespace_code.header_output
1894 decode_function = ''
1895 update_if_needed(output_dir + '/decoder.hh', file_template % vars())
1896
1897 # generate decoder.cc
1898 includes = '#include "decoder.hh"'
1899 global_output = global_code.decoder_output
1900 namespace_output = namespace_code.decoder_output
1901 # namespace_output += namespace_code.decode_block
1902 decode_function = namespace_code.decode_block
1903 update_if_needed(output_dir + '/decoder.cc', file_template % vars())
1904
1905 # generate per-cpu exec files
1906 for cpu in cpu_models:
1907 includes = '#include "decoder.hh"\n'
1908 includes += cpu.includes
1909 global_output = global_code.exec_output[cpu.name]
1910 namespace_output = namespace_code.exec_output[cpu.name]
1911 decode_function = ''
1912 update_if_needed(output_dir + '/' + cpu.filename,
1913 file_template % vars())
1914
1915# global list of CpuModel objects (see cpu_models.py)
1916cpu_models = []
1917
1918# Called as script: get args from command line.
1919# Args are: <path to cpu_models.py> <isa desc file> <output dir> <cpu models>
1920if __name__ == '__main__':
1921 execfile(sys.argv[1]) # read in CpuModel definitions
1922 cpu_models = [CpuModel.dict[cpu] for cpu in sys.argv[4:]]
1923 parse_isa_desc(sys.argv[2], sys.argv[3])