isa_parser.py revision 10326:43516d8eabe9
1# Copyright (c) 2014 ARM Limited
2# All rights reserved
3#
4# The license below extends only to copyright in the software and shall
5# not be construed as granting a license to any other intellectual
6# property including but not limited to intellectual property relating
7# to a hardware implementation of the functionality of the software
8# licensed hereunder.  You may use the software subject to the license
9# terms below provided that you ensure that this notice is replicated
10# unmodified and in its entirety in all distributions of the software,
11# modified or unmodified, in source code or in binary form.
12#
13# Copyright (c) 2003-2005 The Regents of The University of Michigan
14# Copyright (c) 2013 Advanced Micro Devices, Inc.
15# All rights reserved.
16#
17# Redistribution and use in source and binary forms, with or without
18# modification, are permitted provided that the following conditions are
19# met: redistributions of source code must retain the above copyright
20# notice, this list of conditions and the following disclaimer;
21# redistributions in binary form must reproduce the above copyright
22# notice, this list of conditions and the following disclaimer in the
23# documentation and/or other materials provided with the distribution;
24# neither the name of the copyright holders nor the names of its
25# contributors may be used to endorse or promote products derived from
26# this software without specific prior written permission.
27#
28# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39#
40# Authors: Steve Reinhardt
41
42from __future__ import with_statement
43import os
44import sys
45import re
46import string
47import inspect, traceback
48# get type names
49from types import *
50
51from m5.util.grammar import Grammar
52
53debug=False
54
55###################
56# Utility functions
57
58#
59# Indent every line in string 's' by two spaces
60# (except preprocessor directives).
61# Used to make nested code blocks look pretty.
62#
63def indent(s):
64    return re.sub(r'(?m)^(?!#)', '  ', s)
65
66#
67# Munge a somewhat arbitrarily formatted piece of Python code
68# (e.g. from a format 'let' block) into something whose indentation
69# will get by the Python parser.
70#
71# The two keys here are that Python will give a syntax error if
72# there's any whitespace at the beginning of the first line, and that
73# all lines at the same lexical nesting level must have identical
74# indentation.  Unfortunately the way code literals work, an entire
75# let block tends to have some initial indentation.  Rather than
76# trying to figure out what that is and strip it off, we prepend 'if
77# 1:' to make the let code the nested block inside the if (and have
78# the parser automatically deal with the indentation for us).
79#
80# We don't want to do this if (1) the code block is empty or (2) the
81# first line of the block doesn't have any whitespace at the front.
82
83def fixPythonIndentation(s):
84    # get rid of blank lines first
85    s = re.sub(r'(?m)^\s*\n', '', s);
86    if (s != '' and re.match(r'[ \t]', s[0])):
87        s = 'if 1:\n' + s
88    return s
89
90class ISAParserError(Exception):
91    """Error handler for parser errors"""
92    def __init__(self, first, second=None):
93        if second is None:
94            self.lineno = 0
95            self.string = first
96        else:
97            if hasattr(first, 'lexer'):
98                first = first.lexer.lineno
99            self.lineno = first
100            self.string = second
101
102    def display(self, filename_stack, print_traceback=debug):
103        # Output formatted to work under Emacs compile-mode.  Optional
104        # 'print_traceback' arg, if set to True, prints a Python stack
105        # backtrace too (can be handy when trying to debug the parser
106        # itself).
107
108        spaces = ""
109        for (filename, line) in filename_stack[:-1]:
110            print "%sIn file included from %s:" % (spaces, filename)
111            spaces += "  "
112
113        # Print a Python stack backtrace if requested.
114        if print_traceback or not self.lineno:
115            traceback.print_exc()
116
117        line_str = "%s:" % (filename_stack[-1][0], )
118        if self.lineno:
119            line_str += "%d:" % (self.lineno, )
120
121        return "%s%s %s" % (spaces, line_str, self.string)
122
123    def exit(self, filename_stack, print_traceback=debug):
124        # Just call exit.
125
126        sys.exit(self.display(filename_stack, print_traceback))
127
128def error(*args):
129    raise ISAParserError(*args)
130
131####################
132# Template objects.
133#
134# Template objects are format strings that allow substitution from
135# the attribute spaces of other objects (e.g. InstObjParams instances).
136
137labelRE = re.compile(r'(?<!%)%\(([^\)]+)\)[sd]')
138
139class Template(object):
140    def __init__(self, parser, t):
141        self.parser = parser
142        self.template = t
143
144    def subst(self, d):
145        myDict = None
146
147        # Protect non-Python-dict substitutions (e.g. if there's a printf
148        # in the templated C++ code)
149        template = self.parser.protectNonSubstPercents(self.template)
150        # CPU-model-specific substitutions are handled later (in GenCode).
151        template = self.parser.protectCpuSymbols(template)
152
153        # Build a dict ('myDict') to use for the template substitution.
154        # Start with the template namespace.  Make a copy since we're
155        # going to modify it.
156        myDict = self.parser.templateMap.copy()
157
158        if isinstance(d, InstObjParams):
159            # If we're dealing with an InstObjParams object, we need
160            # to be a little more sophisticated.  The instruction-wide
161            # parameters are already formed, but the parameters which
162            # are only function wide still need to be generated.
163            compositeCode = ''
164
165            myDict.update(d.__dict__)
166            # The "operands" and "snippets" attributes of the InstObjParams
167            # objects are for internal use and not substitution.
168            del myDict['operands']
169            del myDict['snippets']
170
171            snippetLabels = [l for l in labelRE.findall(template)
172                             if d.snippets.has_key(l)]
173
174            snippets = dict([(s, self.parser.mungeSnippet(d.snippets[s]))
175                             for s in snippetLabels])
176
177            myDict.update(snippets)
178
179            compositeCode = ' '.join(map(str, snippets.values()))
180
181            # Add in template itself in case it references any
182            # operands explicitly (like Mem)
183            compositeCode += ' ' + template
184
185            operands = SubOperandList(self.parser, compositeCode, d.operands)
186
187            myDict['op_decl'] = operands.concatAttrStrings('op_decl')
188            if operands.readPC or operands.setPC:
189                myDict['op_decl'] += 'TheISA::PCState __parserAutoPCState;\n'
190
191            # In case there are predicated register reads and write, declare
192            # the variables for register indicies. It is being assumed that
193            # all the operands in the OperandList are also in the
194            # SubOperandList and in the same order. Otherwise, it is
195            # expected that predication would not be used for the operands.
196            if operands.predRead:
197                myDict['op_decl'] += 'uint8_t _sourceIndex = 0;\n'
198            if operands.predWrite:
199                myDict['op_decl'] += 'uint8_t M5_VAR_USED _destIndex = 0;\n'
200
201            is_src = lambda op: op.is_src
202            is_dest = lambda op: op.is_dest
203
204            myDict['op_src_decl'] = \
205                      operands.concatSomeAttrStrings(is_src, 'op_src_decl')
206            myDict['op_dest_decl'] = \
207                      operands.concatSomeAttrStrings(is_dest, 'op_dest_decl')
208            if operands.readPC:
209                myDict['op_src_decl'] += \
210                    'TheISA::PCState __parserAutoPCState;\n'
211            if operands.setPC:
212                myDict['op_dest_decl'] += \
213                    'TheISA::PCState __parserAutoPCState;\n'
214
215            myDict['op_rd'] = operands.concatAttrStrings('op_rd')
216            if operands.readPC:
217                myDict['op_rd'] = '__parserAutoPCState = xc->pcState();\n' + \
218                                  myDict['op_rd']
219
220            # Compose the op_wb string. If we're going to write back the
221            # PC state because we changed some of its elements, we'll need to
222            # do that as early as possible. That allows later uncoordinated
223            # modifications to the PC to layer appropriately.
224            reordered = list(operands.items)
225            reordered.reverse()
226            op_wb_str = ''
227            pcWbStr = 'xc->pcState(__parserAutoPCState);\n'
228            for op_desc in reordered:
229                if op_desc.isPCPart() and op_desc.is_dest:
230                    op_wb_str = op_desc.op_wb + pcWbStr + op_wb_str
231                    pcWbStr = ''
232                else:
233                    op_wb_str = op_desc.op_wb + op_wb_str
234            myDict['op_wb'] = op_wb_str
235
236        elif isinstance(d, dict):
237            # if the argument is a dictionary, we just use it.
238            myDict.update(d)
239        elif hasattr(d, '__dict__'):
240            # if the argument is an object, we use its attribute map.
241            myDict.update(d.__dict__)
242        else:
243            raise TypeError, "Template.subst() arg must be or have dictionary"
244        return template % myDict
245
246    # Convert to string.  This handles the case when a template with a
247    # CPU-specific term gets interpolated into another template or into
248    # an output block.
249    def __str__(self):
250        return self.parser.expandCpuSymbolsToString(self.template)
251
252################
253# Format object.
254#
255# A format object encapsulates an instruction format.  It must provide
256# a defineInst() method that generates the code for an instruction
257# definition.
258
259class Format(object):
260    def __init__(self, id, params, code):
261        self.id = id
262        self.params = params
263        label = 'def format ' + id
264        self.user_code = compile(fixPythonIndentation(code), label, 'exec')
265        param_list = string.join(params, ", ")
266        f = '''def defInst(_code, _context, %s):
267                my_locals = vars().copy()
268                exec _code in _context, my_locals
269                return my_locals\n''' % param_list
270        c = compile(f, label + ' wrapper', 'exec')
271        exec c
272        self.func = defInst
273
274    def defineInst(self, parser, name, args, lineno):
275        parser.updateExportContext()
276        context = parser.exportContext.copy()
277        if len(name):
278            Name = name[0].upper()
279            if len(name) > 1:
280                Name += name[1:]
281        context.update({ 'name' : name, 'Name' : Name })
282        try:
283            vars = self.func(self.user_code, context, *args[0], **args[1])
284        except Exception, exc:
285            if debug:
286                raise
287            error(lineno, 'error defining "%s": %s.' % (name, exc))
288        for k in vars.keys():
289            if k not in ('header_output', 'decoder_output',
290                         'exec_output', 'decode_block'):
291                del vars[k]
292        return GenCode(parser, **vars)
293
294# Special null format to catch an implicit-format instruction
295# definition outside of any format block.
296class NoFormat(object):
297    def __init__(self):
298        self.defaultInst = ''
299
300    def defineInst(self, parser, name, args, lineno):
301        error(lineno,
302              'instruction definition "%s" with no active format!' % name)
303
304###############
305# GenCode class
306#
307# The GenCode class encapsulates generated code destined for various
308# output files.  The header_output and decoder_output attributes are
309# strings containing code destined for decoder.hh and decoder.cc
310# respectively.  The decode_block attribute contains code to be
311# incorporated in the decode function itself (that will also end up in
312# decoder.cc).  The exec_output attribute is a dictionary with a key
313# for each CPU model name; the value associated with a particular key
314# is the string of code for that CPU model's exec.cc file.  The
315# has_decode_default attribute is used in the decode block to allow
316# explicit default clauses to override default default clauses.
317
318class GenCode(object):
319    # Constructor.  At this point we substitute out all CPU-specific
320    # symbols.  For the exec output, these go into the per-model
321    # dictionary.  For all other output types they get collapsed into
322    # a single string.
323    def __init__(self, parser,
324                 header_output = '', decoder_output = '', exec_output = '',
325                 decode_block = '', has_decode_default = False):
326        self.parser = parser
327        self.header_output = parser.expandCpuSymbolsToString(header_output)
328        self.decoder_output = parser.expandCpuSymbolsToString(decoder_output)
329        self.exec_output = exec_output
330        self.decode_block = decode_block
331        self.has_decode_default = has_decode_default
332
333    # Write these code chunks out to the filesystem.  They will be properly
334    # interwoven by the write_top_level_files().
335    def emit(self):
336        if self.header_output:
337            self.parser.get_file('header').write(self.header_output)
338        if self.decoder_output:
339            self.parser.get_file('decoder').write(self.decoder_output)
340        if self.exec_output:
341            self.parser.get_file('exec').write(self.exec_output)
342        if self.decode_block:
343            self.parser.get_file('decode_block').write(self.decode_block)
344
345    # Override '+' operator: generate a new GenCode object that
346    # concatenates all the individual strings in the operands.
347    def __add__(self, other):
348        return GenCode(self.parser,
349                       self.header_output + other.header_output,
350                       self.decoder_output + other.decoder_output,
351                       self.exec_output + other.exec_output,
352                       self.decode_block + other.decode_block,
353                       self.has_decode_default or other.has_decode_default)
354
355    # Prepend a string (typically a comment) to all the strings.
356    def prepend_all(self, pre):
357        self.header_output = pre + self.header_output
358        self.decoder_output  = pre + self.decoder_output
359        self.decode_block = pre + self.decode_block
360        self.exec_output  = pre + self.exec_output
361
362    # Wrap the decode block in a pair of strings (e.g., 'case foo:'
363    # and 'break;').  Used to build the big nested switch statement.
364    def wrap_decode_block(self, pre, post = ''):
365        self.decode_block = pre + indent(self.decode_block) + post
366
367#####################################################################
368#
369#                      Bitfield Operator Support
370#
371#####################################################################
372
373bitOp1ArgRE = re.compile(r'<\s*(\w+)\s*:\s*>')
374
375bitOpWordRE = re.compile(r'(?<![\w\.])([\w\.]+)<\s*(\w+)\s*:\s*(\w+)\s*>')
376bitOpExprRE = re.compile(r'\)<\s*(\w+)\s*:\s*(\w+)\s*>')
377
378def substBitOps(code):
379    # first convert single-bit selectors to two-index form
380    # i.e., <n> --> <n:n>
381    code = bitOp1ArgRE.sub(r'<\1:\1>', code)
382    # simple case: selector applied to ID (name)
383    # i.e., foo<a:b> --> bits(foo, a, b)
384    code = bitOpWordRE.sub(r'bits(\1, \2, \3)', code)
385    # if selector is applied to expression (ending in ')'),
386    # we need to search backward for matching '('
387    match = bitOpExprRE.search(code)
388    while match:
389        exprEnd = match.start()
390        here = exprEnd - 1
391        nestLevel = 1
392        while nestLevel > 0:
393            if code[here] == '(':
394                nestLevel -= 1
395            elif code[here] == ')':
396                nestLevel += 1
397            here -= 1
398            if here < 0:
399                sys.exit("Didn't find '('!")
400        exprStart = here+1
401        newExpr = r'bits(%s, %s, %s)' % (code[exprStart:exprEnd+1],
402                                         match.group(1), match.group(2))
403        code = code[:exprStart] + newExpr + code[match.end():]
404        match = bitOpExprRE.search(code)
405    return code
406
407
408#####################################################################
409#
410#                             Code Parser
411#
412# The remaining code is the support for automatically extracting
413# instruction characteristics from pseudocode.
414#
415#####################################################################
416
417# Force the argument to be a list.  Useful for flags, where a caller
418# can specify a singleton flag or a list of flags.  Also usful for
419# converting tuples to lists so they can be modified.
420def makeList(arg):
421    if isinstance(arg, list):
422        return arg
423    elif isinstance(arg, tuple):
424        return list(arg)
425    elif not arg:
426        return []
427    else:
428        return [ arg ]
429
430class Operand(object):
431    '''Base class for operand descriptors.  An instance of this class
432    (or actually a class derived from this one) represents a specific
433    operand for a code block (e.g, "Rc.sq" as a dest). Intermediate
434    derived classes encapsulates the traits of a particular operand
435    type (e.g., "32-bit integer register").'''
436
437    def buildReadCode(self, func = None):
438        subst_dict = {"name": self.base_name,
439                      "func": func,
440                      "reg_idx": self.reg_spec,
441                      "ctype": self.ctype}
442        if hasattr(self, 'src_reg_idx'):
443            subst_dict['op_idx'] = self.src_reg_idx
444        code = self.read_code % subst_dict
445        return '%s = %s;\n' % (self.base_name, code)
446
447    def buildWriteCode(self, func = None):
448        subst_dict = {"name": self.base_name,
449                      "func": func,
450                      "reg_idx": self.reg_spec,
451                      "ctype": self.ctype,
452                      "final_val": self.base_name}
453        if hasattr(self, 'dest_reg_idx'):
454            subst_dict['op_idx'] = self.dest_reg_idx
455        code = self.write_code % subst_dict
456        return '''
457        {
458            %s final_val = %s;
459            %s;
460            if (traceData) { traceData->setData(final_val); }
461        }''' % (self.dflt_ctype, self.base_name, code)
462
463    def __init__(self, parser, full_name, ext, is_src, is_dest):
464        self.full_name = full_name
465        self.ext = ext
466        self.is_src = is_src
467        self.is_dest = is_dest
468        # The 'effective extension' (eff_ext) is either the actual
469        # extension, if one was explicitly provided, or the default.
470        if ext:
471            self.eff_ext = ext
472        elif hasattr(self, 'dflt_ext'):
473            self.eff_ext = self.dflt_ext
474
475        if hasattr(self, 'eff_ext'):
476            self.ctype = parser.operandTypeMap[self.eff_ext]
477
478    # Finalize additional fields (primarily code fields).  This step
479    # is done separately since some of these fields may depend on the
480    # register index enumeration that hasn't been performed yet at the
481    # time of __init__(). The register index enumeration is affected
482    # by predicated register reads/writes. Hence, we forward the flags
483    # that indicate whether or not predication is in use.
484    def finalize(self, predRead, predWrite):
485        self.flags = self.getFlags()
486        self.constructor = self.makeConstructor(predRead, predWrite)
487        self.op_decl = self.makeDecl()
488
489        if self.is_src:
490            self.op_rd = self.makeRead(predRead)
491            self.op_src_decl = self.makeDecl()
492        else:
493            self.op_rd = ''
494            self.op_src_decl = ''
495
496        if self.is_dest:
497            self.op_wb = self.makeWrite(predWrite)
498            self.op_dest_decl = self.makeDecl()
499        else:
500            self.op_wb = ''
501            self.op_dest_decl = ''
502
503    def isMem(self):
504        return 0
505
506    def isReg(self):
507        return 0
508
509    def isFloatReg(self):
510        return 0
511
512    def isIntReg(self):
513        return 0
514
515    def isCCReg(self):
516        return 0
517
518    def isControlReg(self):
519        return 0
520
521    def isPCState(self):
522        return 0
523
524    def isPCPart(self):
525        return self.isPCState() and self.reg_spec
526
527    def hasReadPred(self):
528        return self.read_predicate != None
529
530    def hasWritePred(self):
531        return self.write_predicate != None
532
533    def getFlags(self):
534        # note the empty slice '[:]' gives us a copy of self.flags[0]
535        # instead of a reference to it
536        my_flags = self.flags[0][:]
537        if self.is_src:
538            my_flags += self.flags[1]
539        if self.is_dest:
540            my_flags += self.flags[2]
541        return my_flags
542
543    def makeDecl(self):
544        # Note that initializations in the declarations are solely
545        # to avoid 'uninitialized variable' errors from the compiler.
546        return self.ctype + ' ' + self.base_name + ' = 0;\n';
547
548class IntRegOperand(Operand):
549    def isReg(self):
550        return 1
551
552    def isIntReg(self):
553        return 1
554
555    def makeConstructor(self, predRead, predWrite):
556        c_src = ''
557        c_dest = ''
558
559        if self.is_src:
560            c_src = '\n\t_srcRegIdx[_numSrcRegs++] = %s;' % (self.reg_spec)
561            if self.hasReadPred():
562                c_src = '\n\tif (%s) {%s\n\t}' % \
563                        (self.read_predicate, c_src)
564
565        if self.is_dest:
566            c_dest = '\n\t_destRegIdx[_numDestRegs++] = %s;' % \
567                    (self.reg_spec)
568            c_dest += '\n\t_numIntDestRegs++;'
569            if self.hasWritePred():
570                c_dest = '\n\tif (%s) {%s\n\t}' % \
571                         (self.write_predicate, c_dest)
572
573        return c_src + c_dest
574
575    def makeRead(self, predRead):
576        if (self.ctype == 'float' or self.ctype == 'double'):
577            error('Attempt to read integer register as FP')
578        if self.read_code != None:
579            return self.buildReadCode('readIntRegOperand')
580
581        int_reg_val = ''
582        if predRead:
583            int_reg_val = 'xc->readIntRegOperand(this, _sourceIndex++)'
584            if self.hasReadPred():
585                int_reg_val = '(%s) ? %s : 0' % \
586                              (self.read_predicate, int_reg_val)
587        else:
588            int_reg_val = 'xc->readIntRegOperand(this, %d)' % self.src_reg_idx
589
590        return '%s = %s;\n' % (self.base_name, int_reg_val)
591
592    def makeWrite(self, predWrite):
593        if (self.ctype == 'float' or self.ctype == 'double'):
594            error('Attempt to write integer register as FP')
595        if self.write_code != None:
596            return self.buildWriteCode('setIntRegOperand')
597
598        if predWrite:
599            wp = 'true'
600            if self.hasWritePred():
601                wp = self.write_predicate
602
603            wcond = 'if (%s)' % (wp)
604            windex = '_destIndex++'
605        else:
606            wcond = ''
607            windex = '%d' % self.dest_reg_idx
608
609        wb = '''
610        %s
611        {
612            %s final_val = %s;
613            xc->setIntRegOperand(this, %s, final_val);\n
614            if (traceData) { traceData->setData(final_val); }
615        }''' % (wcond, self.ctype, self.base_name, windex)
616
617        return wb
618
619class FloatRegOperand(Operand):
620    def isReg(self):
621        return 1
622
623    def isFloatReg(self):
624        return 1
625
626    def makeConstructor(self, predRead, predWrite):
627        c_src = ''
628        c_dest = ''
629
630        if self.is_src:
631            c_src = '\n\t_srcRegIdx[_numSrcRegs++] = %s + FP_Reg_Base;' % \
632                    (self.reg_spec)
633
634        if self.is_dest:
635            c_dest = \
636              '\n\t_destRegIdx[_numDestRegs++] = %s + FP_Reg_Base;' % \
637              (self.reg_spec)
638            c_dest += '\n\t_numFPDestRegs++;'
639
640        return c_src + c_dest
641
642    def makeRead(self, predRead):
643        bit_select = 0
644        if (self.ctype == 'float' or self.ctype == 'double'):
645            func = 'readFloatRegOperand'
646        else:
647            func = 'readFloatRegOperandBits'
648        if self.read_code != None:
649            return self.buildReadCode(func)
650
651        if predRead:
652            rindex = '_sourceIndex++'
653        else:
654            rindex = '%d' % self.src_reg_idx
655
656        return '%s = xc->%s(this, %s);\n' % \
657            (self.base_name, func, rindex)
658
659    def makeWrite(self, predWrite):
660        if (self.ctype == 'float' or self.ctype == 'double'):
661            func = 'setFloatRegOperand'
662        else:
663            func = 'setFloatRegOperandBits'
664        if self.write_code != None:
665            return self.buildWriteCode(func)
666
667        if predWrite:
668            wp = '_destIndex++'
669        else:
670            wp = '%d' % self.dest_reg_idx
671        wp = 'xc->%s(this, %s, final_val);' % (func, wp)
672
673        wb = '''
674        {
675            %s final_val = %s;
676            %s\n
677            if (traceData) { traceData->setData(final_val); }
678        }''' % (self.ctype, self.base_name, wp)
679        return wb
680
681class CCRegOperand(Operand):
682    def isReg(self):
683        return 1
684
685    def isCCReg(self):
686        return 1
687
688    def makeConstructor(self, predRead, predWrite):
689        c_src = ''
690        c_dest = ''
691
692        if self.is_src:
693            c_src = '\n\t_srcRegIdx[_numSrcRegs++] = %s + CC_Reg_Base;' % \
694                     (self.reg_spec)
695            if self.hasReadPred():
696                c_src = '\n\tif (%s) {%s\n\t}' % \
697                        (self.read_predicate, c_src)
698
699        if self.is_dest:
700            c_dest = \
701              '\n\t_destRegIdx[_numDestRegs++] = %s + CC_Reg_Base;' % \
702              (self.reg_spec)
703            c_dest += '\n\t_numCCDestRegs++;'
704            if self.hasWritePred():
705                c_dest = '\n\tif (%s) {%s\n\t}' % \
706                         (self.write_predicate, c_dest)
707
708        return c_src + c_dest
709
710    def makeRead(self, predRead):
711        if (self.ctype == 'float' or self.ctype == 'double'):
712            error('Attempt to read condition-code register as FP')
713        if self.read_code != None:
714            return self.buildReadCode('readCCRegOperand')
715
716        int_reg_val = ''
717        if predRead:
718            int_reg_val = 'xc->readCCRegOperand(this, _sourceIndex++)'
719            if self.hasReadPred():
720                int_reg_val = '(%s) ? %s : 0' % \
721                              (self.read_predicate, int_reg_val)
722        else:
723            int_reg_val = 'xc->readCCRegOperand(this, %d)' % self.src_reg_idx
724
725        return '%s = %s;\n' % (self.base_name, int_reg_val)
726
727    def makeWrite(self, predWrite):
728        if (self.ctype == 'float' or self.ctype == 'double'):
729            error('Attempt to write condition-code register as FP')
730        if self.write_code != None:
731            return self.buildWriteCode('setCCRegOperand')
732
733        if predWrite:
734            wp = 'true'
735            if self.hasWritePred():
736                wp = self.write_predicate
737
738            wcond = 'if (%s)' % (wp)
739            windex = '_destIndex++'
740        else:
741            wcond = ''
742            windex = '%d' % self.dest_reg_idx
743
744        wb = '''
745        %s
746        {
747            %s final_val = %s;
748            xc->setCCRegOperand(this, %s, final_val);\n
749            if (traceData) { traceData->setData(final_val); }
750        }''' % (wcond, self.ctype, self.base_name, windex)
751
752        return wb
753
754class ControlRegOperand(Operand):
755    def isReg(self):
756        return 1
757
758    def isControlReg(self):
759        return 1
760
761    def makeConstructor(self, predRead, predWrite):
762        c_src = ''
763        c_dest = ''
764
765        if self.is_src:
766            c_src = \
767              '\n\t_srcRegIdx[_numSrcRegs++] = %s + Misc_Reg_Base;' % \
768              (self.reg_spec)
769
770        if self.is_dest:
771            c_dest = \
772              '\n\t_destRegIdx[_numDestRegs++] = %s + Misc_Reg_Base;' % \
773              (self.reg_spec)
774
775        return c_src + c_dest
776
777    def makeRead(self, predRead):
778        bit_select = 0
779        if (self.ctype == 'float' or self.ctype == 'double'):
780            error('Attempt to read control register as FP')
781        if self.read_code != None:
782            return self.buildReadCode('readMiscRegOperand')
783
784        if predRead:
785            rindex = '_sourceIndex++'
786        else:
787            rindex = '%d' % self.src_reg_idx
788
789        return '%s = xc->readMiscRegOperand(this, %s);\n' % \
790            (self.base_name, rindex)
791
792    def makeWrite(self, predWrite):
793        if (self.ctype == 'float' or self.ctype == 'double'):
794            error('Attempt to write control register as FP')
795        if self.write_code != None:
796            return self.buildWriteCode('setMiscRegOperand')
797
798        if predWrite:
799            windex = '_destIndex++'
800        else:
801            windex = '%d' % self.dest_reg_idx
802
803        wb = 'xc->setMiscRegOperand(this, %s, %s);\n' % \
804             (windex, self.base_name)
805        wb += 'if (traceData) { traceData->setData(%s); }' % \
806              self.base_name
807
808        return wb
809
810class MemOperand(Operand):
811    def isMem(self):
812        return 1
813
814    def makeConstructor(self, predRead, predWrite):
815        return ''
816
817    def makeDecl(self):
818        # Note that initializations in the declarations are solely
819        # to avoid 'uninitialized variable' errors from the compiler.
820        # Declare memory data variable.
821        return '%s %s = 0;\n' % (self.ctype, self.base_name)
822
823    def makeRead(self, predRead):
824        if self.read_code != None:
825            return self.buildReadCode()
826        return ''
827
828    def makeWrite(self, predWrite):
829        if self.write_code != None:
830            return self.buildWriteCode()
831        return ''
832
833class PCStateOperand(Operand):
834    def makeConstructor(self, predRead, predWrite):
835        return ''
836
837    def makeRead(self, predRead):
838        if self.reg_spec:
839            # A component of the PC state.
840            return '%s = __parserAutoPCState.%s();\n' % \
841                (self.base_name, self.reg_spec)
842        else:
843            # The whole PC state itself.
844            return '%s = xc->pcState();\n' % self.base_name
845
846    def makeWrite(self, predWrite):
847        if self.reg_spec:
848            # A component of the PC state.
849            return '__parserAutoPCState.%s(%s);\n' % \
850                (self.reg_spec, self.base_name)
851        else:
852            # The whole PC state itself.
853            return 'xc->pcState(%s);\n' % self.base_name
854
855    def makeDecl(self):
856        ctype = 'TheISA::PCState'
857        if self.isPCPart():
858            ctype = self.ctype
859        return "%s %s;\n" % (ctype, self.base_name)
860
861    def isPCState(self):
862        return 1
863
864class OperandList(object):
865    '''Find all the operands in the given code block.  Returns an operand
866    descriptor list (instance of class OperandList).'''
867    def __init__(self, parser, code):
868        self.items = []
869        self.bases = {}
870        # delete strings and comments so we don't match on operands inside
871        for regEx in (stringRE, commentRE):
872            code = regEx.sub('', code)
873        # search for operands
874        next_pos = 0
875        while 1:
876            match = parser.operandsRE.search(code, next_pos)
877            if not match:
878                # no more matches: we're done
879                break
880            op = match.groups()
881            # regexp groups are operand full name, base, and extension
882            (op_full, op_base, op_ext) = op
883            # if the token following the operand is an assignment, this is
884            # a destination (LHS), else it's a source (RHS)
885            is_dest = (assignRE.match(code, match.end()) != None)
886            is_src = not is_dest
887            # see if we've already seen this one
888            op_desc = self.find_base(op_base)
889            if op_desc:
890                if op_desc.ext != op_ext:
891                    error('Inconsistent extensions for operand %s' % \
892                          op_base)
893                op_desc.is_src = op_desc.is_src or is_src
894                op_desc.is_dest = op_desc.is_dest or is_dest
895            else:
896                # new operand: create new descriptor
897                op_desc = parser.operandNameMap[op_base](parser,
898                    op_full, op_ext, is_src, is_dest)
899                self.append(op_desc)
900            # start next search after end of current match
901            next_pos = match.end()
902        self.sort()
903        # enumerate source & dest register operands... used in building
904        # constructor later
905        self.numSrcRegs = 0
906        self.numDestRegs = 0
907        self.numFPDestRegs = 0
908        self.numIntDestRegs = 0
909        self.numCCDestRegs = 0
910        self.numMiscDestRegs = 0
911        self.memOperand = None
912
913        # Flags to keep track if one or more operands are to be read/written
914        # conditionally.
915        self.predRead = False
916        self.predWrite = False
917
918        for op_desc in self.items:
919            if op_desc.isReg():
920                if op_desc.is_src:
921                    op_desc.src_reg_idx = self.numSrcRegs
922                    self.numSrcRegs += 1
923                if op_desc.is_dest:
924                    op_desc.dest_reg_idx = self.numDestRegs
925                    self.numDestRegs += 1
926                    if op_desc.isFloatReg():
927                        self.numFPDestRegs += 1
928                    elif op_desc.isIntReg():
929                        self.numIntDestRegs += 1
930                    elif op_desc.isCCReg():
931                        self.numCCDestRegs += 1
932                    elif op_desc.isControlReg():
933                        self.numMiscDestRegs += 1
934            elif op_desc.isMem():
935                if self.memOperand:
936                    error("Code block has more than one memory operand.")
937                self.memOperand = op_desc
938
939            # Check if this operand has read/write predication. If true, then
940            # the microop will dynamically index source/dest registers.
941            self.predRead = self.predRead or op_desc.hasReadPred()
942            self.predWrite = self.predWrite or op_desc.hasWritePred()
943
944        if parser.maxInstSrcRegs < self.numSrcRegs:
945            parser.maxInstSrcRegs = self.numSrcRegs
946        if parser.maxInstDestRegs < self.numDestRegs:
947            parser.maxInstDestRegs = self.numDestRegs
948        if parser.maxMiscDestRegs < self.numMiscDestRegs:
949            parser.maxMiscDestRegs = self.numMiscDestRegs
950
951        # now make a final pass to finalize op_desc fields that may depend
952        # on the register enumeration
953        for op_desc in self.items:
954            op_desc.finalize(self.predRead, self.predWrite)
955
956    def __len__(self):
957        return len(self.items)
958
959    def __getitem__(self, index):
960        return self.items[index]
961
962    def append(self, op_desc):
963        self.items.append(op_desc)
964        self.bases[op_desc.base_name] = op_desc
965
966    def find_base(self, base_name):
967        # like self.bases[base_name], but returns None if not found
968        # (rather than raising exception)
969        return self.bases.get(base_name)
970
971    # internal helper function for concat[Some]Attr{Strings|Lists}
972    def __internalConcatAttrs(self, attr_name, filter, result):
973        for op_desc in self.items:
974            if filter(op_desc):
975                result += getattr(op_desc, attr_name)
976        return result
977
978    # return a single string that is the concatenation of the (string)
979    # values of the specified attribute for all operands
980    def concatAttrStrings(self, attr_name):
981        return self.__internalConcatAttrs(attr_name, lambda x: 1, '')
982
983    # like concatAttrStrings, but only include the values for the operands
984    # for which the provided filter function returns true
985    def concatSomeAttrStrings(self, filter, attr_name):
986        return self.__internalConcatAttrs(attr_name, filter, '')
987
988    # return a single list that is the concatenation of the (list)
989    # values of the specified attribute for all operands
990    def concatAttrLists(self, attr_name):
991        return self.__internalConcatAttrs(attr_name, lambda x: 1, [])
992
993    # like concatAttrLists, but only include the values for the operands
994    # for which the provided filter function returns true
995    def concatSomeAttrLists(self, filter, attr_name):
996        return self.__internalConcatAttrs(attr_name, filter, [])
997
998    def sort(self):
999        self.items.sort(lambda a, b: a.sort_pri - b.sort_pri)
1000
1001class SubOperandList(OperandList):
1002    '''Find all the operands in the given code block.  Returns an operand
1003    descriptor list (instance of class OperandList).'''
1004    def __init__(self, parser, code, master_list):
1005        self.items = []
1006        self.bases = {}
1007        # delete strings and comments so we don't match on operands inside
1008        for regEx in (stringRE, commentRE):
1009            code = regEx.sub('', code)
1010        # search for operands
1011        next_pos = 0
1012        while 1:
1013            match = parser.operandsRE.search(code, next_pos)
1014            if not match:
1015                # no more matches: we're done
1016                break
1017            op = match.groups()
1018            # regexp groups are operand full name, base, and extension
1019            (op_full, op_base, op_ext) = op
1020            # find this op in the master list
1021            op_desc = master_list.find_base(op_base)
1022            if not op_desc:
1023                error('Found operand %s which is not in the master list!' \
1024                      ' This is an internal error' % op_base)
1025            else:
1026                # See if we've already found this operand
1027                op_desc = self.find_base(op_base)
1028                if not op_desc:
1029                    # if not, add a reference to it to this sub list
1030                    self.append(master_list.bases[op_base])
1031
1032            # start next search after end of current match
1033            next_pos = match.end()
1034        self.sort()
1035        self.memOperand = None
1036        # Whether the whole PC needs to be read so parts of it can be accessed
1037        self.readPC = False
1038        # Whether the whole PC needs to be written after parts of it were
1039        # changed
1040        self.setPC = False
1041        # Whether this instruction manipulates the whole PC or parts of it.
1042        # Mixing the two is a bad idea and flagged as an error.
1043        self.pcPart = None
1044
1045        # Flags to keep track if one or more operands are to be read/written
1046        # conditionally.
1047        self.predRead = False
1048        self.predWrite = False
1049
1050        for op_desc in self.items:
1051            if op_desc.isPCPart():
1052                self.readPC = True
1053                if op_desc.is_dest:
1054                    self.setPC = True
1055
1056            if op_desc.isPCState():
1057                if self.pcPart is not None:
1058                    if self.pcPart and not op_desc.isPCPart() or \
1059                            not self.pcPart and op_desc.isPCPart():
1060                        error("Mixed whole and partial PC state operands.")
1061                self.pcPart = op_desc.isPCPart()
1062
1063            if op_desc.isMem():
1064                if self.memOperand:
1065                    error("Code block has more than one memory operand.")
1066                self.memOperand = op_desc
1067
1068            # Check if this operand has read/write predication. If true, then
1069            # the microop will dynamically index source/dest registers.
1070            self.predRead = self.predRead or op_desc.hasReadPred()
1071            self.predWrite = self.predWrite or op_desc.hasWritePred()
1072
1073# Regular expression object to match C++ strings
1074stringRE = re.compile(r'"([^"\\]|\\.)*"')
1075
1076# Regular expression object to match C++ comments
1077# (used in findOperands())
1078commentRE = re.compile(r'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?',
1079        re.DOTALL | re.MULTILINE)
1080
1081# Regular expression object to match assignment statements
1082# (used in findOperands())
1083assignRE = re.compile(r'\s*=(?!=)', re.MULTILINE)
1084
1085def makeFlagConstructor(flag_list):
1086    if len(flag_list) == 0:
1087        return ''
1088    # filter out repeated flags
1089    flag_list.sort()
1090    i = 1
1091    while i < len(flag_list):
1092        if flag_list[i] == flag_list[i-1]:
1093            del flag_list[i]
1094        else:
1095            i += 1
1096    pre = '\n\tflags['
1097    post = '] = true;'
1098    code = pre + string.join(flag_list, post + pre) + post
1099    return code
1100
1101# Assume all instruction flags are of the form 'IsFoo'
1102instFlagRE = re.compile(r'Is.*')
1103
1104# OpClass constants end in 'Op' except No_OpClass
1105opClassRE = re.compile(r'.*Op|No_OpClass')
1106
1107class InstObjParams(object):
1108    def __init__(self, parser, mnem, class_name, base_class = '',
1109                 snippets = {}, opt_args = []):
1110        self.mnemonic = mnem
1111        self.class_name = class_name
1112        self.base_class = base_class
1113        if not isinstance(snippets, dict):
1114            snippets = {'code' : snippets}
1115        compositeCode = ' '.join(map(str, snippets.values()))
1116        self.snippets = snippets
1117
1118        self.operands = OperandList(parser, compositeCode)
1119
1120        # The header of the constructor declares the variables to be used
1121        # in the body of the constructor.
1122        header = ''
1123        header += '\n\t_numSrcRegs = 0;'
1124        header += '\n\t_numDestRegs = 0;'
1125        header += '\n\t_numFPDestRegs = 0;'
1126        header += '\n\t_numIntDestRegs = 0;'
1127        header += '\n\t_numCCDestRegs = 0;'
1128
1129        self.constructor = header + \
1130                           self.operands.concatAttrStrings('constructor')
1131
1132        self.flags = self.operands.concatAttrLists('flags')
1133
1134        self.op_class = None
1135
1136        # Optional arguments are assumed to be either StaticInst flags
1137        # or an OpClass value.  To avoid having to import a complete
1138        # list of these values to match against, we do it ad-hoc
1139        # with regexps.
1140        for oa in opt_args:
1141            if instFlagRE.match(oa):
1142                self.flags.append(oa)
1143            elif opClassRE.match(oa):
1144                self.op_class = oa
1145            else:
1146                error('InstObjParams: optional arg "%s" not recognized '
1147                      'as StaticInst::Flag or OpClass.' % oa)
1148
1149        # Make a basic guess on the operand class if not set.
1150        # These are good enough for most cases.
1151        if not self.op_class:
1152            if 'IsStore' in self.flags:
1153                self.op_class = 'MemWriteOp'
1154            elif 'IsLoad' in self.flags or 'IsPrefetch' in self.flags:
1155                self.op_class = 'MemReadOp'
1156            elif 'IsFloating' in self.flags:
1157                self.op_class = 'FloatAddOp'
1158            else:
1159                self.op_class = 'IntAluOp'
1160
1161        # add flag initialization to contructor here to include
1162        # any flags added via opt_args
1163        self.constructor += makeFlagConstructor(self.flags)
1164
1165        # if 'IsFloating' is set, add call to the FP enable check
1166        # function (which should be provided by isa_desc via a declare)
1167        if 'IsFloating' in self.flags:
1168            self.fp_enable_check = 'fault = checkFpEnableFault(xc);'
1169        else:
1170            self.fp_enable_check = ''
1171
1172##############
1173# Stack: a simple stack object.  Used for both formats (formatStack)
1174# and default cases (defaultStack).  Simply wraps a list to give more
1175# stack-like syntax and enable initialization with an argument list
1176# (as opposed to an argument that's a list).
1177
1178class Stack(list):
1179    def __init__(self, *items):
1180        list.__init__(self, items)
1181
1182    def push(self, item):
1183        self.append(item);
1184
1185    def top(self):
1186        return self[-1]
1187
1188#######################
1189#
1190# ISA Parser
1191#   parses ISA DSL and emits C++ headers and source
1192#
1193
1194class ISAParser(Grammar):
1195    class CpuModel(object):
1196        def __init__(self, name, filename, includes, strings):
1197            self.name = name
1198            self.filename = filename
1199            self.includes = includes
1200            self.strings = strings
1201
1202    def __init__(self, output_dir):
1203        super(ISAParser, self).__init__()
1204        self.output_dir = output_dir
1205
1206        self.filename = None # for output file watermarking/scaremongering
1207
1208        self.cpuModels = [
1209            ISAParser.CpuModel('ExecContext',
1210                               'generic_cpu_exec.cc',
1211                               '#include "cpu/exec_context.hh"',
1212                               { "CPU_exec_context" : "ExecContext" }),
1213            ]
1214
1215        # variable to hold templates
1216        self.templateMap = {}
1217
1218        # This dictionary maps format name strings to Format objects.
1219        self.formatMap = {}
1220
1221        # Track open files and, if applicable, how many chunks it has been
1222        # split into so far.
1223        self.files = {}
1224        self.splits = {}
1225
1226        # isa_name / namespace identifier from namespace declaration.
1227        # before the namespace declaration, None.
1228        self.isa_name = None
1229        self.namespace = None
1230
1231        # The format stack.
1232        self.formatStack = Stack(NoFormat())
1233
1234        # The default case stack.
1235        self.defaultStack = Stack(None)
1236
1237        # Stack that tracks current file and line number.  Each
1238        # element is a tuple (filename, lineno) that records the
1239        # *current* filename and the line number in the *previous*
1240        # file where it was included.
1241        self.fileNameStack = Stack()
1242
1243        symbols = ('makeList', 're', 'string')
1244        self.exportContext = dict([(s, eval(s)) for s in symbols])
1245
1246        self.maxInstSrcRegs = 0
1247        self.maxInstDestRegs = 0
1248        self.maxMiscDestRegs = 0
1249
1250    def __getitem__(self, i):    # Allow object (self) to be
1251        return getattr(self, i)  # passed to %-substitutions
1252
1253    # Change the file suffix of a base filename:
1254    #   (e.g.) decoder.cc -> decoder-g.cc.inc for 'global' outputs
1255    def suffixize(self, s, sec):
1256        extn = re.compile('(\.[^\.]+)$') # isolate extension
1257        if self.namespace:
1258            return extn.sub(r'-ns\1.inc', s) # insert some text on either side
1259        else:
1260            return extn.sub(r'-g\1.inc', s)
1261
1262    # Get the file object for emitting code into the specified section
1263    # (header, decoder, exec, decode_block).
1264    def get_file(self, section):
1265        if section == 'decode_block':
1266            filename = 'decode-method.cc.inc'
1267        else:
1268            if section == 'header':
1269                file = 'decoder.hh'
1270            else:
1271                file = '%s.cc' % section
1272            filename = self.suffixize(file, section)
1273        try:
1274            return self.files[filename]
1275        except KeyError: pass
1276
1277        f = self.open(filename)
1278        self.files[filename] = f
1279
1280        # The splittable files are the ones with many independent
1281        # per-instruction functions - the decoder's instruction constructors
1282        # and the instruction execution (execute()) methods. These both have
1283        # the suffix -ns.cc.inc, meaning they are within the namespace part
1284        # of the ISA, contain object-emitting C++ source, and are included
1285        # into other top-level files. These are the files that need special
1286        # #define's to allow parts of them to be compiled separately. Rather
1287        # than splitting the emissions into separate files, the monolithic
1288        # output of the ISA parser is maintained, but the value (or lack
1289        # thereof) of the __SPLIT definition during C preprocessing will
1290        # select the different chunks. If no 'split' directives are used,
1291        # the cpp emissions have no effect.
1292        if re.search('-ns.cc.inc$', filename):
1293            print >>f, '#if !defined(__SPLIT) || (__SPLIT == 1)'
1294            self.splits[f] = 1
1295        # ensure requisite #include's
1296        elif filename in ['decoder-g.cc.inc', 'exec-g.cc.inc']:
1297            print >>f, '#include "decoder.hh"'
1298        elif filename == 'decoder-g.hh.inc':
1299            print >>f, '#include "base/bitfield.hh"'
1300
1301        return f
1302
1303    # Weave together the parts of the different output sections by
1304    # #include'ing them into some very short top-level .cc/.hh files.
1305    # These small files make it much clearer how this tool works, since
1306    # you directly see the chunks emitted as files that are #include'd.
1307    def write_top_level_files(self):
1308        dep = self.open('inc.d', bare=True)
1309
1310        # decoder header - everything depends on this
1311        file = 'decoder.hh'
1312        with self.open(file) as f:
1313            inc = []
1314
1315            fn = 'decoder-g.hh.inc'
1316            assert(fn in self.files)
1317            f.write('#include "%s"\n' % fn)
1318            inc.append(fn)
1319
1320            fn = 'decoder-ns.hh.inc'
1321            assert(fn in self.files)
1322            f.write('namespace %s {\n#include "%s"\n}\n'
1323                    % (self.namespace, fn))
1324            inc.append(fn)
1325
1326            print >>dep, file+':', ' '.join(inc)
1327
1328        # decoder method - cannot be split
1329        file = 'decoder.cc'
1330        with self.open(file) as f:
1331            inc = []
1332
1333            fn = 'decoder-g.cc.inc'
1334            assert(fn in self.files)
1335            f.write('#include "%s"\n' % fn)
1336            inc.append(fn)
1337
1338            fn = 'decode-method.cc.inc'
1339            # is guaranteed to have been written for parse to complete
1340            f.write('#include "%s"\n' % fn)
1341            inc.append(fn)
1342
1343            inc.append("decoder.hh")
1344            print >>dep, file+':', ' '.join(inc)
1345
1346        extn = re.compile('(\.[^\.]+)$')
1347
1348        # instruction constructors
1349        splits = self.splits[self.get_file('decoder')]
1350        file_ = 'inst-constrs.cc'
1351        for i in range(1, splits+1):
1352            if splits > 1:
1353                file = extn.sub(r'-%d\1' % i, file_)
1354            else:
1355                file = file_
1356            with self.open(file) as f:
1357                inc = []
1358
1359                fn = 'decoder-g.cc.inc'
1360                assert(fn in self.files)
1361                f.write('#include "%s"\n' % fn)
1362                inc.append(fn)
1363
1364                fn = 'decoder-ns.cc.inc'
1365                assert(fn in self.files)
1366                print >>f, 'namespace %s {' % self.namespace
1367                if splits > 1:
1368                    print >>f, '#define __SPLIT %u' % i
1369                print >>f, '#include "%s"' % fn
1370                print >>f, '}'
1371                inc.append(fn)
1372
1373                inc.append("decoder.hh")
1374                print >>dep, file+':', ' '.join(inc)
1375
1376        # instruction execution per-CPU model
1377        splits = self.splits[self.get_file('exec')]
1378        for cpu in self.cpuModels:
1379            for i in range(1, splits+1):
1380                if splits > 1:
1381                    file = extn.sub(r'_%d\1' % i, cpu.filename)
1382                else:
1383                    file = cpu.filename
1384                with self.open(file) as f:
1385                    inc = []
1386
1387                    fn = 'exec-g.cc.inc'
1388                    assert(fn in self.files)
1389                    f.write('#include "%s"\n' % fn)
1390                    inc.append(fn)
1391
1392                    f.write(cpu.includes+"\n")
1393
1394                    fn = 'exec-ns.cc.inc'
1395                    assert(fn in self.files)
1396                    print >>f, 'namespace %s {' % self.namespace
1397                    print >>f, '#define CPU_EXEC_CONTEXT %s' \
1398                               % cpu.strings['CPU_exec_context']
1399                    if splits > 1:
1400                        print >>f, '#define __SPLIT %u' % i
1401                    print >>f, '#include "%s"' % fn
1402                    print >>f, '}'
1403                    inc.append(fn)
1404
1405                    inc.append("decoder.hh")
1406                    print >>dep, file+':', ' '.join(inc)
1407
1408        # max_inst_regs.hh
1409        self.update('max_inst_regs.hh',
1410                    '''namespace %(namespace)s {
1411    const int MaxInstSrcRegs = %(maxInstSrcRegs)d;
1412    const int MaxInstDestRegs = %(maxInstDestRegs)d;
1413    const int MaxMiscDestRegs = %(maxMiscDestRegs)d;\n}\n''' % self)
1414        print >>dep, 'max_inst_regs.hh:'
1415
1416        dep.close()
1417
1418
1419    scaremonger_template ='''// DO NOT EDIT
1420// This file was automatically generated from an ISA description:
1421//   %(filename)s
1422
1423''';
1424
1425    #####################################################################
1426    #
1427    #                                Lexer
1428    #
1429    # The PLY lexer module takes two things as input:
1430    # - A list of token names (the string list 'tokens')
1431    # - A regular expression describing a match for each token.  The
1432    #   regexp for token FOO can be provided in two ways:
1433    #   - as a string variable named t_FOO
1434    #   - as the doc string for a function named t_FOO.  In this case,
1435    #     the function is also executed, allowing an action to be
1436    #     associated with each token match.
1437    #
1438    #####################################################################
1439
1440    # Reserved words.  These are listed separately as they are matched
1441    # using the same regexp as generic IDs, but distinguished in the
1442    # t_ID() function.  The PLY documentation suggests this approach.
1443    reserved = (
1444        'BITFIELD', 'DECODE', 'DECODER', 'DEFAULT', 'DEF', 'EXEC', 'FORMAT',
1445        'HEADER', 'LET', 'NAMESPACE', 'OPERAND_TYPES', 'OPERANDS',
1446        'OUTPUT', 'SIGNED', 'SPLIT', 'TEMPLATE'
1447        )
1448
1449    # List of tokens.  The lex module requires this.
1450    tokens = reserved + (
1451        # identifier
1452        'ID',
1453
1454        # integer literal
1455        'INTLIT',
1456
1457        # string literal
1458        'STRLIT',
1459
1460        # code literal
1461        'CODELIT',
1462
1463        # ( ) [ ] { } < > , ; . : :: *
1464        'LPAREN', 'RPAREN',
1465        'LBRACKET', 'RBRACKET',
1466        'LBRACE', 'RBRACE',
1467        'LESS', 'GREATER', 'EQUALS',
1468        'COMMA', 'SEMI', 'DOT', 'COLON', 'DBLCOLON',
1469        'ASTERISK',
1470
1471        # C preprocessor directives
1472        'CPPDIRECTIVE'
1473
1474    # The following are matched but never returned. commented out to
1475    # suppress PLY warning
1476        # newfile directive
1477    #    'NEWFILE',
1478
1479        # endfile directive
1480    #    'ENDFILE'
1481    )
1482
1483    # Regular expressions for token matching
1484    t_LPAREN           = r'\('
1485    t_RPAREN           = r'\)'
1486    t_LBRACKET         = r'\['
1487    t_RBRACKET         = r'\]'
1488    t_LBRACE           = r'\{'
1489    t_RBRACE           = r'\}'
1490    t_LESS             = r'\<'
1491    t_GREATER          = r'\>'
1492    t_EQUALS           = r'='
1493    t_COMMA            = r','
1494    t_SEMI             = r';'
1495    t_DOT              = r'\.'
1496    t_COLON            = r':'
1497    t_DBLCOLON         = r'::'
1498    t_ASTERISK         = r'\*'
1499
1500    # Identifiers and reserved words
1501    reserved_map = { }
1502    for r in reserved:
1503        reserved_map[r.lower()] = r
1504
1505    def t_ID(self, t):
1506        r'[A-Za-z_]\w*'
1507        t.type = self.reserved_map.get(t.value, 'ID')
1508        return t
1509
1510    # Integer literal
1511    def t_INTLIT(self, t):
1512        r'-?(0x[\da-fA-F]+)|\d+'
1513        try:
1514            t.value = int(t.value,0)
1515        except ValueError:
1516            error(t, 'Integer value "%s" too large' % t.value)
1517            t.value = 0
1518        return t
1519
1520    # String literal.  Note that these use only single quotes, and
1521    # can span multiple lines.
1522    def t_STRLIT(self, t):
1523        r"(?m)'([^'])+'"
1524        # strip off quotes
1525        t.value = t.value[1:-1]
1526        t.lexer.lineno += t.value.count('\n')
1527        return t
1528
1529
1530    # "Code literal"... like a string literal, but delimiters are
1531    # '{{' and '}}' so they get formatted nicely under emacs c-mode
1532    def t_CODELIT(self, t):
1533        r"(?m)\{\{([^\}]|}(?!\}))+\}\}"
1534        # strip off {{ & }}
1535        t.value = t.value[2:-2]
1536        t.lexer.lineno += t.value.count('\n')
1537        return t
1538
1539    def t_CPPDIRECTIVE(self, t):
1540        r'^\#[^\#].*\n'
1541        t.lexer.lineno += t.value.count('\n')
1542        return t
1543
1544    def t_NEWFILE(self, t):
1545        r'^\#\#newfile\s+"[^"]*"'
1546        self.fileNameStack.push((t.value[11:-1], t.lexer.lineno))
1547        t.lexer.lineno = 0
1548
1549    def t_ENDFILE(self, t):
1550        r'^\#\#endfile'
1551        (old_filename, t.lexer.lineno) = self.fileNameStack.pop()
1552
1553    #
1554    # The functions t_NEWLINE, t_ignore, and t_error are
1555    # special for the lex module.
1556    #
1557
1558    # Newlines
1559    def t_NEWLINE(self, t):
1560        r'\n+'
1561        t.lexer.lineno += t.value.count('\n')
1562
1563    # Comments
1564    def t_comment(self, t):
1565        r'//.*'
1566
1567    # Completely ignored characters
1568    t_ignore = ' \t\x0c'
1569
1570    # Error handler
1571    def t_error(self, t):
1572        error(t, "illegal character '%s'" % t.value[0])
1573        t.skip(1)
1574
1575    #####################################################################
1576    #
1577    #                                Parser
1578    #
1579    # Every function whose name starts with 'p_' defines a grammar
1580    # rule.  The rule is encoded in the function's doc string, while
1581    # the function body provides the action taken when the rule is
1582    # matched.  The argument to each function is a list of the values
1583    # of the rule's symbols: t[0] for the LHS, and t[1..n] for the
1584    # symbols on the RHS.  For tokens, the value is copied from the
1585    # t.value attribute provided by the lexer.  For non-terminals, the
1586    # value is assigned by the producing rule; i.e., the job of the
1587    # grammar rule function is to set the value for the non-terminal
1588    # on the LHS (by assigning to t[0]).
1589    #####################################################################
1590
1591    # The LHS of the first grammar rule is used as the start symbol
1592    # (in this case, 'specification').  Note that this rule enforces
1593    # that there will be exactly one namespace declaration, with 0 or
1594    # more global defs/decls before and after it.  The defs & decls
1595    # before the namespace decl will be outside the namespace; those
1596    # after will be inside.  The decoder function is always inside the
1597    # namespace.
1598    def p_specification(self, t):
1599        'specification : opt_defs_and_outputs top_level_decode_block'
1600
1601        for f in self.splits.iterkeys():
1602            f.write('\n#endif\n')
1603
1604        for f in self.files.itervalues(): # close ALL the files;
1605            f.close() # not doing so can cause compilation to fail
1606
1607        self.write_top_level_files()
1608
1609        t[0] = True
1610
1611    # 'opt_defs_and_outputs' is a possibly empty sequence of def and/or
1612    # output statements. Its productions do the hard work of eventually
1613    # instantiating a GenCode, which are generally emitted (written to disk)
1614    # as soon as possible, except for the decode_block, which has to be
1615    # accumulated into one large function of nested switch/case blocks.
1616    def p_opt_defs_and_outputs_0(self, t):
1617        'opt_defs_and_outputs : empty'
1618
1619    def p_opt_defs_and_outputs_1(self, t):
1620        'opt_defs_and_outputs : defs_and_outputs'
1621
1622    def p_defs_and_outputs_0(self, t):
1623        'defs_and_outputs : def_or_output'
1624
1625    def p_defs_and_outputs_1(self, t):
1626        'defs_and_outputs : defs_and_outputs def_or_output'
1627
1628    # The list of possible definition/output statements.
1629    # They are all processed as they are seen.
1630    def p_def_or_output(self, t):
1631        '''def_or_output : name_decl
1632                         | def_format
1633                         | def_bitfield
1634                         | def_bitfield_struct
1635                         | def_template
1636                         | def_operand_types
1637                         | def_operands
1638                         | output
1639                         | global_let
1640                         | split'''
1641
1642    # Utility function used by both invocations of splitting - explicit
1643    # 'split' keyword and split() function inside "let {{ }};" blocks.
1644    def split(self, sec, write=False):
1645        assert(sec != 'header' and "header cannot be split")
1646
1647        f = self.get_file(sec)
1648        self.splits[f] += 1
1649        s = '\n#endif\n#if __SPLIT == %u\n' % self.splits[f]
1650        if write:
1651            f.write(s)
1652        else:
1653            return s
1654
1655    # split output file to reduce compilation time
1656    def p_split(self, t):
1657        'split : SPLIT output_type SEMI'
1658        assert(self.isa_name and "'split' not allowed before namespace decl")
1659
1660        self.split(t[2], True)
1661
1662    def p_output_type(self, t):
1663        '''output_type : DECODER
1664                       | HEADER
1665                       | EXEC'''
1666        t[0] = t[1]
1667
1668    # ISA name declaration looks like "namespace <foo>;"
1669    def p_name_decl(self, t):
1670        'name_decl : NAMESPACE ID SEMI'
1671        assert(self.isa_name == None and "Only 1 namespace decl permitted")
1672        self.isa_name = t[2]
1673        self.namespace = t[2] + 'Inst'
1674
1675    # Output blocks 'output <foo> {{...}}' (C++ code blocks) are copied
1676    # directly to the appropriate output section.
1677
1678    # Massage output block by substituting in template definitions and
1679    # bit operators.  We handle '%'s embedded in the string that don't
1680    # indicate template substitutions (or CPU-specific symbols, which
1681    # get handled in GenCode) by doubling them first so that the
1682    # format operation will reduce them back to single '%'s.
1683    def process_output(self, s):
1684        s = self.protectNonSubstPercents(s)
1685        # protects cpu-specific symbols too
1686        s = self.protectCpuSymbols(s)
1687        return substBitOps(s % self.templateMap)
1688
1689    def p_output(self, t):
1690        'output : OUTPUT output_type CODELIT SEMI'
1691        kwargs = { t[2]+'_output' : self.process_output(t[3]) }
1692        GenCode(self, **kwargs).emit()
1693
1694    # global let blocks 'let {{...}}' (Python code blocks) are
1695    # executed directly when seen.  Note that these execute in a
1696    # special variable context 'exportContext' to prevent the code
1697    # from polluting this script's namespace.
1698    def p_global_let(self, t):
1699        'global_let : LET CODELIT SEMI'
1700        def _split(sec):
1701            return self.split(sec)
1702        self.updateExportContext()
1703        self.exportContext["header_output"] = ''
1704        self.exportContext["decoder_output"] = ''
1705        self.exportContext["exec_output"] = ''
1706        self.exportContext["decode_block"] = ''
1707        self.exportContext["split"] = _split
1708        split_setup = '''
1709def wrap(func):
1710    def split(sec):
1711        globals()[sec + '_output'] += func(sec)
1712    return split
1713split = wrap(split)
1714del wrap
1715'''
1716        # This tricky setup (immediately above) allows us to just write
1717        # (e.g.) "split('exec')" in the Python code and the split #ifdef's
1718        # will automatically be added to the exec_output variable. The inner
1719        # Python execution environment doesn't know about the split points,
1720        # so we carefully inject and wrap a closure that can retrieve the
1721        # next split's #define from the parser and add it to the current
1722        # emission-in-progress.
1723        try:
1724            exec split_setup+fixPythonIndentation(t[2]) in self.exportContext
1725        except Exception, exc:
1726            if debug:
1727                raise
1728            error(t, 'error: %s in global let block "%s".' % (exc, t[2]))
1729        GenCode(self,
1730                header_output=self.exportContext["header_output"],
1731                decoder_output=self.exportContext["decoder_output"],
1732                exec_output=self.exportContext["exec_output"],
1733                decode_block=self.exportContext["decode_block"]).emit()
1734
1735    # Define the mapping from operand type extensions to C++ types and
1736    # bit widths (stored in operandTypeMap).
1737    def p_def_operand_types(self, t):
1738        'def_operand_types : DEF OPERAND_TYPES CODELIT SEMI'
1739        try:
1740            self.operandTypeMap = eval('{' + t[3] + '}')
1741        except Exception, exc:
1742            if debug:
1743                raise
1744            error(t,
1745                  'error: %s in def operand_types block "%s".' % (exc, t[3]))
1746
1747    # Define the mapping from operand names to operand classes and
1748    # other traits.  Stored in operandNameMap.
1749    def p_def_operands(self, t):
1750        'def_operands : DEF OPERANDS CODELIT SEMI'
1751        if not hasattr(self, 'operandTypeMap'):
1752            error(t, 'error: operand types must be defined before operands')
1753        try:
1754            user_dict = eval('{' + t[3] + '}', self.exportContext)
1755        except Exception, exc:
1756            if debug:
1757                raise
1758            error(t, 'error: %s in def operands block "%s".' % (exc, t[3]))
1759        self.buildOperandNameMap(user_dict, t.lexer.lineno)
1760
1761    # A bitfield definition looks like:
1762    # 'def [signed] bitfield <ID> [<first>:<last>]'
1763    # This generates a preprocessor macro in the output file.
1764    def p_def_bitfield_0(self, t):
1765        'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT COLON INTLIT GREATER SEMI'
1766        expr = 'bits(machInst, %2d, %2d)' % (t[6], t[8])
1767        if (t[2] == 'signed'):
1768            expr = 'sext<%d>(%s)' % (t[6] - t[8] + 1, expr)
1769        hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
1770        GenCode(self, header_output=hash_define).emit()
1771
1772    # alternate form for single bit: 'def [signed] bitfield <ID> [<bit>]'
1773    def p_def_bitfield_1(self, t):
1774        'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT GREATER SEMI'
1775        expr = 'bits(machInst, %2d, %2d)' % (t[6], t[6])
1776        if (t[2] == 'signed'):
1777            expr = 'sext<%d>(%s)' % (1, expr)
1778        hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
1779        GenCode(self, header_output=hash_define).emit()
1780
1781    # alternate form for structure member: 'def bitfield <ID> <ID>'
1782    def p_def_bitfield_struct(self, t):
1783        'def_bitfield_struct : DEF opt_signed BITFIELD ID id_with_dot SEMI'
1784        if (t[2] != ''):
1785            error(t, 'error: structure bitfields are always unsigned.')
1786        expr = 'machInst.%s' % t[5]
1787        hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
1788        GenCode(self, header_output=hash_define).emit()
1789
1790    def p_id_with_dot_0(self, t):
1791        'id_with_dot : ID'
1792        t[0] = t[1]
1793
1794    def p_id_with_dot_1(self, t):
1795        'id_with_dot : ID DOT id_with_dot'
1796        t[0] = t[1] + t[2] + t[3]
1797
1798    def p_opt_signed_0(self, t):
1799        'opt_signed : SIGNED'
1800        t[0] = t[1]
1801
1802    def p_opt_signed_1(self, t):
1803        'opt_signed : empty'
1804        t[0] = ''
1805
1806    def p_def_template(self, t):
1807        'def_template : DEF TEMPLATE ID CODELIT SEMI'
1808        if t[3] in self.templateMap:
1809            print "warning: template %s already defined" % t[3]
1810        self.templateMap[t[3]] = Template(self, t[4])
1811
1812    # An instruction format definition looks like
1813    # "def format <fmt>(<params>) {{...}};"
1814    def p_def_format(self, t):
1815        'def_format : DEF FORMAT ID LPAREN param_list RPAREN CODELIT SEMI'
1816        (id, params, code) = (t[3], t[5], t[7])
1817        self.defFormat(id, params, code, t.lexer.lineno)
1818
1819    # The formal parameter list for an instruction format is a
1820    # possibly empty list of comma-separated parameters.  Positional
1821    # (standard, non-keyword) parameters must come first, followed by
1822    # keyword parameters, followed by a '*foo' parameter that gets
1823    # excess positional arguments (as in Python).  Each of these three
1824    # parameter categories is optional.
1825    #
1826    # Note that we do not support the '**foo' parameter for collecting
1827    # otherwise undefined keyword args.  Otherwise the parameter list
1828    # is (I believe) identical to what is supported in Python.
1829    #
1830    # The param list generates a tuple, where the first element is a
1831    # list of the positional params and the second element is a dict
1832    # containing the keyword params.
1833    def p_param_list_0(self, t):
1834        'param_list : positional_param_list COMMA nonpositional_param_list'
1835        t[0] = t[1] + t[3]
1836
1837    def p_param_list_1(self, t):
1838        '''param_list : positional_param_list
1839                      | nonpositional_param_list'''
1840        t[0] = t[1]
1841
1842    def p_positional_param_list_0(self, t):
1843        'positional_param_list : empty'
1844        t[0] = []
1845
1846    def p_positional_param_list_1(self, t):
1847        'positional_param_list : ID'
1848        t[0] = [t[1]]
1849
1850    def p_positional_param_list_2(self, t):
1851        'positional_param_list : positional_param_list COMMA ID'
1852        t[0] = t[1] + [t[3]]
1853
1854    def p_nonpositional_param_list_0(self, t):
1855        'nonpositional_param_list : keyword_param_list COMMA excess_args_param'
1856        t[0] = t[1] + t[3]
1857
1858    def p_nonpositional_param_list_1(self, t):
1859        '''nonpositional_param_list : keyword_param_list
1860                                    | excess_args_param'''
1861        t[0] = t[1]
1862
1863    def p_keyword_param_list_0(self, t):
1864        'keyword_param_list : keyword_param'
1865        t[0] = [t[1]]
1866
1867    def p_keyword_param_list_1(self, t):
1868        'keyword_param_list : keyword_param_list COMMA keyword_param'
1869        t[0] = t[1] + [t[3]]
1870
1871    def p_keyword_param(self, t):
1872        'keyword_param : ID EQUALS expr'
1873        t[0] = t[1] + ' = ' + t[3].__repr__()
1874
1875    def p_excess_args_param(self, t):
1876        'excess_args_param : ASTERISK ID'
1877        # Just concatenate them: '*ID'.  Wrap in list to be consistent
1878        # with positional_param_list and keyword_param_list.
1879        t[0] = [t[1] + t[2]]
1880
1881    # End of format definition-related rules.
1882    ##############
1883
1884    #
1885    # A decode block looks like:
1886    #       decode <field1> [, <field2>]* [default <inst>] { ... }
1887    #
1888    def p_top_level_decode_block(self, t):
1889        'top_level_decode_block : decode_block'
1890        codeObj = t[1]
1891        codeObj.wrap_decode_block('''
1892StaticInstPtr
1893%(isa_name)s::Decoder::decodeInst(%(isa_name)s::ExtMachInst machInst)
1894{
1895    using namespace %(namespace)s;
1896''' % self, '}')
1897
1898        codeObj.emit()
1899
1900    def p_decode_block(self, t):
1901        'decode_block : DECODE ID opt_default LBRACE decode_stmt_list RBRACE'
1902        default_defaults = self.defaultStack.pop()
1903        codeObj = t[5]
1904        # use the "default defaults" only if there was no explicit
1905        # default statement in decode_stmt_list
1906        if not codeObj.has_decode_default:
1907            codeObj += default_defaults
1908        codeObj.wrap_decode_block('switch (%s) {\n' % t[2], '}\n')
1909        t[0] = codeObj
1910
1911    # The opt_default statement serves only to push the "default
1912    # defaults" onto defaultStack.  This value will be used by nested
1913    # decode blocks, and used and popped off when the current
1914    # decode_block is processed (in p_decode_block() above).
1915    def p_opt_default_0(self, t):
1916        'opt_default : empty'
1917        # no default specified: reuse the one currently at the top of
1918        # the stack
1919        self.defaultStack.push(self.defaultStack.top())
1920        # no meaningful value returned
1921        t[0] = None
1922
1923    def p_opt_default_1(self, t):
1924        'opt_default : DEFAULT inst'
1925        # push the new default
1926        codeObj = t[2]
1927        codeObj.wrap_decode_block('\ndefault:\n', 'break;\n')
1928        self.defaultStack.push(codeObj)
1929        # no meaningful value returned
1930        t[0] = None
1931
1932    def p_decode_stmt_list_0(self, t):
1933        'decode_stmt_list : decode_stmt'
1934        t[0] = t[1]
1935
1936    def p_decode_stmt_list_1(self, t):
1937        'decode_stmt_list : decode_stmt decode_stmt_list'
1938        if (t[1].has_decode_default and t[2].has_decode_default):
1939            error(t, 'Two default cases in decode block')
1940        t[0] = t[1] + t[2]
1941
1942    #
1943    # Decode statement rules
1944    #
1945    # There are four types of statements allowed in a decode block:
1946    # 1. Format blocks 'format <foo> { ... }'
1947    # 2. Nested decode blocks
1948    # 3. Instruction definitions.
1949    # 4. C preprocessor directives.
1950
1951
1952    # Preprocessor directives found in a decode statement list are
1953    # passed through to the output, replicated to all of the output
1954    # code streams.  This works well for ifdefs, so we can ifdef out
1955    # both the declarations and the decode cases generated by an
1956    # instruction definition.  Handling them as part of the grammar
1957    # makes it easy to keep them in the right place with respect to
1958    # the code generated by the other statements.
1959    def p_decode_stmt_cpp(self, t):
1960        'decode_stmt : CPPDIRECTIVE'
1961        t[0] = GenCode(self, t[1], t[1], t[1], t[1])
1962
1963    # A format block 'format <foo> { ... }' sets the default
1964    # instruction format used to handle instruction definitions inside
1965    # the block.  This format can be overridden by using an explicit
1966    # format on the instruction definition or with a nested format
1967    # block.
1968    def p_decode_stmt_format(self, t):
1969        'decode_stmt : FORMAT push_format_id LBRACE decode_stmt_list RBRACE'
1970        # The format will be pushed on the stack when 'push_format_id'
1971        # is processed (see below).  Once the parser has recognized
1972        # the full production (though the right brace), we're done
1973        # with the format, so now we can pop it.
1974        self.formatStack.pop()
1975        t[0] = t[4]
1976
1977    # This rule exists so we can set the current format (& push the
1978    # stack) when we recognize the format name part of the format
1979    # block.
1980    def p_push_format_id(self, t):
1981        'push_format_id : ID'
1982        try:
1983            self.formatStack.push(self.formatMap[t[1]])
1984            t[0] = ('', '// format %s' % t[1])
1985        except KeyError:
1986            error(t, 'instruction format "%s" not defined.' % t[1])
1987
1988    # Nested decode block: if the value of the current field matches
1989    # the specified constant, do a nested decode on some other field.
1990    def p_decode_stmt_decode(self, t):
1991        'decode_stmt : case_label COLON decode_block'
1992        label = t[1]
1993        codeObj = t[3]
1994        # just wrap the decoding code from the block as a case in the
1995        # outer switch statement.
1996        codeObj.wrap_decode_block('\n%s:\n' % label)
1997        codeObj.has_decode_default = (label == 'default')
1998        t[0] = codeObj
1999
2000    # Instruction definition (finally!).
2001    def p_decode_stmt_inst(self, t):
2002        'decode_stmt : case_label COLON inst SEMI'
2003        label = t[1]
2004        codeObj = t[3]
2005        codeObj.wrap_decode_block('\n%s:' % label, 'break;\n')
2006        codeObj.has_decode_default = (label == 'default')
2007        t[0] = codeObj
2008
2009    # The case label is either a list of one or more constants or
2010    # 'default'
2011    def p_case_label_0(self, t):
2012        'case_label : intlit_list'
2013        def make_case(intlit):
2014            if intlit >= 2**32:
2015                return 'case ULL(%#x)' % intlit
2016            else:
2017                return 'case %#x' % intlit
2018        t[0] = ': '.join(map(make_case, t[1]))
2019
2020    def p_case_label_1(self, t):
2021        'case_label : DEFAULT'
2022        t[0] = 'default'
2023
2024    #
2025    # The constant list for a decode case label must be non-empty, but
2026    # may have one or more comma-separated integer literals in it.
2027    #
2028    def p_intlit_list_0(self, t):
2029        'intlit_list : INTLIT'
2030        t[0] = [t[1]]
2031
2032    def p_intlit_list_1(self, t):
2033        'intlit_list : intlit_list COMMA INTLIT'
2034        t[0] = t[1]
2035        t[0].append(t[3])
2036
2037    # Define an instruction using the current instruction format
2038    # (specified by an enclosing format block).
2039    # "<mnemonic>(<args>)"
2040    def p_inst_0(self, t):
2041        'inst : ID LPAREN arg_list RPAREN'
2042        # Pass the ID and arg list to the current format class to deal with.
2043        currentFormat = self.formatStack.top()
2044        codeObj = currentFormat.defineInst(self, t[1], t[3], t.lexer.lineno)
2045        args = ','.join(map(str, t[3]))
2046        args = re.sub('(?m)^', '//', args)
2047        args = re.sub('^//', '', args)
2048        comment = '\n// %s::%s(%s)\n' % (currentFormat.id, t[1], args)
2049        codeObj.prepend_all(comment)
2050        t[0] = codeObj
2051
2052    # Define an instruction using an explicitly specified format:
2053    # "<fmt>::<mnemonic>(<args>)"
2054    def p_inst_1(self, t):
2055        'inst : ID DBLCOLON ID LPAREN arg_list RPAREN'
2056        try:
2057            format = self.formatMap[t[1]]
2058        except KeyError:
2059            error(t, 'instruction format "%s" not defined.' % t[1])
2060
2061        codeObj = format.defineInst(self, t[3], t[5], t.lexer.lineno)
2062        comment = '\n// %s::%s(%s)\n' % (t[1], t[3], t[5])
2063        codeObj.prepend_all(comment)
2064        t[0] = codeObj
2065
2066    # The arg list generates a tuple, where the first element is a
2067    # list of the positional args and the second element is a dict
2068    # containing the keyword args.
2069    def p_arg_list_0(self, t):
2070        'arg_list : positional_arg_list COMMA keyword_arg_list'
2071        t[0] = ( t[1], t[3] )
2072
2073    def p_arg_list_1(self, t):
2074        'arg_list : positional_arg_list'
2075        t[0] = ( t[1], {} )
2076
2077    def p_arg_list_2(self, t):
2078        'arg_list : keyword_arg_list'
2079        t[0] = ( [], t[1] )
2080
2081    def p_positional_arg_list_0(self, t):
2082        'positional_arg_list : empty'
2083        t[0] = []
2084
2085    def p_positional_arg_list_1(self, t):
2086        'positional_arg_list : expr'
2087        t[0] = [t[1]]
2088
2089    def p_positional_arg_list_2(self, t):
2090        'positional_arg_list : positional_arg_list COMMA expr'
2091        t[0] = t[1] + [t[3]]
2092
2093    def p_keyword_arg_list_0(self, t):
2094        'keyword_arg_list : keyword_arg'
2095        t[0] = t[1]
2096
2097    def p_keyword_arg_list_1(self, t):
2098        'keyword_arg_list : keyword_arg_list COMMA keyword_arg'
2099        t[0] = t[1]
2100        t[0].update(t[3])
2101
2102    def p_keyword_arg(self, t):
2103        'keyword_arg : ID EQUALS expr'
2104        t[0] = { t[1] : t[3] }
2105
2106    #
2107    # Basic expressions.  These constitute the argument values of
2108    # "function calls" (i.e. instruction definitions in the decode
2109    # block) and default values for formal parameters of format
2110    # functions.
2111    #
2112    # Right now, these are either strings, integers, or (recursively)
2113    # lists of exprs (using Python square-bracket list syntax).  Note
2114    # that bare identifiers are trated as string constants here (since
2115    # there isn't really a variable namespace to refer to).
2116    #
2117    def p_expr_0(self, t):
2118        '''expr : ID
2119                | INTLIT
2120                | STRLIT
2121                | CODELIT'''
2122        t[0] = t[1]
2123
2124    def p_expr_1(self, t):
2125        '''expr : LBRACKET list_expr RBRACKET'''
2126        t[0] = t[2]
2127
2128    def p_list_expr_0(self, t):
2129        'list_expr : expr'
2130        t[0] = [t[1]]
2131
2132    def p_list_expr_1(self, t):
2133        'list_expr : list_expr COMMA expr'
2134        t[0] = t[1] + [t[3]]
2135
2136    def p_list_expr_2(self, t):
2137        'list_expr : empty'
2138        t[0] = []
2139
2140    #
2141    # Empty production... use in other rules for readability.
2142    #
2143    def p_empty(self, t):
2144        'empty :'
2145        pass
2146
2147    # Parse error handler.  Note that the argument here is the
2148    # offending *token*, not a grammar symbol (hence the need to use
2149    # t.value)
2150    def p_error(self, t):
2151        if t:
2152            error(t, "syntax error at '%s'" % t.value)
2153        else:
2154            error("unknown syntax error")
2155
2156    # END OF GRAMMAR RULES
2157
2158    def updateExportContext(self):
2159
2160        # create a continuation that allows us to grab the current parser
2161        def wrapInstObjParams(*args):
2162            return InstObjParams(self, *args)
2163        self.exportContext['InstObjParams'] = wrapInstObjParams
2164        self.exportContext.update(self.templateMap)
2165
2166    def defFormat(self, id, params, code, lineno):
2167        '''Define a new format'''
2168
2169        # make sure we haven't already defined this one
2170        if id in self.formatMap:
2171            error(lineno, 'format %s redefined.' % id)
2172
2173        # create new object and store in global map
2174        self.formatMap[id] = Format(id, params, code)
2175
2176    def expandCpuSymbolsToDict(self, template):
2177        '''Expand template with CPU-specific references into a
2178        dictionary with an entry for each CPU model name.  The entry
2179        key is the model name and the corresponding value is the
2180        template with the CPU-specific refs substituted for that
2181        model.'''
2182
2183        # Protect '%'s that don't go with CPU-specific terms
2184        t = re.sub(r'%(?!\(CPU_)', '%%', template)
2185        result = {}
2186        for cpu in self.cpuModels:
2187            result[cpu.name] = t % cpu.strings
2188        return result
2189
2190    def expandCpuSymbolsToString(self, template):
2191        '''*If* the template has CPU-specific references, return a
2192        single string containing a copy of the template for each CPU
2193        model with the corresponding values substituted in.  If the
2194        template has no CPU-specific references, it is returned
2195        unmodified.'''
2196
2197        if template.find('%(CPU_') != -1:
2198            return reduce(lambda x,y: x+y,
2199                          self.expandCpuSymbolsToDict(template).values())
2200        else:
2201            return template
2202
2203    def protectCpuSymbols(self, template):
2204        '''Protect CPU-specific references by doubling the
2205        corresponding '%'s (in preparation for substituting a different
2206        set of references into the template).'''
2207
2208        return re.sub(r'%(?=\(CPU_)', '%%', template)
2209
2210    def protectNonSubstPercents(self, s):
2211        '''Protect any non-dict-substitution '%'s in a format string
2212        (i.e. those not followed by '(')'''
2213
2214        return re.sub(r'%(?!\()', '%%', s)
2215
2216    def buildOperandNameMap(self, user_dict, lineno):
2217        operand_name = {}
2218        for op_name, val in user_dict.iteritems():
2219
2220            # Check if extra attributes have been specified.
2221            if len(val) > 9:
2222                error(lineno, 'error: too many attributes for operand "%s"' %
2223                      base_cls_name)
2224
2225            # Pad val with None in case optional args are missing
2226            val += (None, None, None, None)
2227            base_cls_name, dflt_ext, reg_spec, flags, sort_pri, \
2228            read_code, write_code, read_predicate, write_predicate = val[:9]
2229
2230            # Canonical flag structure is a triple of lists, where each list
2231            # indicates the set of flags implied by this operand always, when
2232            # used as a source, and when used as a dest, respectively.
2233            # For simplicity this can be initialized using a variety of fairly
2234            # obvious shortcuts; we convert these to canonical form here.
2235            if not flags:
2236                # no flags specified (e.g., 'None')
2237                flags = ( [], [], [] )
2238            elif isinstance(flags, str):
2239                # a single flag: assumed to be unconditional
2240                flags = ( [ flags ], [], [] )
2241            elif isinstance(flags, list):
2242                # a list of flags: also assumed to be unconditional
2243                flags = ( flags, [], [] )
2244            elif isinstance(flags, tuple):
2245                # it's a tuple: it should be a triple,
2246                # but each item could be a single string or a list
2247                (uncond_flags, src_flags, dest_flags) = flags
2248                flags = (makeList(uncond_flags),
2249                         makeList(src_flags), makeList(dest_flags))
2250
2251            # Accumulate attributes of new operand class in tmp_dict
2252            tmp_dict = {}
2253            attrList = ['reg_spec', 'flags', 'sort_pri',
2254                        'read_code', 'write_code',
2255                        'read_predicate', 'write_predicate']
2256            if dflt_ext:
2257                dflt_ctype = self.operandTypeMap[dflt_ext]
2258                attrList.extend(['dflt_ctype', 'dflt_ext'])
2259            for attr in attrList:
2260                tmp_dict[attr] = eval(attr)
2261            tmp_dict['base_name'] = op_name
2262
2263            # New class name will be e.g. "IntReg_Ra"
2264            cls_name = base_cls_name + '_' + op_name
2265            # Evaluate string arg to get class object.  Note that the
2266            # actual base class for "IntReg" is "IntRegOperand", i.e. we
2267            # have to append "Operand".
2268            try:
2269                base_cls = eval(base_cls_name + 'Operand')
2270            except NameError:
2271                error(lineno,
2272                      'error: unknown operand base class "%s"' % base_cls_name)
2273            # The following statement creates a new class called
2274            # <cls_name> as a subclass of <base_cls> with the attributes
2275            # in tmp_dict, just as if we evaluated a class declaration.
2276            operand_name[op_name] = type(cls_name, (base_cls,), tmp_dict)
2277
2278        self.operandNameMap = operand_name
2279
2280        # Define operand variables.
2281        operands = user_dict.keys()
2282        extensions = self.operandTypeMap.keys()
2283
2284        operandsREString = r'''
2285        (?<!\w)      # neg. lookbehind assertion: prevent partial matches
2286        ((%s)(?:_(%s))?)   # match: operand with optional '_' then suffix
2287        (?!\w)       # neg. lookahead assertion: prevent partial matches
2288        ''' % (string.join(operands, '|'), string.join(extensions, '|'))
2289
2290        self.operandsRE = re.compile(operandsREString, re.MULTILINE|re.VERBOSE)
2291
2292        # Same as operandsREString, but extension is mandatory, and only two
2293        # groups are returned (base and ext, not full name as above).
2294        # Used for subtituting '_' for '.' to make C++ identifiers.
2295        operandsWithExtREString = r'(?<!\w)(%s)_(%s)(?!\w)' \
2296            % (string.join(operands, '|'), string.join(extensions, '|'))
2297
2298        self.operandsWithExtRE = \
2299            re.compile(operandsWithExtREString, re.MULTILINE)
2300
2301    def substMungedOpNames(self, code):
2302        '''Munge operand names in code string to make legal C++
2303        variable names.  This means getting rid of the type extension
2304        if any.  Will match base_name attribute of Operand object.)'''
2305        return self.operandsWithExtRE.sub(r'\1', code)
2306
2307    def mungeSnippet(self, s):
2308        '''Fix up code snippets for final substitution in templates.'''
2309        if isinstance(s, str):
2310            return self.substMungedOpNames(substBitOps(s))
2311        else:
2312            return s
2313
2314    def open(self, name, bare=False):
2315        '''Open the output file for writing and include scary warning.'''
2316        filename = os.path.join(self.output_dir, name)
2317        f = open(filename, 'w')
2318        if f:
2319            if not bare:
2320                f.write(ISAParser.scaremonger_template % self)
2321        return f
2322
2323    def update(self, file, contents):
2324        '''Update the output file only.  Scons should handle the case when
2325        the new contents are unchanged using its built-in hash feature.'''
2326        f = self.open(file)
2327        f.write(contents)
2328        f.close()
2329
2330    # This regular expression matches '##include' directives
2331    includeRE = re.compile(r'^\s*##include\s+"(?P<filename>[^"]*)".*$',
2332                           re.MULTILINE)
2333
2334    def replace_include(self, matchobj, dirname):
2335        """Function to replace a matched '##include' directive with the
2336        contents of the specified file (with nested ##includes
2337        replaced recursively).  'matchobj' is an re match object
2338        (from a match of includeRE) and 'dirname' is the directory
2339        relative to which the file path should be resolved."""
2340
2341        fname = matchobj.group('filename')
2342        full_fname = os.path.normpath(os.path.join(dirname, fname))
2343        contents = '##newfile "%s"\n%s\n##endfile\n' % \
2344                   (full_fname, self.read_and_flatten(full_fname))
2345        return contents
2346
2347    def read_and_flatten(self, filename):
2348        """Read a file and recursively flatten nested '##include' files."""
2349
2350        current_dir = os.path.dirname(filename)
2351        try:
2352            contents = open(filename).read()
2353        except IOError:
2354            error('Error including file "%s"' % filename)
2355
2356        self.fileNameStack.push((filename, 0))
2357
2358        # Find any includes and include them
2359        def replace(matchobj):
2360            return self.replace_include(matchobj, current_dir)
2361        contents = self.includeRE.sub(replace, contents)
2362
2363        self.fileNameStack.pop()
2364        return contents
2365
2366    AlreadyGenerated = {}
2367
2368    def _parse_isa_desc(self, isa_desc_file):
2369        '''Read in and parse the ISA description.'''
2370
2371        # The build system can end up running the ISA parser twice: once to
2372        # finalize the build dependencies, and then to actually generate
2373        # the files it expects (in src/arch/$ARCH/generated). This code
2374        # doesn't do anything different either time, however; the SCons
2375        # invocations just expect different things. Since this code runs
2376        # within SCons, we can just remember that we've already run and
2377        # not perform a completely unnecessary run, since the ISA parser's
2378        # effect is idempotent.
2379        if isa_desc_file in ISAParser.AlreadyGenerated:
2380            return
2381
2382        # grab the last three path components of isa_desc_file
2383        self.filename = '/'.join(isa_desc_file.split('/')[-3:])
2384
2385        # Read file and (recursively) all included files into a string.
2386        # PLY requires that the input be in a single string so we have to
2387        # do this up front.
2388        isa_desc = self.read_and_flatten(isa_desc_file)
2389
2390        # Initialize filename stack with outer file.
2391        self.fileNameStack.push((isa_desc_file, 0))
2392
2393        # Parse.
2394        self.parse_string(isa_desc)
2395
2396        ISAParser.AlreadyGenerated[isa_desc_file] = None
2397
2398    def parse_isa_desc(self, *args, **kwargs):
2399        try:
2400            self._parse_isa_desc(*args, **kwargs)
2401        except ISAParserError, e:
2402            e.exit(self.fileNameStack)
2403
2404# Called as script: get args from command line.
2405# Args are: <isa desc file> <output dir>
2406if __name__ == '__main__':
2407    ISAParser(sys.argv[2]).parse_isa_desc(sys.argv[1])
2408