isa_parser.py revision 12234:78ece221f9f5
1# Copyright (c) 2014, 2016 ARM Limited
2# All rights reserved
3#
4# The license below extends only to copyright in the software and shall
5# not be construed as granting a license to any other intellectual
6# property including but not limited to intellectual property relating
7# to a hardware implementation of the functionality of the software
8# licensed hereunder.  You may use the software subject to the license
9# terms below provided that you ensure that this notice is replicated
10# unmodified and in its entirety in all distributions of the software,
11# modified or unmodified, in source code or in binary form.
12#
13# Copyright (c) 2003-2005 The Regents of The University of Michigan
14# Copyright (c) 2013,2015 Advanced Micro Devices, Inc.
15# All rights reserved.
16#
17# Redistribution and use in source and binary forms, with or without
18# modification, are permitted provided that the following conditions are
19# met: redistributions of source code must retain the above copyright
20# notice, this list of conditions and the following disclaimer;
21# redistributions in binary form must reproduce the above copyright
22# notice, this list of conditions and the following disclaimer in the
23# documentation and/or other materials provided with the distribution;
24# neither the name of the copyright holders nor the names of its
25# contributors may be used to endorse or promote products derived from
26# this software without specific prior written permission.
27#
28# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39#
40# Authors: Steve Reinhardt
41
42from __future__ import with_statement
43import os
44import sys
45import re
46import string
47import inspect, traceback
48# get type names
49from types import *
50
51from m5.util.grammar import Grammar
52
53debug=False
54
55###################
56# Utility functions
57
58#
59# Indent every line in string 's' by two spaces
60# (except preprocessor directives).
61# Used to make nested code blocks look pretty.
62#
63def indent(s):
64    return re.sub(r'(?m)^(?!#)', '  ', s)
65
66#
67# Munge a somewhat arbitrarily formatted piece of Python code
68# (e.g. from a format 'let' block) into something whose indentation
69# will get by the Python parser.
70#
71# The two keys here are that Python will give a syntax error if
72# there's any whitespace at the beginning of the first line, and that
73# all lines at the same lexical nesting level must have identical
74# indentation.  Unfortunately the way code literals work, an entire
75# let block tends to have some initial indentation.  Rather than
76# trying to figure out what that is and strip it off, we prepend 'if
77# 1:' to make the let code the nested block inside the if (and have
78# the parser automatically deal with the indentation for us).
79#
80# We don't want to do this if (1) the code block is empty or (2) the
81# first line of the block doesn't have any whitespace at the front.
82
83def fixPythonIndentation(s):
84    # get rid of blank lines first
85    s = re.sub(r'(?m)^\s*\n', '', s);
86    if (s != '' and re.match(r'[ \t]', s[0])):
87        s = 'if 1:\n' + s
88    return s
89
90class ISAParserError(Exception):
91    """Exception class for parser errors"""
92    def __init__(self, first, second=None):
93        if second is None:
94            self.lineno = 0
95            self.string = first
96        else:
97            self.lineno = first
98            self.string = second
99
100    def __str__(self):
101        return self.string
102
103def error(*args):
104    raise ISAParserError(*args)
105
106####################
107# Template objects.
108#
109# Template objects are format strings that allow substitution from
110# the attribute spaces of other objects (e.g. InstObjParams instances).
111
112labelRE = re.compile(r'(?<!%)%\(([^\)]+)\)[sd]')
113
114class Template(object):
115    def __init__(self, parser, t):
116        self.parser = parser
117        self.template = t
118
119    def subst(self, d):
120        myDict = None
121
122        # Protect non-Python-dict substitutions (e.g. if there's a printf
123        # in the templated C++ code)
124        template = self.parser.protectNonSubstPercents(self.template)
125
126        # Build a dict ('myDict') to use for the template substitution.
127        # Start with the template namespace.  Make a copy since we're
128        # going to modify it.
129        myDict = self.parser.templateMap.copy()
130
131        if isinstance(d, InstObjParams):
132            # If we're dealing with an InstObjParams object, we need
133            # to be a little more sophisticated.  The instruction-wide
134            # parameters are already formed, but the parameters which
135            # are only function wide still need to be generated.
136            compositeCode = ''
137
138            myDict.update(d.__dict__)
139            # The "operands" and "snippets" attributes of the InstObjParams
140            # objects are for internal use and not substitution.
141            del myDict['operands']
142            del myDict['snippets']
143
144            snippetLabels = [l for l in labelRE.findall(template)
145                             if d.snippets.has_key(l)]
146
147            snippets = dict([(s, self.parser.mungeSnippet(d.snippets[s]))
148                             for s in snippetLabels])
149
150            myDict.update(snippets)
151
152            compositeCode = ' '.join(map(str, snippets.values()))
153
154            # Add in template itself in case it references any
155            # operands explicitly (like Mem)
156            compositeCode += ' ' + template
157
158            operands = SubOperandList(self.parser, compositeCode, d.operands)
159
160            myDict['op_decl'] = operands.concatAttrStrings('op_decl')
161            if operands.readPC or operands.setPC:
162                myDict['op_decl'] += 'TheISA::PCState __parserAutoPCState;\n'
163
164            # In case there are predicated register reads and write, declare
165            # the variables for register indicies. It is being assumed that
166            # all the operands in the OperandList are also in the
167            # SubOperandList and in the same order. Otherwise, it is
168            # expected that predication would not be used for the operands.
169            if operands.predRead:
170                myDict['op_decl'] += 'uint8_t _sourceIndex = 0;\n'
171            if operands.predWrite:
172                myDict['op_decl'] += 'uint8_t M5_VAR_USED _destIndex = 0;\n'
173
174            is_src = lambda op: op.is_src
175            is_dest = lambda op: op.is_dest
176
177            myDict['op_src_decl'] = \
178                      operands.concatSomeAttrStrings(is_src, 'op_src_decl')
179            myDict['op_dest_decl'] = \
180                      operands.concatSomeAttrStrings(is_dest, 'op_dest_decl')
181            if operands.readPC:
182                myDict['op_src_decl'] += \
183                    'TheISA::PCState __parserAutoPCState;\n'
184            if operands.setPC:
185                myDict['op_dest_decl'] += \
186                    'TheISA::PCState __parserAutoPCState;\n'
187
188            myDict['op_rd'] = operands.concatAttrStrings('op_rd')
189            if operands.readPC:
190                myDict['op_rd'] = '__parserAutoPCState = xc->pcState();\n' + \
191                                  myDict['op_rd']
192
193            # Compose the op_wb string. If we're going to write back the
194            # PC state because we changed some of its elements, we'll need to
195            # do that as early as possible. That allows later uncoordinated
196            # modifications to the PC to layer appropriately.
197            reordered = list(operands.items)
198            reordered.reverse()
199            op_wb_str = ''
200            pcWbStr = 'xc->pcState(__parserAutoPCState);\n'
201            for op_desc in reordered:
202                if op_desc.isPCPart() and op_desc.is_dest:
203                    op_wb_str = op_desc.op_wb + pcWbStr + op_wb_str
204                    pcWbStr = ''
205                else:
206                    op_wb_str = op_desc.op_wb + op_wb_str
207            myDict['op_wb'] = op_wb_str
208
209        elif isinstance(d, dict):
210            # if the argument is a dictionary, we just use it.
211            myDict.update(d)
212        elif hasattr(d, '__dict__'):
213            # if the argument is an object, we use its attribute map.
214            myDict.update(d.__dict__)
215        else:
216            raise TypeError, "Template.subst() arg must be or have dictionary"
217        return template % myDict
218
219    # Convert to string.
220    def __str__(self):
221        return self.template
222
223################
224# Format object.
225#
226# A format object encapsulates an instruction format.  It must provide
227# a defineInst() method that generates the code for an instruction
228# definition.
229
230class Format(object):
231    def __init__(self, id, params, code):
232        self.id = id
233        self.params = params
234        label = 'def format ' + id
235        self.user_code = compile(fixPythonIndentation(code), label, 'exec')
236        param_list = string.join(params, ", ")
237        f = '''def defInst(_code, _context, %s):
238                my_locals = vars().copy()
239                exec _code in _context, my_locals
240                return my_locals\n''' % param_list
241        c = compile(f, label + ' wrapper', 'exec')
242        exec c
243        self.func = defInst
244
245    def defineInst(self, parser, name, args, lineno):
246        parser.updateExportContext()
247        context = parser.exportContext.copy()
248        if len(name):
249            Name = name[0].upper()
250            if len(name) > 1:
251                Name += name[1:]
252        context.update({ 'name' : name, 'Name' : Name })
253        try:
254            vars = self.func(self.user_code, context, *args[0], **args[1])
255        except Exception, exc:
256            if debug:
257                raise
258            error(lineno, 'error defining "%s": %s.' % (name, exc))
259        for k in vars.keys():
260            if k not in ('header_output', 'decoder_output',
261                         'exec_output', 'decode_block'):
262                del vars[k]
263        return GenCode(parser, **vars)
264
265# Special null format to catch an implicit-format instruction
266# definition outside of any format block.
267class NoFormat(object):
268    def __init__(self):
269        self.defaultInst = ''
270
271    def defineInst(self, parser, name, args, lineno):
272        error(lineno,
273              'instruction definition "%s" with no active format!' % name)
274
275###############
276# GenCode class
277#
278# The GenCode class encapsulates generated code destined for various
279# output files.  The header_output and decoder_output attributes are
280# strings containing code destined for decoder.hh and decoder.cc
281# respectively.  The decode_block attribute contains code to be
282# incorporated in the decode function itself (that will also end up in
283# decoder.cc).  The exec_output attribute  is the string of code for the
284# exec.cc file.  The has_decode_default attribute is used in the decode block
285# to allow explicit default clauses to override default default clauses.
286
287class GenCode(object):
288    # Constructor.
289    def __init__(self, parser,
290                 header_output = '', decoder_output = '', exec_output = '',
291                 decode_block = '', has_decode_default = False):
292        self.parser = parser
293        self.header_output = header_output
294        self.decoder_output = decoder_output
295        self.exec_output = exec_output
296        self.decode_block = decode_block
297        self.has_decode_default = has_decode_default
298
299    # Write these code chunks out to the filesystem.  They will be properly
300    # interwoven by the write_top_level_files().
301    def emit(self):
302        if self.header_output:
303            self.parser.get_file('header').write(self.header_output)
304        if self.decoder_output:
305            self.parser.get_file('decoder').write(self.decoder_output)
306        if self.exec_output:
307            self.parser.get_file('exec').write(self.exec_output)
308        if self.decode_block:
309            self.parser.get_file('decode_block').write(self.decode_block)
310
311    # Override '+' operator: generate a new GenCode object that
312    # concatenates all the individual strings in the operands.
313    def __add__(self, other):
314        return GenCode(self.parser,
315                       self.header_output + other.header_output,
316                       self.decoder_output + other.decoder_output,
317                       self.exec_output + other.exec_output,
318                       self.decode_block + other.decode_block,
319                       self.has_decode_default or other.has_decode_default)
320
321    # Prepend a string (typically a comment) to all the strings.
322    def prepend_all(self, pre):
323        self.header_output = pre + self.header_output
324        self.decoder_output  = pre + self.decoder_output
325        self.decode_block = pre + self.decode_block
326        self.exec_output  = pre + self.exec_output
327
328    # Wrap the decode block in a pair of strings (e.g., 'case foo:'
329    # and 'break;').  Used to build the big nested switch statement.
330    def wrap_decode_block(self, pre, post = ''):
331        self.decode_block = pre + indent(self.decode_block) + post
332
333#####################################################################
334#
335#                      Bitfield Operator Support
336#
337#####################################################################
338
339bitOp1ArgRE = re.compile(r'<\s*(\w+)\s*:\s*>')
340
341bitOpWordRE = re.compile(r'(?<![\w\.])([\w\.]+)<\s*(\w+)\s*:\s*(\w+)\s*>')
342bitOpExprRE = re.compile(r'\)<\s*(\w+)\s*:\s*(\w+)\s*>')
343
344def substBitOps(code):
345    # first convert single-bit selectors to two-index form
346    # i.e., <n> --> <n:n>
347    code = bitOp1ArgRE.sub(r'<\1:\1>', code)
348    # simple case: selector applied to ID (name)
349    # i.e., foo<a:b> --> bits(foo, a, b)
350    code = bitOpWordRE.sub(r'bits(\1, \2, \3)', code)
351    # if selector is applied to expression (ending in ')'),
352    # we need to search backward for matching '('
353    match = bitOpExprRE.search(code)
354    while match:
355        exprEnd = match.start()
356        here = exprEnd - 1
357        nestLevel = 1
358        while nestLevel > 0:
359            if code[here] == '(':
360                nestLevel -= 1
361            elif code[here] == ')':
362                nestLevel += 1
363            here -= 1
364            if here < 0:
365                sys.exit("Didn't find '('!")
366        exprStart = here+1
367        newExpr = r'bits(%s, %s, %s)' % (code[exprStart:exprEnd+1],
368                                         match.group(1), match.group(2))
369        code = code[:exprStart] + newExpr + code[match.end():]
370        match = bitOpExprRE.search(code)
371    return code
372
373
374#####################################################################
375#
376#                             Code Parser
377#
378# The remaining code is the support for automatically extracting
379# instruction characteristics from pseudocode.
380#
381#####################################################################
382
383# Force the argument to be a list.  Useful for flags, where a caller
384# can specify a singleton flag or a list of flags.  Also usful for
385# converting tuples to lists so they can be modified.
386def makeList(arg):
387    if isinstance(arg, list):
388        return arg
389    elif isinstance(arg, tuple):
390        return list(arg)
391    elif not arg:
392        return []
393    else:
394        return [ arg ]
395
396class Operand(object):
397    '''Base class for operand descriptors.  An instance of this class
398    (or actually a class derived from this one) represents a specific
399    operand for a code block (e.g, "Rc.sq" as a dest). Intermediate
400    derived classes encapsulates the traits of a particular operand
401    type (e.g., "32-bit integer register").'''
402
403    def buildReadCode(self, func = None):
404        subst_dict = {"name": self.base_name,
405                      "func": func,
406                      "reg_idx": self.reg_spec,
407                      "ctype": self.ctype}
408        if hasattr(self, 'src_reg_idx'):
409            subst_dict['op_idx'] = self.src_reg_idx
410        code = self.read_code % subst_dict
411        return '%s = %s;\n' % (self.base_name, code)
412
413    def buildWriteCode(self, func = None):
414        subst_dict = {"name": self.base_name,
415                      "func": func,
416                      "reg_idx": self.reg_spec,
417                      "ctype": self.ctype,
418                      "final_val": self.base_name}
419        if hasattr(self, 'dest_reg_idx'):
420            subst_dict['op_idx'] = self.dest_reg_idx
421        code = self.write_code % subst_dict
422        return '''
423        {
424            %s final_val = %s;
425            %s;
426            if (traceData) { traceData->setData(final_val); }
427        }''' % (self.dflt_ctype, self.base_name, code)
428
429    def __init__(self, parser, full_name, ext, is_src, is_dest):
430        self.full_name = full_name
431        self.ext = ext
432        self.is_src = is_src
433        self.is_dest = is_dest
434        # The 'effective extension' (eff_ext) is either the actual
435        # extension, if one was explicitly provided, or the default.
436        if ext:
437            self.eff_ext = ext
438        elif hasattr(self, 'dflt_ext'):
439            self.eff_ext = self.dflt_ext
440
441        if hasattr(self, 'eff_ext'):
442            self.ctype = parser.operandTypeMap[self.eff_ext]
443
444    # Finalize additional fields (primarily code fields).  This step
445    # is done separately since some of these fields may depend on the
446    # register index enumeration that hasn't been performed yet at the
447    # time of __init__(). The register index enumeration is affected
448    # by predicated register reads/writes. Hence, we forward the flags
449    # that indicate whether or not predication is in use.
450    def finalize(self, predRead, predWrite):
451        self.flags = self.getFlags()
452        self.constructor = self.makeConstructor(predRead, predWrite)
453        self.op_decl = self.makeDecl()
454
455        if self.is_src:
456            self.op_rd = self.makeRead(predRead)
457            self.op_src_decl = self.makeDecl()
458        else:
459            self.op_rd = ''
460            self.op_src_decl = ''
461
462        if self.is_dest:
463            self.op_wb = self.makeWrite(predWrite)
464            self.op_dest_decl = self.makeDecl()
465        else:
466            self.op_wb = ''
467            self.op_dest_decl = ''
468
469    def isMem(self):
470        return 0
471
472    def isReg(self):
473        return 0
474
475    def isFloatReg(self):
476        return 0
477
478    def isIntReg(self):
479        return 0
480
481    def isCCReg(self):
482        return 0
483
484    def isControlReg(self):
485        return 0
486
487    def isVecReg(self):
488        return 0
489
490    def isVecElem(self):
491        return 0
492
493    def isPCState(self):
494        return 0
495
496    def isPCPart(self):
497        return self.isPCState() and self.reg_spec
498
499    def hasReadPred(self):
500        return self.read_predicate != None
501
502    def hasWritePred(self):
503        return self.write_predicate != None
504
505    def getFlags(self):
506        # note the empty slice '[:]' gives us a copy of self.flags[0]
507        # instead of a reference to it
508        my_flags = self.flags[0][:]
509        if self.is_src:
510            my_flags += self.flags[1]
511        if self.is_dest:
512            my_flags += self.flags[2]
513        return my_flags
514
515    def makeDecl(self):
516        # Note that initializations in the declarations are solely
517        # to avoid 'uninitialized variable' errors from the compiler.
518        return self.ctype + ' ' + self.base_name + ' = 0;\n';
519
520
521src_reg_constructor = '\n\t_srcRegIdx[_numSrcRegs++] = RegId(%s, %s);'
522dst_reg_constructor = '\n\t_destRegIdx[_numDestRegs++] = RegId(%s, %s);'
523
524
525class IntRegOperand(Operand):
526    reg_class = 'IntRegClass'
527
528    def isReg(self):
529        return 1
530
531    def isIntReg(self):
532        return 1
533
534    def makeConstructor(self, predRead, predWrite):
535        c_src = ''
536        c_dest = ''
537
538        if self.is_src:
539            c_src = src_reg_constructor % (self.reg_class, self.reg_spec)
540            if self.hasReadPred():
541                c_src = '\n\tif (%s) {%s\n\t}' % \
542                        (self.read_predicate, c_src)
543
544        if self.is_dest:
545            c_dest = dst_reg_constructor % (self.reg_class, self.reg_spec)
546            c_dest += '\n\t_numIntDestRegs++;'
547            if self.hasWritePred():
548                c_dest = '\n\tif (%s) {%s\n\t}' % \
549                         (self.write_predicate, c_dest)
550
551        return c_src + c_dest
552
553    def makeRead(self, predRead):
554        if (self.ctype == 'float' or self.ctype == 'double'):
555            error('Attempt to read integer register as FP')
556        if self.read_code != None:
557            return self.buildReadCode('readIntRegOperand')
558
559        int_reg_val = ''
560        if predRead:
561            int_reg_val = 'xc->readIntRegOperand(this, _sourceIndex++)'
562            if self.hasReadPred():
563                int_reg_val = '(%s) ? %s : 0' % \
564                              (self.read_predicate, int_reg_val)
565        else:
566            int_reg_val = 'xc->readIntRegOperand(this, %d)' % self.src_reg_idx
567
568        return '%s = %s;\n' % (self.base_name, int_reg_val)
569
570    def makeWrite(self, predWrite):
571        if (self.ctype == 'float' or self.ctype == 'double'):
572            error('Attempt to write integer register as FP')
573        if self.write_code != None:
574            return self.buildWriteCode('setIntRegOperand')
575
576        if predWrite:
577            wp = 'true'
578            if self.hasWritePred():
579                wp = self.write_predicate
580
581            wcond = 'if (%s)' % (wp)
582            windex = '_destIndex++'
583        else:
584            wcond = ''
585            windex = '%d' % self.dest_reg_idx
586
587        wb = '''
588        %s
589        {
590            %s final_val = %s;
591            xc->setIntRegOperand(this, %s, final_val);\n
592            if (traceData) { traceData->setData(final_val); }
593        }''' % (wcond, self.ctype, self.base_name, windex)
594
595        return wb
596
597class FloatRegOperand(Operand):
598    reg_class = 'FloatRegClass'
599
600    def isReg(self):
601        return 1
602
603    def isFloatReg(self):
604        return 1
605
606    def makeConstructor(self, predRead, predWrite):
607        c_src = ''
608        c_dest = ''
609
610        if self.is_src:
611            c_src = src_reg_constructor % (self.reg_class, self.reg_spec)
612
613        if self.is_dest:
614            c_dest = dst_reg_constructor % (self.reg_class, self.reg_spec)
615            c_dest += '\n\t_numFPDestRegs++;'
616
617        return c_src + c_dest
618
619    def makeRead(self, predRead):
620        bit_select = 0
621        if (self.ctype == 'float' or self.ctype == 'double'):
622            func = 'readFloatRegOperand'
623        else:
624            func = 'readFloatRegOperandBits'
625        if self.read_code != None:
626            return self.buildReadCode(func)
627
628        if predRead:
629            rindex = '_sourceIndex++'
630        else:
631            rindex = '%d' % self.src_reg_idx
632
633        return '%s = xc->%s(this, %s);\n' % \
634            (self.base_name, func, rindex)
635
636    def makeWrite(self, predWrite):
637        if (self.ctype == 'float' or self.ctype == 'double'):
638            func = 'setFloatRegOperand'
639        else:
640            func = 'setFloatRegOperandBits'
641        if self.write_code != None:
642            return self.buildWriteCode(func)
643
644        if predWrite:
645            wp = '_destIndex++'
646        else:
647            wp = '%d' % self.dest_reg_idx
648        wp = 'xc->%s(this, %s, final_val);' % (func, wp)
649
650        wb = '''
651        {
652            %s final_val = %s;
653            %s\n
654            if (traceData) { traceData->setData(final_val); }
655        }''' % (self.ctype, self.base_name, wp)
656        return wb
657
658class VecRegOperand(Operand):
659    reg_class = 'VecRegClass'
660
661    def __init__(self, parser, full_name, ext, is_src, is_dest):
662        Operand.__init__(self, parser, full_name, ext, is_src, is_dest)
663        self.elemExt = None
664        self.parser = parser
665
666    def isReg(self):
667        return 1
668
669    def isVecReg(self):
670        return 1
671
672    def makeDeclElem(self, elem_op):
673        (elem_name, elem_ext) = elem_op
674        (elem_spec, dflt_elem_ext, zeroing) = self.elems[elem_name]
675        if elem_ext:
676            ext = elem_ext
677        else:
678            ext = dflt_elem_ext
679        ctype = self.parser.operandTypeMap[ext]
680        return '\n\t%s %s = 0;' % (ctype, elem_name)
681
682    def makeDecl(self):
683        if not self.is_dest and self.is_src:
684            c_decl = '\t/* Vars for %s*/' % (self.base_name)
685            if hasattr(self, 'active_elems'):
686                if self.active_elems:
687                    for elem in self.active_elems:
688                        c_decl += self.makeDeclElem(elem)
689            return c_decl + '\t/* End vars for %s */\n' % (self.base_name)
690        else:
691            return ''
692
693    def makeConstructor(self, predRead, predWrite):
694        c_src = ''
695        c_dest = ''
696
697        numAccessNeeded = 1
698
699        if self.is_src:
700            c_src = src_reg_constructor % (self.reg_class, self.reg_spec)
701
702        if self.is_dest:
703            c_dest = dst_reg_constructor % (self.reg_class, self.reg_spec)
704            c_dest += '\n\t_numVecDestRegs++;'
705
706        return c_src + c_dest
707
708    # Read destination register to write
709    def makeReadWElem(self, elem_op):
710        (elem_name, elem_ext) = elem_op
711        (elem_spec, dflt_elem_ext, zeroing) = self.elems[elem_name]
712        if elem_ext:
713            ext = elem_ext
714        else:
715            ext = dflt_elem_ext
716        ctype = self.parser.operandTypeMap[ext]
717        c_read = '\t\t%s& %s = %s[%s];\n' % \
718                  (ctype, elem_name, self.base_name, elem_spec)
719        return c_read
720
721    def makeReadW(self, predWrite):
722        func = 'getWritableVecRegOperand'
723        if self.read_code != None:
724            return self.buildReadCode(func)
725
726        if predWrite:
727            rindex = '_destIndex++'
728        else:
729            rindex = '%d' % self.dest_reg_idx
730
731        c_readw = '\t\t%s& tmp_d%s = xc->%s(this, %s);\n'\
732                % ('TheISA::VecRegContainer', rindex, func, rindex)
733        if self.elemExt:
734            c_readw += '\t\tauto %s = tmp_d%s.as<%s>();\n' % (self.base_name,
735                        rindex, self.parser.operandTypeMap[self.elemExt])
736        if self.ext:
737            c_readw += '\t\tauto %s = tmp_d%s.as<%s>();\n' % (self.base_name,
738                        rindex, self.parser.operandTypeMap[self.ext])
739        if hasattr(self, 'active_elems'):
740            if self.active_elems:
741                for elem in self.active_elems:
742                    c_readw += self.makeReadWElem(elem)
743        return c_readw
744
745    # Normal source operand read
746    def makeReadElem(self, elem_op, name):
747        (elem_name, elem_ext) = elem_op
748        (elem_spec, dflt_elem_ext, zeroing) = self.elems[elem_name]
749
750        if elem_ext:
751            ext = elem_ext
752        else:
753            ext = dflt_elem_ext
754        ctype = self.parser.operandTypeMap[ext]
755        c_read = '\t\t%s = %s[%s];\n' % \
756                  (elem_name, name, elem_spec)
757        return c_read
758
759    def makeRead(self, predRead):
760        func = 'readVecRegOperand'
761        if self.read_code != None:
762            return self.buildReadCode(func)
763
764        if predRead:
765            rindex = '_sourceIndex++'
766        else:
767            rindex = '%d' % self.src_reg_idx
768
769        name = self.base_name
770        if self.is_dest and self.is_src:
771            name += '_merger'
772
773        c_read =  '\t\t%s& tmp_s%s = xc->%s(this, %s);\n' \
774                % ('const TheISA::VecRegContainer', rindex, func, rindex)
775        # If the parser has detected that elements are being access, create
776        # the appropriate view
777        if self.elemExt:
778            c_read += '\t\tauto %s = tmp_s%s.as<%s>();\n' % \
779                 (name, rindex, self.parser.operandTypeMap[self.elemExt])
780        if self.ext:
781            c_read += '\t\tauto %s = tmp_s%s.as<%s>();\n' % \
782                 (name, rindex, self.parser.operandTypeMap[self.ext])
783        if hasattr(self, 'active_elems'):
784            if self.active_elems:
785                for elem in self.active_elems:
786                    c_read += self.makeReadElem(elem, name)
787        return c_read
788
789    def makeWrite(self, predWrite):
790        func = 'setVecRegOperand'
791        if self.write_code != None:
792            return self.buildWriteCode(func)
793
794        wb = '''
795        if (traceData) {
796            warn_once("Vectors not supported yet in tracedata");
797            /*traceData->setData(final_val);*/
798        }
799        '''
800        return wb
801
802    def finalize(self, predRead, predWrite):
803        super(VecRegOperand, self).finalize(predRead, predWrite)
804        if self.is_dest:
805            self.op_rd = self.makeReadW(predWrite) + self.op_rd
806
807class VecElemOperand(Operand):
808    reg_class = 'VectorElemClass'
809
810    def isReg(self):
811        return 1
812
813    def isVecElem(self):
814        return 1
815
816    def makeDecl(self):
817        if self.is_dest and not self.is_src:
818            return '\n\t%s %s;' % (self.ctype, self.base_name)
819        else:
820            return ''
821
822    def makeConstructor(self, predRead, predWrite):
823        c_src = ''
824        c_dest = ''
825
826        numAccessNeeded = 1
827        regId = 'RegId(%s, %s * numVecElemPerVecReg + elemIdx, %s)' % \
828                (self.reg_class, self.reg_spec)
829
830        if self.is_src:
831            c_src = ('\n\t_srcRegIdx[_numSrcRegs++] = RegId(%s, %s, %s);' %
832                    (self.reg_class, self.reg_spec, self.elem_spec))
833
834        if self.is_dest:
835            c_dest = ('\n\t_destRegIdx[_numDestRegs++] = RegId(%s, %s, %s);' %
836                    (self.reg_class, self.reg_spec, self.elem_spec))
837            c_dest += '\n\t_numVecElemDestRegs++;'
838        return c_src + c_dest
839
840    def makeRead(self, predRead):
841        c_read = ('\n/* Elem is kept inside the operand description */' +
842                  '\n\tVecElem %s = xc->readVecElemOperand(this, %d);' %
843                  (self.base_name, self.src_reg_idx))
844        return c_read
845
846    def makeWrite(self, predWrite):
847        c_write = ('\n/* Elem is kept inside the operand description */' +
848                   '\n\txc->setVecElemOperand(this, %d, %s);' %
849                   (self.dest_reg_idx, self.base_name))
850        return c_write
851
852class CCRegOperand(Operand):
853    reg_class = 'CCRegClass'
854
855    def isReg(self):
856        return 1
857
858    def isCCReg(self):
859        return 1
860
861    def makeConstructor(self, predRead, predWrite):
862        c_src = ''
863        c_dest = ''
864
865        if self.is_src:
866            c_src = src_reg_constructor % (self.reg_class, self.reg_spec)
867            if self.hasReadPred():
868                c_src = '\n\tif (%s) {%s\n\t}' % \
869                        (self.read_predicate, c_src)
870
871        if self.is_dest:
872            c_dest = dst_reg_constructor % (self.reg_class, self.reg_spec)
873            c_dest += '\n\t_numCCDestRegs++;'
874            if self.hasWritePred():
875                c_dest = '\n\tif (%s) {%s\n\t}' % \
876                         (self.write_predicate, c_dest)
877
878        return c_src + c_dest
879
880    def makeRead(self, predRead):
881        if (self.ctype == 'float' or self.ctype == 'double'):
882            error('Attempt to read condition-code register as FP')
883        if self.read_code != None:
884            return self.buildReadCode('readCCRegOperand')
885
886        int_reg_val = ''
887        if predRead:
888            int_reg_val = 'xc->readCCRegOperand(this, _sourceIndex++)'
889            if self.hasReadPred():
890                int_reg_val = '(%s) ? %s : 0' % \
891                              (self.read_predicate, int_reg_val)
892        else:
893            int_reg_val = 'xc->readCCRegOperand(this, %d)' % self.src_reg_idx
894
895        return '%s = %s;\n' % (self.base_name, int_reg_val)
896
897    def makeWrite(self, predWrite):
898        if (self.ctype == 'float' or self.ctype == 'double'):
899            error('Attempt to write condition-code register as FP')
900        if self.write_code != None:
901            return self.buildWriteCode('setCCRegOperand')
902
903        if predWrite:
904            wp = 'true'
905            if self.hasWritePred():
906                wp = self.write_predicate
907
908            wcond = 'if (%s)' % (wp)
909            windex = '_destIndex++'
910        else:
911            wcond = ''
912            windex = '%d' % self.dest_reg_idx
913
914        wb = '''
915        %s
916        {
917            %s final_val = %s;
918            xc->setCCRegOperand(this, %s, final_val);\n
919            if (traceData) { traceData->setData(final_val); }
920        }''' % (wcond, self.ctype, self.base_name, windex)
921
922        return wb
923
924class ControlRegOperand(Operand):
925    reg_class = 'MiscRegClass'
926
927    def isReg(self):
928        return 1
929
930    def isControlReg(self):
931        return 1
932
933    def makeConstructor(self, predRead, predWrite):
934        c_src = ''
935        c_dest = ''
936
937        if self.is_src:
938            c_src = src_reg_constructor % (self.reg_class, self.reg_spec)
939
940        if self.is_dest:
941            c_dest = dst_reg_constructor % (self.reg_class, self.reg_spec)
942
943        return c_src + c_dest
944
945    def makeRead(self, predRead):
946        bit_select = 0
947        if (self.ctype == 'float' or self.ctype == 'double'):
948            error('Attempt to read control register as FP')
949        if self.read_code != None:
950            return self.buildReadCode('readMiscRegOperand')
951
952        if predRead:
953            rindex = '_sourceIndex++'
954        else:
955            rindex = '%d' % self.src_reg_idx
956
957        return '%s = xc->readMiscRegOperand(this, %s);\n' % \
958            (self.base_name, rindex)
959
960    def makeWrite(self, predWrite):
961        if (self.ctype == 'float' or self.ctype == 'double'):
962            error('Attempt to write control register as FP')
963        if self.write_code != None:
964            return self.buildWriteCode('setMiscRegOperand')
965
966        if predWrite:
967            windex = '_destIndex++'
968        else:
969            windex = '%d' % self.dest_reg_idx
970
971        wb = 'xc->setMiscRegOperand(this, %s, %s);\n' % \
972             (windex, self.base_name)
973        wb += 'if (traceData) { traceData->setData(%s); }' % \
974              self.base_name
975
976        return wb
977
978class MemOperand(Operand):
979    def isMem(self):
980        return 1
981
982    def makeConstructor(self, predRead, predWrite):
983        return ''
984
985    def makeDecl(self):
986        # Declare memory data variable.
987        return '%s %s;\n' % (self.ctype, self.base_name)
988
989    def makeRead(self, predRead):
990        if self.read_code != None:
991            return self.buildReadCode()
992        return ''
993
994    def makeWrite(self, predWrite):
995        if self.write_code != None:
996            return self.buildWriteCode()
997        return ''
998
999class PCStateOperand(Operand):
1000    def makeConstructor(self, predRead, predWrite):
1001        return ''
1002
1003    def makeRead(self, predRead):
1004        if self.reg_spec:
1005            # A component of the PC state.
1006            return '%s = __parserAutoPCState.%s();\n' % \
1007                (self.base_name, self.reg_spec)
1008        else:
1009            # The whole PC state itself.
1010            return '%s = xc->pcState();\n' % self.base_name
1011
1012    def makeWrite(self, predWrite):
1013        if self.reg_spec:
1014            # A component of the PC state.
1015            return '__parserAutoPCState.%s(%s);\n' % \
1016                (self.reg_spec, self.base_name)
1017        else:
1018            # The whole PC state itself.
1019            return 'xc->pcState(%s);\n' % self.base_name
1020
1021    def makeDecl(self):
1022        ctype = 'TheISA::PCState'
1023        if self.isPCPart():
1024            ctype = self.ctype
1025        # Note that initializations in the declarations are solely
1026        # to avoid 'uninitialized variable' errors from the compiler.
1027        return '%s %s = 0;\n' % (ctype, self.base_name)
1028
1029    def isPCState(self):
1030        return 1
1031
1032class OperandList(object):
1033    '''Find all the operands in the given code block.  Returns an operand
1034    descriptor list (instance of class OperandList).'''
1035    def __init__(self, parser, code):
1036        self.items = []
1037        self.bases = {}
1038        # delete strings and comments so we don't match on operands inside
1039        for regEx in (stringRE, commentRE):
1040            code = regEx.sub('', code)
1041        # search for operands
1042        next_pos = 0
1043        while 1:
1044            match = parser.operandsRE.search(code, next_pos)
1045            if not match:
1046                # no more matches: we're done
1047                break
1048            op = match.groups()
1049            # regexp groups are operand full name, base, and extension
1050            (op_full, op_base, op_ext) = op
1051            # If is a elem operand, define or update the corresponding
1052            # vector operand
1053            isElem = False
1054            if op_base in parser.elemToVector:
1055                isElem = True
1056                elem_op = (op_base, op_ext)
1057                op_base = parser.elemToVector[op_base]
1058                op_ext = '' # use the default one
1059            # if the token following the operand is an assignment, this is
1060            # a destination (LHS), else it's a source (RHS)
1061            is_dest = (assignRE.match(code, match.end()) != None)
1062            is_src = not is_dest
1063
1064            # see if we've already seen this one
1065            op_desc = self.find_base(op_base)
1066            if op_desc:
1067                if op_ext and op_ext != '' and op_desc.ext != op_ext:
1068                    error ('Inconsistent extensions for operand %s: %s - %s' \
1069                            % (op_base, op_desc.ext, op_ext))
1070                op_desc.is_src = op_desc.is_src or is_src
1071                op_desc.is_dest = op_desc.is_dest or is_dest
1072                if isElem:
1073                    (elem_base, elem_ext) = elem_op
1074                    found = False
1075                    for ae in op_desc.active_elems:
1076                        (ae_base, ae_ext) = ae
1077                        if ae_base == elem_base:
1078                            if ae_ext != elem_ext:
1079                                error('Inconsistent extensions for elem'
1080                                      ' operand %s' % elem_base)
1081                            else:
1082                                found = True
1083                    if not found:
1084                        op_desc.active_elems.append(elem_op)
1085            else:
1086                # new operand: create new descriptor
1087                op_desc = parser.operandNameMap[op_base](parser,
1088                    op_full, op_ext, is_src, is_dest)
1089                # if operand is a vector elem, add the corresponding vector
1090                # operand if not already done
1091                if isElem:
1092                    op_desc.elemExt = elem_op[1]
1093                    op_desc.active_elems = [elem_op]
1094                self.append(op_desc)
1095            # start next search after end of current match
1096            next_pos = match.end()
1097        self.sort()
1098        # enumerate source & dest register operands... used in building
1099        # constructor later
1100        self.numSrcRegs = 0
1101        self.numDestRegs = 0
1102        self.numFPDestRegs = 0
1103        self.numIntDestRegs = 0
1104        self.numVecDestRegs = 0
1105        self.numCCDestRegs = 0
1106        self.numMiscDestRegs = 0
1107        self.memOperand = None
1108
1109        # Flags to keep track if one or more operands are to be read/written
1110        # conditionally.
1111        self.predRead = False
1112        self.predWrite = False
1113
1114        for op_desc in self.items:
1115            if op_desc.isReg():
1116                if op_desc.is_src:
1117                    op_desc.src_reg_idx = self.numSrcRegs
1118                    self.numSrcRegs += 1
1119                if op_desc.is_dest:
1120                    op_desc.dest_reg_idx = self.numDestRegs
1121                    self.numDestRegs += 1
1122                    if op_desc.isFloatReg():
1123                        self.numFPDestRegs += 1
1124                    elif op_desc.isIntReg():
1125                        self.numIntDestRegs += 1
1126                    elif op_desc.isVecReg():
1127                        self.numVecDestRegs += 1
1128                    elif op_desc.isCCReg():
1129                        self.numCCDestRegs += 1
1130                    elif op_desc.isControlReg():
1131                        self.numMiscDestRegs += 1
1132            elif op_desc.isMem():
1133                if self.memOperand:
1134                    error("Code block has more than one memory operand.")
1135                self.memOperand = op_desc
1136
1137            # Check if this operand has read/write predication. If true, then
1138            # the microop will dynamically index source/dest registers.
1139            self.predRead = self.predRead or op_desc.hasReadPred()
1140            self.predWrite = self.predWrite or op_desc.hasWritePred()
1141
1142        if parser.maxInstSrcRegs < self.numSrcRegs:
1143            parser.maxInstSrcRegs = self.numSrcRegs
1144        if parser.maxInstDestRegs < self.numDestRegs:
1145            parser.maxInstDestRegs = self.numDestRegs
1146        if parser.maxMiscDestRegs < self.numMiscDestRegs:
1147            parser.maxMiscDestRegs = self.numMiscDestRegs
1148
1149        # now make a final pass to finalize op_desc fields that may depend
1150        # on the register enumeration
1151        for op_desc in self.items:
1152            op_desc.finalize(self.predRead, self.predWrite)
1153
1154    def __len__(self):
1155        return len(self.items)
1156
1157    def __getitem__(self, index):
1158        return self.items[index]
1159
1160    def append(self, op_desc):
1161        self.items.append(op_desc)
1162        self.bases[op_desc.base_name] = op_desc
1163
1164    def find_base(self, base_name):
1165        # like self.bases[base_name], but returns None if not found
1166        # (rather than raising exception)
1167        return self.bases.get(base_name)
1168
1169    # internal helper function for concat[Some]Attr{Strings|Lists}
1170    def __internalConcatAttrs(self, attr_name, filter, result):
1171        for op_desc in self.items:
1172            if filter(op_desc):
1173                result += getattr(op_desc, attr_name)
1174        return result
1175
1176    # return a single string that is the concatenation of the (string)
1177    # values of the specified attribute for all operands
1178    def concatAttrStrings(self, attr_name):
1179        return self.__internalConcatAttrs(attr_name, lambda x: 1, '')
1180
1181    # like concatAttrStrings, but only include the values for the operands
1182    # for which the provided filter function returns true
1183    def concatSomeAttrStrings(self, filter, attr_name):
1184        return self.__internalConcatAttrs(attr_name, filter, '')
1185
1186    # return a single list that is the concatenation of the (list)
1187    # values of the specified attribute for all operands
1188    def concatAttrLists(self, attr_name):
1189        return self.__internalConcatAttrs(attr_name, lambda x: 1, [])
1190
1191    # like concatAttrLists, but only include the values for the operands
1192    # for which the provided filter function returns true
1193    def concatSomeAttrLists(self, filter, attr_name):
1194        return self.__internalConcatAttrs(attr_name, filter, [])
1195
1196    def sort(self):
1197        self.items.sort(lambda a, b: a.sort_pri - b.sort_pri)
1198
1199class SubOperandList(OperandList):
1200    '''Find all the operands in the given code block.  Returns an operand
1201    descriptor list (instance of class OperandList).'''
1202    def __init__(self, parser, code, master_list):
1203        self.items = []
1204        self.bases = {}
1205        # delete strings and comments so we don't match on operands inside
1206        for regEx in (stringRE, commentRE):
1207            code = regEx.sub('', code)
1208        # search for operands
1209        next_pos = 0
1210        while 1:
1211            match = parser.operandsRE.search(code, next_pos)
1212            if not match:
1213                # no more matches: we're done
1214                break
1215            op = match.groups()
1216            # regexp groups are operand full name, base, and extension
1217            (op_full, op_base, op_ext) = op
1218            # If is a elem operand, define or update the corresponding
1219            # vector operand
1220            if op_base in parser.elemToVector:
1221                elem_op = op_base
1222                op_base = parser.elemToVector[elem_op]
1223            # find this op in the master list
1224            op_desc = master_list.find_base(op_base)
1225            if not op_desc:
1226                error('Found operand %s which is not in the master list!'
1227                      % op_base)
1228            else:
1229                # See if we've already found this operand
1230                op_desc = self.find_base(op_base)
1231                if not op_desc:
1232                    # if not, add a reference to it to this sub list
1233                    self.append(master_list.bases[op_base])
1234
1235            # start next search after end of current match
1236            next_pos = match.end()
1237        self.sort()
1238        self.memOperand = None
1239        # Whether the whole PC needs to be read so parts of it can be accessed
1240        self.readPC = False
1241        # Whether the whole PC needs to be written after parts of it were
1242        # changed
1243        self.setPC = False
1244        # Whether this instruction manipulates the whole PC or parts of it.
1245        # Mixing the two is a bad idea and flagged as an error.
1246        self.pcPart = None
1247
1248        # Flags to keep track if one or more operands are to be read/written
1249        # conditionally.
1250        self.predRead = False
1251        self.predWrite = False
1252
1253        for op_desc in self.items:
1254            if op_desc.isPCPart():
1255                self.readPC = True
1256                if op_desc.is_dest:
1257                    self.setPC = True
1258
1259            if op_desc.isPCState():
1260                if self.pcPart is not None:
1261                    if self.pcPart and not op_desc.isPCPart() or \
1262                            not self.pcPart and op_desc.isPCPart():
1263                        error("Mixed whole and partial PC state operands.")
1264                self.pcPart = op_desc.isPCPart()
1265
1266            if op_desc.isMem():
1267                if self.memOperand:
1268                    error("Code block has more than one memory operand.")
1269                self.memOperand = op_desc
1270
1271            # Check if this operand has read/write predication. If true, then
1272            # the microop will dynamically index source/dest registers.
1273            self.predRead = self.predRead or op_desc.hasReadPred()
1274            self.predWrite = self.predWrite or op_desc.hasWritePred()
1275
1276# Regular expression object to match C++ strings
1277stringRE = re.compile(r'"([^"\\]|\\.)*"')
1278
1279# Regular expression object to match C++ comments
1280# (used in findOperands())
1281commentRE = re.compile(r'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?',
1282        re.DOTALL | re.MULTILINE)
1283
1284# Regular expression object to match assignment statements (used in
1285# findOperands()).  If the code immediately following the first
1286# appearance of the operand matches this regex, then the operand
1287# appears to be on the LHS of an assignment, and is thus a
1288# destination.  basically we're looking for an '=' that's not '=='.
1289# The heinous tangle before that handles the case where the operand
1290# has an array subscript.
1291assignRE = re.compile(r'(\[[^\]]+\])?\s*=(?!=)', re.MULTILINE)
1292
1293def makeFlagConstructor(flag_list):
1294    if len(flag_list) == 0:
1295        return ''
1296    # filter out repeated flags
1297    flag_list.sort()
1298    i = 1
1299    while i < len(flag_list):
1300        if flag_list[i] == flag_list[i-1]:
1301            del flag_list[i]
1302        else:
1303            i += 1
1304    pre = '\n\tflags['
1305    post = '] = true;'
1306    code = pre + string.join(flag_list, post + pre) + post
1307    return code
1308
1309# Assume all instruction flags are of the form 'IsFoo'
1310instFlagRE = re.compile(r'Is.*')
1311
1312# OpClass constants end in 'Op' except No_OpClass
1313opClassRE = re.compile(r'.*Op|No_OpClass')
1314
1315class InstObjParams(object):
1316    def __init__(self, parser, mnem, class_name, base_class = '',
1317                 snippets = {}, opt_args = []):
1318        self.mnemonic = mnem
1319        self.class_name = class_name
1320        self.base_class = base_class
1321        if not isinstance(snippets, dict):
1322            snippets = {'code' : snippets}
1323        compositeCode = ' '.join(map(str, snippets.values()))
1324        self.snippets = snippets
1325
1326        self.operands = OperandList(parser, compositeCode)
1327
1328        # The header of the constructor declares the variables to be used
1329        # in the body of the constructor.
1330        header = ''
1331        header += '\n\t_numSrcRegs = 0;'
1332        header += '\n\t_numDestRegs = 0;'
1333        header += '\n\t_numFPDestRegs = 0;'
1334        header += '\n\t_numVecDestRegs = 0;'
1335        header += '\n\t_numVecElemDestRegs = 0;'
1336        header += '\n\t_numIntDestRegs = 0;'
1337        header += '\n\t_numCCDestRegs = 0;'
1338
1339        self.constructor = header + \
1340                           self.operands.concatAttrStrings('constructor')
1341
1342        self.flags = self.operands.concatAttrLists('flags')
1343
1344        self.op_class = None
1345
1346        # Optional arguments are assumed to be either StaticInst flags
1347        # or an OpClass value.  To avoid having to import a complete
1348        # list of these values to match against, we do it ad-hoc
1349        # with regexps.
1350        for oa in opt_args:
1351            if instFlagRE.match(oa):
1352                self.flags.append(oa)
1353            elif opClassRE.match(oa):
1354                self.op_class = oa
1355            else:
1356                error('InstObjParams: optional arg "%s" not recognized '
1357                      'as StaticInst::Flag or OpClass.' % oa)
1358
1359        # Make a basic guess on the operand class if not set.
1360        # These are good enough for most cases.
1361        if not self.op_class:
1362            if 'IsStore' in self.flags:
1363                # The order matters here: 'IsFloating' and 'IsInteger' are
1364                # usually set in FP instructions because of the base
1365                # register
1366                if 'IsFloating' in self.flags:
1367                    self.op_class = 'FloatMemWriteOp'
1368                else:
1369                    self.op_class = 'MemWriteOp'
1370            elif 'IsLoad' in self.flags or 'IsPrefetch' in self.flags:
1371                # The order matters here: 'IsFloating' and 'IsInteger' are
1372                # usually set in FP instructions because of the base
1373                # register
1374                if 'IsFloating' in self.flags:
1375                    self.op_class = 'FloatMemReadOp'
1376                else:
1377                    self.op_class = 'MemReadOp'
1378            elif 'IsFloating' in self.flags:
1379                self.op_class = 'FloatAddOp'
1380            elif 'IsVector' in self.flags:
1381                self.op_class = 'SimdAddOp'
1382            else:
1383                self.op_class = 'IntAluOp'
1384
1385        # add flag initialization to contructor here to include
1386        # any flags added via opt_args
1387        self.constructor += makeFlagConstructor(self.flags)
1388
1389        # if 'IsFloating' is set, add call to the FP enable check
1390        # function (which should be provided by isa_desc via a declare)
1391        # if 'IsVector' is set, add call to the Vector enable check
1392        # function (which should be provided by isa_desc via a declare)
1393        if 'IsFloating' in self.flags:
1394            self.fp_enable_check = 'fault = checkFpEnableFault(xc);'
1395        elif 'IsVector' in self.flags:
1396            self.fp_enable_check = 'fault = checkVecEnableFault(xc);'
1397        else:
1398            self.fp_enable_check = ''
1399
1400##############
1401# Stack: a simple stack object.  Used for both formats (formatStack)
1402# and default cases (defaultStack).  Simply wraps a list to give more
1403# stack-like syntax and enable initialization with an argument list
1404# (as opposed to an argument that's a list).
1405
1406class Stack(list):
1407    def __init__(self, *items):
1408        list.__init__(self, items)
1409
1410    def push(self, item):
1411        self.append(item);
1412
1413    def top(self):
1414        return self[-1]
1415
1416# Format a file include stack backtrace as a string
1417def backtrace(filename_stack):
1418    fmt = "In file included from %s:"
1419    return "\n".join([fmt % f for f in filename_stack])
1420
1421
1422#######################
1423#
1424# LineTracker: track filenames along with line numbers in PLY lineno fields
1425#     PLY explicitly doesn't do anything with 'lineno' except propagate
1426#     it.  This class lets us tie filenames with the line numbers with a
1427#     minimum of disruption to existing increment code.
1428#
1429
1430class LineTracker(object):
1431    def __init__(self, filename, lineno=1):
1432        self.filename = filename
1433        self.lineno = lineno
1434
1435    # Overload '+=' for increments.  We need to create a new object on
1436    # each update else every token ends up referencing the same
1437    # constantly incrementing instance.
1438    def __iadd__(self, incr):
1439        return LineTracker(self.filename, self.lineno + incr)
1440
1441    def __str__(self):
1442        return "%s:%d" % (self.filename, self.lineno)
1443
1444    # In case there are places where someone really expects a number
1445    def __int__(self):
1446        return self.lineno
1447
1448
1449#######################
1450#
1451# ISA Parser
1452#   parses ISA DSL and emits C++ headers and source
1453#
1454
1455class ISAParser(Grammar):
1456    def __init__(self, output_dir):
1457        super(ISAParser, self).__init__()
1458        self.output_dir = output_dir
1459
1460        self.filename = None # for output file watermarking/scaremongering
1461
1462        # variable to hold templates
1463        self.templateMap = {}
1464
1465        # This dictionary maps format name strings to Format objects.
1466        self.formatMap = {}
1467
1468        # Track open files and, if applicable, how many chunks it has been
1469        # split into so far.
1470        self.files = {}
1471        self.splits = {}
1472
1473        # isa_name / namespace identifier from namespace declaration.
1474        # before the namespace declaration, None.
1475        self.isa_name = None
1476        self.namespace = None
1477
1478        # The format stack.
1479        self.formatStack = Stack(NoFormat())
1480
1481        # The default case stack.
1482        self.defaultStack = Stack(None)
1483
1484        # Stack that tracks current file and line number.  Each
1485        # element is a tuple (filename, lineno) that records the
1486        # *current* filename and the line number in the *previous*
1487        # file where it was included.
1488        self.fileNameStack = Stack()
1489
1490        symbols = ('makeList', 're', 'string')
1491        self.exportContext = dict([(s, eval(s)) for s in symbols])
1492
1493        self.maxInstSrcRegs = 0
1494        self.maxInstDestRegs = 0
1495        self.maxMiscDestRegs = 0
1496
1497    def __getitem__(self, i):    # Allow object (self) to be
1498        return getattr(self, i)  # passed to %-substitutions
1499
1500    # Change the file suffix of a base filename:
1501    #   (e.g.) decoder.cc -> decoder-g.cc.inc for 'global' outputs
1502    def suffixize(self, s, sec):
1503        extn = re.compile('(\.[^\.]+)$') # isolate extension
1504        if self.namespace:
1505            return extn.sub(r'-ns\1.inc', s) # insert some text on either side
1506        else:
1507            return extn.sub(r'-g\1.inc', s)
1508
1509    # Get the file object for emitting code into the specified section
1510    # (header, decoder, exec, decode_block).
1511    def get_file(self, section):
1512        if section == 'decode_block':
1513            filename = 'decode-method.cc.inc'
1514        else:
1515            if section == 'header':
1516                file = 'decoder.hh'
1517            else:
1518                file = '%s.cc' % section
1519            filename = self.suffixize(file, section)
1520        try:
1521            return self.files[filename]
1522        except KeyError: pass
1523
1524        f = self.open(filename)
1525        self.files[filename] = f
1526
1527        # The splittable files are the ones with many independent
1528        # per-instruction functions - the decoder's instruction constructors
1529        # and the instruction execution (execute()) methods. These both have
1530        # the suffix -ns.cc.inc, meaning they are within the namespace part
1531        # of the ISA, contain object-emitting C++ source, and are included
1532        # into other top-level files. These are the files that need special
1533        # #define's to allow parts of them to be compiled separately. Rather
1534        # than splitting the emissions into separate files, the monolithic
1535        # output of the ISA parser is maintained, but the value (or lack
1536        # thereof) of the __SPLIT definition during C preprocessing will
1537        # select the different chunks. If no 'split' directives are used,
1538        # the cpp emissions have no effect.
1539        if re.search('-ns.cc.inc$', filename):
1540            print >>f, '#if !defined(__SPLIT) || (__SPLIT == 1)'
1541            self.splits[f] = 1
1542        # ensure requisite #include's
1543        elif filename == 'decoder-g.hh.inc':
1544            print >>f, '#include "base/bitfield.hh"'
1545
1546        return f
1547
1548    # Weave together the parts of the different output sections by
1549    # #include'ing them into some very short top-level .cc/.hh files.
1550    # These small files make it much clearer how this tool works, since
1551    # you directly see the chunks emitted as files that are #include'd.
1552    def write_top_level_files(self):
1553        # decoder header - everything depends on this
1554        file = 'decoder.hh'
1555        with self.open(file) as f:
1556            fn = 'decoder-g.hh.inc'
1557            assert(fn in self.files)
1558            f.write('#include "%s"\n' % fn)
1559
1560            fn = 'decoder-ns.hh.inc'
1561            assert(fn in self.files)
1562            f.write('namespace %s {\n#include "%s"\n}\n'
1563                    % (self.namespace, fn))
1564
1565        # decoder method - cannot be split
1566        file = 'decoder.cc'
1567        with self.open(file) as f:
1568            fn = 'decoder-g.cc.inc'
1569            assert(fn in self.files)
1570            f.write('#include "%s"\n' % fn)
1571
1572            fn = 'decoder.hh'
1573            f.write('#include "%s"\n' % fn)
1574
1575            fn = 'decode-method.cc.inc'
1576            # is guaranteed to have been written for parse to complete
1577            f.write('#include "%s"\n' % fn)
1578
1579        extn = re.compile('(\.[^\.]+)$')
1580
1581        # instruction constructors
1582        splits = self.splits[self.get_file('decoder')]
1583        file_ = 'inst-constrs.cc'
1584        for i in range(1, splits+1):
1585            if splits > 1:
1586                file = extn.sub(r'-%d\1' % i, file_)
1587            else:
1588                file = file_
1589            with self.open(file) as f:
1590                fn = 'decoder-g.cc.inc'
1591                assert(fn in self.files)
1592                f.write('#include "%s"\n' % fn)
1593
1594                fn = 'decoder.hh'
1595                f.write('#include "%s"\n' % fn)
1596
1597                fn = 'decoder-ns.cc.inc'
1598                assert(fn in self.files)
1599                print >>f, 'namespace %s {' % self.namespace
1600                if splits > 1:
1601                    print >>f, '#define __SPLIT %u' % i
1602                print >>f, '#include "%s"' % fn
1603                print >>f, '}'
1604
1605        # instruction execution
1606        splits = self.splits[self.get_file('exec')]
1607        for i in range(1, splits+1):
1608            file = 'generic_cpu_exec.cc'
1609            if splits > 1:
1610                file = extn.sub(r'_%d\1' % i, file)
1611            with self.open(file) as f:
1612                fn = 'exec-g.cc.inc'
1613                assert(fn in self.files)
1614                f.write('#include "%s"\n' % fn)
1615                f.write('#include "cpu/exec_context.hh"\n')
1616                f.write('#include "decoder.hh"\n')
1617
1618                fn = 'exec-ns.cc.inc'
1619                assert(fn in self.files)
1620                print >>f, 'namespace %s {' % self.namespace
1621                if splits > 1:
1622                    print >>f, '#define __SPLIT %u' % i
1623                print >>f, '#include "%s"' % fn
1624                print >>f, '}'
1625
1626        # max_inst_regs.hh
1627        self.update('max_inst_regs.hh',
1628                    '''namespace %(namespace)s {
1629    const int MaxInstSrcRegs = %(maxInstSrcRegs)d;
1630    const int MaxInstDestRegs = %(maxInstDestRegs)d;
1631    const int MaxMiscDestRegs = %(maxMiscDestRegs)d;\n}\n''' % self)
1632
1633    scaremonger_template ='''// DO NOT EDIT
1634// This file was automatically generated from an ISA description:
1635//   %(filename)s
1636
1637''';
1638
1639    #####################################################################
1640    #
1641    #                                Lexer
1642    #
1643    # The PLY lexer module takes two things as input:
1644    # - A list of token names (the string list 'tokens')
1645    # - A regular expression describing a match for each token.  The
1646    #   regexp for token FOO can be provided in two ways:
1647    #   - as a string variable named t_FOO
1648    #   - as the doc string for a function named t_FOO.  In this case,
1649    #     the function is also executed, allowing an action to be
1650    #     associated with each token match.
1651    #
1652    #####################################################################
1653
1654    # Reserved words.  These are listed separately as they are matched
1655    # using the same regexp as generic IDs, but distinguished in the
1656    # t_ID() function.  The PLY documentation suggests this approach.
1657    reserved = (
1658        'BITFIELD', 'DECODE', 'DECODER', 'DEFAULT', 'DEF', 'EXEC', 'FORMAT',
1659        'HEADER', 'LET', 'NAMESPACE', 'OPERAND_TYPES', 'OPERANDS',
1660        'OUTPUT', 'SIGNED', 'SPLIT', 'TEMPLATE'
1661        )
1662
1663    # List of tokens.  The lex module requires this.
1664    tokens = reserved + (
1665        # identifier
1666        'ID',
1667
1668        # integer literal
1669        'INTLIT',
1670
1671        # string literal
1672        'STRLIT',
1673
1674        # code literal
1675        'CODELIT',
1676
1677        # ( ) [ ] { } < > , ; . : :: *
1678        'LPAREN', 'RPAREN',
1679        'LBRACKET', 'RBRACKET',
1680        'LBRACE', 'RBRACE',
1681        'LESS', 'GREATER', 'EQUALS',
1682        'COMMA', 'SEMI', 'DOT', 'COLON', 'DBLCOLON',
1683        'ASTERISK',
1684
1685        # C preprocessor directives
1686        'CPPDIRECTIVE'
1687
1688    # The following are matched but never returned. commented out to
1689    # suppress PLY warning
1690        # newfile directive
1691    #    'NEWFILE',
1692
1693        # endfile directive
1694    #    'ENDFILE'
1695    )
1696
1697    # Regular expressions for token matching
1698    t_LPAREN           = r'\('
1699    t_RPAREN           = r'\)'
1700    t_LBRACKET         = r'\['
1701    t_RBRACKET         = r'\]'
1702    t_LBRACE           = r'\{'
1703    t_RBRACE           = r'\}'
1704    t_LESS             = r'\<'
1705    t_GREATER          = r'\>'
1706    t_EQUALS           = r'='
1707    t_COMMA            = r','
1708    t_SEMI             = r';'
1709    t_DOT              = r'\.'
1710    t_COLON            = r':'
1711    t_DBLCOLON         = r'::'
1712    t_ASTERISK         = r'\*'
1713
1714    # Identifiers and reserved words
1715    reserved_map = { }
1716    for r in reserved:
1717        reserved_map[r.lower()] = r
1718
1719    def t_ID(self, t):
1720        r'[A-Za-z_]\w*'
1721        t.type = self.reserved_map.get(t.value, 'ID')
1722        return t
1723
1724    # Integer literal
1725    def t_INTLIT(self, t):
1726        r'-?(0x[\da-fA-F]+)|\d+'
1727        try:
1728            t.value = int(t.value,0)
1729        except ValueError:
1730            error(t.lexer.lineno, 'Integer value "%s" too large' % t.value)
1731            t.value = 0
1732        return t
1733
1734    # String literal.  Note that these use only single quotes, and
1735    # can span multiple lines.
1736    def t_STRLIT(self, t):
1737        r"(?m)'([^'])+'"
1738        # strip off quotes
1739        t.value = t.value[1:-1]
1740        t.lexer.lineno += t.value.count('\n')
1741        return t
1742
1743
1744    # "Code literal"... like a string literal, but delimiters are
1745    # '{{' and '}}' so they get formatted nicely under emacs c-mode
1746    def t_CODELIT(self, t):
1747        r"(?m)\{\{([^\}]|}(?!\}))+\}\}"
1748        # strip off {{ & }}
1749        t.value = t.value[2:-2]
1750        t.lexer.lineno += t.value.count('\n')
1751        return t
1752
1753    def t_CPPDIRECTIVE(self, t):
1754        r'^\#[^\#].*\n'
1755        t.lexer.lineno += t.value.count('\n')
1756        return t
1757
1758    def t_NEWFILE(self, t):
1759        r'^\#\#newfile\s+"[^"]*"\n'
1760        self.fileNameStack.push(t.lexer.lineno)
1761        t.lexer.lineno = LineTracker(t.value[11:-2])
1762
1763    def t_ENDFILE(self, t):
1764        r'^\#\#endfile\n'
1765        t.lexer.lineno = self.fileNameStack.pop()
1766
1767    #
1768    # The functions t_NEWLINE, t_ignore, and t_error are
1769    # special for the lex module.
1770    #
1771
1772    # Newlines
1773    def t_NEWLINE(self, t):
1774        r'\n+'
1775        t.lexer.lineno += t.value.count('\n')
1776
1777    # Comments
1778    def t_comment(self, t):
1779        r'//.*'
1780
1781    # Completely ignored characters
1782    t_ignore = ' \t\x0c'
1783
1784    # Error handler
1785    def t_error(self, t):
1786        error(t.lexer.lineno, "illegal character '%s'" % t.value[0])
1787        t.skip(1)
1788
1789    #####################################################################
1790    #
1791    #                                Parser
1792    #
1793    # Every function whose name starts with 'p_' defines a grammar
1794    # rule.  The rule is encoded in the function's doc string, while
1795    # the function body provides the action taken when the rule is
1796    # matched.  The argument to each function is a list of the values
1797    # of the rule's symbols: t[0] for the LHS, and t[1..n] for the
1798    # symbols on the RHS.  For tokens, the value is copied from the
1799    # t.value attribute provided by the lexer.  For non-terminals, the
1800    # value is assigned by the producing rule; i.e., the job of the
1801    # grammar rule function is to set the value for the non-terminal
1802    # on the LHS (by assigning to t[0]).
1803    #####################################################################
1804
1805    # The LHS of the first grammar rule is used as the start symbol
1806    # (in this case, 'specification').  Note that this rule enforces
1807    # that there will be exactly one namespace declaration, with 0 or
1808    # more global defs/decls before and after it.  The defs & decls
1809    # before the namespace decl will be outside the namespace; those
1810    # after will be inside.  The decoder function is always inside the
1811    # namespace.
1812    def p_specification(self, t):
1813        'specification : opt_defs_and_outputs top_level_decode_block'
1814
1815        for f in self.splits.iterkeys():
1816            f.write('\n#endif\n')
1817
1818        for f in self.files.itervalues(): # close ALL the files;
1819            f.close() # not doing so can cause compilation to fail
1820
1821        self.write_top_level_files()
1822
1823        t[0] = True
1824
1825    # 'opt_defs_and_outputs' is a possibly empty sequence of def and/or
1826    # output statements. Its productions do the hard work of eventually
1827    # instantiating a GenCode, which are generally emitted (written to disk)
1828    # as soon as possible, except for the decode_block, which has to be
1829    # accumulated into one large function of nested switch/case blocks.
1830    def p_opt_defs_and_outputs_0(self, t):
1831        'opt_defs_and_outputs : empty'
1832
1833    def p_opt_defs_and_outputs_1(self, t):
1834        'opt_defs_and_outputs : defs_and_outputs'
1835
1836    def p_defs_and_outputs_0(self, t):
1837        'defs_and_outputs : def_or_output'
1838
1839    def p_defs_and_outputs_1(self, t):
1840        'defs_and_outputs : defs_and_outputs def_or_output'
1841
1842    # The list of possible definition/output statements.
1843    # They are all processed as they are seen.
1844    def p_def_or_output(self, t):
1845        '''def_or_output : name_decl
1846                         | def_format
1847                         | def_bitfield
1848                         | def_bitfield_struct
1849                         | def_template
1850                         | def_operand_types
1851                         | def_operands
1852                         | output
1853                         | global_let
1854                         | split'''
1855
1856    # Utility function used by both invocations of splitting - explicit
1857    # 'split' keyword and split() function inside "let {{ }};" blocks.
1858    def split(self, sec, write=False):
1859        assert(sec != 'header' and "header cannot be split")
1860
1861        f = self.get_file(sec)
1862        self.splits[f] += 1
1863        s = '\n#endif\n#if __SPLIT == %u\n' % self.splits[f]
1864        if write:
1865            f.write(s)
1866        else:
1867            return s
1868
1869    # split output file to reduce compilation time
1870    def p_split(self, t):
1871        'split : SPLIT output_type SEMI'
1872        assert(self.isa_name and "'split' not allowed before namespace decl")
1873
1874        self.split(t[2], True)
1875
1876    def p_output_type(self, t):
1877        '''output_type : DECODER
1878                       | HEADER
1879                       | EXEC'''
1880        t[0] = t[1]
1881
1882    # ISA name declaration looks like "namespace <foo>;"
1883    def p_name_decl(self, t):
1884        'name_decl : NAMESPACE ID SEMI'
1885        assert(self.isa_name == None and "Only 1 namespace decl permitted")
1886        self.isa_name = t[2]
1887        self.namespace = t[2] + 'Inst'
1888
1889    # Output blocks 'output <foo> {{...}}' (C++ code blocks) are copied
1890    # directly to the appropriate output section.
1891
1892    # Massage output block by substituting in template definitions and
1893    # bit operators.  We handle '%'s embedded in the string that don't
1894    # indicate template substitutions by doubling them first so that the
1895    # format operation will reduce them back to single '%'s.
1896    def process_output(self, s):
1897        s = self.protectNonSubstPercents(s)
1898        return substBitOps(s % self.templateMap)
1899
1900    def p_output(self, t):
1901        'output : OUTPUT output_type CODELIT SEMI'
1902        kwargs = { t[2]+'_output' : self.process_output(t[3]) }
1903        GenCode(self, **kwargs).emit()
1904
1905    # global let blocks 'let {{...}}' (Python code blocks) are
1906    # executed directly when seen.  Note that these execute in a
1907    # special variable context 'exportContext' to prevent the code
1908    # from polluting this script's namespace.
1909    def p_global_let(self, t):
1910        'global_let : LET CODELIT SEMI'
1911        def _split(sec):
1912            return self.split(sec)
1913        self.updateExportContext()
1914        self.exportContext["header_output"] = ''
1915        self.exportContext["decoder_output"] = ''
1916        self.exportContext["exec_output"] = ''
1917        self.exportContext["decode_block"] = ''
1918        self.exportContext["split"] = _split
1919        split_setup = '''
1920def wrap(func):
1921    def split(sec):
1922        globals()[sec + '_output'] += func(sec)
1923    return split
1924split = wrap(split)
1925del wrap
1926'''
1927        # This tricky setup (immediately above) allows us to just write
1928        # (e.g.) "split('exec')" in the Python code and the split #ifdef's
1929        # will automatically be added to the exec_output variable. The inner
1930        # Python execution environment doesn't know about the split points,
1931        # so we carefully inject and wrap a closure that can retrieve the
1932        # next split's #define from the parser and add it to the current
1933        # emission-in-progress.
1934        try:
1935            exec split_setup+fixPythonIndentation(t[2]) in self.exportContext
1936        except Exception, exc:
1937            if debug:
1938                raise
1939            error(t.lineno(1), 'In global let block: %s' % exc)
1940        GenCode(self,
1941                header_output=self.exportContext["header_output"],
1942                decoder_output=self.exportContext["decoder_output"],
1943                exec_output=self.exportContext["exec_output"],
1944                decode_block=self.exportContext["decode_block"]).emit()
1945
1946    # Define the mapping from operand type extensions to C++ types and
1947    # bit widths (stored in operandTypeMap).
1948    def p_def_operand_types(self, t):
1949        'def_operand_types : DEF OPERAND_TYPES CODELIT SEMI'
1950        try:
1951            self.operandTypeMap = eval('{' + t[3] + '}')
1952        except Exception, exc:
1953            if debug:
1954                raise
1955            error(t.lineno(1),
1956                  'In def operand_types: %s' % exc)
1957
1958    # Define the mapping from operand names to operand classes and
1959    # other traits.  Stored in operandNameMap.
1960    def p_def_operands(self, t):
1961        'def_operands : DEF OPERANDS CODELIT SEMI'
1962        if not hasattr(self, 'operandTypeMap'):
1963            error(t.lineno(1),
1964                  'error: operand types must be defined before operands')
1965        try:
1966            user_dict = eval('{' + t[3] + '}', self.exportContext)
1967        except Exception, exc:
1968            if debug:
1969                raise
1970            error(t.lineno(1), 'In def operands: %s' % exc)
1971        self.buildOperandNameMap(user_dict, t.lexer.lineno)
1972
1973    # A bitfield definition looks like:
1974    # 'def [signed] bitfield <ID> [<first>:<last>]'
1975    # This generates a preprocessor macro in the output file.
1976    def p_def_bitfield_0(self, t):
1977        'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT COLON INTLIT GREATER SEMI'
1978        expr = 'bits(machInst, %2d, %2d)' % (t[6], t[8])
1979        if (t[2] == 'signed'):
1980            expr = 'sext<%d>(%s)' % (t[6] - t[8] + 1, expr)
1981        hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
1982        GenCode(self, header_output=hash_define).emit()
1983
1984    # alternate form for single bit: 'def [signed] bitfield <ID> [<bit>]'
1985    def p_def_bitfield_1(self, t):
1986        'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT GREATER SEMI'
1987        expr = 'bits(machInst, %2d, %2d)' % (t[6], t[6])
1988        if (t[2] == 'signed'):
1989            expr = 'sext<%d>(%s)' % (1, expr)
1990        hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
1991        GenCode(self, header_output=hash_define).emit()
1992
1993    # alternate form for structure member: 'def bitfield <ID> <ID>'
1994    def p_def_bitfield_struct(self, t):
1995        'def_bitfield_struct : DEF opt_signed BITFIELD ID id_with_dot SEMI'
1996        if (t[2] != ''):
1997            error(t.lineno(1),
1998                  'error: structure bitfields are always unsigned.')
1999        expr = 'machInst.%s' % t[5]
2000        hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
2001        GenCode(self, header_output=hash_define).emit()
2002
2003    def p_id_with_dot_0(self, t):
2004        'id_with_dot : ID'
2005        t[0] = t[1]
2006
2007    def p_id_with_dot_1(self, t):
2008        'id_with_dot : ID DOT id_with_dot'
2009        t[0] = t[1] + t[2] + t[3]
2010
2011    def p_opt_signed_0(self, t):
2012        'opt_signed : SIGNED'
2013        t[0] = t[1]
2014
2015    def p_opt_signed_1(self, t):
2016        'opt_signed : empty'
2017        t[0] = ''
2018
2019    def p_def_template(self, t):
2020        'def_template : DEF TEMPLATE ID CODELIT SEMI'
2021        if t[3] in self.templateMap:
2022            print "warning: template %s already defined" % t[3]
2023        self.templateMap[t[3]] = Template(self, t[4])
2024
2025    # An instruction format definition looks like
2026    # "def format <fmt>(<params>) {{...}};"
2027    def p_def_format(self, t):
2028        'def_format : DEF FORMAT ID LPAREN param_list RPAREN CODELIT SEMI'
2029        (id, params, code) = (t[3], t[5], t[7])
2030        self.defFormat(id, params, code, t.lexer.lineno)
2031
2032    # The formal parameter list for an instruction format is a
2033    # possibly empty list of comma-separated parameters.  Positional
2034    # (standard, non-keyword) parameters must come first, followed by
2035    # keyword parameters, followed by a '*foo' parameter that gets
2036    # excess positional arguments (as in Python).  Each of these three
2037    # parameter categories is optional.
2038    #
2039    # Note that we do not support the '**foo' parameter for collecting
2040    # otherwise undefined keyword args.  Otherwise the parameter list
2041    # is (I believe) identical to what is supported in Python.
2042    #
2043    # The param list generates a tuple, where the first element is a
2044    # list of the positional params and the second element is a dict
2045    # containing the keyword params.
2046    def p_param_list_0(self, t):
2047        'param_list : positional_param_list COMMA nonpositional_param_list'
2048        t[0] = t[1] + t[3]
2049
2050    def p_param_list_1(self, t):
2051        '''param_list : positional_param_list
2052                      | nonpositional_param_list'''
2053        t[0] = t[1]
2054
2055    def p_positional_param_list_0(self, t):
2056        'positional_param_list : empty'
2057        t[0] = []
2058
2059    def p_positional_param_list_1(self, t):
2060        'positional_param_list : ID'
2061        t[0] = [t[1]]
2062
2063    def p_positional_param_list_2(self, t):
2064        'positional_param_list : positional_param_list COMMA ID'
2065        t[0] = t[1] + [t[3]]
2066
2067    def p_nonpositional_param_list_0(self, t):
2068        'nonpositional_param_list : keyword_param_list COMMA excess_args_param'
2069        t[0] = t[1] + t[3]
2070
2071    def p_nonpositional_param_list_1(self, t):
2072        '''nonpositional_param_list : keyword_param_list
2073                                    | excess_args_param'''
2074        t[0] = t[1]
2075
2076    def p_keyword_param_list_0(self, t):
2077        'keyword_param_list : keyword_param'
2078        t[0] = [t[1]]
2079
2080    def p_keyword_param_list_1(self, t):
2081        'keyword_param_list : keyword_param_list COMMA keyword_param'
2082        t[0] = t[1] + [t[3]]
2083
2084    def p_keyword_param(self, t):
2085        'keyword_param : ID EQUALS expr'
2086        t[0] = t[1] + ' = ' + t[3].__repr__()
2087
2088    def p_excess_args_param(self, t):
2089        'excess_args_param : ASTERISK ID'
2090        # Just concatenate them: '*ID'.  Wrap in list to be consistent
2091        # with positional_param_list and keyword_param_list.
2092        t[0] = [t[1] + t[2]]
2093
2094    # End of format definition-related rules.
2095    ##############
2096
2097    #
2098    # A decode block looks like:
2099    #       decode <field1> [, <field2>]* [default <inst>] { ... }
2100    #
2101    def p_top_level_decode_block(self, t):
2102        'top_level_decode_block : decode_block'
2103        codeObj = t[1]
2104        codeObj.wrap_decode_block('''
2105StaticInstPtr
2106%(isa_name)s::Decoder::decodeInst(%(isa_name)s::ExtMachInst machInst)
2107{
2108    using namespace %(namespace)s;
2109''' % self, '}')
2110
2111        codeObj.emit()
2112
2113    def p_decode_block(self, t):
2114        'decode_block : DECODE ID opt_default LBRACE decode_stmt_list RBRACE'
2115        default_defaults = self.defaultStack.pop()
2116        codeObj = t[5]
2117        # use the "default defaults" only if there was no explicit
2118        # default statement in decode_stmt_list
2119        if not codeObj.has_decode_default:
2120            codeObj += default_defaults
2121        codeObj.wrap_decode_block('switch (%s) {\n' % t[2], '}\n')
2122        t[0] = codeObj
2123
2124    # The opt_default statement serves only to push the "default
2125    # defaults" onto defaultStack.  This value will be used by nested
2126    # decode blocks, and used and popped off when the current
2127    # decode_block is processed (in p_decode_block() above).
2128    def p_opt_default_0(self, t):
2129        'opt_default : empty'
2130        # no default specified: reuse the one currently at the top of
2131        # the stack
2132        self.defaultStack.push(self.defaultStack.top())
2133        # no meaningful value returned
2134        t[0] = None
2135
2136    def p_opt_default_1(self, t):
2137        'opt_default : DEFAULT inst'
2138        # push the new default
2139        codeObj = t[2]
2140        codeObj.wrap_decode_block('\ndefault:\n', 'break;\n')
2141        self.defaultStack.push(codeObj)
2142        # no meaningful value returned
2143        t[0] = None
2144
2145    def p_decode_stmt_list_0(self, t):
2146        'decode_stmt_list : decode_stmt'
2147        t[0] = t[1]
2148
2149    def p_decode_stmt_list_1(self, t):
2150        'decode_stmt_list : decode_stmt decode_stmt_list'
2151        if (t[1].has_decode_default and t[2].has_decode_default):
2152            error(t.lineno(1), 'Two default cases in decode block')
2153        t[0] = t[1] + t[2]
2154
2155    #
2156    # Decode statement rules
2157    #
2158    # There are four types of statements allowed in a decode block:
2159    # 1. Format blocks 'format <foo> { ... }'
2160    # 2. Nested decode blocks
2161    # 3. Instruction definitions.
2162    # 4. C preprocessor directives.
2163
2164
2165    # Preprocessor directives found in a decode statement list are
2166    # passed through to the output, replicated to all of the output
2167    # code streams.  This works well for ifdefs, so we can ifdef out
2168    # both the declarations and the decode cases generated by an
2169    # instruction definition.  Handling them as part of the grammar
2170    # makes it easy to keep them in the right place with respect to
2171    # the code generated by the other statements.
2172    def p_decode_stmt_cpp(self, t):
2173        'decode_stmt : CPPDIRECTIVE'
2174        t[0] = GenCode(self, t[1], t[1], t[1], t[1])
2175
2176    # A format block 'format <foo> { ... }' sets the default
2177    # instruction format used to handle instruction definitions inside
2178    # the block.  This format can be overridden by using an explicit
2179    # format on the instruction definition or with a nested format
2180    # block.
2181    def p_decode_stmt_format(self, t):
2182        'decode_stmt : FORMAT push_format_id LBRACE decode_stmt_list RBRACE'
2183        # The format will be pushed on the stack when 'push_format_id'
2184        # is processed (see below).  Once the parser has recognized
2185        # the full production (though the right brace), we're done
2186        # with the format, so now we can pop it.
2187        self.formatStack.pop()
2188        t[0] = t[4]
2189
2190    # This rule exists so we can set the current format (& push the
2191    # stack) when we recognize the format name part of the format
2192    # block.
2193    def p_push_format_id(self, t):
2194        'push_format_id : ID'
2195        try:
2196            self.formatStack.push(self.formatMap[t[1]])
2197            t[0] = ('', '// format %s' % t[1])
2198        except KeyError:
2199            error(t.lineno(1), 'instruction format "%s" not defined.' % t[1])
2200
2201    # Nested decode block: if the value of the current field matches
2202    # the specified constant(s), do a nested decode on some other field.
2203    def p_decode_stmt_decode(self, t):
2204        'decode_stmt : case_list COLON decode_block'
2205        case_list = t[1]
2206        codeObj = t[3]
2207        # just wrap the decoding code from the block as a case in the
2208        # outer switch statement.
2209        codeObj.wrap_decode_block('\n%s\n' % ''.join(case_list))
2210        codeObj.has_decode_default = (case_list == ['default:'])
2211        t[0] = codeObj
2212
2213    # Instruction definition (finally!).
2214    def p_decode_stmt_inst(self, t):
2215        'decode_stmt : case_list COLON inst SEMI'
2216        case_list = t[1]
2217        codeObj = t[3]
2218        codeObj.wrap_decode_block('\n%s' % ''.join(case_list), 'break;\n')
2219        codeObj.has_decode_default = (case_list == ['default:'])
2220        t[0] = codeObj
2221
2222    # The constant list for a decode case label must be non-empty, and must
2223    # either be the keyword 'default', or made up of one or more
2224    # comma-separated integer literals or strings which evaluate to
2225    # constants when compiled as C++.
2226    def p_case_list_0(self, t):
2227        'case_list : DEFAULT'
2228        t[0] = ['default:']
2229
2230    def prep_int_lit_case_label(self, lit):
2231        if lit >= 2**32:
2232            return 'case ULL(%#x): ' % lit
2233        else:
2234            return 'case %#x: ' % lit
2235
2236    def prep_str_lit_case_label(self, lit):
2237        return 'case %s: ' % lit
2238
2239    def p_case_list_1(self, t):
2240        'case_list : INTLIT'
2241        t[0] = [self.prep_int_lit_case_label(t[1])]
2242
2243    def p_case_list_2(self, t):
2244        'case_list : STRLIT'
2245        t[0] = [self.prep_str_lit_case_label(t[1])]
2246
2247    def p_case_list_3(self, t):
2248        'case_list : case_list COMMA INTLIT'
2249        t[0] = t[1]
2250        t[0].append(self.prep_int_lit_case_label(t[3]))
2251
2252    def p_case_list_4(self, t):
2253        'case_list : case_list COMMA STRLIT'
2254        t[0] = t[1]
2255        t[0].append(self.prep_str_lit_case_label(t[3]))
2256
2257    # Define an instruction using the current instruction format
2258    # (specified by an enclosing format block).
2259    # "<mnemonic>(<args>)"
2260    def p_inst_0(self, t):
2261        'inst : ID LPAREN arg_list RPAREN'
2262        # Pass the ID and arg list to the current format class to deal with.
2263        currentFormat = self.formatStack.top()
2264        codeObj = currentFormat.defineInst(self, t[1], t[3], t.lexer.lineno)
2265        args = ','.join(map(str, t[3]))
2266        args = re.sub('(?m)^', '//', args)
2267        args = re.sub('^//', '', args)
2268        comment = '\n// %s::%s(%s)\n' % (currentFormat.id, t[1], args)
2269        codeObj.prepend_all(comment)
2270        t[0] = codeObj
2271
2272    # Define an instruction using an explicitly specified format:
2273    # "<fmt>::<mnemonic>(<args>)"
2274    def p_inst_1(self, t):
2275        'inst : ID DBLCOLON ID LPAREN arg_list RPAREN'
2276        try:
2277            format = self.formatMap[t[1]]
2278        except KeyError:
2279            error(t.lineno(1), 'instruction format "%s" not defined.' % t[1])
2280
2281        codeObj = format.defineInst(self, t[3], t[5], t.lexer.lineno)
2282        comment = '\n// %s::%s(%s)\n' % (t[1], t[3], t[5])
2283        codeObj.prepend_all(comment)
2284        t[0] = codeObj
2285
2286    # The arg list generates a tuple, where the first element is a
2287    # list of the positional args and the second element is a dict
2288    # containing the keyword args.
2289    def p_arg_list_0(self, t):
2290        'arg_list : positional_arg_list COMMA keyword_arg_list'
2291        t[0] = ( t[1], t[3] )
2292
2293    def p_arg_list_1(self, t):
2294        'arg_list : positional_arg_list'
2295        t[0] = ( t[1], {} )
2296
2297    def p_arg_list_2(self, t):
2298        'arg_list : keyword_arg_list'
2299        t[0] = ( [], t[1] )
2300
2301    def p_positional_arg_list_0(self, t):
2302        'positional_arg_list : empty'
2303        t[0] = []
2304
2305    def p_positional_arg_list_1(self, t):
2306        'positional_arg_list : expr'
2307        t[0] = [t[1]]
2308
2309    def p_positional_arg_list_2(self, t):
2310        'positional_arg_list : positional_arg_list COMMA expr'
2311        t[0] = t[1] + [t[3]]
2312
2313    def p_keyword_arg_list_0(self, t):
2314        'keyword_arg_list : keyword_arg'
2315        t[0] = t[1]
2316
2317    def p_keyword_arg_list_1(self, t):
2318        'keyword_arg_list : keyword_arg_list COMMA keyword_arg'
2319        t[0] = t[1]
2320        t[0].update(t[3])
2321
2322    def p_keyword_arg(self, t):
2323        'keyword_arg : ID EQUALS expr'
2324        t[0] = { t[1] : t[3] }
2325
2326    #
2327    # Basic expressions.  These constitute the argument values of
2328    # "function calls" (i.e. instruction definitions in the decode
2329    # block) and default values for formal parameters of format
2330    # functions.
2331    #
2332    # Right now, these are either strings, integers, or (recursively)
2333    # lists of exprs (using Python square-bracket list syntax).  Note
2334    # that bare identifiers are trated as string constants here (since
2335    # there isn't really a variable namespace to refer to).
2336    #
2337    def p_expr_0(self, t):
2338        '''expr : ID
2339                | INTLIT
2340                | STRLIT
2341                | CODELIT'''
2342        t[0] = t[1]
2343
2344    def p_expr_1(self, t):
2345        '''expr : LBRACKET list_expr RBRACKET'''
2346        t[0] = t[2]
2347
2348    def p_list_expr_0(self, t):
2349        'list_expr : expr'
2350        t[0] = [t[1]]
2351
2352    def p_list_expr_1(self, t):
2353        'list_expr : list_expr COMMA expr'
2354        t[0] = t[1] + [t[3]]
2355
2356    def p_list_expr_2(self, t):
2357        'list_expr : empty'
2358        t[0] = []
2359
2360    #
2361    # Empty production... use in other rules for readability.
2362    #
2363    def p_empty(self, t):
2364        'empty :'
2365        pass
2366
2367    # Parse error handler.  Note that the argument here is the
2368    # offending *token*, not a grammar symbol (hence the need to use
2369    # t.value)
2370    def p_error(self, t):
2371        if t:
2372            error(t.lexer.lineno, "syntax error at '%s'" % t.value)
2373        else:
2374            error("unknown syntax error")
2375
2376    # END OF GRAMMAR RULES
2377
2378    def updateExportContext(self):
2379
2380        # create a continuation that allows us to grab the current parser
2381        def wrapInstObjParams(*args):
2382            return InstObjParams(self, *args)
2383        self.exportContext['InstObjParams'] = wrapInstObjParams
2384        self.exportContext.update(self.templateMap)
2385
2386    def defFormat(self, id, params, code, lineno):
2387        '''Define a new format'''
2388
2389        # make sure we haven't already defined this one
2390        if id in self.formatMap:
2391            error(lineno, 'format %s redefined.' % id)
2392
2393        # create new object and store in global map
2394        self.formatMap[id] = Format(id, params, code)
2395
2396    def protectNonSubstPercents(self, s):
2397        '''Protect any non-dict-substitution '%'s in a format string
2398        (i.e. those not followed by '(')'''
2399
2400        return re.sub(r'%(?!\()', '%%', s)
2401
2402    def buildOperandNameMap(self, user_dict, lineno):
2403        operand_name = {}
2404        for op_name, val in user_dict.iteritems():
2405
2406            # Check if extra attributes have been specified.
2407            if len(val) > 9:
2408                error(lineno, 'error: too many attributes for operand "%s"' %
2409                      base_cls_name)
2410
2411            # Pad val with None in case optional args are missing
2412            val += (None, None, None, None)
2413            base_cls_name, dflt_ext, reg_spec, flags, sort_pri, \
2414            read_code, write_code, read_predicate, write_predicate = val[:9]
2415
2416            # Canonical flag structure is a triple of lists, where each list
2417            # indicates the set of flags implied by this operand always, when
2418            # used as a source, and when used as a dest, respectively.
2419            # For simplicity this can be initialized using a variety of fairly
2420            # obvious shortcuts; we convert these to canonical form here.
2421            if not flags:
2422                # no flags specified (e.g., 'None')
2423                flags = ( [], [], [] )
2424            elif isinstance(flags, str):
2425                # a single flag: assumed to be unconditional
2426                flags = ( [ flags ], [], [] )
2427            elif isinstance(flags, list):
2428                # a list of flags: also assumed to be unconditional
2429                flags = ( flags, [], [] )
2430            elif isinstance(flags, tuple):
2431                # it's a tuple: it should be a triple,
2432                # but each item could be a single string or a list
2433                (uncond_flags, src_flags, dest_flags) = flags
2434                flags = (makeList(uncond_flags),
2435                         makeList(src_flags), makeList(dest_flags))
2436
2437            # Accumulate attributes of new operand class in tmp_dict
2438            tmp_dict = {}
2439            attrList = ['reg_spec', 'flags', 'sort_pri',
2440                        'read_code', 'write_code',
2441                        'read_predicate', 'write_predicate']
2442            if dflt_ext:
2443                dflt_ctype = self.operandTypeMap[dflt_ext]
2444                attrList.extend(['dflt_ctype', 'dflt_ext'])
2445            # reg_spec is either just a string or a dictionary
2446            # (for elems of vector)
2447            if isinstance(reg_spec, tuple):
2448                (reg_spec, elem_spec) = reg_spec
2449                if isinstance(elem_spec, str):
2450                    attrList.append('elem_spec')
2451                else:
2452                    assert(isinstance(elem_spec, dict))
2453                    elems = elem_spec
2454                    attrList.append('elems')
2455            for attr in attrList:
2456                tmp_dict[attr] = eval(attr)
2457            tmp_dict['base_name'] = op_name
2458
2459            # New class name will be e.g. "IntReg_Ra"
2460            cls_name = base_cls_name + '_' + op_name
2461            # Evaluate string arg to get class object.  Note that the
2462            # actual base class for "IntReg" is "IntRegOperand", i.e. we
2463            # have to append "Operand".
2464            try:
2465                base_cls = eval(base_cls_name + 'Operand')
2466            except NameError:
2467                error(lineno,
2468                      'error: unknown operand base class "%s"' % base_cls_name)
2469            # The following statement creates a new class called
2470            # <cls_name> as a subclass of <base_cls> with the attributes
2471            # in tmp_dict, just as if we evaluated a class declaration.
2472            operand_name[op_name] = type(cls_name, (base_cls,), tmp_dict)
2473
2474        self.operandNameMap = operand_name
2475
2476        # Define operand variables.
2477        operands = user_dict.keys()
2478        # Add the elems defined in the vector operands and
2479        # build a map elem -> vector (used in OperandList)
2480        elem_to_vec = {}
2481        for op in user_dict.keys():
2482            if hasattr(self.operandNameMap[op], 'elems'):
2483                for elem in self.operandNameMap[op].elems.keys():
2484                    operands.append(elem)
2485                    elem_to_vec[elem] = op
2486        self.elemToVector = elem_to_vec
2487        extensions = self.operandTypeMap.keys()
2488
2489        operandsREString = r'''
2490        (?<!\w)      # neg. lookbehind assertion: prevent partial matches
2491        ((%s)(?:_(%s))?)   # match: operand with optional '_' then suffix
2492        (?!\w)       # neg. lookahead assertion: prevent partial matches
2493        ''' % (string.join(operands, '|'), string.join(extensions, '|'))
2494
2495        self.operandsRE = re.compile(operandsREString, re.MULTILINE|re.VERBOSE)
2496
2497        # Same as operandsREString, but extension is mandatory, and only two
2498        # groups are returned (base and ext, not full name as above).
2499        # Used for subtituting '_' for '.' to make C++ identifiers.
2500        operandsWithExtREString = r'(?<!\w)(%s)_(%s)(?!\w)' \
2501            % (string.join(operands, '|'), string.join(extensions, '|'))
2502
2503        self.operandsWithExtRE = \
2504            re.compile(operandsWithExtREString, re.MULTILINE)
2505
2506    def substMungedOpNames(self, code):
2507        '''Munge operand names in code string to make legal C++
2508        variable names.  This means getting rid of the type extension
2509        if any.  Will match base_name attribute of Operand object.)'''
2510        return self.operandsWithExtRE.sub(r'\1', code)
2511
2512    def mungeSnippet(self, s):
2513        '''Fix up code snippets for final substitution in templates.'''
2514        if isinstance(s, str):
2515            return self.substMungedOpNames(substBitOps(s))
2516        else:
2517            return s
2518
2519    def open(self, name, bare=False):
2520        '''Open the output file for writing and include scary warning.'''
2521        filename = os.path.join(self.output_dir, name)
2522        f = open(filename, 'w')
2523        if f:
2524            if not bare:
2525                f.write(ISAParser.scaremonger_template % self)
2526        return f
2527
2528    def update(self, file, contents):
2529        '''Update the output file only.  Scons should handle the case when
2530        the new contents are unchanged using its built-in hash feature.'''
2531        f = self.open(file)
2532        f.write(contents)
2533        f.close()
2534
2535    # This regular expression matches '##include' directives
2536    includeRE = re.compile(r'^\s*##include\s+"(?P<filename>[^"]*)".*$',
2537                           re.MULTILINE)
2538
2539    def replace_include(self, matchobj, dirname):
2540        """Function to replace a matched '##include' directive with the
2541        contents of the specified file (with nested ##includes
2542        replaced recursively).  'matchobj' is an re match object
2543        (from a match of includeRE) and 'dirname' is the directory
2544        relative to which the file path should be resolved."""
2545
2546        fname = matchobj.group('filename')
2547        full_fname = os.path.normpath(os.path.join(dirname, fname))
2548        contents = '##newfile "%s"\n%s\n##endfile\n' % \
2549                   (full_fname, self.read_and_flatten(full_fname))
2550        return contents
2551
2552    def read_and_flatten(self, filename):
2553        """Read a file and recursively flatten nested '##include' files."""
2554
2555        current_dir = os.path.dirname(filename)
2556        try:
2557            contents = open(filename).read()
2558        except IOError:
2559            error('Error including file "%s"' % filename)
2560
2561        self.fileNameStack.push(LineTracker(filename))
2562
2563        # Find any includes and include them
2564        def replace(matchobj):
2565            return self.replace_include(matchobj, current_dir)
2566        contents = self.includeRE.sub(replace, contents)
2567
2568        self.fileNameStack.pop()
2569        return contents
2570
2571    AlreadyGenerated = {}
2572
2573    def _parse_isa_desc(self, isa_desc_file):
2574        '''Read in and parse the ISA description.'''
2575
2576        # The build system can end up running the ISA parser twice: once to
2577        # finalize the build dependencies, and then to actually generate
2578        # the files it expects (in src/arch/$ARCH/generated). This code
2579        # doesn't do anything different either time, however; the SCons
2580        # invocations just expect different things. Since this code runs
2581        # within SCons, we can just remember that we've already run and
2582        # not perform a completely unnecessary run, since the ISA parser's
2583        # effect is idempotent.
2584        if isa_desc_file in ISAParser.AlreadyGenerated:
2585            return
2586
2587        # grab the last three path components of isa_desc_file
2588        self.filename = '/'.join(isa_desc_file.split('/')[-3:])
2589
2590        # Read file and (recursively) all included files into a string.
2591        # PLY requires that the input be in a single string so we have to
2592        # do this up front.
2593        isa_desc = self.read_and_flatten(isa_desc_file)
2594
2595        # Initialize lineno tracker
2596        self.lex.lineno = LineTracker(isa_desc_file)
2597
2598        # Parse.
2599        self.parse_string(isa_desc)
2600
2601        ISAParser.AlreadyGenerated[isa_desc_file] = None
2602
2603    def parse_isa_desc(self, *args, **kwargs):
2604        try:
2605            self._parse_isa_desc(*args, **kwargs)
2606        except ISAParserError, e:
2607            print backtrace(self.fileNameStack)
2608            print "At %s:" % e.lineno
2609            print e
2610            sys.exit(1)
2611
2612# Called as script: get args from command line.
2613# Args are: <isa desc file> <output dir>
2614if __name__ == '__main__':
2615    ISAParser(sys.argv[2]).parse_isa_desc(sys.argv[1])
2616