base.cc revision 2090
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <cmath>
30#include <cstdio>
31#include <cstdlib>
32#include <iostream>
33#include <iomanip>
34#include <list>
35#include <sstream>
36#include <string>
37
38#include "base/cprintf.hh"
39#include "base/inifile.hh"
40#include "base/loader/symtab.hh"
41#include "base/misc.hh"
42#include "base/pollevent.hh"
43#include "base/range.hh"
44#include "base/stats/events.hh"
45#include "base/trace.hh"
46#include "cpu/base.hh"
47#include "cpu/exec_context.hh"
48#include "cpu/exetrace.hh"
49#include "cpu/profile.hh"
50#include "cpu/sampler/sampler.hh"
51#include "cpu/simple/cpu.hh"
52#include "cpu/smt.hh"
53#include "cpu/static_inst.hh"
54#include "kern/kernel_stats.hh"
55#include "mem/base_mem.hh"
56#include "mem/mem_interface.hh"
57#include "sim/byteswap.hh"
58#include "sim/builder.hh"
59#include "sim/debug.hh"
60#include "sim/host.hh"
61#include "sim/sim_events.hh"
62#include "sim/sim_object.hh"
63#include "sim/stats.hh"
64
65#if FULL_SYSTEM
66#include "base/remote_gdb.hh"
67#include "mem/functional/memory_control.hh"
68#include "mem/functional/physical.hh"
69#include "sim/system.hh"
70#include "targetarch/alpha_memory.hh"
71#include "targetarch/stacktrace.hh"
72#include "targetarch/vtophys.hh"
73#else // !FULL_SYSTEM
74#include "mem/functional/functional.hh"
75#endif // FULL_SYSTEM
76
77using namespace std;
78//The SimpleCPU does alpha only
79using namespace LittleEndianGuest;
80
81
82SimpleCPU::TickEvent::TickEvent(SimpleCPU *c, int w)
83    : Event(&mainEventQueue, CPU_Tick_Pri), cpu(c), width(w)
84{
85}
86
87void
88SimpleCPU::TickEvent::process()
89{
90    int count = width;
91    do {
92        cpu->tick();
93    } while (--count > 0 && cpu->status() == Running);
94}
95
96const char *
97SimpleCPU::TickEvent::description()
98{
99    return "SimpleCPU tick event";
100}
101
102
103SimpleCPU::CacheCompletionEvent::CacheCompletionEvent(SimpleCPU *_cpu)
104    : Event(&mainEventQueue), cpu(_cpu)
105{
106}
107
108void SimpleCPU::CacheCompletionEvent::process()
109{
110    cpu->processCacheCompletion();
111}
112
113const char *
114SimpleCPU::CacheCompletionEvent::description()
115{
116    return "SimpleCPU cache completion event";
117}
118
119SimpleCPU::SimpleCPU(Params *p)
120    : BaseCPU(p), tickEvent(this, p->width), xc(NULL),
121      cacheCompletionEvent(this)
122{
123    _status = Idle;
124#if FULL_SYSTEM
125    xc = new ExecContext(this, 0, p->system, p->itb, p->dtb, p->mem);
126
127    // initialize CPU, including PC
128    TheISA::initCPU(&xc->regs);
129#else
130    xc = new ExecContext(this, /* thread_num */ 0, p->process, /* asid */ 0);
131#endif // !FULL_SYSTEM
132
133    icacheInterface = p->icache_interface;
134    dcacheInterface = p->dcache_interface;
135
136    memReq = new MemReq();
137    memReq->xc = xc;
138    memReq->asid = 0;
139    memReq->data = new uint8_t[64];
140
141    numInst = 0;
142    startNumInst = 0;
143    numLoad = 0;
144    startNumLoad = 0;
145    lastIcacheStall = 0;
146    lastDcacheStall = 0;
147
148    execContexts.push_back(xc);
149}
150
151SimpleCPU::~SimpleCPU()
152{
153}
154
155void
156SimpleCPU::switchOut(Sampler *s)
157{
158    sampler = s;
159    if (status() == DcacheMissStall) {
160        DPRINTF(Sampler,"Outstanding dcache access, waiting for completion\n");
161        _status = DcacheMissSwitch;
162    }
163    else {
164        _status = SwitchedOut;
165
166        if (tickEvent.scheduled())
167            tickEvent.squash();
168
169        sampler->signalSwitched();
170    }
171}
172
173
174void
175SimpleCPU::takeOverFrom(BaseCPU *oldCPU)
176{
177    BaseCPU::takeOverFrom(oldCPU);
178
179    assert(!tickEvent.scheduled());
180
181    // if any of this CPU's ExecContexts are active, mark the CPU as
182    // running and schedule its tick event.
183    for (int i = 0; i < execContexts.size(); ++i) {
184        ExecContext *xc = execContexts[i];
185        if (xc->status() == ExecContext::Active && _status != Running) {
186            _status = Running;
187            tickEvent.schedule(curTick);
188        }
189    }
190}
191
192
193void
194SimpleCPU::activateContext(int thread_num, int delay)
195{
196    assert(thread_num == 0);
197    assert(xc);
198
199    assert(_status == Idle);
200    notIdleFraction++;
201    scheduleTickEvent(delay);
202    _status = Running;
203}
204
205
206void
207SimpleCPU::suspendContext(int thread_num)
208{
209    assert(thread_num == 0);
210    assert(xc);
211
212    assert(_status == Running);
213    notIdleFraction--;
214    unscheduleTickEvent();
215    _status = Idle;
216}
217
218
219void
220SimpleCPU::deallocateContext(int thread_num)
221{
222    // for now, these are equivalent
223    suspendContext(thread_num);
224}
225
226
227void
228SimpleCPU::haltContext(int thread_num)
229{
230    // for now, these are equivalent
231    suspendContext(thread_num);
232}
233
234
235void
236SimpleCPU::regStats()
237{
238    using namespace Stats;
239
240    BaseCPU::regStats();
241
242    numInsts
243        .name(name() + ".num_insts")
244        .desc("Number of instructions executed")
245        ;
246
247    numMemRefs
248        .name(name() + ".num_refs")
249        .desc("Number of memory references")
250        ;
251
252    notIdleFraction
253        .name(name() + ".not_idle_fraction")
254        .desc("Percentage of non-idle cycles")
255        ;
256
257    idleFraction
258        .name(name() + ".idle_fraction")
259        .desc("Percentage of idle cycles")
260        ;
261
262    icacheStallCycles
263        .name(name() + ".icache_stall_cycles")
264        .desc("ICache total stall cycles")
265        .prereq(icacheStallCycles)
266        ;
267
268    dcacheStallCycles
269        .name(name() + ".dcache_stall_cycles")
270        .desc("DCache total stall cycles")
271        .prereq(dcacheStallCycles)
272        ;
273
274    idleFraction = constant(1.0) - notIdleFraction;
275}
276
277void
278SimpleCPU::resetStats()
279{
280    startNumInst = numInst;
281    notIdleFraction = (_status != Idle);
282}
283
284void
285SimpleCPU::serialize(ostream &os)
286{
287    BaseCPU::serialize(os);
288    SERIALIZE_ENUM(_status);
289    SERIALIZE_SCALAR(inst);
290    nameOut(os, csprintf("%s.xc", name()));
291    xc->serialize(os);
292    nameOut(os, csprintf("%s.tickEvent", name()));
293    tickEvent.serialize(os);
294    nameOut(os, csprintf("%s.cacheCompletionEvent", name()));
295    cacheCompletionEvent.serialize(os);
296}
297
298void
299SimpleCPU::unserialize(Checkpoint *cp, const string &section)
300{
301    BaseCPU::unserialize(cp, section);
302    UNSERIALIZE_ENUM(_status);
303    UNSERIALIZE_SCALAR(inst);
304    xc->unserialize(cp, csprintf("%s.xc", section));
305    tickEvent.unserialize(cp, csprintf("%s.tickEvent", section));
306    cacheCompletionEvent
307        .unserialize(cp, csprintf("%s.cacheCompletionEvent", section));
308}
309
310void
311change_thread_state(int thread_number, int activate, int priority)
312{
313}
314
315Fault *
316SimpleCPU::copySrcTranslate(Addr src)
317{
318    static bool no_warn = true;
319    int blk_size = (dcacheInterface) ? dcacheInterface->getBlockSize() : 64;
320    // Only support block sizes of 64 atm.
321    assert(blk_size == 64);
322    int offset = src & (blk_size - 1);
323
324    // Make sure block doesn't span page
325    if (no_warn &&
326        (src & TheISA::PageMask) != ((src + blk_size) & TheISA::PageMask) &&
327        (src >> 40) != 0xfffffc) {
328        warn("Copied block source spans pages %x.", src);
329        no_warn = false;
330    }
331
332    memReq->reset(src & ~(blk_size - 1), blk_size);
333
334    // translate to physical address
335    Fault * fault = xc->translateDataReadReq(memReq);
336
337    assert(fault != AlignmentFault);
338
339    if (fault == NoFault) {
340        xc->copySrcAddr = src;
341        xc->copySrcPhysAddr = memReq->paddr + offset;
342    } else {
343        xc->copySrcAddr = 0;
344        xc->copySrcPhysAddr = 0;
345    }
346    return fault;
347}
348
349Fault *
350SimpleCPU::copy(Addr dest)
351{
352    static bool no_warn = true;
353    int blk_size = (dcacheInterface) ? dcacheInterface->getBlockSize() : 64;
354    // Only support block sizes of 64 atm.
355    assert(blk_size == 64);
356    uint8_t data[blk_size];
357    //assert(xc->copySrcAddr);
358    int offset = dest & (blk_size - 1);
359
360    // Make sure block doesn't span page
361    if (no_warn &&
362        (dest & TheISA::PageMask) != ((dest + blk_size) & TheISA::PageMask) &&
363        (dest >> 40) != 0xfffffc) {
364        no_warn = false;
365        warn("Copied block destination spans pages %x. ", dest);
366    }
367
368    memReq->reset(dest & ~(blk_size -1), blk_size);
369    // translate to physical address
370    Fault * fault = xc->translateDataWriteReq(memReq);
371
372    assert(fault != AlignmentFault);
373
374    if (fault == NoFault) {
375        Addr dest_addr = memReq->paddr + offset;
376        // Need to read straight from memory since we have more than 8 bytes.
377        memReq->paddr = xc->copySrcPhysAddr;
378        xc->mem->read(memReq, data);
379        memReq->paddr = dest_addr;
380        xc->mem->write(memReq, data);
381        if (dcacheInterface) {
382            memReq->cmd = Copy;
383            memReq->completionEvent = NULL;
384            memReq->paddr = xc->copySrcPhysAddr;
385            memReq->dest = dest_addr;
386            memReq->size = 64;
387            memReq->time = curTick;
388            memReq->flags &= ~INST_READ;
389            dcacheInterface->access(memReq);
390        }
391    }
392    return fault;
393}
394
395// precise architected memory state accessor macros
396template <class T>
397Fault *
398SimpleCPU::read(Addr addr, T &data, unsigned flags)
399{
400    if (status() == DcacheMissStall || status() == DcacheMissSwitch) {
401        Fault * fault = xc->read(memReq,data);
402
403        if (traceData) {
404            traceData->setAddr(addr);
405        }
406        return fault;
407    }
408
409    memReq->reset(addr, sizeof(T), flags);
410
411    // translate to physical address
412    Fault * fault = xc->translateDataReadReq(memReq);
413
414    // if we have a cache, do cache access too
415    if (fault == NoFault && dcacheInterface) {
416        memReq->cmd = Read;
417        memReq->completionEvent = NULL;
418        memReq->time = curTick;
419        memReq->flags &= ~INST_READ;
420        MemAccessResult result = dcacheInterface->access(memReq);
421
422        // Ugly hack to get an event scheduled *only* if the access is
423        // a miss.  We really should add first-class support for this
424        // at some point.
425        if (result != MA_HIT && dcacheInterface->doEvents()) {
426            memReq->completionEvent = &cacheCompletionEvent;
427            lastDcacheStall = curTick;
428            unscheduleTickEvent();
429            _status = DcacheMissStall;
430        } else {
431            // do functional access
432            fault = xc->read(memReq, data);
433
434        }
435    } else if(fault == NoFault) {
436        // do functional access
437        fault = xc->read(memReq, data);
438
439    }
440
441    if (!dcacheInterface && (memReq->flags & UNCACHEABLE))
442        recordEvent("Uncached Read");
443
444    return fault;
445}
446
447#ifndef DOXYGEN_SHOULD_SKIP_THIS
448
449template
450Fault *
451SimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
452
453template
454Fault *
455SimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
456
457template
458Fault *
459SimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
460
461template
462Fault *
463SimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
464
465#endif //DOXYGEN_SHOULD_SKIP_THIS
466
467template<>
468Fault *
469SimpleCPU::read(Addr addr, double &data, unsigned flags)
470{
471    return read(addr, *(uint64_t*)&data, flags);
472}
473
474template<>
475Fault *
476SimpleCPU::read(Addr addr, float &data, unsigned flags)
477{
478    return read(addr, *(uint32_t*)&data, flags);
479}
480
481
482template<>
483Fault *
484SimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
485{
486    return read(addr, (uint32_t&)data, flags);
487}
488
489
490template <class T>
491Fault *
492SimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
493{
494    memReq->reset(addr, sizeof(T), flags);
495
496    // translate to physical address
497    Fault * fault = xc->translateDataWriteReq(memReq);
498
499    // do functional access
500    if (fault == NoFault)
501        fault = xc->write(memReq, data);
502
503    if (fault == NoFault && dcacheInterface) {
504        memReq->cmd = Write;
505        memcpy(memReq->data,(uint8_t *)&data,memReq->size);
506        memReq->completionEvent = NULL;
507        memReq->time = curTick;
508        memReq->flags &= ~INST_READ;
509        MemAccessResult result = dcacheInterface->access(memReq);
510
511        // Ugly hack to get an event scheduled *only* if the access is
512        // a miss.  We really should add first-class support for this
513        // at some point.
514        if (result != MA_HIT && dcacheInterface->doEvents()) {
515            memReq->completionEvent = &cacheCompletionEvent;
516            lastDcacheStall = curTick;
517            unscheduleTickEvent();
518            _status = DcacheMissStall;
519        }
520    }
521
522    if (res && (fault == NoFault))
523        *res = memReq->result;
524
525    if (!dcacheInterface && (memReq->flags & UNCACHEABLE))
526        recordEvent("Uncached Write");
527
528    return fault;
529}
530
531
532#ifndef DOXYGEN_SHOULD_SKIP_THIS
533template
534Fault *
535SimpleCPU::write(uint64_t data, Addr addr, unsigned flags, uint64_t *res);
536
537template
538Fault *
539SimpleCPU::write(uint32_t data, Addr addr, unsigned flags, uint64_t *res);
540
541template
542Fault *
543SimpleCPU::write(uint16_t data, Addr addr, unsigned flags, uint64_t *res);
544
545template
546Fault *
547SimpleCPU::write(uint8_t data, Addr addr, unsigned flags, uint64_t *res);
548
549#endif //DOXYGEN_SHOULD_SKIP_THIS
550
551template<>
552Fault *
553SimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
554{
555    return write(*(uint64_t*)&data, addr, flags, res);
556}
557
558template<>
559Fault *
560SimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
561{
562    return write(*(uint32_t*)&data, addr, flags, res);
563}
564
565
566template<>
567Fault *
568SimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
569{
570    return write((uint32_t)data, addr, flags, res);
571}
572
573
574#if FULL_SYSTEM
575Addr
576SimpleCPU::dbg_vtophys(Addr addr)
577{
578    return vtophys(xc, addr);
579}
580#endif // FULL_SYSTEM
581
582void
583SimpleCPU::processCacheCompletion()
584{
585    switch (status()) {
586      case IcacheMissStall:
587        icacheStallCycles += curTick - lastIcacheStall;
588        _status = IcacheMissComplete;
589        scheduleTickEvent(1);
590        break;
591      case DcacheMissStall:
592        if (memReq->cmd.isRead()) {
593            curStaticInst->execute(this,traceData);
594            if (traceData)
595                traceData->finalize();
596        }
597        dcacheStallCycles += curTick - lastDcacheStall;
598        _status = Running;
599        scheduleTickEvent(1);
600        break;
601      case DcacheMissSwitch:
602        if (memReq->cmd.isRead()) {
603            curStaticInst->execute(this,traceData);
604            if (traceData)
605                traceData->finalize();
606        }
607        _status = SwitchedOut;
608        sampler->signalSwitched();
609      case SwitchedOut:
610        // If this CPU has been switched out due to sampling/warm-up,
611        // ignore any further status changes (e.g., due to cache
612        // misses outstanding at the time of the switch).
613        return;
614      default:
615        panic("SimpleCPU::processCacheCompletion: bad state");
616        break;
617    }
618}
619
620#if FULL_SYSTEM
621void
622SimpleCPU::post_interrupt(int int_num, int index)
623{
624    BaseCPU::post_interrupt(int_num, index);
625
626    if (xc->status() == ExecContext::Suspended) {
627                DPRINTF(IPI,"Suspended Processor awoke\n");
628        xc->activate();
629    }
630}
631#endif // FULL_SYSTEM
632
633/* start simulation, program loaded, processor precise state initialized */
634void
635SimpleCPU::tick()
636{
637    numCycles++;
638
639    traceData = NULL;
640
641    Fault * fault = NoFault;
642
643#if FULL_SYSTEM
644    if (checkInterrupts && check_interrupts() && !xc->inPalMode() &&
645        status() != IcacheMissComplete) {
646        int ipl = 0;
647        int summary = 0;
648        checkInterrupts = false;
649        IntReg *ipr = xc->regs.ipr;
650
651        if (xc->regs.ipr[TheISA::IPR_SIRR]) {
652            for (int i = TheISA::INTLEVEL_SOFTWARE_MIN;
653                 i < TheISA::INTLEVEL_SOFTWARE_MAX; i++) {
654                if (ipr[TheISA::IPR_SIRR] & (ULL(1) << i)) {
655                    // See table 4-19 of 21164 hardware reference
656                    ipl = (i - TheISA::INTLEVEL_SOFTWARE_MIN) + 1;
657                    summary |= (ULL(1) << i);
658                }
659            }
660        }
661
662        uint64_t interrupts = xc->cpu->intr_status();
663        for (int i = TheISA::INTLEVEL_EXTERNAL_MIN;
664            i < TheISA::INTLEVEL_EXTERNAL_MAX; i++) {
665            if (interrupts & (ULL(1) << i)) {
666                // See table 4-19 of 21164 hardware reference
667                ipl = i;
668                summary |= (ULL(1) << i);
669            }
670        }
671
672        if (ipr[TheISA::IPR_ASTRR])
673            panic("asynchronous traps not implemented\n");
674
675        if (ipl && ipl > xc->regs.ipr[TheISA::IPR_IPLR]) {
676            ipr[TheISA::IPR_ISR] = summary;
677            ipr[TheISA::IPR_INTID] = ipl;
678            xc->ev5_trap(InterruptFault);
679
680            DPRINTF(Flow, "Interrupt! IPLR=%d ipl=%d summary=%x\n",
681                    ipr[TheISA::IPR_IPLR], ipl, summary);
682        }
683    }
684#endif
685
686    // maintain $r0 semantics
687    xc->regs.intRegFile[ZeroReg] = 0;
688#ifdef TARGET_ALPHA
689    xc->regs.floatRegFile.d[ZeroReg] = 0.0;
690#endif // TARGET_ALPHA
691
692    if (status() == IcacheMissComplete) {
693        // We've already fetched an instruction and were stalled on an
694        // I-cache miss.  No need to fetch it again.
695
696        // Set status to running; tick event will get rescheduled if
697        // necessary at end of tick() function.
698        _status = Running;
699    }
700    else {
701        // Try to fetch an instruction
702
703        // set up memory request for instruction fetch
704#if FULL_SYSTEM
705#define IFETCH_FLAGS(pc)	((pc) & 1) ? PHYSICAL : 0
706#else
707#define IFETCH_FLAGS(pc)	0
708#endif
709
710        memReq->cmd = Read;
711        memReq->reset(xc->regs.pc & ~3, sizeof(uint32_t),
712                     IFETCH_FLAGS(xc->regs.pc));
713
714        fault = xc->translateInstReq(memReq);
715
716        if (fault == NoFault)
717            fault = xc->mem->read(memReq, inst);
718
719        if (icacheInterface && fault == NoFault) {
720            memReq->completionEvent = NULL;
721
722            memReq->time = curTick;
723            memReq->flags |= INST_READ;
724            MemAccessResult result = icacheInterface->access(memReq);
725
726            // Ugly hack to get an event scheduled *only* if the access is
727            // a miss.  We really should add first-class support for this
728            // at some point.
729            if (result != MA_HIT && icacheInterface->doEvents()) {
730                memReq->completionEvent = &cacheCompletionEvent;
731                lastIcacheStall = curTick;
732                unscheduleTickEvent();
733                _status = IcacheMissStall;
734                return;
735            }
736        }
737    }
738
739    // If we've got a valid instruction (i.e., no fault on instruction
740    // fetch), then execute it.
741    if (fault == NoFault) {
742
743        // keep an instruction count
744        numInst++;
745        numInsts++;
746
747        // check for instruction-count-based events
748        comInstEventQueue[0]->serviceEvents(numInst);
749
750        // decode the instruction
751        inst = gtoh(inst);
752        curStaticInst = StaticInst<TheISA>::decode(inst);
753
754        traceData = Trace::getInstRecord(curTick, xc, this, curStaticInst,
755                                         xc->regs.pc);
756
757#if FULL_SYSTEM
758        xc->setInst(inst);
759#endif // FULL_SYSTEM
760
761        xc->func_exe_inst++;
762
763        fault = curStaticInst->execute(this, traceData);
764
765#if FULL_SYSTEM
766        if (xc->fnbin) {
767            assert(xc->kernelStats);
768            system->kernelBinning->execute(xc, inst);
769        }
770
771        if (xc->profile) {
772            bool usermode = (xc->regs.ipr[AlphaISA::IPR_DTB_CM] & 0x18) != 0;
773            xc->profilePC = usermode ? 1 : xc->regs.pc;
774            ProfileNode *node = xc->profile->consume(xc, inst);
775            if (node)
776                xc->profileNode = node;
777        }
778#endif
779
780        if (curStaticInst->isMemRef()) {
781            numMemRefs++;
782        }
783
784        if (curStaticInst->isLoad()) {
785            ++numLoad;
786            comLoadEventQueue[0]->serviceEvents(numLoad);
787        }
788
789        // If we have a dcache miss, then we can't finialize the instruction
790        // trace yet because we want to populate it with the data later
791        if (traceData &&
792                !(status() == DcacheMissStall && memReq->cmd.isRead())) {
793            traceData->finalize();
794        }
795
796        traceFunctions(xc->regs.pc);
797
798    }	// if (fault == NoFault)
799
800    if (fault != NoFault) {
801#if FULL_SYSTEM
802        xc->ev5_trap(fault);
803#else // !FULL_SYSTEM
804        fatal("fault (%d) detected @ PC 0x%08p", fault, xc->regs.pc);
805#endif // FULL_SYSTEM
806    }
807    else {
808        // go to the next instruction
809        xc->regs.pc = xc->regs.npc;
810        xc->regs.npc += sizeof(MachInst);
811    }
812
813#if FULL_SYSTEM
814    Addr oldpc;
815    do {
816        oldpc = xc->regs.pc;
817        system->pcEventQueue.service(xc);
818    } while (oldpc != xc->regs.pc);
819#endif
820
821    assert(status() == Running ||
822           status() == Idle ||
823           status() == DcacheMissStall);
824
825    if (status() == Running && !tickEvent.scheduled())
826        tickEvent.schedule(curTick + cycles(1));
827}
828
829////////////////////////////////////////////////////////////////////////
830//
831//  SimpleCPU Simulation Object
832//
833BEGIN_DECLARE_SIM_OBJECT_PARAMS(SimpleCPU)
834
835    Param<Counter> max_insts_any_thread;
836    Param<Counter> max_insts_all_threads;
837    Param<Counter> max_loads_any_thread;
838    Param<Counter> max_loads_all_threads;
839
840#if FULL_SYSTEM
841    SimObjectParam<AlphaITB *> itb;
842    SimObjectParam<AlphaDTB *> dtb;
843    SimObjectParam<FunctionalMemory *> mem;
844    SimObjectParam<System *> system;
845    Param<int> cpu_id;
846    Param<Tick> profile;
847#else
848    SimObjectParam<Process *> workload;
849#endif // FULL_SYSTEM
850
851    Param<int> clock;
852    SimObjectParam<BaseMem *> icache;
853    SimObjectParam<BaseMem *> dcache;
854
855    Param<bool> defer_registration;
856    Param<int> width;
857    Param<bool> function_trace;
858    Param<Tick> function_trace_start;
859
860END_DECLARE_SIM_OBJECT_PARAMS(SimpleCPU)
861
862BEGIN_INIT_SIM_OBJECT_PARAMS(SimpleCPU)
863
864    INIT_PARAM(max_insts_any_thread,
865               "terminate when any thread reaches this inst count"),
866    INIT_PARAM(max_insts_all_threads,
867               "terminate when all threads have reached this inst count"),
868    INIT_PARAM(max_loads_any_thread,
869               "terminate when any thread reaches this load count"),
870    INIT_PARAM(max_loads_all_threads,
871               "terminate when all threads have reached this load count"),
872
873#if FULL_SYSTEM
874    INIT_PARAM(itb, "Instruction TLB"),
875    INIT_PARAM(dtb, "Data TLB"),
876    INIT_PARAM(mem, "memory"),
877    INIT_PARAM(system, "system object"),
878    INIT_PARAM(cpu_id, "processor ID"),
879    INIT_PARAM(profile, ""),
880#else
881    INIT_PARAM(workload, "processes to run"),
882#endif // FULL_SYSTEM
883
884    INIT_PARAM(clock, "clock speed"),
885    INIT_PARAM(icache, "L1 instruction cache object"),
886    INIT_PARAM(dcache, "L1 data cache object"),
887    INIT_PARAM(defer_registration, "defer system registration (for sampling)"),
888    INIT_PARAM(width, "cpu width"),
889    INIT_PARAM(function_trace, "Enable function trace"),
890    INIT_PARAM(function_trace_start, "Cycle to start function trace")
891
892END_INIT_SIM_OBJECT_PARAMS(SimpleCPU)
893
894
895CREATE_SIM_OBJECT(SimpleCPU)
896{
897    SimpleCPU::Params *params = new SimpleCPU::Params();
898    params->name = getInstanceName();
899    params->numberOfThreads = 1;
900    params->max_insts_any_thread = max_insts_any_thread;
901    params->max_insts_all_threads = max_insts_all_threads;
902    params->max_loads_any_thread = max_loads_any_thread;
903    params->max_loads_all_threads = max_loads_all_threads;
904    params->deferRegistration = defer_registration;
905    params->clock = clock;
906    params->functionTrace = function_trace;
907    params->functionTraceStart = function_trace_start;
908    params->icache_interface = (icache) ? icache->getInterface() : NULL;
909    params->dcache_interface = (dcache) ? dcache->getInterface() : NULL;
910    params->width = width;
911
912#if FULL_SYSTEM
913    params->itb = itb;
914    params->dtb = dtb;
915    params->mem = mem;
916    params->system = system;
917    params->cpu_id = cpu_id;
918    params->profile = profile;
919#else
920    params->process = workload;
921#endif
922
923    SimpleCPU *cpu = new SimpleCPU(params);
924    return cpu;
925}
926
927REGISTER_SIM_OBJECT("SimpleCPU", SimpleCPU)
928
929