base.cc revision 2190
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <cmath>
30#include <cstdio>
31#include <cstdlib>
32#include <iostream>
33#include <iomanip>
34#include <list>
35#include <sstream>
36#include <string>
37
38#include "base/cprintf.hh"
39#include "base/inifile.hh"
40#include "base/loader/symtab.hh"
41#include "base/misc.hh"
42#include "base/pollevent.hh"
43#include "base/range.hh"
44#include "base/stats/events.hh"
45#include "base/trace.hh"
46#include "cpu/base.hh"
47#include "cpu/cpu_exec_context.hh"
48#include "cpu/exec_context.hh"
49#include "cpu/exetrace.hh"
50#include "cpu/profile.hh"
51#include "cpu/sampler/sampler.hh"
52#include "cpu/simple/cpu.hh"
53#include "cpu/smt.hh"
54#include "cpu/static_inst.hh"
55#include "kern/kernel_stats.hh"
56#include "mem/base_mem.hh"
57#include "mem/mem_interface.hh"
58#include "sim/byteswap.hh"
59#include "sim/builder.hh"
60#include "sim/debug.hh"
61#include "sim/host.hh"
62#include "sim/sim_events.hh"
63#include "sim/sim_object.hh"
64#include "sim/stats.hh"
65
66#if FULL_SYSTEM
67#include "base/remote_gdb.hh"
68#include "mem/functional/memory_control.hh"
69#include "mem/functional/physical.hh"
70#include "sim/system.hh"
71#include "targetarch/alpha_memory.hh"
72#include "targetarch/stacktrace.hh"
73#include "targetarch/vtophys.hh"
74#else // !FULL_SYSTEM
75#include "mem/functional/functional.hh"
76#endif // FULL_SYSTEM
77
78using namespace std;
79//The SimpleCPU does alpha only
80using namespace AlphaISA;
81
82
83SimpleCPU::TickEvent::TickEvent(SimpleCPU *c, int w)
84    : Event(&mainEventQueue, CPU_Tick_Pri), cpu(c), width(w)
85{
86}
87
88
89void
90SimpleCPU::init()
91{
92    BaseCPU::init();
93#if FULL_SYSTEM
94    for (int i = 0; i < execContexts.size(); ++i) {
95        ExecContext *xc = execContexts[i];
96
97        // initialize CPU, including PC
98        TheISA::initCPU(xc, xc->readCpuId());
99    }
100#endif
101}
102
103void
104SimpleCPU::TickEvent::process()
105{
106    int count = width;
107    do {
108        cpu->tick();
109    } while (--count > 0 && cpu->status() == Running);
110}
111
112const char *
113SimpleCPU::TickEvent::description()
114{
115    return "SimpleCPU tick event";
116}
117
118
119SimpleCPU::CacheCompletionEvent::CacheCompletionEvent(SimpleCPU *_cpu)
120    : Event(&mainEventQueue), cpu(_cpu)
121{
122}
123
124void SimpleCPU::CacheCompletionEvent::process()
125{
126    cpu->processCacheCompletion();
127}
128
129const char *
130SimpleCPU::CacheCompletionEvent::description()
131{
132    return "SimpleCPU cache completion event";
133}
134
135SimpleCPU::SimpleCPU(Params *p)
136    : BaseCPU(p), tickEvent(this, p->width), cpuXC(NULL),
137      cacheCompletionEvent(this)
138{
139    _status = Idle;
140#if FULL_SYSTEM
141    cpuXC = new CPUExecContext(this, 0, p->system, p->itb, p->dtb, p->mem);
142
143#else
144    cpuXC = new CPUExecContext(this, /* thread_num */ 0, p->process,
145                               /* asid */ 0);
146#endif // !FULL_SYSTEM
147    xcProxy = cpuXC->getProxy();
148
149    icacheInterface = p->icache_interface;
150    dcacheInterface = p->dcache_interface;
151
152    memReq = new MemReq();
153    memReq->xc = xcProxy;
154    memReq->asid = 0;
155    memReq->data = new uint8_t[64];
156
157    numInst = 0;
158    startNumInst = 0;
159    numLoad = 0;
160    startNumLoad = 0;
161    lastIcacheStall = 0;
162    lastDcacheStall = 0;
163
164    execContexts.push_back(xcProxy);
165}
166
167SimpleCPU::~SimpleCPU()
168{
169}
170
171void
172SimpleCPU::switchOut(Sampler *s)
173{
174    sampler = s;
175    if (status() == DcacheMissStall) {
176        DPRINTF(Sampler,"Outstanding dcache access, waiting for completion\n");
177        _status = DcacheMissSwitch;
178    }
179    else {
180        _status = SwitchedOut;
181
182        if (tickEvent.scheduled())
183            tickEvent.squash();
184
185        sampler->signalSwitched();
186    }
187}
188
189
190void
191SimpleCPU::takeOverFrom(BaseCPU *oldCPU)
192{
193    BaseCPU::takeOverFrom(oldCPU);
194
195    assert(!tickEvent.scheduled());
196
197    // if any of this CPU's ExecContexts are active, mark the CPU as
198    // running and schedule its tick event.
199    for (int i = 0; i < execContexts.size(); ++i) {
200        ExecContext *xc = execContexts[i];
201        if (xc->status() == ExecContext::Active && _status != Running) {
202            _status = Running;
203            tickEvent.schedule(curTick);
204        }
205    }
206}
207
208
209void
210SimpleCPU::activateContext(int thread_num, int delay)
211{
212    assert(thread_num == 0);
213    assert(cpuXC);
214
215    assert(_status == Idle);
216    notIdleFraction++;
217    scheduleTickEvent(delay);
218    _status = Running;
219}
220
221
222void
223SimpleCPU::suspendContext(int thread_num)
224{
225    assert(thread_num == 0);
226    assert(cpuXC);
227
228    assert(_status == Running);
229    notIdleFraction--;
230    unscheduleTickEvent();
231    _status = Idle;
232}
233
234
235void
236SimpleCPU::deallocateContext(int thread_num)
237{
238    // for now, these are equivalent
239    suspendContext(thread_num);
240}
241
242
243void
244SimpleCPU::haltContext(int thread_num)
245{
246    // for now, these are equivalent
247    suspendContext(thread_num);
248}
249
250
251void
252SimpleCPU::regStats()
253{
254    using namespace Stats;
255
256    BaseCPU::regStats();
257
258    numInsts
259        .name(name() + ".num_insts")
260        .desc("Number of instructions executed")
261        ;
262
263    numMemRefs
264        .name(name() + ".num_refs")
265        .desc("Number of memory references")
266        ;
267
268    notIdleFraction
269        .name(name() + ".not_idle_fraction")
270        .desc("Percentage of non-idle cycles")
271        ;
272
273    idleFraction
274        .name(name() + ".idle_fraction")
275        .desc("Percentage of idle cycles")
276        ;
277
278    icacheStallCycles
279        .name(name() + ".icache_stall_cycles")
280        .desc("ICache total stall cycles")
281        .prereq(icacheStallCycles)
282        ;
283
284    dcacheStallCycles
285        .name(name() + ".dcache_stall_cycles")
286        .desc("DCache total stall cycles")
287        .prereq(dcacheStallCycles)
288        ;
289
290    idleFraction = constant(1.0) - notIdleFraction;
291}
292
293void
294SimpleCPU::resetStats()
295{
296    startNumInst = numInst;
297    notIdleFraction = (_status != Idle);
298}
299
300void
301SimpleCPU::serialize(ostream &os)
302{
303    BaseCPU::serialize(os);
304    SERIALIZE_ENUM(_status);
305    SERIALIZE_SCALAR(inst);
306    nameOut(os, csprintf("%s.xc", name()));
307    cpuXC->serialize(os);
308    nameOut(os, csprintf("%s.tickEvent", name()));
309    tickEvent.serialize(os);
310    nameOut(os, csprintf("%s.cacheCompletionEvent", name()));
311    cacheCompletionEvent.serialize(os);
312}
313
314void
315SimpleCPU::unserialize(Checkpoint *cp, const string &section)
316{
317    BaseCPU::unserialize(cp, section);
318    UNSERIALIZE_ENUM(_status);
319    UNSERIALIZE_SCALAR(inst);
320    cpuXC->unserialize(cp, csprintf("%s.xc", section));
321    tickEvent.unserialize(cp, csprintf("%s.tickEvent", section));
322    cacheCompletionEvent
323        .unserialize(cp, csprintf("%s.cacheCompletionEvent", section));
324}
325
326void
327change_thread_state(int thread_number, int activate, int priority)
328{
329}
330
331Fault
332SimpleCPU::copySrcTranslate(Addr src)
333{
334    static bool no_warn = true;
335    int blk_size = (dcacheInterface) ? dcacheInterface->getBlockSize() : 64;
336    // Only support block sizes of 64 atm.
337    assert(blk_size == 64);
338    int offset = src & (blk_size - 1);
339
340    // Make sure block doesn't span page
341    if (no_warn &&
342        (src & PageMask) != ((src + blk_size) & PageMask) &&
343        (src >> 40) != 0xfffffc) {
344        warn("Copied block source spans pages %x.", src);
345        no_warn = false;
346    }
347
348    memReq->reset(src & ~(blk_size - 1), blk_size);
349
350    // translate to physical address
351    Fault fault = cpuXC->translateDataReadReq(memReq);
352
353    assert(fault != AlignmentFault);
354
355    if (fault == NoFault) {
356        cpuXC->copySrcAddr = src;
357        cpuXC->copySrcPhysAddr = memReq->paddr + offset;
358    } else {
359        cpuXC->copySrcAddr = 0;
360        cpuXC->copySrcPhysAddr = 0;
361    }
362    return fault;
363}
364
365Fault
366SimpleCPU::copy(Addr dest)
367{
368    static bool no_warn = true;
369    int blk_size = (dcacheInterface) ? dcacheInterface->getBlockSize() : 64;
370    // Only support block sizes of 64 atm.
371    assert(blk_size == 64);
372    uint8_t data[blk_size];
373    //assert(cpuXC->copySrcAddr);
374    int offset = dest & (blk_size - 1);
375
376    // Make sure block doesn't span page
377    if (no_warn &&
378        (dest & PageMask) != ((dest + blk_size) & PageMask) &&
379        (dest >> 40) != 0xfffffc) {
380        no_warn = false;
381        warn("Copied block destination spans pages %x. ", dest);
382    }
383
384    memReq->reset(dest & ~(blk_size -1), blk_size);
385    // translate to physical address
386    Fault fault = cpuXC->translateDataWriteReq(memReq);
387
388    assert(fault != AlignmentFault);
389
390    if (fault == NoFault) {
391        Addr dest_addr = memReq->paddr + offset;
392        // Need to read straight from memory since we have more than 8 bytes.
393        memReq->paddr = cpuXC->copySrcPhysAddr;
394        cpuXC->mem->read(memReq, data);
395        memReq->paddr = dest_addr;
396        cpuXC->mem->write(memReq, data);
397        if (dcacheInterface) {
398            memReq->cmd = Copy;
399            memReq->completionEvent = NULL;
400            memReq->paddr = cpuXC->copySrcPhysAddr;
401            memReq->dest = dest_addr;
402            memReq->size = 64;
403            memReq->time = curTick;
404            memReq->flags &= ~INST_READ;
405            dcacheInterface->access(memReq);
406        }
407    }
408    return fault;
409}
410
411// precise architected memory state accessor macros
412template <class T>
413Fault
414SimpleCPU::read(Addr addr, T &data, unsigned flags)
415{
416    if (status() == DcacheMissStall || status() == DcacheMissSwitch) {
417        Fault fault = cpuXC->read(memReq,data);
418
419        if (traceData) {
420            traceData->setAddr(addr);
421        }
422        return fault;
423    }
424
425    memReq->reset(addr, sizeof(T), flags);
426
427    // translate to physical address
428    Fault fault = cpuXC->translateDataReadReq(memReq);
429
430    // if we have a cache, do cache access too
431    if (fault == NoFault && dcacheInterface) {
432        memReq->cmd = Read;
433        memReq->completionEvent = NULL;
434        memReq->time = curTick;
435        memReq->flags &= ~INST_READ;
436        MemAccessResult result = dcacheInterface->access(memReq);
437
438        // Ugly hack to get an event scheduled *only* if the access is
439        // a miss.  We really should add first-class support for this
440        // at some point.
441        if (result != MA_HIT && dcacheInterface->doEvents()) {
442            memReq->completionEvent = &cacheCompletionEvent;
443            lastDcacheStall = curTick;
444            unscheduleTickEvent();
445            _status = DcacheMissStall;
446        } else {
447            // do functional access
448            fault = cpuXC->read(memReq, data);
449
450        }
451    } else if(fault == NoFault) {
452        // do functional access
453        fault = cpuXC->read(memReq, data);
454
455    }
456
457    if (!dcacheInterface && (memReq->flags & UNCACHEABLE))
458        recordEvent("Uncached Read");
459
460    return fault;
461}
462
463#ifndef DOXYGEN_SHOULD_SKIP_THIS
464
465template
466Fault
467SimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
468
469template
470Fault
471SimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
472
473template
474Fault
475SimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
476
477template
478Fault
479SimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
480
481#endif //DOXYGEN_SHOULD_SKIP_THIS
482
483template<>
484Fault
485SimpleCPU::read(Addr addr, double &data, unsigned flags)
486{
487    return read(addr, *(uint64_t*)&data, flags);
488}
489
490template<>
491Fault
492SimpleCPU::read(Addr addr, float &data, unsigned flags)
493{
494    return read(addr, *(uint32_t*)&data, flags);
495}
496
497
498template<>
499Fault
500SimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
501{
502    return read(addr, (uint32_t&)data, flags);
503}
504
505
506template <class T>
507Fault
508SimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
509{
510    memReq->reset(addr, sizeof(T), flags);
511
512    // translate to physical address
513    Fault fault = cpuXC->translateDataWriteReq(memReq);
514
515    // do functional access
516    if (fault == NoFault)
517        fault = cpuXC->write(memReq, data);
518
519    if (fault == NoFault && dcacheInterface) {
520        memReq->cmd = Write;
521        memcpy(memReq->data,(uint8_t *)&data,memReq->size);
522        memReq->completionEvent = NULL;
523        memReq->time = curTick;
524        memReq->flags &= ~INST_READ;
525        MemAccessResult result = dcacheInterface->access(memReq);
526
527        // Ugly hack to get an event scheduled *only* if the access is
528        // a miss.  We really should add first-class support for this
529        // at some point.
530        if (result != MA_HIT && dcacheInterface->doEvents()) {
531            memReq->completionEvent = &cacheCompletionEvent;
532            lastDcacheStall = curTick;
533            unscheduleTickEvent();
534            _status = DcacheMissStall;
535        }
536    }
537
538    if (res && (fault == NoFault))
539        *res = memReq->result;
540
541    if (!dcacheInterface && (memReq->flags & UNCACHEABLE))
542        recordEvent("Uncached Write");
543
544    return fault;
545}
546
547
548#ifndef DOXYGEN_SHOULD_SKIP_THIS
549template
550Fault
551SimpleCPU::write(uint64_t data, Addr addr, unsigned flags, uint64_t *res);
552
553template
554Fault
555SimpleCPU::write(uint32_t data, Addr addr, unsigned flags, uint64_t *res);
556
557template
558Fault
559SimpleCPU::write(uint16_t data, Addr addr, unsigned flags, uint64_t *res);
560
561template
562Fault
563SimpleCPU::write(uint8_t data, Addr addr, unsigned flags, uint64_t *res);
564
565#endif //DOXYGEN_SHOULD_SKIP_THIS
566
567template<>
568Fault
569SimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
570{
571    return write(*(uint64_t*)&data, addr, flags, res);
572}
573
574template<>
575Fault
576SimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
577{
578    return write(*(uint32_t*)&data, addr, flags, res);
579}
580
581
582template<>
583Fault
584SimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
585{
586    return write((uint32_t)data, addr, flags, res);
587}
588
589
590#if FULL_SYSTEM
591Addr
592SimpleCPU::dbg_vtophys(Addr addr)
593{
594    return vtophys(xcProxy, addr);
595}
596#endif // FULL_SYSTEM
597
598void
599SimpleCPU::processCacheCompletion()
600{
601    switch (status()) {
602      case IcacheMissStall:
603        icacheStallCycles += curTick - lastIcacheStall;
604        _status = IcacheMissComplete;
605        scheduleTickEvent(1);
606        break;
607      case DcacheMissStall:
608        if (memReq->cmd.isRead()) {
609            curStaticInst->execute(this,traceData);
610            if (traceData)
611                traceData->finalize();
612        }
613        dcacheStallCycles += curTick - lastDcacheStall;
614        _status = Running;
615        scheduleTickEvent(1);
616        break;
617      case DcacheMissSwitch:
618        if (memReq->cmd.isRead()) {
619            curStaticInst->execute(this,traceData);
620            if (traceData)
621                traceData->finalize();
622        }
623        _status = SwitchedOut;
624        sampler->signalSwitched();
625      case SwitchedOut:
626        // If this CPU has been switched out due to sampling/warm-up,
627        // ignore any further status changes (e.g., due to cache
628        // misses outstanding at the time of the switch).
629        return;
630      default:
631        panic("SimpleCPU::processCacheCompletion: bad state");
632        break;
633    }
634}
635
636#if FULL_SYSTEM
637void
638SimpleCPU::post_interrupt(int int_num, int index)
639{
640    BaseCPU::post_interrupt(int_num, index);
641
642    if (cpuXC->status() == ExecContext::Suspended) {
643                DPRINTF(IPI,"Suspended Processor awoke\n");
644        cpuXC->activate();
645    }
646}
647#endif // FULL_SYSTEM
648
649/* start simulation, program loaded, processor precise state initialized */
650void
651SimpleCPU::tick()
652{
653    numCycles++;
654
655    traceData = NULL;
656
657    Fault fault = NoFault;
658
659#if FULL_SYSTEM
660    if (checkInterrupts && check_interrupts() && !cpuXC->inPalMode() &&
661        status() != IcacheMissComplete) {
662        int ipl = 0;
663        int summary = 0;
664        checkInterrupts = false;
665
666        if (cpuXC->readMiscReg(IPR_SIRR)) {
667            for (int i = INTLEVEL_SOFTWARE_MIN;
668                 i < INTLEVEL_SOFTWARE_MAX; i++) {
669                if (cpuXC->readMiscReg(IPR_SIRR) & (ULL(1) << i)) {
670                    // See table 4-19 of 21164 hardware reference
671                    ipl = (i - INTLEVEL_SOFTWARE_MIN) + 1;
672                    summary |= (ULL(1) << i);
673                }
674            }
675        }
676
677        uint64_t interrupts = cpuXC->cpu->intr_status();
678        for (int i = INTLEVEL_EXTERNAL_MIN;
679            i < INTLEVEL_EXTERNAL_MAX; i++) {
680            if (interrupts & (ULL(1) << i)) {
681                // See table 4-19 of 21164 hardware reference
682                ipl = i;
683                summary |= (ULL(1) << i);
684            }
685        }
686
687        if (cpuXC->readMiscReg(IPR_ASTRR))
688            panic("asynchronous traps not implemented\n");
689
690        if (ipl && ipl > cpuXC->readMiscReg(IPR_IPLR)) {
691            cpuXC->setMiscReg(IPR_ISR, summary);
692            cpuXC->setMiscReg(IPR_INTID, ipl);
693            cpuXC->ev5_trap(InterruptFault);
694
695            DPRINTF(Flow, "Interrupt! IPLR=%d ipl=%d summary=%x\n",
696                    cpuXC->readMiscReg(IPR_IPLR), ipl, summary);
697        }
698    }
699#endif
700
701    // maintain $r0 semantics
702    cpuXC->setIntReg(ZeroReg, 0);
703#ifdef TARGET_ALPHA
704    cpuXC->setFloatRegDouble(ZeroReg, 0.0);
705#endif // TARGET_ALPHA
706
707    if (status() == IcacheMissComplete) {
708        // We've already fetched an instruction and were stalled on an
709        // I-cache miss.  No need to fetch it again.
710
711        // Set status to running; tick event will get rescheduled if
712        // necessary at end of tick() function.
713        _status = Running;
714    }
715    else {
716        // Try to fetch an instruction
717
718        // set up memory request for instruction fetch
719#if FULL_SYSTEM
720#define IFETCH_FLAGS(pc)	((pc) & 1) ? PHYSICAL : 0
721#else
722#define IFETCH_FLAGS(pc)	0
723#endif
724
725        memReq->cmd = Read;
726        memReq->reset(cpuXC->readPC() & ~3, sizeof(uint32_t),
727                     IFETCH_FLAGS(cpuXC->readPC()));
728
729        fault = cpuXC->translateInstReq(memReq);
730
731        if (fault == NoFault)
732            fault = cpuXC->mem->read(memReq, inst);
733
734        if (icacheInterface && fault == NoFault) {
735            memReq->completionEvent = NULL;
736
737            memReq->time = curTick;
738            memReq->flags |= INST_READ;
739            MemAccessResult result = icacheInterface->access(memReq);
740
741            // Ugly hack to get an event scheduled *only* if the access is
742            // a miss.  We really should add first-class support for this
743            // at some point.
744            if (result != MA_HIT && icacheInterface->doEvents()) {
745                memReq->completionEvent = &cacheCompletionEvent;
746                lastIcacheStall = curTick;
747                unscheduleTickEvent();
748                _status = IcacheMissStall;
749                return;
750            }
751        }
752    }
753
754    // If we've got a valid instruction (i.e., no fault on instruction
755    // fetch), then execute it.
756    if (fault == NoFault) {
757
758        // keep an instruction count
759        numInst++;
760        numInsts++;
761
762        // check for instruction-count-based events
763        comInstEventQueue[0]->serviceEvents(numInst);
764
765        // decode the instruction
766        inst = gtoh(inst);
767        curStaticInst = StaticInst::decode(inst);
768
769        traceData = Trace::getInstRecord(curTick, xcProxy, this, curStaticInst,
770                                         cpuXC->readPC());
771
772#if FULL_SYSTEM
773        cpuXC->setInst(inst);
774#endif // FULL_SYSTEM
775
776        cpuXC->func_exe_inst++;
777
778        fault = curStaticInst->execute(this, traceData);
779
780#if FULL_SYSTEM
781        if (system->kernelBinning->fnbin) {
782            assert(kernelStats);
783            system->kernelBinning->execute(xcProxy, inst);
784        }
785
786        if (cpuXC->profile) {
787            bool usermode =
788                (cpuXC->readMiscReg(AlphaISA::IPR_DTB_CM) & 0x18) != 0;
789            cpuXC->profilePC = usermode ? 1 : cpuXC->readPC();
790            ProfileNode *node = cpuXC->profile->consume(xcProxy, inst);
791            if (node)
792                cpuXC->profileNode = node;
793        }
794#endif
795
796        if (curStaticInst->isMemRef()) {
797            numMemRefs++;
798        }
799
800        if (curStaticInst->isLoad()) {
801            ++numLoad;
802            comLoadEventQueue[0]->serviceEvents(numLoad);
803        }
804
805        // If we have a dcache miss, then we can't finialize the instruction
806        // trace yet because we want to populate it with the data later
807        if (traceData &&
808                !(status() == DcacheMissStall && memReq->cmd.isRead())) {
809            traceData->finalize();
810        }
811
812        traceFunctions(cpuXC->readPC());
813
814    }	// if (fault == NoFault)
815
816    if (fault != NoFault) {
817#if FULL_SYSTEM
818        cpuXC->ev5_trap(fault);
819#else // !FULL_SYSTEM
820        fatal("fault (%d) detected @ PC 0x%08p", fault, cpuXC->readPC());
821#endif // FULL_SYSTEM
822    }
823    else {
824        // go to the next instruction
825        cpuXC->setPC(cpuXC->readNextPC());
826        cpuXC->setNextPC(cpuXC->readNextPC() + sizeof(MachInst));
827    }
828
829#if FULL_SYSTEM
830    Addr oldpc;
831    do {
832        oldpc = cpuXC->readPC();
833        system->pcEventQueue.service(xcProxy);
834    } while (oldpc != cpuXC->readPC());
835#endif
836
837    assert(status() == Running ||
838           status() == Idle ||
839           status() == DcacheMissStall);
840
841    if (status() == Running && !tickEvent.scheduled())
842        tickEvent.schedule(curTick + cycles(1));
843}
844
845////////////////////////////////////////////////////////////////////////
846//
847//  SimpleCPU Simulation Object
848//
849BEGIN_DECLARE_SIM_OBJECT_PARAMS(SimpleCPU)
850
851    Param<Counter> max_insts_any_thread;
852    Param<Counter> max_insts_all_threads;
853    Param<Counter> max_loads_any_thread;
854    Param<Counter> max_loads_all_threads;
855
856#if FULL_SYSTEM
857    SimObjectParam<AlphaITB *> itb;
858    SimObjectParam<AlphaDTB *> dtb;
859    SimObjectParam<FunctionalMemory *> mem;
860    SimObjectParam<System *> system;
861    Param<int> cpu_id;
862    Param<Tick> profile;
863#else
864    SimObjectParam<Process *> workload;
865#endif // FULL_SYSTEM
866
867    Param<int> clock;
868    SimObjectParam<BaseMem *> icache;
869    SimObjectParam<BaseMem *> dcache;
870
871    Param<bool> defer_registration;
872    Param<int> width;
873    Param<bool> function_trace;
874    Param<Tick> function_trace_start;
875
876END_DECLARE_SIM_OBJECT_PARAMS(SimpleCPU)
877
878BEGIN_INIT_SIM_OBJECT_PARAMS(SimpleCPU)
879
880    INIT_PARAM(max_insts_any_thread,
881               "terminate when any thread reaches this inst count"),
882    INIT_PARAM(max_insts_all_threads,
883               "terminate when all threads have reached this inst count"),
884    INIT_PARAM(max_loads_any_thread,
885               "terminate when any thread reaches this load count"),
886    INIT_PARAM(max_loads_all_threads,
887               "terminate when all threads have reached this load count"),
888
889#if FULL_SYSTEM
890    INIT_PARAM(itb, "Instruction TLB"),
891    INIT_PARAM(dtb, "Data TLB"),
892    INIT_PARAM(mem, "memory"),
893    INIT_PARAM(system, "system object"),
894    INIT_PARAM(cpu_id, "processor ID"),
895    INIT_PARAM(profile, ""),
896#else
897    INIT_PARAM(workload, "processes to run"),
898#endif // FULL_SYSTEM
899
900    INIT_PARAM(clock, "clock speed"),
901    INIT_PARAM(icache, "L1 instruction cache object"),
902    INIT_PARAM(dcache, "L1 data cache object"),
903    INIT_PARAM(defer_registration, "defer system registration (for sampling)"),
904    INIT_PARAM(width, "cpu width"),
905    INIT_PARAM(function_trace, "Enable function trace"),
906    INIT_PARAM(function_trace_start, "Cycle to start function trace")
907
908END_INIT_SIM_OBJECT_PARAMS(SimpleCPU)
909
910
911CREATE_SIM_OBJECT(SimpleCPU)
912{
913    SimpleCPU::Params *params = new SimpleCPU::Params();
914    params->name = getInstanceName();
915    params->numberOfThreads = 1;
916    params->max_insts_any_thread = max_insts_any_thread;
917    params->max_insts_all_threads = max_insts_all_threads;
918    params->max_loads_any_thread = max_loads_any_thread;
919    params->max_loads_all_threads = max_loads_all_threads;
920    params->deferRegistration = defer_registration;
921    params->clock = clock;
922    params->functionTrace = function_trace;
923    params->functionTraceStart = function_trace_start;
924    params->icache_interface = (icache) ? icache->getInterface() : NULL;
925    params->dcache_interface = (dcache) ? dcache->getInterface() : NULL;
926    params->width = width;
927
928#if FULL_SYSTEM
929    params->itb = itb;
930    params->dtb = dtb;
931    params->mem = mem;
932    params->system = system;
933    params->cpu_id = cpu_id;
934    params->profile = profile;
935#else
936    params->process = workload;
937#endif
938
939    SimpleCPU *cpu = new SimpleCPU(params);
940    return cpu;
941}
942
943REGISTER_SIM_OBJECT("SimpleCPU", SimpleCPU)
944
945