base.cc revision 2234
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <cmath>
30#include <cstdio>
31#include <cstdlib>
32#include <iostream>
33#include <iomanip>
34#include <list>
35#include <sstream>
36#include <string>
37
38#include "base/cprintf.hh"
39#include "base/inifile.hh"
40#include "base/loader/symtab.hh"
41#include "base/misc.hh"
42#include "base/pollevent.hh"
43#include "base/range.hh"
44#include "base/stats/events.hh"
45#include "base/trace.hh"
46#include "cpu/base.hh"
47#include "cpu/cpu_exec_context.hh"
48#include "cpu/exec_context.hh"
49#include "cpu/exetrace.hh"
50#include "cpu/profile.hh"
51#include "cpu/sampler/sampler.hh"
52#include "cpu/simple/cpu.hh"
53#include "cpu/smt.hh"
54#include "cpu/static_inst.hh"
55#include "kern/kernel_stats.hh"
56#include "mem/base_mem.hh"
57#include "mem/mem_interface.hh"
58#include "sim/byteswap.hh"
59#include "sim/builder.hh"
60#include "sim/debug.hh"
61#include "sim/host.hh"
62#include "sim/sim_events.hh"
63#include "sim/sim_object.hh"
64#include "sim/stats.hh"
65
66#if FULL_SYSTEM
67#include "base/remote_gdb.hh"
68#include "mem/functional/memory_control.hh"
69#include "mem/functional/physical.hh"
70#include "sim/system.hh"
71#include "arch/tlb.hh"
72#include "arch/stacktrace.hh"
73#include "arch/vtophys.hh"
74#else // !FULL_SYSTEM
75#include "mem/functional/functional.hh"
76#endif // FULL_SYSTEM
77
78using namespace std;
79//The SimpleCPU does alpha only
80using namespace AlphaISA;
81
82
83SimpleCPU::TickEvent::TickEvent(SimpleCPU *c, int w)
84    : Event(&mainEventQueue, CPU_Tick_Pri), cpu(c), width(w)
85{
86}
87
88
89void
90SimpleCPU::init()
91{
92    BaseCPU::init();
93#if FULL_SYSTEM
94    for (int i = 0; i < execContexts.size(); ++i) {
95        ExecContext *xc = execContexts[i];
96
97        // initialize CPU, including PC
98        TheISA::initCPU(xc, xc->readCpuId());
99    }
100#endif
101}
102
103void
104SimpleCPU::TickEvent::process()
105{
106    int count = width;
107    do {
108        cpu->tick();
109    } while (--count > 0 && cpu->status() == Running);
110}
111
112const char *
113SimpleCPU::TickEvent::description()
114{
115    return "SimpleCPU tick event";
116}
117
118
119SimpleCPU::CacheCompletionEvent::CacheCompletionEvent(SimpleCPU *_cpu)
120    : Event(&mainEventQueue), cpu(_cpu)
121{
122}
123
124void SimpleCPU::CacheCompletionEvent::process()
125{
126    cpu->processCacheCompletion();
127}
128
129const char *
130SimpleCPU::CacheCompletionEvent::description()
131{
132    return "SimpleCPU cache completion event";
133}
134
135SimpleCPU::SimpleCPU(Params *p)
136    : BaseCPU(p), tickEvent(this, p->width), cpuXC(NULL),
137      cacheCompletionEvent(this)
138{
139    _status = Idle;
140#if FULL_SYSTEM
141    cpuXC = new CPUExecContext(this, 0, p->system, p->itb, p->dtb, p->mem);
142
143#else
144    cpuXC = new CPUExecContext(this, /* thread_num */ 0, p->process,
145                               /* asid */ 0);
146#endif // !FULL_SYSTEM
147    xcProxy = cpuXC->getProxy();
148
149    icacheInterface = p->icache_interface;
150    dcacheInterface = p->dcache_interface;
151
152    memReq = new MemReq();
153    memReq->xc = xcProxy;
154    memReq->asid = 0;
155    memReq->data = new uint8_t[64];
156
157    numInst = 0;
158    startNumInst = 0;
159    numLoad = 0;
160    startNumLoad = 0;
161    lastIcacheStall = 0;
162    lastDcacheStall = 0;
163
164    execContexts.push_back(xcProxy);
165}
166
167SimpleCPU::~SimpleCPU()
168{
169}
170
171void
172SimpleCPU::switchOut(Sampler *s)
173{
174    sampler = s;
175    if (status() == DcacheMissStall) {
176        DPRINTF(Sampler,"Outstanding dcache access, waiting for completion\n");
177        _status = DcacheMissSwitch;
178    }
179    else {
180        _status = SwitchedOut;
181
182        if (tickEvent.scheduled())
183            tickEvent.squash();
184
185        sampler->signalSwitched();
186    }
187}
188
189
190void
191SimpleCPU::takeOverFrom(BaseCPU *oldCPU)
192{
193    BaseCPU::takeOverFrom(oldCPU);
194
195    assert(!tickEvent.scheduled());
196
197    // if any of this CPU's ExecContexts are active, mark the CPU as
198    // running and schedule its tick event.
199    for (int i = 0; i < execContexts.size(); ++i) {
200        ExecContext *xc = execContexts[i];
201        if (xc->status() == ExecContext::Active && _status != Running) {
202            _status = Running;
203            tickEvent.schedule(curTick);
204        }
205    }
206}
207
208
209void
210SimpleCPU::activateContext(int thread_num, int delay)
211{
212    assert(thread_num == 0);
213    assert(cpuXC);
214
215    assert(_status == Idle);
216    notIdleFraction++;
217    scheduleTickEvent(delay);
218    _status = Running;
219}
220
221
222void
223SimpleCPU::suspendContext(int thread_num)
224{
225    assert(thread_num == 0);
226    assert(cpuXC);
227
228    assert(_status == Running);
229    notIdleFraction--;
230    unscheduleTickEvent();
231    _status = Idle;
232}
233
234
235void
236SimpleCPU::deallocateContext(int thread_num)
237{
238    // for now, these are equivalent
239    suspendContext(thread_num);
240}
241
242
243void
244SimpleCPU::haltContext(int thread_num)
245{
246    // for now, these are equivalent
247    suspendContext(thread_num);
248}
249
250
251void
252SimpleCPU::regStats()
253{
254    using namespace Stats;
255
256    BaseCPU::regStats();
257
258    numInsts
259        .name(name() + ".num_insts")
260        .desc("Number of instructions executed")
261        ;
262
263    numMemRefs
264        .name(name() + ".num_refs")
265        .desc("Number of memory references")
266        ;
267
268    notIdleFraction
269        .name(name() + ".not_idle_fraction")
270        .desc("Percentage of non-idle cycles")
271        ;
272
273    idleFraction
274        .name(name() + ".idle_fraction")
275        .desc("Percentage of idle cycles")
276        ;
277
278    icacheStallCycles
279        .name(name() + ".icache_stall_cycles")
280        .desc("ICache total stall cycles")
281        .prereq(icacheStallCycles)
282        ;
283
284    dcacheStallCycles
285        .name(name() + ".dcache_stall_cycles")
286        .desc("DCache total stall cycles")
287        .prereq(dcacheStallCycles)
288        ;
289
290    idleFraction = constant(1.0) - notIdleFraction;
291}
292
293void
294SimpleCPU::resetStats()
295{
296    startNumInst = numInst;
297    notIdleFraction = (_status != Idle);
298}
299
300void
301SimpleCPU::serialize(ostream &os)
302{
303    BaseCPU::serialize(os);
304    SERIALIZE_ENUM(_status);
305    SERIALIZE_SCALAR(inst);
306    nameOut(os, csprintf("%s.xc", name()));
307    cpuXC->serialize(os);
308    nameOut(os, csprintf("%s.tickEvent", name()));
309    tickEvent.serialize(os);
310    nameOut(os, csprintf("%s.cacheCompletionEvent", name()));
311    cacheCompletionEvent.serialize(os);
312}
313
314void
315SimpleCPU::unserialize(Checkpoint *cp, const string &section)
316{
317    BaseCPU::unserialize(cp, section);
318    UNSERIALIZE_ENUM(_status);
319    UNSERIALIZE_SCALAR(inst);
320    cpuXC->unserialize(cp, csprintf("%s.xc", section));
321    tickEvent.unserialize(cp, csprintf("%s.tickEvent", section));
322    cacheCompletionEvent
323        .unserialize(cp, csprintf("%s.cacheCompletionEvent", section));
324}
325
326void
327change_thread_state(int thread_number, int activate, int priority)
328{
329}
330
331Fault
332SimpleCPU::copySrcTranslate(Addr src)
333{
334    static bool no_warn = true;
335    int blk_size = (dcacheInterface) ? dcacheInterface->getBlockSize() : 64;
336    // Only support block sizes of 64 atm.
337    assert(blk_size == 64);
338    int offset = src & (blk_size - 1);
339
340    // Make sure block doesn't span page
341    if (no_warn &&
342        (src & PageMask) != ((src + blk_size) & PageMask) &&
343        (src >> 40) != 0xfffffc) {
344        warn("Copied block source spans pages %x.", src);
345        no_warn = false;
346    }
347
348    memReq->reset(src & ~(blk_size - 1), blk_size);
349
350    // translate to physical address
351    Fault fault = cpuXC->translateDataReadReq(memReq);
352
353    if (fault == NoFault) {
354        cpuXC->copySrcAddr = src;
355        cpuXC->copySrcPhysAddr = memReq->paddr + offset;
356    } else {
357        assert(!fault->isAlignmentFault());
358
359        cpuXC->copySrcAddr = 0;
360        cpuXC->copySrcPhysAddr = 0;
361    }
362    return fault;
363}
364
365Fault
366SimpleCPU::copy(Addr dest)
367{
368    static bool no_warn = true;
369    int blk_size = (dcacheInterface) ? dcacheInterface->getBlockSize() : 64;
370    // Only support block sizes of 64 atm.
371    assert(blk_size == 64);
372    uint8_t data[blk_size];
373    //assert(cpuXC->copySrcAddr);
374    int offset = dest & (blk_size - 1);
375
376    // Make sure block doesn't span page
377    if (no_warn &&
378        (dest & PageMask) != ((dest + blk_size) & PageMask) &&
379        (dest >> 40) != 0xfffffc) {
380        no_warn = false;
381        warn("Copied block destination spans pages %x. ", dest);
382    }
383
384    memReq->reset(dest & ~(blk_size -1), blk_size);
385    // translate to physical address
386    Fault fault = cpuXC->translateDataWriteReq(memReq);
387
388    if (fault == NoFault) {
389        Addr dest_addr = memReq->paddr + offset;
390        // Need to read straight from memory since we have more than 8 bytes.
391        memReq->paddr = cpuXC->copySrcPhysAddr;
392        cpuXC->mem->read(memReq, data);
393        memReq->paddr = dest_addr;
394        cpuXC->mem->write(memReq, data);
395        if (dcacheInterface) {
396            memReq->cmd = Copy;
397            memReq->completionEvent = NULL;
398            memReq->paddr = cpuXC->copySrcPhysAddr;
399            memReq->dest = dest_addr;
400            memReq->size = 64;
401            memReq->time = curTick;
402            memReq->flags &= ~INST_READ;
403            dcacheInterface->access(memReq);
404        }
405    }
406    else
407        assert(!fault->isAlignmentFault());
408
409    return fault;
410}
411
412// precise architected memory state accessor macros
413template <class T>
414Fault
415SimpleCPU::read(Addr addr, T &data, unsigned flags)
416{
417    if (status() == DcacheMissStall || status() == DcacheMissSwitch) {
418        Fault fault = cpuXC->read(memReq,data);
419
420        if (traceData) {
421            traceData->setAddr(addr);
422        }
423        return fault;
424    }
425
426    memReq->reset(addr, sizeof(T), flags);
427
428    // translate to physical address
429    Fault fault = cpuXC->translateDataReadReq(memReq);
430
431    // if we have a cache, do cache access too
432    if (fault == NoFault && dcacheInterface) {
433        memReq->cmd = Read;
434        memReq->completionEvent = NULL;
435        memReq->time = curTick;
436        memReq->flags &= ~INST_READ;
437        MemAccessResult result = dcacheInterface->access(memReq);
438
439        // Ugly hack to get an event scheduled *only* if the access is
440        // a miss.  We really should add first-class support for this
441        // at some point.
442        if (result != MA_HIT && dcacheInterface->doEvents()) {
443            memReq->completionEvent = &cacheCompletionEvent;
444            lastDcacheStall = curTick;
445            unscheduleTickEvent();
446            _status = DcacheMissStall;
447        } else {
448            // do functional access
449            fault = cpuXC->read(memReq, data);
450
451        }
452    } else if(fault == NoFault) {
453        // do functional access
454        fault = cpuXC->read(memReq, data);
455
456    }
457
458    if (!dcacheInterface && (memReq->flags & UNCACHEABLE))
459        recordEvent("Uncached Read");
460
461    return fault;
462}
463
464#ifndef DOXYGEN_SHOULD_SKIP_THIS
465
466template
467Fault
468SimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
469
470template
471Fault
472SimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
473
474template
475Fault
476SimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
477
478template
479Fault
480SimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
481
482#endif //DOXYGEN_SHOULD_SKIP_THIS
483
484template<>
485Fault
486SimpleCPU::read(Addr addr, double &data, unsigned flags)
487{
488    return read(addr, *(uint64_t*)&data, flags);
489}
490
491template<>
492Fault
493SimpleCPU::read(Addr addr, float &data, unsigned flags)
494{
495    return read(addr, *(uint32_t*)&data, flags);
496}
497
498
499template<>
500Fault
501SimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
502{
503    return read(addr, (uint32_t&)data, flags);
504}
505
506
507template <class T>
508Fault
509SimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
510{
511    memReq->reset(addr, sizeof(T), flags);
512
513    // translate to physical address
514    Fault fault = cpuXC->translateDataWriteReq(memReq);
515
516    // do functional access
517    if (fault == NoFault)
518        fault = cpuXC->write(memReq, data);
519
520    if (fault == NoFault && dcacheInterface) {
521        memReq->cmd = Write;
522        memcpy(memReq->data,(uint8_t *)&data,memReq->size);
523        memReq->completionEvent = NULL;
524        memReq->time = curTick;
525        memReq->flags &= ~INST_READ;
526        MemAccessResult result = dcacheInterface->access(memReq);
527
528        // Ugly hack to get an event scheduled *only* if the access is
529        // a miss.  We really should add first-class support for this
530        // at some point.
531        if (result != MA_HIT && dcacheInterface->doEvents()) {
532            memReq->completionEvent = &cacheCompletionEvent;
533            lastDcacheStall = curTick;
534            unscheduleTickEvent();
535            _status = DcacheMissStall;
536        }
537    }
538
539    if (res && (fault == NoFault))
540        *res = memReq->result;
541
542    if (!dcacheInterface && (memReq->flags & UNCACHEABLE))
543        recordEvent("Uncached Write");
544
545    return fault;
546}
547
548
549#ifndef DOXYGEN_SHOULD_SKIP_THIS
550template
551Fault
552SimpleCPU::write(uint64_t data, Addr addr, unsigned flags, uint64_t *res);
553
554template
555Fault
556SimpleCPU::write(uint32_t data, Addr addr, unsigned flags, uint64_t *res);
557
558template
559Fault
560SimpleCPU::write(uint16_t data, Addr addr, unsigned flags, uint64_t *res);
561
562template
563Fault
564SimpleCPU::write(uint8_t data, Addr addr, unsigned flags, uint64_t *res);
565
566#endif //DOXYGEN_SHOULD_SKIP_THIS
567
568template<>
569Fault
570SimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
571{
572    return write(*(uint64_t*)&data, addr, flags, res);
573}
574
575template<>
576Fault
577SimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
578{
579    return write(*(uint32_t*)&data, addr, flags, res);
580}
581
582
583template<>
584Fault
585SimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
586{
587    return write((uint32_t)data, addr, flags, res);
588}
589
590
591#if FULL_SYSTEM
592Addr
593SimpleCPU::dbg_vtophys(Addr addr)
594{
595    return vtophys(xcProxy, addr);
596}
597#endif // FULL_SYSTEM
598
599void
600SimpleCPU::processCacheCompletion()
601{
602    switch (status()) {
603      case IcacheMissStall:
604        icacheStallCycles += curTick - lastIcacheStall;
605        _status = IcacheMissComplete;
606        scheduleTickEvent(1);
607        break;
608      case DcacheMissStall:
609        if (memReq->cmd.isRead()) {
610            curStaticInst->execute(this,traceData);
611            if (traceData)
612                traceData->finalize();
613        }
614        dcacheStallCycles += curTick - lastDcacheStall;
615        _status = Running;
616        scheduleTickEvent(1);
617        break;
618      case DcacheMissSwitch:
619        if (memReq->cmd.isRead()) {
620            curStaticInst->execute(this,traceData);
621            if (traceData)
622                traceData->finalize();
623        }
624        _status = SwitchedOut;
625        sampler->signalSwitched();
626      case SwitchedOut:
627        // If this CPU has been switched out due to sampling/warm-up,
628        // ignore any further status changes (e.g., due to cache
629        // misses outstanding at the time of the switch).
630        return;
631      default:
632        panic("SimpleCPU::processCacheCompletion: bad state");
633        break;
634    }
635}
636
637#if FULL_SYSTEM
638void
639SimpleCPU::post_interrupt(int int_num, int index)
640{
641    BaseCPU::post_interrupt(int_num, index);
642
643    if (cpuXC->status() == ExecContext::Suspended) {
644                DPRINTF(IPI,"Suspended Processor awoke\n");
645        cpuXC->activate();
646    }
647}
648#endif // FULL_SYSTEM
649
650/* start simulation, program loaded, processor precise state initialized */
651void
652SimpleCPU::tick()
653{
654    numCycles++;
655
656    traceData = NULL;
657
658    Fault fault = NoFault;
659
660#if FULL_SYSTEM
661    if (checkInterrupts && check_interrupts() && !cpuXC->inPalMode() &&
662        status() != IcacheMissComplete) {
663        int ipl = 0;
664        int summary = 0;
665        checkInterrupts = false;
666
667        if (cpuXC->readMiscReg(IPR_SIRR)) {
668            for (int i = INTLEVEL_SOFTWARE_MIN;
669                 i < INTLEVEL_SOFTWARE_MAX; i++) {
670                if (cpuXC->readMiscReg(IPR_SIRR) & (ULL(1) << i)) {
671                    // See table 4-19 of 21164 hardware reference
672                    ipl = (i - INTLEVEL_SOFTWARE_MIN) + 1;
673                    summary |= (ULL(1) << i);
674                }
675            }
676        }
677
678        uint64_t interrupts = cpuXC->cpu->intr_status();
679        for (int i = INTLEVEL_EXTERNAL_MIN;
680            i < INTLEVEL_EXTERNAL_MAX; i++) {
681            if (interrupts & (ULL(1) << i)) {
682                // See table 4-19 of 21164 hardware reference
683                ipl = i;
684                summary |= (ULL(1) << i);
685            }
686        }
687
688        if (cpuXC->readMiscReg(IPR_ASTRR))
689            panic("asynchronous traps not implemented\n");
690
691        if (ipl && ipl > cpuXC->readMiscReg(IPR_IPLR)) {
692            cpuXC->setMiscReg(IPR_ISR, summary);
693            cpuXC->setMiscReg(IPR_INTID, ipl);
694
695            Fault(new InterruptFault)->invoke(xcProxy);
696
697            DPRINTF(Flow, "Interrupt! IPLR=%d ipl=%d summary=%x\n",
698                    cpuXC->readMiscReg(IPR_IPLR), ipl, summary);
699        }
700    }
701#endif
702
703    // maintain $r0 semantics
704    cpuXC->setIntReg(ZeroReg, 0);
705#ifdef TARGET_ALPHA
706    cpuXC->setFloatRegDouble(ZeroReg, 0.0);
707#endif // TARGET_ALPHA
708
709    if (status() == IcacheMissComplete) {
710        // We've already fetched an instruction and were stalled on an
711        // I-cache miss.  No need to fetch it again.
712
713        // Set status to running; tick event will get rescheduled if
714        // necessary at end of tick() function.
715        _status = Running;
716    }
717    else {
718        // Try to fetch an instruction
719
720        // set up memory request for instruction fetch
721#if FULL_SYSTEM
722#define IFETCH_FLAGS(pc)	((pc) & 1) ? PHYSICAL : 0
723#else
724#define IFETCH_FLAGS(pc)	0
725#endif
726
727        memReq->cmd = Read;
728        memReq->reset(cpuXC->readPC() & ~3, sizeof(uint32_t),
729                     IFETCH_FLAGS(cpuXC->readPC()));
730
731        fault = cpuXC->translateInstReq(memReq);
732
733        if (fault == NoFault)
734            fault = cpuXC->mem->read(memReq, inst);
735
736        if (icacheInterface && fault == NoFault) {
737            memReq->completionEvent = NULL;
738
739            memReq->time = curTick;
740            memReq->flags |= INST_READ;
741            MemAccessResult result = icacheInterface->access(memReq);
742
743            // Ugly hack to get an event scheduled *only* if the access is
744            // a miss.  We really should add first-class support for this
745            // at some point.
746            if (result != MA_HIT && icacheInterface->doEvents()) {
747                memReq->completionEvent = &cacheCompletionEvent;
748                lastIcacheStall = curTick;
749                unscheduleTickEvent();
750                _status = IcacheMissStall;
751                return;
752            }
753        }
754    }
755
756    // If we've got a valid instruction (i.e., no fault on instruction
757    // fetch), then execute it.
758    if (fault == NoFault) {
759
760        // keep an instruction count
761        numInst++;
762        numInsts++;
763
764        // check for instruction-count-based events
765        comInstEventQueue[0]->serviceEvents(numInst);
766
767        // decode the instruction
768        inst = gtoh(inst);
769        curStaticInst = StaticInst::decode(makeExtMI(inst, xc->readPC()));
770
771        traceData = Trace::getInstRecord(curTick, xcProxy, this, curStaticInst,
772                                         cpuXC->readPC());
773
774#if FULL_SYSTEM
775        cpuXC->setInst(inst);
776#endif // FULL_SYSTEM
777
778        cpuXC->func_exe_inst++;
779
780        fault = curStaticInst->execute(this, traceData);
781
782#if FULL_SYSTEM
783        if (system->kernelBinning->fnbin) {
784            assert(kernelStats);
785            system->kernelBinning->execute(xcProxy, inst);
786        }
787
788        if (cpuXC->profile) {
789            bool usermode =
790                (cpuXC->readMiscReg(AlphaISA::IPR_DTB_CM) & 0x18) != 0;
791            cpuXC->profilePC = usermode ? 1 : cpuXC->readPC();
792            ProfileNode *node = cpuXC->profile->consume(xcProxy, inst);
793            if (node)
794                cpuXC->profileNode = node;
795        }
796#endif
797
798        if (curStaticInst->isMemRef()) {
799            numMemRefs++;
800        }
801
802        if (curStaticInst->isLoad()) {
803            ++numLoad;
804            comLoadEventQueue[0]->serviceEvents(numLoad);
805        }
806
807        // If we have a dcache miss, then we can't finialize the instruction
808        // trace yet because we want to populate it with the data later
809        if (traceData &&
810                !(status() == DcacheMissStall && memReq->cmd.isRead())) {
811            traceData->finalize();
812        }
813
814        traceFunctions(cpuXC->readPC());
815
816    }	// if (fault == NoFault)
817
818    if (fault != NoFault) {
819#if FULL_SYSTEM
820        fault->invoke(xcProxy);
821#else // !FULL_SYSTEM
822        fatal("fault (%d) detected @ PC 0x%08p", fault, cpuXC->readPC());
823#endif // FULL_SYSTEM
824    }
825    else {
826        // go to the next instruction
827        cpuXC->setPC(cpuXC->readNextPC());
828        cpuXC->setNextPC(cpuXC->readNextPC() + sizeof(MachInst));
829    }
830
831#if FULL_SYSTEM
832    Addr oldpc;
833    do {
834        oldpc = cpuXC->readPC();
835        system->pcEventQueue.service(xcProxy);
836    } while (oldpc != cpuXC->readPC());
837#endif
838
839    assert(status() == Running ||
840           status() == Idle ||
841           status() == DcacheMissStall);
842
843    if (status() == Running && !tickEvent.scheduled())
844        tickEvent.schedule(curTick + cycles(1));
845}
846
847////////////////////////////////////////////////////////////////////////
848//
849//  SimpleCPU Simulation Object
850//
851BEGIN_DECLARE_SIM_OBJECT_PARAMS(SimpleCPU)
852
853    Param<Counter> max_insts_any_thread;
854    Param<Counter> max_insts_all_threads;
855    Param<Counter> max_loads_any_thread;
856    Param<Counter> max_loads_all_threads;
857
858#if FULL_SYSTEM
859    SimObjectParam<AlphaITB *> itb;
860    SimObjectParam<AlphaDTB *> dtb;
861    SimObjectParam<FunctionalMemory *> mem;
862    SimObjectParam<System *> system;
863    Param<int> cpu_id;
864    Param<Tick> profile;
865#else
866    SimObjectParam<Process *> workload;
867#endif // FULL_SYSTEM
868
869    Param<int> clock;
870    SimObjectParam<BaseMem *> icache;
871    SimObjectParam<BaseMem *> dcache;
872
873    Param<bool> defer_registration;
874    Param<int> width;
875    Param<bool> function_trace;
876    Param<Tick> function_trace_start;
877
878END_DECLARE_SIM_OBJECT_PARAMS(SimpleCPU)
879
880BEGIN_INIT_SIM_OBJECT_PARAMS(SimpleCPU)
881
882    INIT_PARAM(max_insts_any_thread,
883               "terminate when any thread reaches this inst count"),
884    INIT_PARAM(max_insts_all_threads,
885               "terminate when all threads have reached this inst count"),
886    INIT_PARAM(max_loads_any_thread,
887               "terminate when any thread reaches this load count"),
888    INIT_PARAM(max_loads_all_threads,
889               "terminate when all threads have reached this load count"),
890
891#if FULL_SYSTEM
892    INIT_PARAM(itb, "Instruction TLB"),
893    INIT_PARAM(dtb, "Data TLB"),
894    INIT_PARAM(mem, "memory"),
895    INIT_PARAM(system, "system object"),
896    INIT_PARAM(cpu_id, "processor ID"),
897    INIT_PARAM(profile, ""),
898#else
899    INIT_PARAM(workload, "processes to run"),
900#endif // FULL_SYSTEM
901
902    INIT_PARAM(clock, "clock speed"),
903    INIT_PARAM(icache, "L1 instruction cache object"),
904    INIT_PARAM(dcache, "L1 data cache object"),
905    INIT_PARAM(defer_registration, "defer system registration (for sampling)"),
906    INIT_PARAM(width, "cpu width"),
907    INIT_PARAM(function_trace, "Enable function trace"),
908    INIT_PARAM(function_trace_start, "Cycle to start function trace")
909
910END_INIT_SIM_OBJECT_PARAMS(SimpleCPU)
911
912
913CREATE_SIM_OBJECT(SimpleCPU)
914{
915    SimpleCPU::Params *params = new SimpleCPU::Params();
916    params->name = getInstanceName();
917    params->numberOfThreads = 1;
918    params->max_insts_any_thread = max_insts_any_thread;
919    params->max_insts_all_threads = max_insts_all_threads;
920    params->max_loads_any_thread = max_loads_any_thread;
921    params->max_loads_all_threads = max_loads_all_threads;
922    params->deferRegistration = defer_registration;
923    params->clock = clock;
924    params->functionTrace = function_trace;
925    params->functionTraceStart = function_trace_start;
926    params->icache_interface = (icache) ? icache->getInterface() : NULL;
927    params->dcache_interface = (dcache) ? dcache->getInterface() : NULL;
928    params->width = width;
929
930#if FULL_SYSTEM
931    params->itb = itb;
932    params->dtb = dtb;
933    params->mem = mem;
934    params->system = system;
935    params->cpu_id = cpu_id;
936    params->profile = profile;
937#else
938    params->process = workload;
939#endif
940
941    SimpleCPU *cpu = new SimpleCPU(params);
942    return cpu;
943}
944
945REGISTER_SIM_OBJECT("SimpleCPU", SimpleCPU)
946
947