timing.cc revision 8706
1/*
2 * Copyright (c) 2010 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Steve Reinhardt
41 */
42
43#include "arch/locked_mem.hh"
44#include "arch/mmapped_ipr.hh"
45#include "arch/utility.hh"
46#include "base/bigint.hh"
47#include "config/the_isa.hh"
48#include "cpu/simple/timing.hh"
49#include "cpu/exetrace.hh"
50#include "debug/Config.hh"
51#include "debug/ExecFaulting.hh"
52#include "debug/SimpleCPU.hh"
53#include "mem/packet.hh"
54#include "mem/packet_access.hh"
55#include "params/TimingSimpleCPU.hh"
56#include "sim/faults.hh"
57#include "sim/system.hh"
58
59using namespace std;
60using namespace TheISA;
61
62Port *
63TimingSimpleCPU::getPort(const std::string &if_name, int idx)
64{
65    if (if_name == "dcache_port")
66        return &dcachePort;
67    else if (if_name == "icache_port")
68        return &icachePort;
69    else
70        panic("No Such Port\n");
71}
72
73void
74TimingSimpleCPU::init()
75{
76    BaseCPU::init();
77#if FULL_SYSTEM
78    for (int i = 0; i < threadContexts.size(); ++i) {
79        ThreadContext *tc = threadContexts[i];
80
81        // initialize CPU, including PC
82        TheISA::initCPU(tc, _cpuId);
83    }
84
85    // Initialise the ThreadContext's memory proxies
86    tcBase()->initMemProxies(tcBase());
87#endif
88}
89
90Tick
91TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
92{
93    panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
94    return curTick();
95}
96
97void
98TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
99{
100    //No internal storage to update, jusst return
101    return;
102}
103
104void
105TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
106{
107    if (status == RangeChange) {
108        if (!snoopRangeSent) {
109            snoopRangeSent = true;
110            sendStatusChange(Port::RangeChange);
111        }
112        return;
113    }
114
115    panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
116}
117
118
119void
120TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
121{
122    pkt = _pkt;
123    cpu->schedule(this, t);
124}
125
126TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
127    : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this, p->clock),
128    dcachePort(this, p->clock), fetchEvent(this)
129{
130    _status = Idle;
131
132    icachePort.snoopRangeSent = false;
133    dcachePort.snoopRangeSent = false;
134
135    ifetch_pkt = dcache_pkt = NULL;
136    drainEvent = NULL;
137    previousTick = 0;
138    changeState(SimObject::Running);
139    system->totalNumInsts = 0;
140}
141
142
143TimingSimpleCPU::~TimingSimpleCPU()
144{
145}
146
147void
148TimingSimpleCPU::serialize(ostream &os)
149{
150    SimObject::State so_state = SimObject::getState();
151    SERIALIZE_ENUM(so_state);
152    BaseSimpleCPU::serialize(os);
153}
154
155void
156TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
157{
158    SimObject::State so_state;
159    UNSERIALIZE_ENUM(so_state);
160    BaseSimpleCPU::unserialize(cp, section);
161}
162
163unsigned int
164TimingSimpleCPU::drain(Event *drain_event)
165{
166    // TimingSimpleCPU is ready to drain if it's not waiting for
167    // an access to complete.
168    if (_status == Idle || _status == Running || _status == SwitchedOut) {
169        changeState(SimObject::Drained);
170        return 0;
171    } else {
172        changeState(SimObject::Draining);
173        drainEvent = drain_event;
174        return 1;
175    }
176}
177
178void
179TimingSimpleCPU::resume()
180{
181    DPRINTF(SimpleCPU, "Resume\n");
182    if (_status != SwitchedOut && _status != Idle) {
183        assert(system->getMemoryMode() == Enums::timing);
184
185        if (fetchEvent.scheduled())
186           deschedule(fetchEvent);
187
188        schedule(fetchEvent, nextCycle());
189    }
190
191    changeState(SimObject::Running);
192}
193
194void
195TimingSimpleCPU::switchOut()
196{
197    assert(_status == Running || _status == Idle);
198    _status = SwitchedOut;
199    numCycles += tickToCycles(curTick() - previousTick);
200
201    // If we've been scheduled to resume but are then told to switch out,
202    // we'll need to cancel it.
203    if (fetchEvent.scheduled())
204        deschedule(fetchEvent);
205}
206
207
208void
209TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
210{
211    BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
212
213    // if any of this CPU's ThreadContexts are active, mark the CPU as
214    // running and schedule its tick event.
215    for (int i = 0; i < threadContexts.size(); ++i) {
216        ThreadContext *tc = threadContexts[i];
217        if (tc->status() == ThreadContext::Active && _status != Running) {
218            _status = Running;
219            break;
220        }
221    }
222
223    if (_status != Running) {
224        _status = Idle;
225    }
226    assert(threadContexts.size() == 1);
227    previousTick = curTick();
228}
229
230
231void
232TimingSimpleCPU::activateContext(int thread_num, int delay)
233{
234    DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
235
236    assert(thread_num == 0);
237    assert(thread);
238
239    assert(_status == Idle);
240
241    notIdleFraction++;
242    _status = Running;
243
244    // kick things off by initiating the fetch of the next instruction
245    schedule(fetchEvent, nextCycle(curTick() + ticks(delay)));
246}
247
248
249void
250TimingSimpleCPU::suspendContext(int thread_num)
251{
252    DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
253
254    assert(thread_num == 0);
255    assert(thread);
256
257    if (_status == Idle)
258        return;
259
260    assert(_status == Running);
261
262    // just change status to Idle... if status != Running,
263    // completeInst() will not initiate fetch of next instruction.
264
265    notIdleFraction--;
266    _status = Idle;
267}
268
269bool
270TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
271{
272    RequestPtr req = pkt->req;
273    if (req->isMmappedIpr()) {
274        Tick delay;
275        delay = TheISA::handleIprRead(thread->getTC(), pkt);
276        new IprEvent(pkt, this, nextCycle(curTick() + delay));
277        _status = DcacheWaitResponse;
278        dcache_pkt = NULL;
279    } else if (!dcachePort.sendTiming(pkt)) {
280        _status = DcacheRetry;
281        dcache_pkt = pkt;
282    } else {
283        _status = DcacheWaitResponse;
284        // memory system takes ownership of packet
285        dcache_pkt = NULL;
286    }
287    return dcache_pkt == NULL;
288}
289
290void
291TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
292                          bool read)
293{
294    PacketPtr pkt;
295    buildPacket(pkt, req, read);
296    pkt->dataDynamicArray<uint8_t>(data);
297    if (req->getFlags().isSet(Request::NO_ACCESS)) {
298        assert(!dcache_pkt);
299        pkt->makeResponse();
300        completeDataAccess(pkt);
301    } else if (read) {
302        handleReadPacket(pkt);
303    } else {
304        bool do_access = true;  // flag to suppress cache access
305
306        if (req->isLLSC()) {
307            do_access = TheISA::handleLockedWrite(thread, req);
308        } else if (req->isCondSwap()) {
309            assert(res);
310            req->setExtraData(*res);
311        }
312
313        if (do_access) {
314            dcache_pkt = pkt;
315            handleWritePacket();
316        } else {
317            _status = DcacheWaitResponse;
318            completeDataAccess(pkt);
319        }
320    }
321}
322
323void
324TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
325                               RequestPtr req, uint8_t *data, bool read)
326{
327    PacketPtr pkt1, pkt2;
328    buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
329    if (req->getFlags().isSet(Request::NO_ACCESS)) {
330        assert(!dcache_pkt);
331        pkt1->makeResponse();
332        completeDataAccess(pkt1);
333    } else if (read) {
334        SplitFragmentSenderState * send_state =
335            dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
336        if (handleReadPacket(pkt1)) {
337            send_state->clearFromParent();
338            send_state = dynamic_cast<SplitFragmentSenderState *>(
339                    pkt2->senderState);
340            if (handleReadPacket(pkt2)) {
341                send_state->clearFromParent();
342            }
343        }
344    } else {
345        dcache_pkt = pkt1;
346        SplitFragmentSenderState * send_state =
347            dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
348        if (handleWritePacket()) {
349            send_state->clearFromParent();
350            dcache_pkt = pkt2;
351            send_state = dynamic_cast<SplitFragmentSenderState *>(
352                    pkt2->senderState);
353            if (handleWritePacket()) {
354                send_state->clearFromParent();
355            }
356        }
357    }
358}
359
360void
361TimingSimpleCPU::translationFault(Fault fault)
362{
363    // fault may be NoFault in cases where a fault is suppressed,
364    // for instance prefetches.
365    numCycles += tickToCycles(curTick() - previousTick);
366    previousTick = curTick();
367
368    if (traceData) {
369        // Since there was a fault, we shouldn't trace this instruction.
370        delete traceData;
371        traceData = NULL;
372    }
373
374    postExecute();
375
376    if (getState() == SimObject::Draining) {
377        advancePC(fault);
378        completeDrain();
379    } else {
380        advanceInst(fault);
381    }
382}
383
384void
385TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
386{
387    MemCmd cmd;
388    if (read) {
389        cmd = MemCmd::ReadReq;
390        if (req->isLLSC())
391            cmd = MemCmd::LoadLockedReq;
392    } else {
393        cmd = MemCmd::WriteReq;
394        if (req->isLLSC()) {
395            cmd = MemCmd::StoreCondReq;
396        } else if (req->isSwap()) {
397            cmd = MemCmd::SwapReq;
398        }
399    }
400    pkt = new Packet(req, cmd, Packet::Broadcast);
401}
402
403void
404TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
405        RequestPtr req1, RequestPtr req2, RequestPtr req,
406        uint8_t *data, bool read)
407{
408    pkt1 = pkt2 = NULL;
409
410    assert(!req1->isMmappedIpr() && !req2->isMmappedIpr());
411
412    if (req->getFlags().isSet(Request::NO_ACCESS)) {
413        buildPacket(pkt1, req, read);
414        return;
415    }
416
417    buildPacket(pkt1, req1, read);
418    buildPacket(pkt2, req2, read);
419
420    req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags());
421    PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand(),
422                               Packet::Broadcast);
423
424    pkt->dataDynamicArray<uint8_t>(data);
425    pkt1->dataStatic<uint8_t>(data);
426    pkt2->dataStatic<uint8_t>(data + req1->getSize());
427
428    SplitMainSenderState * main_send_state = new SplitMainSenderState;
429    pkt->senderState = main_send_state;
430    main_send_state->fragments[0] = pkt1;
431    main_send_state->fragments[1] = pkt2;
432    main_send_state->outstanding = 2;
433    pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
434    pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
435}
436
437Fault
438TimingSimpleCPU::readMem(Addr addr, uint8_t *data,
439                         unsigned size, unsigned flags)
440{
441    Fault fault;
442    const int asid = 0;
443    const ThreadID tid = 0;
444    const Addr pc = thread->instAddr();
445    unsigned block_size = dcachePort.peerBlockSize();
446    BaseTLB::Mode mode = BaseTLB::Read;
447
448    if (traceData) {
449        traceData->setAddr(addr);
450    }
451
452    RequestPtr req  = new Request(asid, addr, size,
453                                  flags, pc, _cpuId, tid);
454
455    Addr split_addr = roundDown(addr + size - 1, block_size);
456    assert(split_addr <= addr || split_addr - addr < block_size);
457
458    _status = DTBWaitResponse;
459    if (split_addr > addr) {
460        RequestPtr req1, req2;
461        assert(!req->isLLSC() && !req->isSwap());
462        req->splitOnVaddr(split_addr, req1, req2);
463
464        WholeTranslationState *state =
465            new WholeTranslationState(req, req1, req2, new uint8_t[size],
466                                      NULL, mode);
467        DataTranslation<TimingSimpleCPU *> *trans1 =
468            new DataTranslation<TimingSimpleCPU *>(this, state, 0);
469        DataTranslation<TimingSimpleCPU *> *trans2 =
470            new DataTranslation<TimingSimpleCPU *>(this, state, 1);
471
472        thread->dtb->translateTiming(req1, tc, trans1, mode);
473        thread->dtb->translateTiming(req2, tc, trans2, mode);
474    } else {
475        WholeTranslationState *state =
476            new WholeTranslationState(req, new uint8_t[size], NULL, mode);
477        DataTranslation<TimingSimpleCPU *> *translation
478            = new DataTranslation<TimingSimpleCPU *>(this, state);
479        thread->dtb->translateTiming(req, tc, translation, mode);
480    }
481
482    return NoFault;
483}
484
485bool
486TimingSimpleCPU::handleWritePacket()
487{
488    RequestPtr req = dcache_pkt->req;
489    if (req->isMmappedIpr()) {
490        Tick delay;
491        delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
492        new IprEvent(dcache_pkt, this, nextCycle(curTick() + delay));
493        _status = DcacheWaitResponse;
494        dcache_pkt = NULL;
495    } else if (!dcachePort.sendTiming(dcache_pkt)) {
496        _status = DcacheRetry;
497    } else {
498        _status = DcacheWaitResponse;
499        // memory system takes ownership of packet
500        dcache_pkt = NULL;
501    }
502    return dcache_pkt == NULL;
503}
504
505Fault
506TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
507                          Addr addr, unsigned flags, uint64_t *res)
508{
509    uint8_t *newData = new uint8_t[size];
510    memcpy(newData, data, size);
511
512    const int asid = 0;
513    const ThreadID tid = 0;
514    const Addr pc = thread->instAddr();
515    unsigned block_size = dcachePort.peerBlockSize();
516    BaseTLB::Mode mode = BaseTLB::Write;
517
518    if (traceData) {
519        traceData->setAddr(addr);
520    }
521
522    RequestPtr req = new Request(asid, addr, size,
523                                 flags, pc, _cpuId, tid);
524
525    Addr split_addr = roundDown(addr + size - 1, block_size);
526    assert(split_addr <= addr || split_addr - addr < block_size);
527
528    _status = DTBWaitResponse;
529    if (split_addr > addr) {
530        RequestPtr req1, req2;
531        assert(!req->isLLSC() && !req->isSwap());
532        req->splitOnVaddr(split_addr, req1, req2);
533
534        WholeTranslationState *state =
535            new WholeTranslationState(req, req1, req2, newData, res, mode);
536        DataTranslation<TimingSimpleCPU *> *trans1 =
537            new DataTranslation<TimingSimpleCPU *>(this, state, 0);
538        DataTranslation<TimingSimpleCPU *> *trans2 =
539            new DataTranslation<TimingSimpleCPU *>(this, state, 1);
540
541        thread->dtb->translateTiming(req1, tc, trans1, mode);
542        thread->dtb->translateTiming(req2, tc, trans2, mode);
543    } else {
544        WholeTranslationState *state =
545            new WholeTranslationState(req, newData, res, mode);
546        DataTranslation<TimingSimpleCPU *> *translation =
547            new DataTranslation<TimingSimpleCPU *>(this, state);
548        thread->dtb->translateTiming(req, tc, translation, mode);
549    }
550
551    // Translation faults will be returned via finishTranslation()
552    return NoFault;
553}
554
555
556void
557TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
558{
559    _status = Running;
560
561    if (state->getFault() != NoFault) {
562        if (state->isPrefetch()) {
563            state->setNoFault();
564        }
565        delete [] state->data;
566        state->deleteReqs();
567        translationFault(state->getFault());
568    } else {
569        if (!state->isSplit) {
570            sendData(state->mainReq, state->data, state->res,
571                     state->mode == BaseTLB::Read);
572        } else {
573            sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
574                          state->data, state->mode == BaseTLB::Read);
575        }
576    }
577
578    delete state;
579}
580
581
582void
583TimingSimpleCPU::fetch()
584{
585    DPRINTF(SimpleCPU, "Fetch\n");
586
587    if (!curStaticInst || !curStaticInst->isDelayedCommit())
588        checkForInterrupts();
589
590    checkPcEventQueue();
591
592    // We must have just got suspended by a PC event
593    if (_status == Idle)
594        return;
595
596    TheISA::PCState pcState = thread->pcState();
597    bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
598
599    if (needToFetch) {
600        _status = Running;
601        Request *ifetch_req = new Request();
602        ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
603        setupFetchRequest(ifetch_req);
604        DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
605        thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
606                BaseTLB::Execute);
607    } else {
608        _status = IcacheWaitResponse;
609        completeIfetch(NULL);
610
611        numCycles += tickToCycles(curTick() - previousTick);
612        previousTick = curTick();
613    }
614}
615
616
617void
618TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
619{
620    if (fault == NoFault) {
621        DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n",
622                req->getVaddr(), req->getPaddr());
623        ifetch_pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
624        ifetch_pkt->dataStatic(&inst);
625        DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr());
626
627        if (!icachePort.sendTiming(ifetch_pkt)) {
628            // Need to wait for retry
629            _status = IcacheRetry;
630        } else {
631            // Need to wait for cache to respond
632            _status = IcacheWaitResponse;
633            // ownership of packet transferred to memory system
634            ifetch_pkt = NULL;
635        }
636    } else {
637        DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
638        delete req;
639        // fetch fault: advance directly to next instruction (fault handler)
640        _status = Running;
641        advanceInst(fault);
642    }
643
644    numCycles += tickToCycles(curTick() - previousTick);
645    previousTick = curTick();
646}
647
648
649void
650TimingSimpleCPU::advanceInst(Fault fault)
651{
652
653    if (_status == Faulting)
654        return;
655
656    if (fault != NoFault) {
657        advancePC(fault);
658        DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
659        reschedule(fetchEvent, nextCycle(), true);
660        _status = Faulting;
661        return;
662    }
663
664
665    if (!stayAtPC)
666        advancePC(fault);
667
668    if (_status == Running) {
669        // kick off fetch of next instruction... callback from icache
670        // response will cause that instruction to be executed,
671        // keeping the CPU running.
672        fetch();
673    }
674}
675
676
677void
678TimingSimpleCPU::completeIfetch(PacketPtr pkt)
679{
680    DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ?
681            pkt->getAddr() : 0);
682
683    // received a response from the icache: execute the received
684    // instruction
685
686    assert(!pkt || !pkt->isError());
687    assert(_status == IcacheWaitResponse);
688
689    _status = Running;
690
691    numCycles += tickToCycles(curTick() - previousTick);
692    previousTick = curTick();
693
694    if (getState() == SimObject::Draining) {
695        if (pkt) {
696            delete pkt->req;
697            delete pkt;
698        }
699
700        completeDrain();
701        return;
702    }
703
704    preExecute();
705    if (curStaticInst && curStaticInst->isMemRef()) {
706        // load or store: just send to dcache
707        Fault fault = curStaticInst->initiateAcc(this, traceData);
708
709        // If we're not running now the instruction will complete in a dcache
710        // response callback or the instruction faulted and has started an
711        // ifetch
712        if (_status == Running) {
713            if (fault != NoFault && traceData) {
714                // If there was a fault, we shouldn't trace this instruction.
715                delete traceData;
716                traceData = NULL;
717            }
718
719            postExecute();
720            // @todo remove me after debugging with legion done
721            if (curStaticInst && (!curStaticInst->isMicroop() ||
722                        curStaticInst->isFirstMicroop()))
723                instCnt++;
724            advanceInst(fault);
725        }
726    } else if (curStaticInst) {
727        // non-memory instruction: execute completely now
728        Fault fault = curStaticInst->execute(this, traceData);
729
730        // keep an instruction count
731        if (fault == NoFault)
732            countInst();
733        else if (traceData && !DTRACE(ExecFaulting)) {
734            delete traceData;
735            traceData = NULL;
736        }
737
738        postExecute();
739        // @todo remove me after debugging with legion done
740        if (curStaticInst && (!curStaticInst->isMicroop() ||
741                    curStaticInst->isFirstMicroop()))
742            instCnt++;
743        advanceInst(fault);
744    } else {
745        advanceInst(NoFault);
746    }
747
748    if (pkt) {
749        delete pkt->req;
750        delete pkt;
751    }
752}
753
754void
755TimingSimpleCPU::IcachePort::ITickEvent::process()
756{
757    cpu->completeIfetch(pkt);
758}
759
760bool
761TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
762{
763    if (pkt->isResponse() && !pkt->wasNacked()) {
764        DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
765        // delay processing of returned data until next CPU clock edge
766        Tick next_tick = cpu->nextCycle(curTick());
767
768        if (next_tick == curTick())
769            cpu->completeIfetch(pkt);
770        else
771            tickEvent.schedule(pkt, next_tick);
772
773        return true;
774    } else if (pkt->wasNacked()) {
775        assert(cpu->_status == IcacheWaitResponse);
776        pkt->reinitNacked();
777        if (!sendTiming(pkt)) {
778            cpu->_status = IcacheRetry;
779            cpu->ifetch_pkt = pkt;
780        }
781    }
782    //Snooping a Coherence Request, do nothing
783    return true;
784}
785
786void
787TimingSimpleCPU::IcachePort::recvRetry()
788{
789    // we shouldn't get a retry unless we have a packet that we're
790    // waiting to transmit
791    assert(cpu->ifetch_pkt != NULL);
792    assert(cpu->_status == IcacheRetry);
793    PacketPtr tmp = cpu->ifetch_pkt;
794    if (sendTiming(tmp)) {
795        cpu->_status = IcacheWaitResponse;
796        cpu->ifetch_pkt = NULL;
797    }
798}
799
800void
801TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
802{
803    // received a response from the dcache: complete the load or store
804    // instruction
805    assert(!pkt->isError());
806    assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
807           pkt->req->getFlags().isSet(Request::NO_ACCESS));
808
809    numCycles += tickToCycles(curTick() - previousTick);
810    previousTick = curTick();
811
812    if (pkt->senderState) {
813        SplitFragmentSenderState * send_state =
814            dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
815        assert(send_state);
816        delete pkt->req;
817        delete pkt;
818        PacketPtr big_pkt = send_state->bigPkt;
819        delete send_state;
820
821        SplitMainSenderState * main_send_state =
822            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
823        assert(main_send_state);
824        // Record the fact that this packet is no longer outstanding.
825        assert(main_send_state->outstanding != 0);
826        main_send_state->outstanding--;
827
828        if (main_send_state->outstanding) {
829            return;
830        } else {
831            delete main_send_state;
832            big_pkt->senderState = NULL;
833            pkt = big_pkt;
834        }
835    }
836
837    _status = Running;
838
839    Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
840
841    // keep an instruction count
842    if (fault == NoFault)
843        countInst();
844    else if (traceData) {
845        // If there was a fault, we shouldn't trace this instruction.
846        delete traceData;
847        traceData = NULL;
848    }
849
850    // the locked flag may be cleared on the response packet, so check
851    // pkt->req and not pkt to see if it was a load-locked
852    if (pkt->isRead() && pkt->req->isLLSC()) {
853        TheISA::handleLockedRead(thread, pkt->req);
854    }
855
856    delete pkt->req;
857    delete pkt;
858
859    postExecute();
860
861    if (getState() == SimObject::Draining) {
862        advancePC(fault);
863        completeDrain();
864
865        return;
866    }
867
868    advanceInst(fault);
869}
870
871
872void
873TimingSimpleCPU::completeDrain()
874{
875    DPRINTF(Config, "Done draining\n");
876    changeState(SimObject::Drained);
877    drainEvent->process();
878}
879
880bool
881TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
882{
883    if (pkt->isResponse() && !pkt->wasNacked()) {
884        // delay processing of returned data until next CPU clock edge
885        Tick next_tick = cpu->nextCycle(curTick());
886
887        if (next_tick == curTick()) {
888            cpu->completeDataAccess(pkt);
889        } else {
890            if (!tickEvent.scheduled()) {
891                tickEvent.schedule(pkt, next_tick);
892            } else {
893                // In the case of a split transaction and a cache that is
894                // faster than a CPU we could get two responses before
895                // next_tick expires
896                if (!retryEvent.scheduled())
897                    schedule(retryEvent, next_tick);
898                return false;
899            }
900        }
901
902        return true;
903    }
904    else if (pkt->wasNacked()) {
905        assert(cpu->_status == DcacheWaitResponse);
906        pkt->reinitNacked();
907        if (!sendTiming(pkt)) {
908            cpu->_status = DcacheRetry;
909            cpu->dcache_pkt = pkt;
910        }
911    }
912    //Snooping a Coherence Request, do nothing
913    return true;
914}
915
916void
917TimingSimpleCPU::DcachePort::DTickEvent::process()
918{
919    cpu->completeDataAccess(pkt);
920}
921
922void
923TimingSimpleCPU::DcachePort::recvRetry()
924{
925    // we shouldn't get a retry unless we have a packet that we're
926    // waiting to transmit
927    assert(cpu->dcache_pkt != NULL);
928    assert(cpu->_status == DcacheRetry);
929    PacketPtr tmp = cpu->dcache_pkt;
930    if (tmp->senderState) {
931        // This is a packet from a split access.
932        SplitFragmentSenderState * send_state =
933            dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
934        assert(send_state);
935        PacketPtr big_pkt = send_state->bigPkt;
936
937        SplitMainSenderState * main_send_state =
938            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
939        assert(main_send_state);
940
941        if (sendTiming(tmp)) {
942            // If we were able to send without retrying, record that fact
943            // and try sending the other fragment.
944            send_state->clearFromParent();
945            int other_index = main_send_state->getPendingFragment();
946            if (other_index > 0) {
947                tmp = main_send_state->fragments[other_index];
948                cpu->dcache_pkt = tmp;
949                if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
950                        (big_pkt->isWrite() && cpu->handleWritePacket())) {
951                    main_send_state->fragments[other_index] = NULL;
952                }
953            } else {
954                cpu->_status = DcacheWaitResponse;
955                // memory system takes ownership of packet
956                cpu->dcache_pkt = NULL;
957            }
958        }
959    } else if (sendTiming(tmp)) {
960        cpu->_status = DcacheWaitResponse;
961        // memory system takes ownership of packet
962        cpu->dcache_pkt = NULL;
963    }
964}
965
966TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
967    Tick t)
968    : pkt(_pkt), cpu(_cpu)
969{
970    cpu->schedule(this, t);
971}
972
973void
974TimingSimpleCPU::IprEvent::process()
975{
976    cpu->completeDataAccess(pkt);
977}
978
979const char *
980TimingSimpleCPU::IprEvent::description() const
981{
982    return "Timing Simple CPU Delay IPR event";
983}
984
985
986void
987TimingSimpleCPU::printAddr(Addr a)
988{
989    dcachePort.printAddr(a);
990}
991
992
993////////////////////////////////////////////////////////////////////////
994//
995//  TimingSimpleCPU Simulation Object
996//
997TimingSimpleCPU *
998TimingSimpleCPUParams::create()
999{
1000    numThreads = 1;
1001#if !FULL_SYSTEM
1002    if (workload.size() != 1)
1003        panic("only one workload allowed");
1004#endif
1005    return new TimingSimpleCPU(this);
1006}
1007