timing.cc revision 8779
1/*
2 * Copyright (c) 2010 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Steve Reinhardt
41 */
42
43#include "arch/locked_mem.hh"
44#include "arch/mmapped_ipr.hh"
45#include "arch/utility.hh"
46#include "base/bigint.hh"
47#include "config/the_isa.hh"
48#include "cpu/simple/timing.hh"
49#include "cpu/exetrace.hh"
50#include "debug/Config.hh"
51#include "debug/ExecFaulting.hh"
52#include "debug/SimpleCPU.hh"
53#include "mem/packet.hh"
54#include "mem/packet_access.hh"
55#include "params/TimingSimpleCPU.hh"
56#include "sim/faults.hh"
57#include "sim/full_system.hh"
58#include "sim/system.hh"
59
60using namespace std;
61using namespace TheISA;
62
63Port *
64TimingSimpleCPU::getPort(const std::string &if_name, int idx)
65{
66    if (if_name == "dcache_port")
67        return &dcachePort;
68    else if (if_name == "icache_port")
69        return &icachePort;
70    else
71        panic("No Such Port\n");
72}
73
74void
75TimingSimpleCPU::init()
76{
77    BaseCPU::init();
78    if (FullSystem) {
79        for (int i = 0; i < threadContexts.size(); ++i) {
80#if FULL_SYSTEM
81            ThreadContext *tc = threadContexts[i];
82            // initialize CPU, including PC
83            TheISA::initCPU(tc, _cpuId);
84#endif
85        }
86    }
87}
88
89Tick
90TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
91{
92    panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
93    return curTick();
94}
95
96void
97TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
98{
99    //No internal storage to update, jusst return
100    return;
101}
102
103void
104TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
105{
106    if (status == RangeChange) {
107        if (!snoopRangeSent) {
108            snoopRangeSent = true;
109            sendStatusChange(Port::RangeChange);
110        }
111        return;
112    }
113
114    panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
115}
116
117
118void
119TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
120{
121    pkt = _pkt;
122    cpu->schedule(this, t);
123}
124
125TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
126    : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this, p->clock),
127    dcachePort(this, p->clock), fetchEvent(this)
128{
129    _status = Idle;
130
131    icachePort.snoopRangeSent = false;
132    dcachePort.snoopRangeSent = false;
133
134    ifetch_pkt = dcache_pkt = NULL;
135    drainEvent = NULL;
136    previousTick = 0;
137    changeState(SimObject::Running);
138    system->totalNumInsts = 0;
139}
140
141
142TimingSimpleCPU::~TimingSimpleCPU()
143{
144}
145
146void
147TimingSimpleCPU::serialize(ostream &os)
148{
149    SimObject::State so_state = SimObject::getState();
150    SERIALIZE_ENUM(so_state);
151    BaseSimpleCPU::serialize(os);
152}
153
154void
155TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
156{
157    SimObject::State so_state;
158    UNSERIALIZE_ENUM(so_state);
159    BaseSimpleCPU::unserialize(cp, section);
160}
161
162unsigned int
163TimingSimpleCPU::drain(Event *drain_event)
164{
165    // TimingSimpleCPU is ready to drain if it's not waiting for
166    // an access to complete.
167    if (_status == Idle || _status == Running || _status == SwitchedOut) {
168        changeState(SimObject::Drained);
169        return 0;
170    } else {
171        changeState(SimObject::Draining);
172        drainEvent = drain_event;
173        return 1;
174    }
175}
176
177void
178TimingSimpleCPU::resume()
179{
180    DPRINTF(SimpleCPU, "Resume\n");
181    if (_status != SwitchedOut && _status != Idle) {
182        assert(system->getMemoryMode() == Enums::timing);
183
184        if (fetchEvent.scheduled())
185           deschedule(fetchEvent);
186
187        schedule(fetchEvent, nextCycle());
188    }
189
190    changeState(SimObject::Running);
191}
192
193void
194TimingSimpleCPU::switchOut()
195{
196    assert(_status == Running || _status == Idle);
197    _status = SwitchedOut;
198    numCycles += tickToCycles(curTick() - previousTick);
199
200    // If we've been scheduled to resume but are then told to switch out,
201    // we'll need to cancel it.
202    if (fetchEvent.scheduled())
203        deschedule(fetchEvent);
204}
205
206
207void
208TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
209{
210    BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
211
212    // if any of this CPU's ThreadContexts are active, mark the CPU as
213    // running and schedule its tick event.
214    for (int i = 0; i < threadContexts.size(); ++i) {
215        ThreadContext *tc = threadContexts[i];
216        if (tc->status() == ThreadContext::Active && _status != Running) {
217            _status = Running;
218            break;
219        }
220    }
221
222    if (_status != Running) {
223        _status = Idle;
224    }
225    assert(threadContexts.size() == 1);
226    previousTick = curTick();
227}
228
229
230void
231TimingSimpleCPU::activateContext(int thread_num, int delay)
232{
233    DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
234
235    assert(thread_num == 0);
236    assert(thread);
237
238    assert(_status == Idle);
239
240    notIdleFraction++;
241    _status = Running;
242
243    // kick things off by initiating the fetch of the next instruction
244    schedule(fetchEvent, nextCycle(curTick() + ticks(delay)));
245}
246
247
248void
249TimingSimpleCPU::suspendContext(int thread_num)
250{
251    DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
252
253    assert(thread_num == 0);
254    assert(thread);
255
256    if (_status == Idle)
257        return;
258
259    assert(_status == Running);
260
261    // just change status to Idle... if status != Running,
262    // completeInst() will not initiate fetch of next instruction.
263
264    notIdleFraction--;
265    _status = Idle;
266}
267
268bool
269TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
270{
271    RequestPtr req = pkt->req;
272    if (req->isMmappedIpr()) {
273        Tick delay;
274        delay = TheISA::handleIprRead(thread->getTC(), pkt);
275        new IprEvent(pkt, this, nextCycle(curTick() + delay));
276        _status = DcacheWaitResponse;
277        dcache_pkt = NULL;
278    } else if (!dcachePort.sendTiming(pkt)) {
279        _status = DcacheRetry;
280        dcache_pkt = pkt;
281    } else {
282        _status = DcacheWaitResponse;
283        // memory system takes ownership of packet
284        dcache_pkt = NULL;
285    }
286    return dcache_pkt == NULL;
287}
288
289void
290TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
291                          bool read)
292{
293    PacketPtr pkt;
294    buildPacket(pkt, req, read);
295    pkt->dataDynamicArray<uint8_t>(data);
296    if (req->getFlags().isSet(Request::NO_ACCESS)) {
297        assert(!dcache_pkt);
298        pkt->makeResponse();
299        completeDataAccess(pkt);
300    } else if (read) {
301        handleReadPacket(pkt);
302    } else {
303        bool do_access = true;  // flag to suppress cache access
304
305        if (req->isLLSC()) {
306            do_access = TheISA::handleLockedWrite(thread, req);
307        } else if (req->isCondSwap()) {
308            assert(res);
309            req->setExtraData(*res);
310        }
311
312        if (do_access) {
313            dcache_pkt = pkt;
314            handleWritePacket();
315        } else {
316            _status = DcacheWaitResponse;
317            completeDataAccess(pkt);
318        }
319    }
320}
321
322void
323TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
324                               RequestPtr req, uint8_t *data, bool read)
325{
326    PacketPtr pkt1, pkt2;
327    buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
328    if (req->getFlags().isSet(Request::NO_ACCESS)) {
329        assert(!dcache_pkt);
330        pkt1->makeResponse();
331        completeDataAccess(pkt1);
332    } else if (read) {
333        SplitFragmentSenderState * send_state =
334            dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
335        if (handleReadPacket(pkt1)) {
336            send_state->clearFromParent();
337            send_state = dynamic_cast<SplitFragmentSenderState *>(
338                    pkt2->senderState);
339            if (handleReadPacket(pkt2)) {
340                send_state->clearFromParent();
341            }
342        }
343    } else {
344        dcache_pkt = pkt1;
345        SplitFragmentSenderState * send_state =
346            dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
347        if (handleWritePacket()) {
348            send_state->clearFromParent();
349            dcache_pkt = pkt2;
350            send_state = dynamic_cast<SplitFragmentSenderState *>(
351                    pkt2->senderState);
352            if (handleWritePacket()) {
353                send_state->clearFromParent();
354            }
355        }
356    }
357}
358
359void
360TimingSimpleCPU::translationFault(Fault fault)
361{
362    // fault may be NoFault in cases where a fault is suppressed,
363    // for instance prefetches.
364    numCycles += tickToCycles(curTick() - previousTick);
365    previousTick = curTick();
366
367    if (traceData) {
368        // Since there was a fault, we shouldn't trace this instruction.
369        delete traceData;
370        traceData = NULL;
371    }
372
373    postExecute();
374
375    if (getState() == SimObject::Draining) {
376        advancePC(fault);
377        completeDrain();
378    } else {
379        advanceInst(fault);
380    }
381}
382
383void
384TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
385{
386    MemCmd cmd;
387    if (read) {
388        cmd = MemCmd::ReadReq;
389        if (req->isLLSC())
390            cmd = MemCmd::LoadLockedReq;
391    } else {
392        cmd = MemCmd::WriteReq;
393        if (req->isLLSC()) {
394            cmd = MemCmd::StoreCondReq;
395        } else if (req->isSwap()) {
396            cmd = MemCmd::SwapReq;
397        }
398    }
399    pkt = new Packet(req, cmd, Packet::Broadcast);
400}
401
402void
403TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
404        RequestPtr req1, RequestPtr req2, RequestPtr req,
405        uint8_t *data, bool read)
406{
407    pkt1 = pkt2 = NULL;
408
409    assert(!req1->isMmappedIpr() && !req2->isMmappedIpr());
410
411    if (req->getFlags().isSet(Request::NO_ACCESS)) {
412        buildPacket(pkt1, req, read);
413        return;
414    }
415
416    buildPacket(pkt1, req1, read);
417    buildPacket(pkt2, req2, read);
418
419    req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags());
420    PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand(),
421                               Packet::Broadcast);
422
423    pkt->dataDynamicArray<uint8_t>(data);
424    pkt1->dataStatic<uint8_t>(data);
425    pkt2->dataStatic<uint8_t>(data + req1->getSize());
426
427    SplitMainSenderState * main_send_state = new SplitMainSenderState;
428    pkt->senderState = main_send_state;
429    main_send_state->fragments[0] = pkt1;
430    main_send_state->fragments[1] = pkt2;
431    main_send_state->outstanding = 2;
432    pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
433    pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
434}
435
436Fault
437TimingSimpleCPU::readMem(Addr addr, uint8_t *data,
438                         unsigned size, unsigned flags)
439{
440    Fault fault;
441    const int asid = 0;
442    const ThreadID tid = 0;
443    const Addr pc = thread->instAddr();
444    unsigned block_size = dcachePort.peerBlockSize();
445    BaseTLB::Mode mode = BaseTLB::Read;
446
447    if (traceData) {
448        traceData->setAddr(addr);
449    }
450
451    RequestPtr req  = new Request(asid, addr, size,
452                                  flags, pc, _cpuId, tid);
453
454    Addr split_addr = roundDown(addr + size - 1, block_size);
455    assert(split_addr <= addr || split_addr - addr < block_size);
456
457    _status = DTBWaitResponse;
458    if (split_addr > addr) {
459        RequestPtr req1, req2;
460        assert(!req->isLLSC() && !req->isSwap());
461        req->splitOnVaddr(split_addr, req1, req2);
462
463        WholeTranslationState *state =
464            new WholeTranslationState(req, req1, req2, new uint8_t[size],
465                                      NULL, mode);
466        DataTranslation<TimingSimpleCPU *> *trans1 =
467            new DataTranslation<TimingSimpleCPU *>(this, state, 0);
468        DataTranslation<TimingSimpleCPU *> *trans2 =
469            new DataTranslation<TimingSimpleCPU *>(this, state, 1);
470
471        thread->dtb->translateTiming(req1, tc, trans1, mode);
472        thread->dtb->translateTiming(req2, tc, trans2, mode);
473    } else {
474        WholeTranslationState *state =
475            new WholeTranslationState(req, new uint8_t[size], NULL, mode);
476        DataTranslation<TimingSimpleCPU *> *translation
477            = new DataTranslation<TimingSimpleCPU *>(this, state);
478        thread->dtb->translateTiming(req, tc, translation, mode);
479    }
480
481    return NoFault;
482}
483
484bool
485TimingSimpleCPU::handleWritePacket()
486{
487    RequestPtr req = dcache_pkt->req;
488    if (req->isMmappedIpr()) {
489        Tick delay;
490        delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
491        new IprEvent(dcache_pkt, this, nextCycle(curTick() + delay));
492        _status = DcacheWaitResponse;
493        dcache_pkt = NULL;
494    } else if (!dcachePort.sendTiming(dcache_pkt)) {
495        _status = DcacheRetry;
496    } else {
497        _status = DcacheWaitResponse;
498        // memory system takes ownership of packet
499        dcache_pkt = NULL;
500    }
501    return dcache_pkt == NULL;
502}
503
504Fault
505TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
506                          Addr addr, unsigned flags, uint64_t *res)
507{
508    uint8_t *newData = new uint8_t[size];
509    memcpy(newData, data, size);
510
511    const int asid = 0;
512    const ThreadID tid = 0;
513    const Addr pc = thread->instAddr();
514    unsigned block_size = dcachePort.peerBlockSize();
515    BaseTLB::Mode mode = BaseTLB::Write;
516
517    if (traceData) {
518        traceData->setAddr(addr);
519    }
520
521    RequestPtr req = new Request(asid, addr, size,
522                                 flags, pc, _cpuId, tid);
523
524    Addr split_addr = roundDown(addr + size - 1, block_size);
525    assert(split_addr <= addr || split_addr - addr < block_size);
526
527    _status = DTBWaitResponse;
528    if (split_addr > addr) {
529        RequestPtr req1, req2;
530        assert(!req->isLLSC() && !req->isSwap());
531        req->splitOnVaddr(split_addr, req1, req2);
532
533        WholeTranslationState *state =
534            new WholeTranslationState(req, req1, req2, newData, res, mode);
535        DataTranslation<TimingSimpleCPU *> *trans1 =
536            new DataTranslation<TimingSimpleCPU *>(this, state, 0);
537        DataTranslation<TimingSimpleCPU *> *trans2 =
538            new DataTranslation<TimingSimpleCPU *>(this, state, 1);
539
540        thread->dtb->translateTiming(req1, tc, trans1, mode);
541        thread->dtb->translateTiming(req2, tc, trans2, mode);
542    } else {
543        WholeTranslationState *state =
544            new WholeTranslationState(req, newData, res, mode);
545        DataTranslation<TimingSimpleCPU *> *translation =
546            new DataTranslation<TimingSimpleCPU *>(this, state);
547        thread->dtb->translateTiming(req, tc, translation, mode);
548    }
549
550    // Translation faults will be returned via finishTranslation()
551    return NoFault;
552}
553
554
555void
556TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
557{
558    _status = Running;
559
560    if (state->getFault() != NoFault) {
561        if (state->isPrefetch()) {
562            state->setNoFault();
563        }
564        delete [] state->data;
565        state->deleteReqs();
566        translationFault(state->getFault());
567    } else {
568        if (!state->isSplit) {
569            sendData(state->mainReq, state->data, state->res,
570                     state->mode == BaseTLB::Read);
571        } else {
572            sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
573                          state->data, state->mode == BaseTLB::Read);
574        }
575    }
576
577    delete state;
578}
579
580
581void
582TimingSimpleCPU::fetch()
583{
584    DPRINTF(SimpleCPU, "Fetch\n");
585
586    if (!curStaticInst || !curStaticInst->isDelayedCommit())
587        checkForInterrupts();
588
589    checkPcEventQueue();
590
591    // We must have just got suspended by a PC event
592    if (_status == Idle)
593        return;
594
595    TheISA::PCState pcState = thread->pcState();
596    bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
597
598    if (needToFetch) {
599        _status = Running;
600        Request *ifetch_req = new Request();
601        ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
602        setupFetchRequest(ifetch_req);
603        DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
604        thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
605                BaseTLB::Execute);
606    } else {
607        _status = IcacheWaitResponse;
608        completeIfetch(NULL);
609
610        numCycles += tickToCycles(curTick() - previousTick);
611        previousTick = curTick();
612    }
613}
614
615
616void
617TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
618{
619    if (fault == NoFault) {
620        DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n",
621                req->getVaddr(), req->getPaddr());
622        ifetch_pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
623        ifetch_pkt->dataStatic(&inst);
624        DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr());
625
626        if (!icachePort.sendTiming(ifetch_pkt)) {
627            // Need to wait for retry
628            _status = IcacheRetry;
629        } else {
630            // Need to wait for cache to respond
631            _status = IcacheWaitResponse;
632            // ownership of packet transferred to memory system
633            ifetch_pkt = NULL;
634        }
635    } else {
636        DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
637        delete req;
638        // fetch fault: advance directly to next instruction (fault handler)
639        _status = Running;
640        advanceInst(fault);
641    }
642
643    numCycles += tickToCycles(curTick() - previousTick);
644    previousTick = curTick();
645}
646
647
648void
649TimingSimpleCPU::advanceInst(Fault fault)
650{
651
652    if (_status == Faulting)
653        return;
654
655    if (fault != NoFault) {
656        advancePC(fault);
657        DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
658        reschedule(fetchEvent, nextCycle(), true);
659        _status = Faulting;
660        return;
661    }
662
663
664    if (!stayAtPC)
665        advancePC(fault);
666
667    if (_status == Running) {
668        // kick off fetch of next instruction... callback from icache
669        // response will cause that instruction to be executed,
670        // keeping the CPU running.
671        fetch();
672    }
673}
674
675
676void
677TimingSimpleCPU::completeIfetch(PacketPtr pkt)
678{
679    DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ?
680            pkt->getAddr() : 0);
681
682    // received a response from the icache: execute the received
683    // instruction
684
685    assert(!pkt || !pkt->isError());
686    assert(_status == IcacheWaitResponse);
687
688    _status = Running;
689
690    numCycles += tickToCycles(curTick() - previousTick);
691    previousTick = curTick();
692
693    if (getState() == SimObject::Draining) {
694        if (pkt) {
695            delete pkt->req;
696            delete pkt;
697        }
698
699        completeDrain();
700        return;
701    }
702
703    preExecute();
704    if (curStaticInst && curStaticInst->isMemRef()) {
705        // load or store: just send to dcache
706        Fault fault = curStaticInst->initiateAcc(this, traceData);
707
708        // If we're not running now the instruction will complete in a dcache
709        // response callback or the instruction faulted and has started an
710        // ifetch
711        if (_status == Running) {
712            if (fault != NoFault && traceData) {
713                // If there was a fault, we shouldn't trace this instruction.
714                delete traceData;
715                traceData = NULL;
716            }
717
718            postExecute();
719            // @todo remove me after debugging with legion done
720            if (curStaticInst && (!curStaticInst->isMicroop() ||
721                        curStaticInst->isFirstMicroop()))
722                instCnt++;
723            advanceInst(fault);
724        }
725    } else if (curStaticInst) {
726        // non-memory instruction: execute completely now
727        Fault fault = curStaticInst->execute(this, traceData);
728
729        // keep an instruction count
730        if (fault == NoFault)
731            countInst();
732        else if (traceData && !DTRACE(ExecFaulting)) {
733            delete traceData;
734            traceData = NULL;
735        }
736
737        postExecute();
738        // @todo remove me after debugging with legion done
739        if (curStaticInst && (!curStaticInst->isMicroop() ||
740                    curStaticInst->isFirstMicroop()))
741            instCnt++;
742        advanceInst(fault);
743    } else {
744        advanceInst(NoFault);
745    }
746
747    if (pkt) {
748        delete pkt->req;
749        delete pkt;
750    }
751}
752
753void
754TimingSimpleCPU::IcachePort::ITickEvent::process()
755{
756    cpu->completeIfetch(pkt);
757}
758
759bool
760TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
761{
762    if (pkt->isResponse() && !pkt->wasNacked()) {
763        DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
764        // delay processing of returned data until next CPU clock edge
765        Tick next_tick = cpu->nextCycle(curTick());
766
767        if (next_tick == curTick())
768            cpu->completeIfetch(pkt);
769        else
770            tickEvent.schedule(pkt, next_tick);
771
772        return true;
773    } else if (pkt->wasNacked()) {
774        assert(cpu->_status == IcacheWaitResponse);
775        pkt->reinitNacked();
776        if (!sendTiming(pkt)) {
777            cpu->_status = IcacheRetry;
778            cpu->ifetch_pkt = pkt;
779        }
780    }
781    //Snooping a Coherence Request, do nothing
782    return true;
783}
784
785void
786TimingSimpleCPU::IcachePort::recvRetry()
787{
788    // we shouldn't get a retry unless we have a packet that we're
789    // waiting to transmit
790    assert(cpu->ifetch_pkt != NULL);
791    assert(cpu->_status == IcacheRetry);
792    PacketPtr tmp = cpu->ifetch_pkt;
793    if (sendTiming(tmp)) {
794        cpu->_status = IcacheWaitResponse;
795        cpu->ifetch_pkt = NULL;
796    }
797}
798
799void
800TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
801{
802    // received a response from the dcache: complete the load or store
803    // instruction
804    assert(!pkt->isError());
805    assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
806           pkt->req->getFlags().isSet(Request::NO_ACCESS));
807
808    numCycles += tickToCycles(curTick() - previousTick);
809    previousTick = curTick();
810
811    if (pkt->senderState) {
812        SplitFragmentSenderState * send_state =
813            dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
814        assert(send_state);
815        delete pkt->req;
816        delete pkt;
817        PacketPtr big_pkt = send_state->bigPkt;
818        delete send_state;
819
820        SplitMainSenderState * main_send_state =
821            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
822        assert(main_send_state);
823        // Record the fact that this packet is no longer outstanding.
824        assert(main_send_state->outstanding != 0);
825        main_send_state->outstanding--;
826
827        if (main_send_state->outstanding) {
828            return;
829        } else {
830            delete main_send_state;
831            big_pkt->senderState = NULL;
832            pkt = big_pkt;
833        }
834    }
835
836    _status = Running;
837
838    Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
839
840    // keep an instruction count
841    if (fault == NoFault)
842        countInst();
843    else if (traceData) {
844        // If there was a fault, we shouldn't trace this instruction.
845        delete traceData;
846        traceData = NULL;
847    }
848
849    // the locked flag may be cleared on the response packet, so check
850    // pkt->req and not pkt to see if it was a load-locked
851    if (pkt->isRead() && pkt->req->isLLSC()) {
852        TheISA::handleLockedRead(thread, pkt->req);
853    }
854
855    delete pkt->req;
856    delete pkt;
857
858    postExecute();
859
860    if (getState() == SimObject::Draining) {
861        advancePC(fault);
862        completeDrain();
863
864        return;
865    }
866
867    advanceInst(fault);
868}
869
870
871void
872TimingSimpleCPU::completeDrain()
873{
874    DPRINTF(Config, "Done draining\n");
875    changeState(SimObject::Drained);
876    drainEvent->process();
877}
878
879void
880TimingSimpleCPU::DcachePort::setPeer(Port *port)
881{
882    Port::setPeer(port);
883
884    if (FullSystem) {
885        // Update the ThreadContext's memory ports (Functional/Virtual
886        // Ports)
887        cpu->tcBase()->connectMemPorts(cpu->tcBase());
888    }
889}
890
891bool
892TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
893{
894    if (pkt->isResponse() && !pkt->wasNacked()) {
895        // delay processing of returned data until next CPU clock edge
896        Tick next_tick = cpu->nextCycle(curTick());
897
898        if (next_tick == curTick()) {
899            cpu->completeDataAccess(pkt);
900        } else {
901            if (!tickEvent.scheduled()) {
902                tickEvent.schedule(pkt, next_tick);
903            } else {
904                // In the case of a split transaction and a cache that is
905                // faster than a CPU we could get two responses before
906                // next_tick expires
907                if (!retryEvent.scheduled())
908                    schedule(retryEvent, next_tick);
909                return false;
910            }
911        }
912
913        return true;
914    }
915    else if (pkt->wasNacked()) {
916        assert(cpu->_status == DcacheWaitResponse);
917        pkt->reinitNacked();
918        if (!sendTiming(pkt)) {
919            cpu->_status = DcacheRetry;
920            cpu->dcache_pkt = pkt;
921        }
922    }
923    //Snooping a Coherence Request, do nothing
924    return true;
925}
926
927void
928TimingSimpleCPU::DcachePort::DTickEvent::process()
929{
930    cpu->completeDataAccess(pkt);
931}
932
933void
934TimingSimpleCPU::DcachePort::recvRetry()
935{
936    // we shouldn't get a retry unless we have a packet that we're
937    // waiting to transmit
938    assert(cpu->dcache_pkt != NULL);
939    assert(cpu->_status == DcacheRetry);
940    PacketPtr tmp = cpu->dcache_pkt;
941    if (tmp->senderState) {
942        // This is a packet from a split access.
943        SplitFragmentSenderState * send_state =
944            dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
945        assert(send_state);
946        PacketPtr big_pkt = send_state->bigPkt;
947
948        SplitMainSenderState * main_send_state =
949            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
950        assert(main_send_state);
951
952        if (sendTiming(tmp)) {
953            // If we were able to send without retrying, record that fact
954            // and try sending the other fragment.
955            send_state->clearFromParent();
956            int other_index = main_send_state->getPendingFragment();
957            if (other_index > 0) {
958                tmp = main_send_state->fragments[other_index];
959                cpu->dcache_pkt = tmp;
960                if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
961                        (big_pkt->isWrite() && cpu->handleWritePacket())) {
962                    main_send_state->fragments[other_index] = NULL;
963                }
964            } else {
965                cpu->_status = DcacheWaitResponse;
966                // memory system takes ownership of packet
967                cpu->dcache_pkt = NULL;
968            }
969        }
970    } else if (sendTiming(tmp)) {
971        cpu->_status = DcacheWaitResponse;
972        // memory system takes ownership of packet
973        cpu->dcache_pkt = NULL;
974    }
975}
976
977TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
978    Tick t)
979    : pkt(_pkt), cpu(_cpu)
980{
981    cpu->schedule(this, t);
982}
983
984void
985TimingSimpleCPU::IprEvent::process()
986{
987    cpu->completeDataAccess(pkt);
988}
989
990const char *
991TimingSimpleCPU::IprEvent::description() const
992{
993    return "Timing Simple CPU Delay IPR event";
994}
995
996
997void
998TimingSimpleCPU::printAddr(Addr a)
999{
1000    dcachePort.printAddr(a);
1001}
1002
1003
1004////////////////////////////////////////////////////////////////////////
1005//
1006//  TimingSimpleCPU Simulation Object
1007//
1008TimingSimpleCPU *
1009TimingSimpleCPUParams::create()
1010{
1011    numThreads = 1;
1012#if !FULL_SYSTEM
1013    if (!FullSystem && workload.size() != 1)
1014        panic("only one workload allowed");
1015#endif
1016    return new TimingSimpleCPU(this);
1017}
1018