timing.cc revision 9433:34971d2e0019
1/*
2 * Copyright (c) 2010-2012 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Steve Reinhardt
41 */
42
43#include "arch/locked_mem.hh"
44#include "arch/mmapped_ipr.hh"
45#include "arch/utility.hh"
46#include "base/bigint.hh"
47#include "config/the_isa.hh"
48#include "cpu/simple/timing.hh"
49#include "cpu/exetrace.hh"
50#include "debug/Config.hh"
51#include "debug/Drain.hh"
52#include "debug/ExecFaulting.hh"
53#include "debug/SimpleCPU.hh"
54#include "mem/packet.hh"
55#include "mem/packet_access.hh"
56#include "params/TimingSimpleCPU.hh"
57#include "sim/faults.hh"
58#include "sim/full_system.hh"
59#include "sim/system.hh"
60
61using namespace std;
62using namespace TheISA;
63
64void
65TimingSimpleCPU::init()
66{
67    BaseCPU::init();
68
69    if (!params()->switched_out &&
70        system->getMemoryMode() != Enums::timing) {
71        fatal("The timing CPU requires the memory system to be in "
72              "'timing' mode.\n");
73    }
74
75    // Initialise the ThreadContext's memory proxies
76    tcBase()->initMemProxies(tcBase());
77
78    if (FullSystem && !params()->switched_out) {
79        for (int i = 0; i < threadContexts.size(); ++i) {
80            ThreadContext *tc = threadContexts[i];
81            // initialize CPU, including PC
82            TheISA::initCPU(tc, _cpuId);
83        }
84    }
85}
86
87void
88TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
89{
90    pkt = _pkt;
91    cpu->schedule(this, t);
92}
93
94TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
95    : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this),
96      dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0),
97      fetchEvent(this)
98{
99    _status = Idle;
100
101    setDrainState(Drainable::Running);
102    system->totalNumInsts = 0;
103}
104
105
106TimingSimpleCPU::~TimingSimpleCPU()
107{
108}
109
110void
111TimingSimpleCPU::serialize(ostream &os)
112{
113    Drainable::State so_state(getDrainState());
114    SERIALIZE_ENUM(so_state);
115    BaseSimpleCPU::serialize(os);
116}
117
118void
119TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
120{
121    Drainable::State so_state;
122    UNSERIALIZE_ENUM(so_state);
123    BaseSimpleCPU::unserialize(cp, section);
124}
125
126unsigned int
127TimingSimpleCPU::drain(DrainManager *drain_manager)
128{
129    // TimingSimpleCPU is ready to drain if it's not waiting for
130    // an access to complete.
131    if (_status == Idle ||
132        _status == BaseSimpleCPU::Running ||
133        _status == SwitchedOut) {
134        setDrainState(Drainable::Drained);
135        return 0;
136    } else {
137        setDrainState(Drainable::Draining);
138        drainManager = drain_manager;
139        DPRINTF(Drain, "CPU not drained\n");
140        return 1;
141    }
142}
143
144void
145TimingSimpleCPU::drainResume()
146{
147    DPRINTF(SimpleCPU, "Resume\n");
148    if (_status != SwitchedOut && _status != Idle) {
149        if (system->getMemoryMode() != Enums::timing) {
150            fatal("The timing CPU requires the memory system to be in "
151                  "'timing' mode.\n");
152        }
153
154        if (fetchEvent.scheduled())
155           deschedule(fetchEvent);
156
157        schedule(fetchEvent, nextCycle());
158    }
159
160    setDrainState(Drainable::Running);
161}
162
163void
164TimingSimpleCPU::switchOut()
165{
166    BaseSimpleCPU::switchOut();
167
168    assert(_status == BaseSimpleCPU::Running || _status == Idle);
169    _status = SwitchedOut;
170    numCycles += curCycle() - previousCycle;
171
172    // If we've been scheduled to resume but are then told to switch out,
173    // we'll need to cancel it.
174    if (fetchEvent.scheduled())
175        deschedule(fetchEvent);
176}
177
178
179void
180TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
181{
182    BaseSimpleCPU::takeOverFrom(oldCPU);
183
184    // if any of this CPU's ThreadContexts are active, mark the CPU as
185    // running and schedule its tick event.
186    for (int i = 0; i < threadContexts.size(); ++i) {
187        ThreadContext *tc = threadContexts[i];
188        if (tc->status() == ThreadContext::Active &&
189            _status != BaseSimpleCPU::Running) {
190            _status = BaseSimpleCPU::Running;
191            break;
192        }
193    }
194
195    if (_status != BaseSimpleCPU::Running) {
196        _status = Idle;
197    }
198    assert(threadContexts.size() == 1);
199    previousCycle = curCycle();
200}
201
202
203void
204TimingSimpleCPU::activateContext(ThreadID thread_num, Cycles delay)
205{
206    DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
207
208    assert(thread_num == 0);
209    assert(thread);
210
211    assert(_status == Idle);
212
213    notIdleFraction++;
214    _status = BaseSimpleCPU::Running;
215
216    // kick things off by initiating the fetch of the next instruction
217    schedule(fetchEvent, clockEdge(delay));
218}
219
220
221void
222TimingSimpleCPU::suspendContext(ThreadID thread_num)
223{
224    DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
225
226    assert(thread_num == 0);
227    assert(thread);
228
229    if (_status == Idle)
230        return;
231
232    assert(_status == BaseSimpleCPU::Running);
233
234    // just change status to Idle... if status != Running,
235    // completeInst() will not initiate fetch of next instruction.
236
237    notIdleFraction--;
238    _status = Idle;
239}
240
241bool
242TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
243{
244    RequestPtr req = pkt->req;
245    if (req->isMmappedIpr()) {
246        Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt);
247        new IprEvent(pkt, this, clockEdge(delay));
248        _status = DcacheWaitResponse;
249        dcache_pkt = NULL;
250    } else if (!dcachePort.sendTimingReq(pkt)) {
251        _status = DcacheRetry;
252        dcache_pkt = pkt;
253    } else {
254        _status = DcacheWaitResponse;
255        // memory system takes ownership of packet
256        dcache_pkt = NULL;
257    }
258    return dcache_pkt == NULL;
259}
260
261void
262TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
263                          bool read)
264{
265    PacketPtr pkt;
266    buildPacket(pkt, req, read);
267    pkt->dataDynamicArray<uint8_t>(data);
268    if (req->getFlags().isSet(Request::NO_ACCESS)) {
269        assert(!dcache_pkt);
270        pkt->makeResponse();
271        completeDataAccess(pkt);
272    } else if (read) {
273        handleReadPacket(pkt);
274    } else {
275        bool do_access = true;  // flag to suppress cache access
276
277        if (req->isLLSC()) {
278            do_access = TheISA::handleLockedWrite(thread, req);
279        } else if (req->isCondSwap()) {
280            assert(res);
281            req->setExtraData(*res);
282        }
283
284        if (do_access) {
285            dcache_pkt = pkt;
286            handleWritePacket();
287        } else {
288            _status = DcacheWaitResponse;
289            completeDataAccess(pkt);
290        }
291    }
292}
293
294void
295TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
296                               RequestPtr req, uint8_t *data, bool read)
297{
298    PacketPtr pkt1, pkt2;
299    buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
300    if (req->getFlags().isSet(Request::NO_ACCESS)) {
301        assert(!dcache_pkt);
302        pkt1->makeResponse();
303        completeDataAccess(pkt1);
304    } else if (read) {
305        SplitFragmentSenderState * send_state =
306            dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
307        if (handleReadPacket(pkt1)) {
308            send_state->clearFromParent();
309            send_state = dynamic_cast<SplitFragmentSenderState *>(
310                    pkt2->senderState);
311            if (handleReadPacket(pkt2)) {
312                send_state->clearFromParent();
313            }
314        }
315    } else {
316        dcache_pkt = pkt1;
317        SplitFragmentSenderState * send_state =
318            dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
319        if (handleWritePacket()) {
320            send_state->clearFromParent();
321            dcache_pkt = pkt2;
322            send_state = dynamic_cast<SplitFragmentSenderState *>(
323                    pkt2->senderState);
324            if (handleWritePacket()) {
325                send_state->clearFromParent();
326            }
327        }
328    }
329}
330
331void
332TimingSimpleCPU::translationFault(Fault fault)
333{
334    // fault may be NoFault in cases where a fault is suppressed,
335    // for instance prefetches.
336    numCycles += curCycle() - previousCycle;
337    previousCycle = curCycle();
338
339    if (traceData) {
340        // Since there was a fault, we shouldn't trace this instruction.
341        delete traceData;
342        traceData = NULL;
343    }
344
345    postExecute();
346
347    if (getDrainState() == Drainable::Draining) {
348        advancePC(fault);
349        completeDrain();
350    } else {
351        advanceInst(fault);
352    }
353}
354
355void
356TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
357{
358    MemCmd cmd;
359    if (read) {
360        cmd = MemCmd::ReadReq;
361        if (req->isLLSC())
362            cmd = MemCmd::LoadLockedReq;
363    } else {
364        cmd = MemCmd::WriteReq;
365        if (req->isLLSC()) {
366            cmd = MemCmd::StoreCondReq;
367        } else if (req->isSwap()) {
368            cmd = MemCmd::SwapReq;
369        }
370    }
371    pkt = new Packet(req, cmd);
372}
373
374void
375TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
376        RequestPtr req1, RequestPtr req2, RequestPtr req,
377        uint8_t *data, bool read)
378{
379    pkt1 = pkt2 = NULL;
380
381    assert(!req1->isMmappedIpr() && !req2->isMmappedIpr());
382
383    if (req->getFlags().isSet(Request::NO_ACCESS)) {
384        buildPacket(pkt1, req, read);
385        return;
386    }
387
388    buildPacket(pkt1, req1, read);
389    buildPacket(pkt2, req2, read);
390
391    req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags(), dataMasterId());
392    PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand());
393
394    pkt->dataDynamicArray<uint8_t>(data);
395    pkt1->dataStatic<uint8_t>(data);
396    pkt2->dataStatic<uint8_t>(data + req1->getSize());
397
398    SplitMainSenderState * main_send_state = new SplitMainSenderState;
399    pkt->senderState = main_send_state;
400    main_send_state->fragments[0] = pkt1;
401    main_send_state->fragments[1] = pkt2;
402    main_send_state->outstanding = 2;
403    pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
404    pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
405}
406
407Fault
408TimingSimpleCPU::readMem(Addr addr, uint8_t *data,
409                         unsigned size, unsigned flags)
410{
411    Fault fault;
412    const int asid = 0;
413    const ThreadID tid = 0;
414    const Addr pc = thread->instAddr();
415    unsigned block_size = dcachePort.peerBlockSize();
416    BaseTLB::Mode mode = BaseTLB::Read;
417
418    if (traceData) {
419        traceData->setAddr(addr);
420    }
421
422    RequestPtr req  = new Request(asid, addr, size,
423                                  flags, dataMasterId(), pc, _cpuId, tid);
424
425    Addr split_addr = roundDown(addr + size - 1, block_size);
426    assert(split_addr <= addr || split_addr - addr < block_size);
427
428    _status = DTBWaitResponse;
429    if (split_addr > addr) {
430        RequestPtr req1, req2;
431        assert(!req->isLLSC() && !req->isSwap());
432        req->splitOnVaddr(split_addr, req1, req2);
433
434        WholeTranslationState *state =
435            new WholeTranslationState(req, req1, req2, new uint8_t[size],
436                                      NULL, mode);
437        DataTranslation<TimingSimpleCPU *> *trans1 =
438            new DataTranslation<TimingSimpleCPU *>(this, state, 0);
439        DataTranslation<TimingSimpleCPU *> *trans2 =
440            new DataTranslation<TimingSimpleCPU *>(this, state, 1);
441
442        thread->dtb->translateTiming(req1, tc, trans1, mode);
443        thread->dtb->translateTiming(req2, tc, trans2, mode);
444    } else {
445        WholeTranslationState *state =
446            new WholeTranslationState(req, new uint8_t[size], NULL, mode);
447        DataTranslation<TimingSimpleCPU *> *translation
448            = new DataTranslation<TimingSimpleCPU *>(this, state);
449        thread->dtb->translateTiming(req, tc, translation, mode);
450    }
451
452    return NoFault;
453}
454
455bool
456TimingSimpleCPU::handleWritePacket()
457{
458    RequestPtr req = dcache_pkt->req;
459    if (req->isMmappedIpr()) {
460        Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
461        new IprEvent(dcache_pkt, this, clockEdge(delay));
462        _status = DcacheWaitResponse;
463        dcache_pkt = NULL;
464    } else if (!dcachePort.sendTimingReq(dcache_pkt)) {
465        _status = DcacheRetry;
466    } else {
467        _status = DcacheWaitResponse;
468        // memory system takes ownership of packet
469        dcache_pkt = NULL;
470    }
471    return dcache_pkt == NULL;
472}
473
474Fault
475TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
476                          Addr addr, unsigned flags, uint64_t *res)
477{
478    uint8_t *newData = new uint8_t[size];
479    memcpy(newData, data, size);
480
481    const int asid = 0;
482    const ThreadID tid = 0;
483    const Addr pc = thread->instAddr();
484    unsigned block_size = dcachePort.peerBlockSize();
485    BaseTLB::Mode mode = BaseTLB::Write;
486
487    if (traceData) {
488        traceData->setAddr(addr);
489    }
490
491    RequestPtr req = new Request(asid, addr, size,
492                                 flags, dataMasterId(), pc, _cpuId, tid);
493
494    Addr split_addr = roundDown(addr + size - 1, block_size);
495    assert(split_addr <= addr || split_addr - addr < block_size);
496
497    _status = DTBWaitResponse;
498    if (split_addr > addr) {
499        RequestPtr req1, req2;
500        assert(!req->isLLSC() && !req->isSwap());
501        req->splitOnVaddr(split_addr, req1, req2);
502
503        WholeTranslationState *state =
504            new WholeTranslationState(req, req1, req2, newData, res, mode);
505        DataTranslation<TimingSimpleCPU *> *trans1 =
506            new DataTranslation<TimingSimpleCPU *>(this, state, 0);
507        DataTranslation<TimingSimpleCPU *> *trans2 =
508            new DataTranslation<TimingSimpleCPU *>(this, state, 1);
509
510        thread->dtb->translateTiming(req1, tc, trans1, mode);
511        thread->dtb->translateTiming(req2, tc, trans2, mode);
512    } else {
513        WholeTranslationState *state =
514            new WholeTranslationState(req, newData, res, mode);
515        DataTranslation<TimingSimpleCPU *> *translation =
516            new DataTranslation<TimingSimpleCPU *>(this, state);
517        thread->dtb->translateTiming(req, tc, translation, mode);
518    }
519
520    // Translation faults will be returned via finishTranslation()
521    return NoFault;
522}
523
524
525void
526TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
527{
528    _status = BaseSimpleCPU::Running;
529
530    if (state->getFault() != NoFault) {
531        if (state->isPrefetch()) {
532            state->setNoFault();
533        }
534        delete [] state->data;
535        state->deleteReqs();
536        translationFault(state->getFault());
537    } else {
538        if (!state->isSplit) {
539            sendData(state->mainReq, state->data, state->res,
540                     state->mode == BaseTLB::Read);
541        } else {
542            sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
543                          state->data, state->mode == BaseTLB::Read);
544        }
545    }
546
547    delete state;
548}
549
550
551void
552TimingSimpleCPU::fetch()
553{
554    DPRINTF(SimpleCPU, "Fetch\n");
555
556    if (!curStaticInst || !curStaticInst->isDelayedCommit())
557        checkForInterrupts();
558
559    checkPcEventQueue();
560
561    // We must have just got suspended by a PC event
562    if (_status == Idle)
563        return;
564
565    TheISA::PCState pcState = thread->pcState();
566    bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
567
568    if (needToFetch) {
569        _status = BaseSimpleCPU::Running;
570        Request *ifetch_req = new Request();
571        ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
572        setupFetchRequest(ifetch_req);
573        DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
574        thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
575                BaseTLB::Execute);
576    } else {
577        _status = IcacheWaitResponse;
578        completeIfetch(NULL);
579
580        numCycles += curCycle() - previousCycle;
581        previousCycle = curCycle();
582    }
583}
584
585
586void
587TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
588{
589    if (fault == NoFault) {
590        DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n",
591                req->getVaddr(), req->getPaddr());
592        ifetch_pkt = new Packet(req, MemCmd::ReadReq);
593        ifetch_pkt->dataStatic(&inst);
594        DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr());
595
596        if (!icachePort.sendTimingReq(ifetch_pkt)) {
597            // Need to wait for retry
598            _status = IcacheRetry;
599        } else {
600            // Need to wait for cache to respond
601            _status = IcacheWaitResponse;
602            // ownership of packet transferred to memory system
603            ifetch_pkt = NULL;
604        }
605    } else {
606        DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
607        delete req;
608        // fetch fault: advance directly to next instruction (fault handler)
609        _status = BaseSimpleCPU::Running;
610        advanceInst(fault);
611    }
612
613    numCycles += curCycle() - previousCycle;
614    previousCycle = curCycle();
615}
616
617
618void
619TimingSimpleCPU::advanceInst(Fault fault)
620{
621
622    if (_status == Faulting)
623        return;
624
625    if (fault != NoFault) {
626        advancePC(fault);
627        DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
628        reschedule(fetchEvent, nextCycle(), true);
629        _status = Faulting;
630        return;
631    }
632
633
634    if (!stayAtPC)
635        advancePC(fault);
636
637    if (_status == BaseSimpleCPU::Running) {
638        // kick off fetch of next instruction... callback from icache
639        // response will cause that instruction to be executed,
640        // keeping the CPU running.
641        fetch();
642    }
643}
644
645
646void
647TimingSimpleCPU::completeIfetch(PacketPtr pkt)
648{
649    DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ?
650            pkt->getAddr() : 0);
651
652    // received a response from the icache: execute the received
653    // instruction
654
655    assert(!pkt || !pkt->isError());
656    assert(_status == IcacheWaitResponse);
657
658    _status = BaseSimpleCPU::Running;
659
660    numCycles += curCycle() - previousCycle;
661    previousCycle = curCycle();
662
663    if (getDrainState() == Drainable::Draining) {
664        if (pkt) {
665            delete pkt->req;
666            delete pkt;
667        }
668
669        completeDrain();
670        return;
671    }
672
673    preExecute();
674    if (curStaticInst && curStaticInst->isMemRef()) {
675        // load or store: just send to dcache
676        Fault fault = curStaticInst->initiateAcc(this, traceData);
677
678        // If we're not running now the instruction will complete in a dcache
679        // response callback or the instruction faulted and has started an
680        // ifetch
681        if (_status == BaseSimpleCPU::Running) {
682            if (fault != NoFault && traceData) {
683                // If there was a fault, we shouldn't trace this instruction.
684                delete traceData;
685                traceData = NULL;
686            }
687
688            postExecute();
689            // @todo remove me after debugging with legion done
690            if (curStaticInst && (!curStaticInst->isMicroop() ||
691                        curStaticInst->isFirstMicroop()))
692                instCnt++;
693            advanceInst(fault);
694        }
695    } else if (curStaticInst) {
696        // non-memory instruction: execute completely now
697        Fault fault = curStaticInst->execute(this, traceData);
698
699        // keep an instruction count
700        if (fault == NoFault)
701            countInst();
702        else if (traceData && !DTRACE(ExecFaulting)) {
703            delete traceData;
704            traceData = NULL;
705        }
706
707        postExecute();
708        // @todo remove me after debugging with legion done
709        if (curStaticInst && (!curStaticInst->isMicroop() ||
710                    curStaticInst->isFirstMicroop()))
711            instCnt++;
712        advanceInst(fault);
713    } else {
714        advanceInst(NoFault);
715    }
716
717    if (pkt) {
718        delete pkt->req;
719        delete pkt;
720    }
721}
722
723void
724TimingSimpleCPU::IcachePort::ITickEvent::process()
725{
726    cpu->completeIfetch(pkt);
727}
728
729bool
730TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt)
731{
732    DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
733    // delay processing of returned data until next CPU clock edge
734    Tick next_tick = cpu->nextCycle();
735
736    if (next_tick == curTick())
737        cpu->completeIfetch(pkt);
738    else
739        tickEvent.schedule(pkt, next_tick);
740
741    return true;
742}
743
744void
745TimingSimpleCPU::IcachePort::recvRetry()
746{
747    // we shouldn't get a retry unless we have a packet that we're
748    // waiting to transmit
749    assert(cpu->ifetch_pkt != NULL);
750    assert(cpu->_status == IcacheRetry);
751    PacketPtr tmp = cpu->ifetch_pkt;
752    if (sendTimingReq(tmp)) {
753        cpu->_status = IcacheWaitResponse;
754        cpu->ifetch_pkt = NULL;
755    }
756}
757
758void
759TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
760{
761    // received a response from the dcache: complete the load or store
762    // instruction
763    assert(!pkt->isError());
764    assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
765           pkt->req->getFlags().isSet(Request::NO_ACCESS));
766
767    numCycles += curCycle() - previousCycle;
768    previousCycle = curCycle();
769
770    if (pkt->senderState) {
771        SplitFragmentSenderState * send_state =
772            dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
773        assert(send_state);
774        delete pkt->req;
775        delete pkt;
776        PacketPtr big_pkt = send_state->bigPkt;
777        delete send_state;
778
779        SplitMainSenderState * main_send_state =
780            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
781        assert(main_send_state);
782        // Record the fact that this packet is no longer outstanding.
783        assert(main_send_state->outstanding != 0);
784        main_send_state->outstanding--;
785
786        if (main_send_state->outstanding) {
787            return;
788        } else {
789            delete main_send_state;
790            big_pkt->senderState = NULL;
791            pkt = big_pkt;
792        }
793    }
794
795    _status = BaseSimpleCPU::Running;
796
797    Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
798
799    // keep an instruction count
800    if (fault == NoFault)
801        countInst();
802    else if (traceData) {
803        // If there was a fault, we shouldn't trace this instruction.
804        delete traceData;
805        traceData = NULL;
806    }
807
808    // the locked flag may be cleared on the response packet, so check
809    // pkt->req and not pkt to see if it was a load-locked
810    if (pkt->isRead() && pkt->req->isLLSC()) {
811        TheISA::handleLockedRead(thread, pkt->req);
812    }
813
814    delete pkt->req;
815    delete pkt;
816
817    postExecute();
818
819    if (getDrainState() == Drainable::Draining) {
820        advancePC(fault);
821        completeDrain();
822
823        return;
824    }
825
826    advanceInst(fault);
827}
828
829
830void
831TimingSimpleCPU::completeDrain()
832{
833    DPRINTF(Drain, "CPU done draining, processing drain event\n");
834    setDrainState(Drainable::Drained);
835    drainManager->signalDrainDone();
836}
837
838bool
839TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt)
840{
841    // delay processing of returned data until next CPU clock edge
842    Tick next_tick = cpu->nextCycle();
843
844    if (next_tick == curTick()) {
845        cpu->completeDataAccess(pkt);
846    } else {
847        if (!tickEvent.scheduled()) {
848            tickEvent.schedule(pkt, next_tick);
849        } else {
850            // In the case of a split transaction and a cache that is
851            // faster than a CPU we could get two responses before
852            // next_tick expires
853            if (!retryEvent.scheduled())
854                cpu->schedule(retryEvent, next_tick);
855            return false;
856        }
857    }
858
859    return true;
860}
861
862void
863TimingSimpleCPU::DcachePort::DTickEvent::process()
864{
865    cpu->completeDataAccess(pkt);
866}
867
868void
869TimingSimpleCPU::DcachePort::recvRetry()
870{
871    // we shouldn't get a retry unless we have a packet that we're
872    // waiting to transmit
873    assert(cpu->dcache_pkt != NULL);
874    assert(cpu->_status == DcacheRetry);
875    PacketPtr tmp = cpu->dcache_pkt;
876    if (tmp->senderState) {
877        // This is a packet from a split access.
878        SplitFragmentSenderState * send_state =
879            dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
880        assert(send_state);
881        PacketPtr big_pkt = send_state->bigPkt;
882
883        SplitMainSenderState * main_send_state =
884            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
885        assert(main_send_state);
886
887        if (sendTimingReq(tmp)) {
888            // If we were able to send without retrying, record that fact
889            // and try sending the other fragment.
890            send_state->clearFromParent();
891            int other_index = main_send_state->getPendingFragment();
892            if (other_index > 0) {
893                tmp = main_send_state->fragments[other_index];
894                cpu->dcache_pkt = tmp;
895                if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
896                        (big_pkt->isWrite() && cpu->handleWritePacket())) {
897                    main_send_state->fragments[other_index] = NULL;
898                }
899            } else {
900                cpu->_status = DcacheWaitResponse;
901                // memory system takes ownership of packet
902                cpu->dcache_pkt = NULL;
903            }
904        }
905    } else if (sendTimingReq(tmp)) {
906        cpu->_status = DcacheWaitResponse;
907        // memory system takes ownership of packet
908        cpu->dcache_pkt = NULL;
909    }
910}
911
912TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
913    Tick t)
914    : pkt(_pkt), cpu(_cpu)
915{
916    cpu->schedule(this, t);
917}
918
919void
920TimingSimpleCPU::IprEvent::process()
921{
922    cpu->completeDataAccess(pkt);
923}
924
925const char *
926TimingSimpleCPU::IprEvent::description() const
927{
928    return "Timing Simple CPU Delay IPR event";
929}
930
931
932void
933TimingSimpleCPU::printAddr(Addr a)
934{
935    dcachePort.printAddr(a);
936}
937
938
939////////////////////////////////////////////////////////////////////////
940//
941//  TimingSimpleCPU Simulation Object
942//
943TimingSimpleCPU *
944TimingSimpleCPUParams::create()
945{
946    numThreads = 1;
947    if (!FullSystem && workload.size() != 1)
948        panic("only one workload allowed");
949    return new TimingSimpleCPU(this);
950}
951