timing.cc revision 9165:f9e3dac185ba
1/*
2 * Copyright (c) 2010-2012 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Steve Reinhardt
41 */
42
43#include "arch/locked_mem.hh"
44#include "arch/mmapped_ipr.hh"
45#include "arch/utility.hh"
46#include "base/bigint.hh"
47#include "config/the_isa.hh"
48#include "cpu/simple/timing.hh"
49#include "cpu/exetrace.hh"
50#include "debug/Config.hh"
51#include "debug/Drain.hh"
52#include "debug/ExecFaulting.hh"
53#include "debug/SimpleCPU.hh"
54#include "mem/packet.hh"
55#include "mem/packet_access.hh"
56#include "params/TimingSimpleCPU.hh"
57#include "sim/faults.hh"
58#include "sim/full_system.hh"
59#include "sim/system.hh"
60
61using namespace std;
62using namespace TheISA;
63
64void
65TimingSimpleCPU::init()
66{
67    BaseCPU::init();
68
69    // Initialise the ThreadContext's memory proxies
70    tcBase()->initMemProxies(tcBase());
71
72    if (FullSystem && !params()->defer_registration) {
73        for (int i = 0; i < threadContexts.size(); ++i) {
74            ThreadContext *tc = threadContexts[i];
75            // initialize CPU, including PC
76            TheISA::initCPU(tc, _cpuId);
77        }
78    }
79}
80
81void
82TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
83{
84    pkt = _pkt;
85    cpu->schedule(this, t);
86}
87
88TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
89    : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this),
90    dcachePort(this), fetchEvent(this)
91{
92    _status = Idle;
93
94    ifetch_pkt = dcache_pkt = NULL;
95    drainEvent = NULL;
96    previousTick = 0;
97    changeState(SimObject::Running);
98    system->totalNumInsts = 0;
99}
100
101
102TimingSimpleCPU::~TimingSimpleCPU()
103{
104}
105
106void
107TimingSimpleCPU::serialize(ostream &os)
108{
109    SimObject::State so_state = SimObject::getState();
110    SERIALIZE_ENUM(so_state);
111    BaseSimpleCPU::serialize(os);
112}
113
114void
115TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
116{
117    SimObject::State so_state;
118    UNSERIALIZE_ENUM(so_state);
119    BaseSimpleCPU::unserialize(cp, section);
120}
121
122unsigned int
123TimingSimpleCPU::drain(Event *drain_event)
124{
125    // TimingSimpleCPU is ready to drain if it's not waiting for
126    // an access to complete.
127    if (_status == Idle || _status == Running || _status == SwitchedOut) {
128        changeState(SimObject::Drained);
129        return 0;
130    } else {
131        changeState(SimObject::Draining);
132        drainEvent = drain_event;
133        DPRINTF(Drain, "CPU not drained\n");
134        return 1;
135    }
136}
137
138void
139TimingSimpleCPU::resume()
140{
141    DPRINTF(SimpleCPU, "Resume\n");
142    if (_status != SwitchedOut && _status != Idle) {
143        assert(system->getMemoryMode() == Enums::timing);
144
145        if (fetchEvent.scheduled())
146           deschedule(fetchEvent);
147
148        schedule(fetchEvent, nextCycle());
149    }
150
151    changeState(SimObject::Running);
152}
153
154void
155TimingSimpleCPU::switchOut()
156{
157    assert(_status == Running || _status == Idle);
158    _status = SwitchedOut;
159    numCycles += tickToCycles(curTick() - previousTick);
160
161    // If we've been scheduled to resume but are then told to switch out,
162    // we'll need to cancel it.
163    if (fetchEvent.scheduled())
164        deschedule(fetchEvent);
165}
166
167
168void
169TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
170{
171    BaseCPU::takeOverFrom(oldCPU);
172
173    // if any of this CPU's ThreadContexts are active, mark the CPU as
174    // running and schedule its tick event.
175    for (int i = 0; i < threadContexts.size(); ++i) {
176        ThreadContext *tc = threadContexts[i];
177        if (tc->status() == ThreadContext::Active && _status != Running) {
178            _status = Running;
179            break;
180        }
181    }
182
183    if (_status != Running) {
184        _status = Idle;
185    }
186    assert(threadContexts.size() == 1);
187    previousTick = curTick();
188}
189
190
191void
192TimingSimpleCPU::activateContext(ThreadID thread_num, int delay)
193{
194    DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
195
196    assert(thread_num == 0);
197    assert(thread);
198
199    assert(_status == Idle);
200
201    notIdleFraction++;
202    _status = Running;
203
204    // kick things off by initiating the fetch of the next instruction
205    schedule(fetchEvent, nextCycle(curTick() + ticks(delay)));
206}
207
208
209void
210TimingSimpleCPU::suspendContext(ThreadID thread_num)
211{
212    DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
213
214    assert(thread_num == 0);
215    assert(thread);
216
217    if (_status == Idle)
218        return;
219
220    assert(_status == Running);
221
222    // just change status to Idle... if status != Running,
223    // completeInst() will not initiate fetch of next instruction.
224
225    notIdleFraction--;
226    _status = Idle;
227}
228
229bool
230TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
231{
232    RequestPtr req = pkt->req;
233    if (req->isMmappedIpr()) {
234        Tick delay;
235        delay = TheISA::handleIprRead(thread->getTC(), pkt);
236        new IprEvent(pkt, this, nextCycle(curTick() + delay));
237        _status = DcacheWaitResponse;
238        dcache_pkt = NULL;
239    } else if (!dcachePort.sendTimingReq(pkt)) {
240        _status = DcacheRetry;
241        dcache_pkt = pkt;
242    } else {
243        _status = DcacheWaitResponse;
244        // memory system takes ownership of packet
245        dcache_pkt = NULL;
246    }
247    return dcache_pkt == NULL;
248}
249
250void
251TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
252                          bool read)
253{
254    PacketPtr pkt;
255    buildPacket(pkt, req, read);
256    pkt->dataDynamicArray<uint8_t>(data);
257    if (req->getFlags().isSet(Request::NO_ACCESS)) {
258        assert(!dcache_pkt);
259        pkt->makeResponse();
260        completeDataAccess(pkt);
261    } else if (read) {
262        handleReadPacket(pkt);
263    } else {
264        bool do_access = true;  // flag to suppress cache access
265
266        if (req->isLLSC()) {
267            do_access = TheISA::handleLockedWrite(thread, req);
268        } else if (req->isCondSwap()) {
269            assert(res);
270            req->setExtraData(*res);
271        }
272
273        if (do_access) {
274            dcache_pkt = pkt;
275            handleWritePacket();
276        } else {
277            _status = DcacheWaitResponse;
278            completeDataAccess(pkt);
279        }
280    }
281}
282
283void
284TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
285                               RequestPtr req, uint8_t *data, bool read)
286{
287    PacketPtr pkt1, pkt2;
288    buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
289    if (req->getFlags().isSet(Request::NO_ACCESS)) {
290        assert(!dcache_pkt);
291        pkt1->makeResponse();
292        completeDataAccess(pkt1);
293    } else if (read) {
294        SplitFragmentSenderState * send_state =
295            dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
296        if (handleReadPacket(pkt1)) {
297            send_state->clearFromParent();
298            send_state = dynamic_cast<SplitFragmentSenderState *>(
299                    pkt2->senderState);
300            if (handleReadPacket(pkt2)) {
301                send_state->clearFromParent();
302            }
303        }
304    } else {
305        dcache_pkt = pkt1;
306        SplitFragmentSenderState * send_state =
307            dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
308        if (handleWritePacket()) {
309            send_state->clearFromParent();
310            dcache_pkt = pkt2;
311            send_state = dynamic_cast<SplitFragmentSenderState *>(
312                    pkt2->senderState);
313            if (handleWritePacket()) {
314                send_state->clearFromParent();
315            }
316        }
317    }
318}
319
320void
321TimingSimpleCPU::translationFault(Fault fault)
322{
323    // fault may be NoFault in cases where a fault is suppressed,
324    // for instance prefetches.
325    numCycles += tickToCycles(curTick() - previousTick);
326    previousTick = curTick();
327
328    if (traceData) {
329        // Since there was a fault, we shouldn't trace this instruction.
330        delete traceData;
331        traceData = NULL;
332    }
333
334    postExecute();
335
336    if (getState() == SimObject::Draining) {
337        advancePC(fault);
338        completeDrain();
339    } else {
340        advanceInst(fault);
341    }
342}
343
344void
345TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
346{
347    MemCmd cmd;
348    if (read) {
349        cmd = MemCmd::ReadReq;
350        if (req->isLLSC())
351            cmd = MemCmd::LoadLockedReq;
352    } else {
353        cmd = MemCmd::WriteReq;
354        if (req->isLLSC()) {
355            cmd = MemCmd::StoreCondReq;
356        } else if (req->isSwap()) {
357            cmd = MemCmd::SwapReq;
358        }
359    }
360    pkt = new Packet(req, cmd);
361}
362
363void
364TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
365        RequestPtr req1, RequestPtr req2, RequestPtr req,
366        uint8_t *data, bool read)
367{
368    pkt1 = pkt2 = NULL;
369
370    assert(!req1->isMmappedIpr() && !req2->isMmappedIpr());
371
372    if (req->getFlags().isSet(Request::NO_ACCESS)) {
373        buildPacket(pkt1, req, read);
374        return;
375    }
376
377    buildPacket(pkt1, req1, read);
378    buildPacket(pkt2, req2, read);
379
380    req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags(), dataMasterId());
381    PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand());
382
383    pkt->dataDynamicArray<uint8_t>(data);
384    pkt1->dataStatic<uint8_t>(data);
385    pkt2->dataStatic<uint8_t>(data + req1->getSize());
386
387    SplitMainSenderState * main_send_state = new SplitMainSenderState;
388    pkt->senderState = main_send_state;
389    main_send_state->fragments[0] = pkt1;
390    main_send_state->fragments[1] = pkt2;
391    main_send_state->outstanding = 2;
392    pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
393    pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
394}
395
396Fault
397TimingSimpleCPU::readMem(Addr addr, uint8_t *data,
398                         unsigned size, unsigned flags)
399{
400    Fault fault;
401    const int asid = 0;
402    const ThreadID tid = 0;
403    const Addr pc = thread->instAddr();
404    unsigned block_size = dcachePort.peerBlockSize();
405    BaseTLB::Mode mode = BaseTLB::Read;
406
407    if (traceData) {
408        traceData->setAddr(addr);
409    }
410
411    RequestPtr req  = new Request(asid, addr, size,
412                                  flags, dataMasterId(), pc, _cpuId, tid);
413
414    Addr split_addr = roundDown(addr + size - 1, block_size);
415    assert(split_addr <= addr || split_addr - addr < block_size);
416
417    _status = DTBWaitResponse;
418    if (split_addr > addr) {
419        RequestPtr req1, req2;
420        assert(!req->isLLSC() && !req->isSwap());
421        req->splitOnVaddr(split_addr, req1, req2);
422
423        WholeTranslationState *state =
424            new WholeTranslationState(req, req1, req2, new uint8_t[size],
425                                      NULL, mode);
426        DataTranslation<TimingSimpleCPU *> *trans1 =
427            new DataTranslation<TimingSimpleCPU *>(this, state, 0);
428        DataTranslation<TimingSimpleCPU *> *trans2 =
429            new DataTranslation<TimingSimpleCPU *>(this, state, 1);
430
431        thread->dtb->translateTiming(req1, tc, trans1, mode);
432        thread->dtb->translateTiming(req2, tc, trans2, mode);
433    } else {
434        WholeTranslationState *state =
435            new WholeTranslationState(req, new uint8_t[size], NULL, mode);
436        DataTranslation<TimingSimpleCPU *> *translation
437            = new DataTranslation<TimingSimpleCPU *>(this, state);
438        thread->dtb->translateTiming(req, tc, translation, mode);
439    }
440
441    return NoFault;
442}
443
444bool
445TimingSimpleCPU::handleWritePacket()
446{
447    RequestPtr req = dcache_pkt->req;
448    if (req->isMmappedIpr()) {
449        Tick delay;
450        delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
451        new IprEvent(dcache_pkt, this, nextCycle(curTick() + delay));
452        _status = DcacheWaitResponse;
453        dcache_pkt = NULL;
454    } else if (!dcachePort.sendTimingReq(dcache_pkt)) {
455        _status = DcacheRetry;
456    } else {
457        _status = DcacheWaitResponse;
458        // memory system takes ownership of packet
459        dcache_pkt = NULL;
460    }
461    return dcache_pkt == NULL;
462}
463
464Fault
465TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
466                          Addr addr, unsigned flags, uint64_t *res)
467{
468    uint8_t *newData = new uint8_t[size];
469    memcpy(newData, data, size);
470
471    const int asid = 0;
472    const ThreadID tid = 0;
473    const Addr pc = thread->instAddr();
474    unsigned block_size = dcachePort.peerBlockSize();
475    BaseTLB::Mode mode = BaseTLB::Write;
476
477    if (traceData) {
478        traceData->setAddr(addr);
479    }
480
481    RequestPtr req = new Request(asid, addr, size,
482                                 flags, dataMasterId(), pc, _cpuId, tid);
483
484    Addr split_addr = roundDown(addr + size - 1, block_size);
485    assert(split_addr <= addr || split_addr - addr < block_size);
486
487    _status = DTBWaitResponse;
488    if (split_addr > addr) {
489        RequestPtr req1, req2;
490        assert(!req->isLLSC() && !req->isSwap());
491        req->splitOnVaddr(split_addr, req1, req2);
492
493        WholeTranslationState *state =
494            new WholeTranslationState(req, req1, req2, newData, res, mode);
495        DataTranslation<TimingSimpleCPU *> *trans1 =
496            new DataTranslation<TimingSimpleCPU *>(this, state, 0);
497        DataTranslation<TimingSimpleCPU *> *trans2 =
498            new DataTranslation<TimingSimpleCPU *>(this, state, 1);
499
500        thread->dtb->translateTiming(req1, tc, trans1, mode);
501        thread->dtb->translateTiming(req2, tc, trans2, mode);
502    } else {
503        WholeTranslationState *state =
504            new WholeTranslationState(req, newData, res, mode);
505        DataTranslation<TimingSimpleCPU *> *translation =
506            new DataTranslation<TimingSimpleCPU *>(this, state);
507        thread->dtb->translateTiming(req, tc, translation, mode);
508    }
509
510    // Translation faults will be returned via finishTranslation()
511    return NoFault;
512}
513
514
515void
516TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
517{
518    _status = Running;
519
520    if (state->getFault() != NoFault) {
521        if (state->isPrefetch()) {
522            state->setNoFault();
523        }
524        delete [] state->data;
525        state->deleteReqs();
526        translationFault(state->getFault());
527    } else {
528        if (!state->isSplit) {
529            sendData(state->mainReq, state->data, state->res,
530                     state->mode == BaseTLB::Read);
531        } else {
532            sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
533                          state->data, state->mode == BaseTLB::Read);
534        }
535    }
536
537    delete state;
538}
539
540
541void
542TimingSimpleCPU::fetch()
543{
544    DPRINTF(SimpleCPU, "Fetch\n");
545
546    if (!curStaticInst || !curStaticInst->isDelayedCommit())
547        checkForInterrupts();
548
549    checkPcEventQueue();
550
551    // We must have just got suspended by a PC event
552    if (_status == Idle)
553        return;
554
555    TheISA::PCState pcState = thread->pcState();
556    bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
557
558    if (needToFetch) {
559        _status = Running;
560        Request *ifetch_req = new Request();
561        ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
562        setupFetchRequest(ifetch_req);
563        DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
564        thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
565                BaseTLB::Execute);
566    } else {
567        _status = IcacheWaitResponse;
568        completeIfetch(NULL);
569
570        numCycles += tickToCycles(curTick() - previousTick);
571        previousTick = curTick();
572    }
573}
574
575
576void
577TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
578{
579    if (fault == NoFault) {
580        DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n",
581                req->getVaddr(), req->getPaddr());
582        ifetch_pkt = new Packet(req, MemCmd::ReadReq);
583        ifetch_pkt->dataStatic(&inst);
584        DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr());
585
586        if (!icachePort.sendTimingReq(ifetch_pkt)) {
587            // Need to wait for retry
588            _status = IcacheRetry;
589        } else {
590            // Need to wait for cache to respond
591            _status = IcacheWaitResponse;
592            // ownership of packet transferred to memory system
593            ifetch_pkt = NULL;
594        }
595    } else {
596        DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
597        delete req;
598        // fetch fault: advance directly to next instruction (fault handler)
599        _status = Running;
600        advanceInst(fault);
601    }
602
603    numCycles += tickToCycles(curTick() - previousTick);
604    previousTick = curTick();
605}
606
607
608void
609TimingSimpleCPU::advanceInst(Fault fault)
610{
611
612    if (_status == Faulting)
613        return;
614
615    if (fault != NoFault) {
616        advancePC(fault);
617        DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
618        reschedule(fetchEvent, nextCycle(), true);
619        _status = Faulting;
620        return;
621    }
622
623
624    if (!stayAtPC)
625        advancePC(fault);
626
627    if (_status == Running) {
628        // kick off fetch of next instruction... callback from icache
629        // response will cause that instruction to be executed,
630        // keeping the CPU running.
631        fetch();
632    }
633}
634
635
636void
637TimingSimpleCPU::completeIfetch(PacketPtr pkt)
638{
639    DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ?
640            pkt->getAddr() : 0);
641
642    // received a response from the icache: execute the received
643    // instruction
644
645    assert(!pkt || !pkt->isError());
646    assert(_status == IcacheWaitResponse);
647
648    _status = Running;
649
650    numCycles += tickToCycles(curTick() - previousTick);
651    previousTick = curTick();
652
653    if (getState() == SimObject::Draining) {
654        if (pkt) {
655            delete pkt->req;
656            delete pkt;
657        }
658
659        completeDrain();
660        return;
661    }
662
663    preExecute();
664    if (curStaticInst && curStaticInst->isMemRef()) {
665        // load or store: just send to dcache
666        Fault fault = curStaticInst->initiateAcc(this, traceData);
667
668        // If we're not running now the instruction will complete in a dcache
669        // response callback or the instruction faulted and has started an
670        // ifetch
671        if (_status == Running) {
672            if (fault != NoFault && traceData) {
673                // If there was a fault, we shouldn't trace this instruction.
674                delete traceData;
675                traceData = NULL;
676            }
677
678            postExecute();
679            // @todo remove me after debugging with legion done
680            if (curStaticInst && (!curStaticInst->isMicroop() ||
681                        curStaticInst->isFirstMicroop()))
682                instCnt++;
683            advanceInst(fault);
684        }
685    } else if (curStaticInst) {
686        // non-memory instruction: execute completely now
687        Fault fault = curStaticInst->execute(this, traceData);
688
689        // keep an instruction count
690        if (fault == NoFault)
691            countInst();
692        else if (traceData && !DTRACE(ExecFaulting)) {
693            delete traceData;
694            traceData = NULL;
695        }
696
697        postExecute();
698        // @todo remove me after debugging with legion done
699        if (curStaticInst && (!curStaticInst->isMicroop() ||
700                    curStaticInst->isFirstMicroop()))
701            instCnt++;
702        advanceInst(fault);
703    } else {
704        advanceInst(NoFault);
705    }
706
707    if (pkt) {
708        delete pkt->req;
709        delete pkt;
710    }
711}
712
713void
714TimingSimpleCPU::IcachePort::ITickEvent::process()
715{
716    cpu->completeIfetch(pkt);
717}
718
719bool
720TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt)
721{
722    DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
723    // delay processing of returned data until next CPU clock edge
724    Tick next_tick = cpu->nextCycle();
725
726    if (next_tick == curTick())
727        cpu->completeIfetch(pkt);
728    else
729        tickEvent.schedule(pkt, next_tick);
730
731    return true;
732}
733
734void
735TimingSimpleCPU::IcachePort::recvRetry()
736{
737    // we shouldn't get a retry unless we have a packet that we're
738    // waiting to transmit
739    assert(cpu->ifetch_pkt != NULL);
740    assert(cpu->_status == IcacheRetry);
741    PacketPtr tmp = cpu->ifetch_pkt;
742    if (sendTimingReq(tmp)) {
743        cpu->_status = IcacheWaitResponse;
744        cpu->ifetch_pkt = NULL;
745    }
746}
747
748void
749TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
750{
751    // received a response from the dcache: complete the load or store
752    // instruction
753    assert(!pkt->isError());
754    assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
755           pkt->req->getFlags().isSet(Request::NO_ACCESS));
756
757    numCycles += tickToCycles(curTick() - previousTick);
758    previousTick = curTick();
759
760    if (pkt->senderState) {
761        SplitFragmentSenderState * send_state =
762            dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
763        assert(send_state);
764        delete pkt->req;
765        delete pkt;
766        PacketPtr big_pkt = send_state->bigPkt;
767        delete send_state;
768
769        SplitMainSenderState * main_send_state =
770            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
771        assert(main_send_state);
772        // Record the fact that this packet is no longer outstanding.
773        assert(main_send_state->outstanding != 0);
774        main_send_state->outstanding--;
775
776        if (main_send_state->outstanding) {
777            return;
778        } else {
779            delete main_send_state;
780            big_pkt->senderState = NULL;
781            pkt = big_pkt;
782        }
783    }
784
785    _status = Running;
786
787    Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
788
789    // keep an instruction count
790    if (fault == NoFault)
791        countInst();
792    else if (traceData) {
793        // If there was a fault, we shouldn't trace this instruction.
794        delete traceData;
795        traceData = NULL;
796    }
797
798    // the locked flag may be cleared on the response packet, so check
799    // pkt->req and not pkt to see if it was a load-locked
800    if (pkt->isRead() && pkt->req->isLLSC()) {
801        TheISA::handleLockedRead(thread, pkt->req);
802    }
803
804    delete pkt->req;
805    delete pkt;
806
807    postExecute();
808
809    if (getState() == SimObject::Draining) {
810        advancePC(fault);
811        completeDrain();
812
813        return;
814    }
815
816    advanceInst(fault);
817}
818
819
820void
821TimingSimpleCPU::completeDrain()
822{
823    DPRINTF(Drain, "CPU done draining, processing drain event\n");
824    changeState(SimObject::Drained);
825    drainEvent->process();
826}
827
828bool
829TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt)
830{
831    // delay processing of returned data until next CPU clock edge
832    Tick next_tick = cpu->nextCycle();
833
834    if (next_tick == curTick()) {
835        cpu->completeDataAccess(pkt);
836    } else {
837        if (!tickEvent.scheduled()) {
838            tickEvent.schedule(pkt, next_tick);
839        } else {
840            // In the case of a split transaction and a cache that is
841            // faster than a CPU we could get two responses before
842            // next_tick expires
843            if (!retryEvent.scheduled())
844                cpu->schedule(retryEvent, next_tick);
845            return false;
846        }
847    }
848
849    return true;
850}
851
852void
853TimingSimpleCPU::DcachePort::DTickEvent::process()
854{
855    cpu->completeDataAccess(pkt);
856}
857
858void
859TimingSimpleCPU::DcachePort::recvRetry()
860{
861    // we shouldn't get a retry unless we have a packet that we're
862    // waiting to transmit
863    assert(cpu->dcache_pkt != NULL);
864    assert(cpu->_status == DcacheRetry);
865    PacketPtr tmp = cpu->dcache_pkt;
866    if (tmp->senderState) {
867        // This is a packet from a split access.
868        SplitFragmentSenderState * send_state =
869            dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
870        assert(send_state);
871        PacketPtr big_pkt = send_state->bigPkt;
872
873        SplitMainSenderState * main_send_state =
874            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
875        assert(main_send_state);
876
877        if (sendTimingReq(tmp)) {
878            // If we were able to send without retrying, record that fact
879            // and try sending the other fragment.
880            send_state->clearFromParent();
881            int other_index = main_send_state->getPendingFragment();
882            if (other_index > 0) {
883                tmp = main_send_state->fragments[other_index];
884                cpu->dcache_pkt = tmp;
885                if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
886                        (big_pkt->isWrite() && cpu->handleWritePacket())) {
887                    main_send_state->fragments[other_index] = NULL;
888                }
889            } else {
890                cpu->_status = DcacheWaitResponse;
891                // memory system takes ownership of packet
892                cpu->dcache_pkt = NULL;
893            }
894        }
895    } else if (sendTimingReq(tmp)) {
896        cpu->_status = DcacheWaitResponse;
897        // memory system takes ownership of packet
898        cpu->dcache_pkt = NULL;
899    }
900}
901
902TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
903    Tick t)
904    : pkt(_pkt), cpu(_cpu)
905{
906    cpu->schedule(this, t);
907}
908
909void
910TimingSimpleCPU::IprEvent::process()
911{
912    cpu->completeDataAccess(pkt);
913}
914
915const char *
916TimingSimpleCPU::IprEvent::description() const
917{
918    return "Timing Simple CPU Delay IPR event";
919}
920
921
922void
923TimingSimpleCPU::printAddr(Addr a)
924{
925    dcachePort.printAddr(a);
926}
927
928
929////////////////////////////////////////////////////////////////////////
930//
931//  TimingSimpleCPU Simulation Object
932//
933TimingSimpleCPU *
934TimingSimpleCPUParams::create()
935{
936    numThreads = 1;
937    if (!FullSystem && workload.size() != 1)
938        panic("only one workload allowed");
939    return new TimingSimpleCPU(this);
940}
941