timing.cc revision 9342
1/*
2 * Copyright (c) 2010-2012 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Steve Reinhardt
41 */
42
43#include "arch/locked_mem.hh"
44#include "arch/mmapped_ipr.hh"
45#include "arch/utility.hh"
46#include "base/bigint.hh"
47#include "config/the_isa.hh"
48#include "cpu/simple/timing.hh"
49#include "cpu/exetrace.hh"
50#include "debug/Config.hh"
51#include "debug/Drain.hh"
52#include "debug/ExecFaulting.hh"
53#include "debug/SimpleCPU.hh"
54#include "mem/packet.hh"
55#include "mem/packet_access.hh"
56#include "params/TimingSimpleCPU.hh"
57#include "sim/faults.hh"
58#include "sim/full_system.hh"
59#include "sim/system.hh"
60
61using namespace std;
62using namespace TheISA;
63
64void
65TimingSimpleCPU::init()
66{
67    BaseCPU::init();
68
69    // Initialise the ThreadContext's memory proxies
70    tcBase()->initMemProxies(tcBase());
71
72    if (FullSystem && !params()->defer_registration) {
73        for (int i = 0; i < threadContexts.size(); ++i) {
74            ThreadContext *tc = threadContexts[i];
75            // initialize CPU, including PC
76            TheISA::initCPU(tc, _cpuId);
77        }
78    }
79}
80
81void
82TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
83{
84    pkt = _pkt;
85    cpu->schedule(this, t);
86}
87
88TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
89    : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this),
90      dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0),
91      fetchEvent(this)
92{
93    _status = Idle;
94
95    setDrainState(Drainable::Running);
96    system->totalNumInsts = 0;
97}
98
99
100TimingSimpleCPU::~TimingSimpleCPU()
101{
102}
103
104void
105TimingSimpleCPU::serialize(ostream &os)
106{
107    Drainable::State so_state(getDrainState());
108    SERIALIZE_ENUM(so_state);
109    BaseSimpleCPU::serialize(os);
110}
111
112void
113TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
114{
115    Drainable::State so_state;
116    UNSERIALIZE_ENUM(so_state);
117    BaseSimpleCPU::unserialize(cp, section);
118}
119
120unsigned int
121TimingSimpleCPU::drain(DrainManager *drain_manager)
122{
123    // TimingSimpleCPU is ready to drain if it's not waiting for
124    // an access to complete.
125    if (_status == Idle ||
126        _status == BaseSimpleCPU::Running ||
127        _status == SwitchedOut) {
128        setDrainState(Drainable::Drained);
129        return 0;
130    } else {
131        setDrainState(Drainable::Draining);
132        drainManager = drain_manager;
133        DPRINTF(Drain, "CPU not drained\n");
134        return 1;
135    }
136}
137
138void
139TimingSimpleCPU::drainResume()
140{
141    DPRINTF(SimpleCPU, "Resume\n");
142    if (_status != SwitchedOut && _status != Idle) {
143        assert(system->getMemoryMode() == Enums::timing);
144
145        if (fetchEvent.scheduled())
146           deschedule(fetchEvent);
147
148        schedule(fetchEvent, nextCycle());
149    }
150
151    setDrainState(Drainable::Running);
152}
153
154void
155TimingSimpleCPU::switchOut()
156{
157    assert(_status == BaseSimpleCPU::Running || _status == Idle);
158    _status = SwitchedOut;
159    numCycles += curCycle() - previousCycle;
160
161    // If we've been scheduled to resume but are then told to switch out,
162    // we'll need to cancel it.
163    if (fetchEvent.scheduled())
164        deschedule(fetchEvent);
165}
166
167
168void
169TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
170{
171    BaseCPU::takeOverFrom(oldCPU);
172
173    // if any of this CPU's ThreadContexts are active, mark the CPU as
174    // running and schedule its tick event.
175    for (int i = 0; i < threadContexts.size(); ++i) {
176        ThreadContext *tc = threadContexts[i];
177        if (tc->status() == ThreadContext::Active &&
178            _status != BaseSimpleCPU::Running) {
179            _status = BaseSimpleCPU::Running;
180            break;
181        }
182    }
183
184    if (_status != BaseSimpleCPU::Running) {
185        _status = Idle;
186    }
187    assert(threadContexts.size() == 1);
188    previousCycle = curCycle();
189}
190
191
192void
193TimingSimpleCPU::activateContext(ThreadID thread_num, Cycles delay)
194{
195    DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
196
197    assert(thread_num == 0);
198    assert(thread);
199
200    assert(_status == Idle);
201
202    notIdleFraction++;
203    _status = BaseSimpleCPU::Running;
204
205    // kick things off by initiating the fetch of the next instruction
206    schedule(fetchEvent, clockEdge(delay));
207}
208
209
210void
211TimingSimpleCPU::suspendContext(ThreadID thread_num)
212{
213    DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
214
215    assert(thread_num == 0);
216    assert(thread);
217
218    if (_status == Idle)
219        return;
220
221    assert(_status == BaseSimpleCPU::Running);
222
223    // just change status to Idle... if status != Running,
224    // completeInst() will not initiate fetch of next instruction.
225
226    notIdleFraction--;
227    _status = Idle;
228}
229
230bool
231TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
232{
233    RequestPtr req = pkt->req;
234    if (req->isMmappedIpr()) {
235        Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt);
236        new IprEvent(pkt, this, clockEdge(delay));
237        _status = DcacheWaitResponse;
238        dcache_pkt = NULL;
239    } else if (!dcachePort.sendTimingReq(pkt)) {
240        _status = DcacheRetry;
241        dcache_pkt = pkt;
242    } else {
243        _status = DcacheWaitResponse;
244        // memory system takes ownership of packet
245        dcache_pkt = NULL;
246    }
247    return dcache_pkt == NULL;
248}
249
250void
251TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
252                          bool read)
253{
254    PacketPtr pkt;
255    buildPacket(pkt, req, read);
256    pkt->dataDynamicArray<uint8_t>(data);
257    if (req->getFlags().isSet(Request::NO_ACCESS)) {
258        assert(!dcache_pkt);
259        pkt->makeResponse();
260        completeDataAccess(pkt);
261    } else if (read) {
262        handleReadPacket(pkt);
263    } else {
264        bool do_access = true;  // flag to suppress cache access
265
266        if (req->isLLSC()) {
267            do_access = TheISA::handleLockedWrite(thread, req);
268        } else if (req->isCondSwap()) {
269            assert(res);
270            req->setExtraData(*res);
271        }
272
273        if (do_access) {
274            dcache_pkt = pkt;
275            handleWritePacket();
276        } else {
277            _status = DcacheWaitResponse;
278            completeDataAccess(pkt);
279        }
280    }
281}
282
283void
284TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
285                               RequestPtr req, uint8_t *data, bool read)
286{
287    PacketPtr pkt1, pkt2;
288    buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
289    if (req->getFlags().isSet(Request::NO_ACCESS)) {
290        assert(!dcache_pkt);
291        pkt1->makeResponse();
292        completeDataAccess(pkt1);
293    } else if (read) {
294        SplitFragmentSenderState * send_state =
295            dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
296        if (handleReadPacket(pkt1)) {
297            send_state->clearFromParent();
298            send_state = dynamic_cast<SplitFragmentSenderState *>(
299                    pkt2->senderState);
300            if (handleReadPacket(pkt2)) {
301                send_state->clearFromParent();
302            }
303        }
304    } else {
305        dcache_pkt = pkt1;
306        SplitFragmentSenderState * send_state =
307            dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
308        if (handleWritePacket()) {
309            send_state->clearFromParent();
310            dcache_pkt = pkt2;
311            send_state = dynamic_cast<SplitFragmentSenderState *>(
312                    pkt2->senderState);
313            if (handleWritePacket()) {
314                send_state->clearFromParent();
315            }
316        }
317    }
318}
319
320void
321TimingSimpleCPU::translationFault(Fault fault)
322{
323    // fault may be NoFault in cases where a fault is suppressed,
324    // for instance prefetches.
325    numCycles += curCycle() - previousCycle;
326    previousCycle = curCycle();
327
328    if (traceData) {
329        // Since there was a fault, we shouldn't trace this instruction.
330        delete traceData;
331        traceData = NULL;
332    }
333
334    postExecute();
335
336    if (getDrainState() == Drainable::Draining) {
337        advancePC(fault);
338        completeDrain();
339    } else {
340        advanceInst(fault);
341    }
342}
343
344void
345TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
346{
347    MemCmd cmd;
348    if (read) {
349        cmd = MemCmd::ReadReq;
350        if (req->isLLSC())
351            cmd = MemCmd::LoadLockedReq;
352    } else {
353        cmd = MemCmd::WriteReq;
354        if (req->isLLSC()) {
355            cmd = MemCmd::StoreCondReq;
356        } else if (req->isSwap()) {
357            cmd = MemCmd::SwapReq;
358        }
359    }
360    pkt = new Packet(req, cmd);
361}
362
363void
364TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
365        RequestPtr req1, RequestPtr req2, RequestPtr req,
366        uint8_t *data, bool read)
367{
368    pkt1 = pkt2 = NULL;
369
370    assert(!req1->isMmappedIpr() && !req2->isMmappedIpr());
371
372    if (req->getFlags().isSet(Request::NO_ACCESS)) {
373        buildPacket(pkt1, req, read);
374        return;
375    }
376
377    buildPacket(pkt1, req1, read);
378    buildPacket(pkt2, req2, read);
379
380    req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags(), dataMasterId());
381    PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand());
382
383    pkt->dataDynamicArray<uint8_t>(data);
384    pkt1->dataStatic<uint8_t>(data);
385    pkt2->dataStatic<uint8_t>(data + req1->getSize());
386
387    SplitMainSenderState * main_send_state = new SplitMainSenderState;
388    pkt->senderState = main_send_state;
389    main_send_state->fragments[0] = pkt1;
390    main_send_state->fragments[1] = pkt2;
391    main_send_state->outstanding = 2;
392    pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
393    pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
394}
395
396Fault
397TimingSimpleCPU::readMem(Addr addr, uint8_t *data,
398                         unsigned size, unsigned flags)
399{
400    Fault fault;
401    const int asid = 0;
402    const ThreadID tid = 0;
403    const Addr pc = thread->instAddr();
404    unsigned block_size = dcachePort.peerBlockSize();
405    BaseTLB::Mode mode = BaseTLB::Read;
406
407    if (traceData) {
408        traceData->setAddr(addr);
409    }
410
411    RequestPtr req  = new Request(asid, addr, size,
412                                  flags, dataMasterId(), pc, _cpuId, tid);
413
414    Addr split_addr = roundDown(addr + size - 1, block_size);
415    assert(split_addr <= addr || split_addr - addr < block_size);
416
417    _status = DTBWaitResponse;
418    if (split_addr > addr) {
419        RequestPtr req1, req2;
420        assert(!req->isLLSC() && !req->isSwap());
421        req->splitOnVaddr(split_addr, req1, req2);
422
423        WholeTranslationState *state =
424            new WholeTranslationState(req, req1, req2, new uint8_t[size],
425                                      NULL, mode);
426        DataTranslation<TimingSimpleCPU *> *trans1 =
427            new DataTranslation<TimingSimpleCPU *>(this, state, 0);
428        DataTranslation<TimingSimpleCPU *> *trans2 =
429            new DataTranslation<TimingSimpleCPU *>(this, state, 1);
430
431        thread->dtb->translateTiming(req1, tc, trans1, mode);
432        thread->dtb->translateTiming(req2, tc, trans2, mode);
433    } else {
434        WholeTranslationState *state =
435            new WholeTranslationState(req, new uint8_t[size], NULL, mode);
436        DataTranslation<TimingSimpleCPU *> *translation
437            = new DataTranslation<TimingSimpleCPU *>(this, state);
438        thread->dtb->translateTiming(req, tc, translation, mode);
439    }
440
441    return NoFault;
442}
443
444bool
445TimingSimpleCPU::handleWritePacket()
446{
447    RequestPtr req = dcache_pkt->req;
448    if (req->isMmappedIpr()) {
449        Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
450        new IprEvent(dcache_pkt, this, clockEdge(delay));
451        _status = DcacheWaitResponse;
452        dcache_pkt = NULL;
453    } else if (!dcachePort.sendTimingReq(dcache_pkt)) {
454        _status = DcacheRetry;
455    } else {
456        _status = DcacheWaitResponse;
457        // memory system takes ownership of packet
458        dcache_pkt = NULL;
459    }
460    return dcache_pkt == NULL;
461}
462
463Fault
464TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
465                          Addr addr, unsigned flags, uint64_t *res)
466{
467    uint8_t *newData = new uint8_t[size];
468    memcpy(newData, data, size);
469
470    const int asid = 0;
471    const ThreadID tid = 0;
472    const Addr pc = thread->instAddr();
473    unsigned block_size = dcachePort.peerBlockSize();
474    BaseTLB::Mode mode = BaseTLB::Write;
475
476    if (traceData) {
477        traceData->setAddr(addr);
478    }
479
480    RequestPtr req = new Request(asid, addr, size,
481                                 flags, dataMasterId(), pc, _cpuId, tid);
482
483    Addr split_addr = roundDown(addr + size - 1, block_size);
484    assert(split_addr <= addr || split_addr - addr < block_size);
485
486    _status = DTBWaitResponse;
487    if (split_addr > addr) {
488        RequestPtr req1, req2;
489        assert(!req->isLLSC() && !req->isSwap());
490        req->splitOnVaddr(split_addr, req1, req2);
491
492        WholeTranslationState *state =
493            new WholeTranslationState(req, req1, req2, newData, res, mode);
494        DataTranslation<TimingSimpleCPU *> *trans1 =
495            new DataTranslation<TimingSimpleCPU *>(this, state, 0);
496        DataTranslation<TimingSimpleCPU *> *trans2 =
497            new DataTranslation<TimingSimpleCPU *>(this, state, 1);
498
499        thread->dtb->translateTiming(req1, tc, trans1, mode);
500        thread->dtb->translateTiming(req2, tc, trans2, mode);
501    } else {
502        WholeTranslationState *state =
503            new WholeTranslationState(req, newData, res, mode);
504        DataTranslation<TimingSimpleCPU *> *translation =
505            new DataTranslation<TimingSimpleCPU *>(this, state);
506        thread->dtb->translateTiming(req, tc, translation, mode);
507    }
508
509    // Translation faults will be returned via finishTranslation()
510    return NoFault;
511}
512
513
514void
515TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
516{
517    _status = BaseSimpleCPU::Running;
518
519    if (state->getFault() != NoFault) {
520        if (state->isPrefetch()) {
521            state->setNoFault();
522        }
523        delete [] state->data;
524        state->deleteReqs();
525        translationFault(state->getFault());
526    } else {
527        if (!state->isSplit) {
528            sendData(state->mainReq, state->data, state->res,
529                     state->mode == BaseTLB::Read);
530        } else {
531            sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
532                          state->data, state->mode == BaseTLB::Read);
533        }
534    }
535
536    delete state;
537}
538
539
540void
541TimingSimpleCPU::fetch()
542{
543    DPRINTF(SimpleCPU, "Fetch\n");
544
545    if (!curStaticInst || !curStaticInst->isDelayedCommit())
546        checkForInterrupts();
547
548    checkPcEventQueue();
549
550    // We must have just got suspended by a PC event
551    if (_status == Idle)
552        return;
553
554    TheISA::PCState pcState = thread->pcState();
555    bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
556
557    if (needToFetch) {
558        _status = BaseSimpleCPU::Running;
559        Request *ifetch_req = new Request();
560        ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
561        setupFetchRequest(ifetch_req);
562        DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
563        thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
564                BaseTLB::Execute);
565    } else {
566        _status = IcacheWaitResponse;
567        completeIfetch(NULL);
568
569        numCycles += curCycle() - previousCycle;
570        previousCycle = curCycle();
571    }
572}
573
574
575void
576TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
577{
578    if (fault == NoFault) {
579        DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n",
580                req->getVaddr(), req->getPaddr());
581        ifetch_pkt = new Packet(req, MemCmd::ReadReq);
582        ifetch_pkt->dataStatic(&inst);
583        DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr());
584
585        if (!icachePort.sendTimingReq(ifetch_pkt)) {
586            // Need to wait for retry
587            _status = IcacheRetry;
588        } else {
589            // Need to wait for cache to respond
590            _status = IcacheWaitResponse;
591            // ownership of packet transferred to memory system
592            ifetch_pkt = NULL;
593        }
594    } else {
595        DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
596        delete req;
597        // fetch fault: advance directly to next instruction (fault handler)
598        _status = BaseSimpleCPU::Running;
599        advanceInst(fault);
600    }
601
602    numCycles += curCycle() - previousCycle;
603    previousCycle = curCycle();
604}
605
606
607void
608TimingSimpleCPU::advanceInst(Fault fault)
609{
610
611    if (_status == Faulting)
612        return;
613
614    if (fault != NoFault) {
615        advancePC(fault);
616        DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
617        reschedule(fetchEvent, nextCycle(), true);
618        _status = Faulting;
619        return;
620    }
621
622
623    if (!stayAtPC)
624        advancePC(fault);
625
626    if (_status == BaseSimpleCPU::Running) {
627        // kick off fetch of next instruction... callback from icache
628        // response will cause that instruction to be executed,
629        // keeping the CPU running.
630        fetch();
631    }
632}
633
634
635void
636TimingSimpleCPU::completeIfetch(PacketPtr pkt)
637{
638    DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ?
639            pkt->getAddr() : 0);
640
641    // received a response from the icache: execute the received
642    // instruction
643
644    assert(!pkt || !pkt->isError());
645    assert(_status == IcacheWaitResponse);
646
647    _status = BaseSimpleCPU::Running;
648
649    numCycles += curCycle() - previousCycle;
650    previousCycle = curCycle();
651
652    if (getDrainState() == Drainable::Draining) {
653        if (pkt) {
654            delete pkt->req;
655            delete pkt;
656        }
657
658        completeDrain();
659        return;
660    }
661
662    preExecute();
663    if (curStaticInst && curStaticInst->isMemRef()) {
664        // load or store: just send to dcache
665        Fault fault = curStaticInst->initiateAcc(this, traceData);
666
667        // If we're not running now the instruction will complete in a dcache
668        // response callback or the instruction faulted and has started an
669        // ifetch
670        if (_status == BaseSimpleCPU::Running) {
671            if (fault != NoFault && traceData) {
672                // If there was a fault, we shouldn't trace this instruction.
673                delete traceData;
674                traceData = NULL;
675            }
676
677            postExecute();
678            // @todo remove me after debugging with legion done
679            if (curStaticInst && (!curStaticInst->isMicroop() ||
680                        curStaticInst->isFirstMicroop()))
681                instCnt++;
682            advanceInst(fault);
683        }
684    } else if (curStaticInst) {
685        // non-memory instruction: execute completely now
686        Fault fault = curStaticInst->execute(this, traceData);
687
688        // keep an instruction count
689        if (fault == NoFault)
690            countInst();
691        else if (traceData && !DTRACE(ExecFaulting)) {
692            delete traceData;
693            traceData = NULL;
694        }
695
696        postExecute();
697        // @todo remove me after debugging with legion done
698        if (curStaticInst && (!curStaticInst->isMicroop() ||
699                    curStaticInst->isFirstMicroop()))
700            instCnt++;
701        advanceInst(fault);
702    } else {
703        advanceInst(NoFault);
704    }
705
706    if (pkt) {
707        delete pkt->req;
708        delete pkt;
709    }
710}
711
712void
713TimingSimpleCPU::IcachePort::ITickEvent::process()
714{
715    cpu->completeIfetch(pkt);
716}
717
718bool
719TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt)
720{
721    DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
722    // delay processing of returned data until next CPU clock edge
723    Tick next_tick = cpu->nextCycle();
724
725    if (next_tick == curTick())
726        cpu->completeIfetch(pkt);
727    else
728        tickEvent.schedule(pkt, next_tick);
729
730    return true;
731}
732
733void
734TimingSimpleCPU::IcachePort::recvRetry()
735{
736    // we shouldn't get a retry unless we have a packet that we're
737    // waiting to transmit
738    assert(cpu->ifetch_pkt != NULL);
739    assert(cpu->_status == IcacheRetry);
740    PacketPtr tmp = cpu->ifetch_pkt;
741    if (sendTimingReq(tmp)) {
742        cpu->_status = IcacheWaitResponse;
743        cpu->ifetch_pkt = NULL;
744    }
745}
746
747void
748TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
749{
750    // received a response from the dcache: complete the load or store
751    // instruction
752    assert(!pkt->isError());
753    assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
754           pkt->req->getFlags().isSet(Request::NO_ACCESS));
755
756    numCycles += curCycle() - previousCycle;
757    previousCycle = curCycle();
758
759    if (pkt->senderState) {
760        SplitFragmentSenderState * send_state =
761            dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
762        assert(send_state);
763        delete pkt->req;
764        delete pkt;
765        PacketPtr big_pkt = send_state->bigPkt;
766        delete send_state;
767
768        SplitMainSenderState * main_send_state =
769            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
770        assert(main_send_state);
771        // Record the fact that this packet is no longer outstanding.
772        assert(main_send_state->outstanding != 0);
773        main_send_state->outstanding--;
774
775        if (main_send_state->outstanding) {
776            return;
777        } else {
778            delete main_send_state;
779            big_pkt->senderState = NULL;
780            pkt = big_pkt;
781        }
782    }
783
784    _status = BaseSimpleCPU::Running;
785
786    Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
787
788    // keep an instruction count
789    if (fault == NoFault)
790        countInst();
791    else if (traceData) {
792        // If there was a fault, we shouldn't trace this instruction.
793        delete traceData;
794        traceData = NULL;
795    }
796
797    // the locked flag may be cleared on the response packet, so check
798    // pkt->req and not pkt to see if it was a load-locked
799    if (pkt->isRead() && pkt->req->isLLSC()) {
800        TheISA::handleLockedRead(thread, pkt->req);
801    }
802
803    delete pkt->req;
804    delete pkt;
805
806    postExecute();
807
808    if (getDrainState() == Drainable::Draining) {
809        advancePC(fault);
810        completeDrain();
811
812        return;
813    }
814
815    advanceInst(fault);
816}
817
818
819void
820TimingSimpleCPU::completeDrain()
821{
822    DPRINTF(Drain, "CPU done draining, processing drain event\n");
823    setDrainState(Drainable::Drained);
824    drainManager->signalDrainDone();
825}
826
827bool
828TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt)
829{
830    // delay processing of returned data until next CPU clock edge
831    Tick next_tick = cpu->nextCycle();
832
833    if (next_tick == curTick()) {
834        cpu->completeDataAccess(pkt);
835    } else {
836        if (!tickEvent.scheduled()) {
837            tickEvent.schedule(pkt, next_tick);
838        } else {
839            // In the case of a split transaction and a cache that is
840            // faster than a CPU we could get two responses before
841            // next_tick expires
842            if (!retryEvent.scheduled())
843                cpu->schedule(retryEvent, next_tick);
844            return false;
845        }
846    }
847
848    return true;
849}
850
851void
852TimingSimpleCPU::DcachePort::DTickEvent::process()
853{
854    cpu->completeDataAccess(pkt);
855}
856
857void
858TimingSimpleCPU::DcachePort::recvRetry()
859{
860    // we shouldn't get a retry unless we have a packet that we're
861    // waiting to transmit
862    assert(cpu->dcache_pkt != NULL);
863    assert(cpu->_status == DcacheRetry);
864    PacketPtr tmp = cpu->dcache_pkt;
865    if (tmp->senderState) {
866        // This is a packet from a split access.
867        SplitFragmentSenderState * send_state =
868            dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
869        assert(send_state);
870        PacketPtr big_pkt = send_state->bigPkt;
871
872        SplitMainSenderState * main_send_state =
873            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
874        assert(main_send_state);
875
876        if (sendTimingReq(tmp)) {
877            // If we were able to send without retrying, record that fact
878            // and try sending the other fragment.
879            send_state->clearFromParent();
880            int other_index = main_send_state->getPendingFragment();
881            if (other_index > 0) {
882                tmp = main_send_state->fragments[other_index];
883                cpu->dcache_pkt = tmp;
884                if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
885                        (big_pkt->isWrite() && cpu->handleWritePacket())) {
886                    main_send_state->fragments[other_index] = NULL;
887                }
888            } else {
889                cpu->_status = DcacheWaitResponse;
890                // memory system takes ownership of packet
891                cpu->dcache_pkt = NULL;
892            }
893        }
894    } else if (sendTimingReq(tmp)) {
895        cpu->_status = DcacheWaitResponse;
896        // memory system takes ownership of packet
897        cpu->dcache_pkt = NULL;
898    }
899}
900
901TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
902    Tick t)
903    : pkt(_pkt), cpu(_cpu)
904{
905    cpu->schedule(this, t);
906}
907
908void
909TimingSimpleCPU::IprEvent::process()
910{
911    cpu->completeDataAccess(pkt);
912}
913
914const char *
915TimingSimpleCPU::IprEvent::description() const
916{
917    return "Timing Simple CPU Delay IPR event";
918}
919
920
921void
922TimingSimpleCPU::printAddr(Addr a)
923{
924    dcachePort.printAddr(a);
925}
926
927
928////////////////////////////////////////////////////////////////////////
929//
930//  TimingSimpleCPU Simulation Object
931//
932TimingSimpleCPU *
933TimingSimpleCPUParams::create()
934{
935    numThreads = 1;
936    if (!FullSystem && workload.size() != 1)
937        panic("only one workload allowed");
938    return new TimingSimpleCPU(this);
939}
940