timing.cc revision 9424
1/*
2 * Copyright (c) 2010-2012 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Steve Reinhardt
41 */
42
43#include "arch/locked_mem.hh"
44#include "arch/mmapped_ipr.hh"
45#include "arch/utility.hh"
46#include "base/bigint.hh"
47#include "config/the_isa.hh"
48#include "cpu/simple/timing.hh"
49#include "cpu/exetrace.hh"
50#include "debug/Config.hh"
51#include "debug/Drain.hh"
52#include "debug/ExecFaulting.hh"
53#include "debug/SimpleCPU.hh"
54#include "mem/packet.hh"
55#include "mem/packet_access.hh"
56#include "params/TimingSimpleCPU.hh"
57#include "sim/faults.hh"
58#include "sim/full_system.hh"
59#include "sim/system.hh"
60
61using namespace std;
62using namespace TheISA;
63
64void
65TimingSimpleCPU::init()
66{
67    BaseCPU::init();
68
69    if (!params()->defer_registration &&
70        system->getMemoryMode() != Enums::timing) {
71        fatal("The timing CPU requires the memory system to be in "
72              "'timing' mode.\n");
73    }
74
75    // Initialise the ThreadContext's memory proxies
76    tcBase()->initMemProxies(tcBase());
77
78    if (FullSystem && !params()->defer_registration) {
79        for (int i = 0; i < threadContexts.size(); ++i) {
80            ThreadContext *tc = threadContexts[i];
81            // initialize CPU, including PC
82            TheISA::initCPU(tc, _cpuId);
83        }
84    }
85}
86
87void
88TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
89{
90    pkt = _pkt;
91    cpu->schedule(this, t);
92}
93
94TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
95    : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this),
96      dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0),
97      fetchEvent(this)
98{
99    _status = Idle;
100
101    setDrainState(Drainable::Running);
102    system->totalNumInsts = 0;
103}
104
105
106TimingSimpleCPU::~TimingSimpleCPU()
107{
108}
109
110void
111TimingSimpleCPU::serialize(ostream &os)
112{
113    Drainable::State so_state(getDrainState());
114    SERIALIZE_ENUM(so_state);
115    BaseSimpleCPU::serialize(os);
116}
117
118void
119TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
120{
121    Drainable::State so_state;
122    UNSERIALIZE_ENUM(so_state);
123    BaseSimpleCPU::unserialize(cp, section);
124}
125
126unsigned int
127TimingSimpleCPU::drain(DrainManager *drain_manager)
128{
129    // TimingSimpleCPU is ready to drain if it's not waiting for
130    // an access to complete.
131    if (_status == Idle ||
132        _status == BaseSimpleCPU::Running ||
133        _status == SwitchedOut) {
134        setDrainState(Drainable::Drained);
135        return 0;
136    } else {
137        setDrainState(Drainable::Draining);
138        drainManager = drain_manager;
139        DPRINTF(Drain, "CPU not drained\n");
140        return 1;
141    }
142}
143
144void
145TimingSimpleCPU::drainResume()
146{
147    DPRINTF(SimpleCPU, "Resume\n");
148    if (_status != SwitchedOut && _status != Idle) {
149        if (system->getMemoryMode() != Enums::timing) {
150            fatal("The timing CPU requires the memory system to be in "
151                  "'timing' mode.\n");
152        }
153
154        if (fetchEvent.scheduled())
155           deschedule(fetchEvent);
156
157        schedule(fetchEvent, nextCycle());
158    }
159
160    setDrainState(Drainable::Running);
161}
162
163void
164TimingSimpleCPU::switchOut()
165{
166    assert(_status == BaseSimpleCPU::Running || _status == Idle);
167    _status = SwitchedOut;
168    numCycles += curCycle() - previousCycle;
169
170    // If we've been scheduled to resume but are then told to switch out,
171    // we'll need to cancel it.
172    if (fetchEvent.scheduled())
173        deschedule(fetchEvent);
174}
175
176
177void
178TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
179{
180    BaseCPU::takeOverFrom(oldCPU);
181
182    // if any of this CPU's ThreadContexts are active, mark the CPU as
183    // running and schedule its tick event.
184    for (int i = 0; i < threadContexts.size(); ++i) {
185        ThreadContext *tc = threadContexts[i];
186        if (tc->status() == ThreadContext::Active &&
187            _status != BaseSimpleCPU::Running) {
188            _status = BaseSimpleCPU::Running;
189            break;
190        }
191    }
192
193    if (_status != BaseSimpleCPU::Running) {
194        _status = Idle;
195    }
196    assert(threadContexts.size() == 1);
197    previousCycle = curCycle();
198}
199
200
201void
202TimingSimpleCPU::activateContext(ThreadID thread_num, Cycles delay)
203{
204    DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
205
206    assert(thread_num == 0);
207    assert(thread);
208
209    assert(_status == Idle);
210
211    notIdleFraction++;
212    _status = BaseSimpleCPU::Running;
213
214    // kick things off by initiating the fetch of the next instruction
215    schedule(fetchEvent, clockEdge(delay));
216}
217
218
219void
220TimingSimpleCPU::suspendContext(ThreadID thread_num)
221{
222    DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
223
224    assert(thread_num == 0);
225    assert(thread);
226
227    if (_status == Idle)
228        return;
229
230    assert(_status == BaseSimpleCPU::Running);
231
232    // just change status to Idle... if status != Running,
233    // completeInst() will not initiate fetch of next instruction.
234
235    notIdleFraction--;
236    _status = Idle;
237}
238
239bool
240TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
241{
242    RequestPtr req = pkt->req;
243    if (req->isMmappedIpr()) {
244        Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt);
245        new IprEvent(pkt, this, clockEdge(delay));
246        _status = DcacheWaitResponse;
247        dcache_pkt = NULL;
248    } else if (!dcachePort.sendTimingReq(pkt)) {
249        _status = DcacheRetry;
250        dcache_pkt = pkt;
251    } else {
252        _status = DcacheWaitResponse;
253        // memory system takes ownership of packet
254        dcache_pkt = NULL;
255    }
256    return dcache_pkt == NULL;
257}
258
259void
260TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
261                          bool read)
262{
263    PacketPtr pkt;
264    buildPacket(pkt, req, read);
265    pkt->dataDynamicArray<uint8_t>(data);
266    if (req->getFlags().isSet(Request::NO_ACCESS)) {
267        assert(!dcache_pkt);
268        pkt->makeResponse();
269        completeDataAccess(pkt);
270    } else if (read) {
271        handleReadPacket(pkt);
272    } else {
273        bool do_access = true;  // flag to suppress cache access
274
275        if (req->isLLSC()) {
276            do_access = TheISA::handleLockedWrite(thread, req);
277        } else if (req->isCondSwap()) {
278            assert(res);
279            req->setExtraData(*res);
280        }
281
282        if (do_access) {
283            dcache_pkt = pkt;
284            handleWritePacket();
285        } else {
286            _status = DcacheWaitResponse;
287            completeDataAccess(pkt);
288        }
289    }
290}
291
292void
293TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
294                               RequestPtr req, uint8_t *data, bool read)
295{
296    PacketPtr pkt1, pkt2;
297    buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
298    if (req->getFlags().isSet(Request::NO_ACCESS)) {
299        assert(!dcache_pkt);
300        pkt1->makeResponse();
301        completeDataAccess(pkt1);
302    } else if (read) {
303        SplitFragmentSenderState * send_state =
304            dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
305        if (handleReadPacket(pkt1)) {
306            send_state->clearFromParent();
307            send_state = dynamic_cast<SplitFragmentSenderState *>(
308                    pkt2->senderState);
309            if (handleReadPacket(pkt2)) {
310                send_state->clearFromParent();
311            }
312        }
313    } else {
314        dcache_pkt = pkt1;
315        SplitFragmentSenderState * send_state =
316            dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
317        if (handleWritePacket()) {
318            send_state->clearFromParent();
319            dcache_pkt = pkt2;
320            send_state = dynamic_cast<SplitFragmentSenderState *>(
321                    pkt2->senderState);
322            if (handleWritePacket()) {
323                send_state->clearFromParent();
324            }
325        }
326    }
327}
328
329void
330TimingSimpleCPU::translationFault(Fault fault)
331{
332    // fault may be NoFault in cases where a fault is suppressed,
333    // for instance prefetches.
334    numCycles += curCycle() - previousCycle;
335    previousCycle = curCycle();
336
337    if (traceData) {
338        // Since there was a fault, we shouldn't trace this instruction.
339        delete traceData;
340        traceData = NULL;
341    }
342
343    postExecute();
344
345    if (getDrainState() == Drainable::Draining) {
346        advancePC(fault);
347        completeDrain();
348    } else {
349        advanceInst(fault);
350    }
351}
352
353void
354TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
355{
356    MemCmd cmd;
357    if (read) {
358        cmd = MemCmd::ReadReq;
359        if (req->isLLSC())
360            cmd = MemCmd::LoadLockedReq;
361    } else {
362        cmd = MemCmd::WriteReq;
363        if (req->isLLSC()) {
364            cmd = MemCmd::StoreCondReq;
365        } else if (req->isSwap()) {
366            cmd = MemCmd::SwapReq;
367        }
368    }
369    pkt = new Packet(req, cmd);
370}
371
372void
373TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
374        RequestPtr req1, RequestPtr req2, RequestPtr req,
375        uint8_t *data, bool read)
376{
377    pkt1 = pkt2 = NULL;
378
379    assert(!req1->isMmappedIpr() && !req2->isMmappedIpr());
380
381    if (req->getFlags().isSet(Request::NO_ACCESS)) {
382        buildPacket(pkt1, req, read);
383        return;
384    }
385
386    buildPacket(pkt1, req1, read);
387    buildPacket(pkt2, req2, read);
388
389    req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags(), dataMasterId());
390    PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand());
391
392    pkt->dataDynamicArray<uint8_t>(data);
393    pkt1->dataStatic<uint8_t>(data);
394    pkt2->dataStatic<uint8_t>(data + req1->getSize());
395
396    SplitMainSenderState * main_send_state = new SplitMainSenderState;
397    pkt->senderState = main_send_state;
398    main_send_state->fragments[0] = pkt1;
399    main_send_state->fragments[1] = pkt2;
400    main_send_state->outstanding = 2;
401    pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
402    pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
403}
404
405Fault
406TimingSimpleCPU::readMem(Addr addr, uint8_t *data,
407                         unsigned size, unsigned flags)
408{
409    Fault fault;
410    const int asid = 0;
411    const ThreadID tid = 0;
412    const Addr pc = thread->instAddr();
413    unsigned block_size = dcachePort.peerBlockSize();
414    BaseTLB::Mode mode = BaseTLB::Read;
415
416    if (traceData) {
417        traceData->setAddr(addr);
418    }
419
420    RequestPtr req  = new Request(asid, addr, size,
421                                  flags, dataMasterId(), pc, _cpuId, tid);
422
423    Addr split_addr = roundDown(addr + size - 1, block_size);
424    assert(split_addr <= addr || split_addr - addr < block_size);
425
426    _status = DTBWaitResponse;
427    if (split_addr > addr) {
428        RequestPtr req1, req2;
429        assert(!req->isLLSC() && !req->isSwap());
430        req->splitOnVaddr(split_addr, req1, req2);
431
432        WholeTranslationState *state =
433            new WholeTranslationState(req, req1, req2, new uint8_t[size],
434                                      NULL, mode);
435        DataTranslation<TimingSimpleCPU *> *trans1 =
436            new DataTranslation<TimingSimpleCPU *>(this, state, 0);
437        DataTranslation<TimingSimpleCPU *> *trans2 =
438            new DataTranslation<TimingSimpleCPU *>(this, state, 1);
439
440        thread->dtb->translateTiming(req1, tc, trans1, mode);
441        thread->dtb->translateTiming(req2, tc, trans2, mode);
442    } else {
443        WholeTranslationState *state =
444            new WholeTranslationState(req, new uint8_t[size], NULL, mode);
445        DataTranslation<TimingSimpleCPU *> *translation
446            = new DataTranslation<TimingSimpleCPU *>(this, state);
447        thread->dtb->translateTiming(req, tc, translation, mode);
448    }
449
450    return NoFault;
451}
452
453bool
454TimingSimpleCPU::handleWritePacket()
455{
456    RequestPtr req = dcache_pkt->req;
457    if (req->isMmappedIpr()) {
458        Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
459        new IprEvent(dcache_pkt, this, clockEdge(delay));
460        _status = DcacheWaitResponse;
461        dcache_pkt = NULL;
462    } else if (!dcachePort.sendTimingReq(dcache_pkt)) {
463        _status = DcacheRetry;
464    } else {
465        _status = DcacheWaitResponse;
466        // memory system takes ownership of packet
467        dcache_pkt = NULL;
468    }
469    return dcache_pkt == NULL;
470}
471
472Fault
473TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
474                          Addr addr, unsigned flags, uint64_t *res)
475{
476    uint8_t *newData = new uint8_t[size];
477    memcpy(newData, data, size);
478
479    const int asid = 0;
480    const ThreadID tid = 0;
481    const Addr pc = thread->instAddr();
482    unsigned block_size = dcachePort.peerBlockSize();
483    BaseTLB::Mode mode = BaseTLB::Write;
484
485    if (traceData) {
486        traceData->setAddr(addr);
487    }
488
489    RequestPtr req = new Request(asid, addr, size,
490                                 flags, dataMasterId(), pc, _cpuId, tid);
491
492    Addr split_addr = roundDown(addr + size - 1, block_size);
493    assert(split_addr <= addr || split_addr - addr < block_size);
494
495    _status = DTBWaitResponse;
496    if (split_addr > addr) {
497        RequestPtr req1, req2;
498        assert(!req->isLLSC() && !req->isSwap());
499        req->splitOnVaddr(split_addr, req1, req2);
500
501        WholeTranslationState *state =
502            new WholeTranslationState(req, req1, req2, newData, res, mode);
503        DataTranslation<TimingSimpleCPU *> *trans1 =
504            new DataTranslation<TimingSimpleCPU *>(this, state, 0);
505        DataTranslation<TimingSimpleCPU *> *trans2 =
506            new DataTranslation<TimingSimpleCPU *>(this, state, 1);
507
508        thread->dtb->translateTiming(req1, tc, trans1, mode);
509        thread->dtb->translateTiming(req2, tc, trans2, mode);
510    } else {
511        WholeTranslationState *state =
512            new WholeTranslationState(req, newData, res, mode);
513        DataTranslation<TimingSimpleCPU *> *translation =
514            new DataTranslation<TimingSimpleCPU *>(this, state);
515        thread->dtb->translateTiming(req, tc, translation, mode);
516    }
517
518    // Translation faults will be returned via finishTranslation()
519    return NoFault;
520}
521
522
523void
524TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
525{
526    _status = BaseSimpleCPU::Running;
527
528    if (state->getFault() != NoFault) {
529        if (state->isPrefetch()) {
530            state->setNoFault();
531        }
532        delete [] state->data;
533        state->deleteReqs();
534        translationFault(state->getFault());
535    } else {
536        if (!state->isSplit) {
537            sendData(state->mainReq, state->data, state->res,
538                     state->mode == BaseTLB::Read);
539        } else {
540            sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
541                          state->data, state->mode == BaseTLB::Read);
542        }
543    }
544
545    delete state;
546}
547
548
549void
550TimingSimpleCPU::fetch()
551{
552    DPRINTF(SimpleCPU, "Fetch\n");
553
554    if (!curStaticInst || !curStaticInst->isDelayedCommit())
555        checkForInterrupts();
556
557    checkPcEventQueue();
558
559    // We must have just got suspended by a PC event
560    if (_status == Idle)
561        return;
562
563    TheISA::PCState pcState = thread->pcState();
564    bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
565
566    if (needToFetch) {
567        _status = BaseSimpleCPU::Running;
568        Request *ifetch_req = new Request();
569        ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
570        setupFetchRequest(ifetch_req);
571        DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
572        thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
573                BaseTLB::Execute);
574    } else {
575        _status = IcacheWaitResponse;
576        completeIfetch(NULL);
577
578        numCycles += curCycle() - previousCycle;
579        previousCycle = curCycle();
580    }
581}
582
583
584void
585TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
586{
587    if (fault == NoFault) {
588        DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n",
589                req->getVaddr(), req->getPaddr());
590        ifetch_pkt = new Packet(req, MemCmd::ReadReq);
591        ifetch_pkt->dataStatic(&inst);
592        DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr());
593
594        if (!icachePort.sendTimingReq(ifetch_pkt)) {
595            // Need to wait for retry
596            _status = IcacheRetry;
597        } else {
598            // Need to wait for cache to respond
599            _status = IcacheWaitResponse;
600            // ownership of packet transferred to memory system
601            ifetch_pkt = NULL;
602        }
603    } else {
604        DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
605        delete req;
606        // fetch fault: advance directly to next instruction (fault handler)
607        _status = BaseSimpleCPU::Running;
608        advanceInst(fault);
609    }
610
611    numCycles += curCycle() - previousCycle;
612    previousCycle = curCycle();
613}
614
615
616void
617TimingSimpleCPU::advanceInst(Fault fault)
618{
619
620    if (_status == Faulting)
621        return;
622
623    if (fault != NoFault) {
624        advancePC(fault);
625        DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
626        reschedule(fetchEvent, nextCycle(), true);
627        _status = Faulting;
628        return;
629    }
630
631
632    if (!stayAtPC)
633        advancePC(fault);
634
635    if (_status == BaseSimpleCPU::Running) {
636        // kick off fetch of next instruction... callback from icache
637        // response will cause that instruction to be executed,
638        // keeping the CPU running.
639        fetch();
640    }
641}
642
643
644void
645TimingSimpleCPU::completeIfetch(PacketPtr pkt)
646{
647    DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ?
648            pkt->getAddr() : 0);
649
650    // received a response from the icache: execute the received
651    // instruction
652
653    assert(!pkt || !pkt->isError());
654    assert(_status == IcacheWaitResponse);
655
656    _status = BaseSimpleCPU::Running;
657
658    numCycles += curCycle() - previousCycle;
659    previousCycle = curCycle();
660
661    if (getDrainState() == Drainable::Draining) {
662        if (pkt) {
663            delete pkt->req;
664            delete pkt;
665        }
666
667        completeDrain();
668        return;
669    }
670
671    preExecute();
672    if (curStaticInst && curStaticInst->isMemRef()) {
673        // load or store: just send to dcache
674        Fault fault = curStaticInst->initiateAcc(this, traceData);
675
676        // If we're not running now the instruction will complete in a dcache
677        // response callback or the instruction faulted and has started an
678        // ifetch
679        if (_status == BaseSimpleCPU::Running) {
680            if (fault != NoFault && traceData) {
681                // If there was a fault, we shouldn't trace this instruction.
682                delete traceData;
683                traceData = NULL;
684            }
685
686            postExecute();
687            // @todo remove me after debugging with legion done
688            if (curStaticInst && (!curStaticInst->isMicroop() ||
689                        curStaticInst->isFirstMicroop()))
690                instCnt++;
691            advanceInst(fault);
692        }
693    } else if (curStaticInst) {
694        // non-memory instruction: execute completely now
695        Fault fault = curStaticInst->execute(this, traceData);
696
697        // keep an instruction count
698        if (fault == NoFault)
699            countInst();
700        else if (traceData && !DTRACE(ExecFaulting)) {
701            delete traceData;
702            traceData = NULL;
703        }
704
705        postExecute();
706        // @todo remove me after debugging with legion done
707        if (curStaticInst && (!curStaticInst->isMicroop() ||
708                    curStaticInst->isFirstMicroop()))
709            instCnt++;
710        advanceInst(fault);
711    } else {
712        advanceInst(NoFault);
713    }
714
715    if (pkt) {
716        delete pkt->req;
717        delete pkt;
718    }
719}
720
721void
722TimingSimpleCPU::IcachePort::ITickEvent::process()
723{
724    cpu->completeIfetch(pkt);
725}
726
727bool
728TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt)
729{
730    DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
731    // delay processing of returned data until next CPU clock edge
732    Tick next_tick = cpu->nextCycle();
733
734    if (next_tick == curTick())
735        cpu->completeIfetch(pkt);
736    else
737        tickEvent.schedule(pkt, next_tick);
738
739    return true;
740}
741
742void
743TimingSimpleCPU::IcachePort::recvRetry()
744{
745    // we shouldn't get a retry unless we have a packet that we're
746    // waiting to transmit
747    assert(cpu->ifetch_pkt != NULL);
748    assert(cpu->_status == IcacheRetry);
749    PacketPtr tmp = cpu->ifetch_pkt;
750    if (sendTimingReq(tmp)) {
751        cpu->_status = IcacheWaitResponse;
752        cpu->ifetch_pkt = NULL;
753    }
754}
755
756void
757TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
758{
759    // received a response from the dcache: complete the load or store
760    // instruction
761    assert(!pkt->isError());
762    assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
763           pkt->req->getFlags().isSet(Request::NO_ACCESS));
764
765    numCycles += curCycle() - previousCycle;
766    previousCycle = curCycle();
767
768    if (pkt->senderState) {
769        SplitFragmentSenderState * send_state =
770            dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
771        assert(send_state);
772        delete pkt->req;
773        delete pkt;
774        PacketPtr big_pkt = send_state->bigPkt;
775        delete send_state;
776
777        SplitMainSenderState * main_send_state =
778            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
779        assert(main_send_state);
780        // Record the fact that this packet is no longer outstanding.
781        assert(main_send_state->outstanding != 0);
782        main_send_state->outstanding--;
783
784        if (main_send_state->outstanding) {
785            return;
786        } else {
787            delete main_send_state;
788            big_pkt->senderState = NULL;
789            pkt = big_pkt;
790        }
791    }
792
793    _status = BaseSimpleCPU::Running;
794
795    Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
796
797    // keep an instruction count
798    if (fault == NoFault)
799        countInst();
800    else if (traceData) {
801        // If there was a fault, we shouldn't trace this instruction.
802        delete traceData;
803        traceData = NULL;
804    }
805
806    // the locked flag may be cleared on the response packet, so check
807    // pkt->req and not pkt to see if it was a load-locked
808    if (pkt->isRead() && pkt->req->isLLSC()) {
809        TheISA::handleLockedRead(thread, pkt->req);
810    }
811
812    delete pkt->req;
813    delete pkt;
814
815    postExecute();
816
817    if (getDrainState() == Drainable::Draining) {
818        advancePC(fault);
819        completeDrain();
820
821        return;
822    }
823
824    advanceInst(fault);
825}
826
827
828void
829TimingSimpleCPU::completeDrain()
830{
831    DPRINTF(Drain, "CPU done draining, processing drain event\n");
832    setDrainState(Drainable::Drained);
833    drainManager->signalDrainDone();
834}
835
836bool
837TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt)
838{
839    // delay processing of returned data until next CPU clock edge
840    Tick next_tick = cpu->nextCycle();
841
842    if (next_tick == curTick()) {
843        cpu->completeDataAccess(pkt);
844    } else {
845        if (!tickEvent.scheduled()) {
846            tickEvent.schedule(pkt, next_tick);
847        } else {
848            // In the case of a split transaction and a cache that is
849            // faster than a CPU we could get two responses before
850            // next_tick expires
851            if (!retryEvent.scheduled())
852                cpu->schedule(retryEvent, next_tick);
853            return false;
854        }
855    }
856
857    return true;
858}
859
860void
861TimingSimpleCPU::DcachePort::DTickEvent::process()
862{
863    cpu->completeDataAccess(pkt);
864}
865
866void
867TimingSimpleCPU::DcachePort::recvRetry()
868{
869    // we shouldn't get a retry unless we have a packet that we're
870    // waiting to transmit
871    assert(cpu->dcache_pkt != NULL);
872    assert(cpu->_status == DcacheRetry);
873    PacketPtr tmp = cpu->dcache_pkt;
874    if (tmp->senderState) {
875        // This is a packet from a split access.
876        SplitFragmentSenderState * send_state =
877            dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
878        assert(send_state);
879        PacketPtr big_pkt = send_state->bigPkt;
880
881        SplitMainSenderState * main_send_state =
882            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
883        assert(main_send_state);
884
885        if (sendTimingReq(tmp)) {
886            // If we were able to send without retrying, record that fact
887            // and try sending the other fragment.
888            send_state->clearFromParent();
889            int other_index = main_send_state->getPendingFragment();
890            if (other_index > 0) {
891                tmp = main_send_state->fragments[other_index];
892                cpu->dcache_pkt = tmp;
893                if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
894                        (big_pkt->isWrite() && cpu->handleWritePacket())) {
895                    main_send_state->fragments[other_index] = NULL;
896                }
897            } else {
898                cpu->_status = DcacheWaitResponse;
899                // memory system takes ownership of packet
900                cpu->dcache_pkt = NULL;
901            }
902        }
903    } else if (sendTimingReq(tmp)) {
904        cpu->_status = DcacheWaitResponse;
905        // memory system takes ownership of packet
906        cpu->dcache_pkt = NULL;
907    }
908}
909
910TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
911    Tick t)
912    : pkt(_pkt), cpu(_cpu)
913{
914    cpu->schedule(this, t);
915}
916
917void
918TimingSimpleCPU::IprEvent::process()
919{
920    cpu->completeDataAccess(pkt);
921}
922
923const char *
924TimingSimpleCPU::IprEvent::description() const
925{
926    return "Timing Simple CPU Delay IPR event";
927}
928
929
930void
931TimingSimpleCPU::printAddr(Addr a)
932{
933    dcachePort.printAddr(a);
934}
935
936
937////////////////////////////////////////////////////////////////////////
938//
939//  TimingSimpleCPU Simulation Object
940//
941TimingSimpleCPU *
942TimingSimpleCPUParams::create()
943{
944    numThreads = 1;
945    if (!FullSystem && workload.size() != 1)
946        panic("only one workload allowed");
947    return new TimingSimpleCPU(this);
948}
949