timing.cc revision 9180
1/*
2 * Copyright (c) 2010-2012 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Steve Reinhardt
41 */
42
43#include "arch/locked_mem.hh"
44#include "arch/mmapped_ipr.hh"
45#include "arch/utility.hh"
46#include "base/bigint.hh"
47#include "config/the_isa.hh"
48#include "cpu/simple/timing.hh"
49#include "cpu/exetrace.hh"
50#include "debug/Config.hh"
51#include "debug/Drain.hh"
52#include "debug/ExecFaulting.hh"
53#include "debug/SimpleCPU.hh"
54#include "mem/packet.hh"
55#include "mem/packet_access.hh"
56#include "params/TimingSimpleCPU.hh"
57#include "sim/faults.hh"
58#include "sim/full_system.hh"
59#include "sim/system.hh"
60
61using namespace std;
62using namespace TheISA;
63
64void
65TimingSimpleCPU::init()
66{
67    BaseCPU::init();
68
69    // Initialise the ThreadContext's memory proxies
70    tcBase()->initMemProxies(tcBase());
71
72    if (FullSystem && !params()->defer_registration) {
73        for (int i = 0; i < threadContexts.size(); ++i) {
74            ThreadContext *tc = threadContexts[i];
75            // initialize CPU, including PC
76            TheISA::initCPU(tc, _cpuId);
77        }
78    }
79}
80
81void
82TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
83{
84    pkt = _pkt;
85    cpu->schedule(this, t);
86}
87
88TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
89    : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this),
90      dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0),
91      fetchEvent(this)
92{
93    _status = Idle;
94
95    changeState(SimObject::Running);
96    system->totalNumInsts = 0;
97}
98
99
100TimingSimpleCPU::~TimingSimpleCPU()
101{
102}
103
104void
105TimingSimpleCPU::serialize(ostream &os)
106{
107    SimObject::State so_state = SimObject::getState();
108    SERIALIZE_ENUM(so_state);
109    BaseSimpleCPU::serialize(os);
110}
111
112void
113TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
114{
115    SimObject::State so_state;
116    UNSERIALIZE_ENUM(so_state);
117    BaseSimpleCPU::unserialize(cp, section);
118}
119
120unsigned int
121TimingSimpleCPU::drain(Event *drain_event)
122{
123    // TimingSimpleCPU is ready to drain if it's not waiting for
124    // an access to complete.
125    if (_status == Idle || _status == Running || _status == SwitchedOut) {
126        changeState(SimObject::Drained);
127        return 0;
128    } else {
129        changeState(SimObject::Draining);
130        drainEvent = drain_event;
131        DPRINTF(Drain, "CPU not drained\n");
132        return 1;
133    }
134}
135
136void
137TimingSimpleCPU::resume()
138{
139    DPRINTF(SimpleCPU, "Resume\n");
140    if (_status != SwitchedOut && _status != Idle) {
141        assert(system->getMemoryMode() == Enums::timing);
142
143        if (fetchEvent.scheduled())
144           deschedule(fetchEvent);
145
146        schedule(fetchEvent, nextCycle());
147    }
148
149    changeState(SimObject::Running);
150}
151
152void
153TimingSimpleCPU::switchOut()
154{
155    assert(_status == Running || _status == Idle);
156    _status = SwitchedOut;
157    numCycles += curCycle() - previousCycle;
158
159    // If we've been scheduled to resume but are then told to switch out,
160    // we'll need to cancel it.
161    if (fetchEvent.scheduled())
162        deschedule(fetchEvent);
163}
164
165
166void
167TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
168{
169    BaseCPU::takeOverFrom(oldCPU);
170
171    // if any of this CPU's ThreadContexts are active, mark the CPU as
172    // running and schedule its tick event.
173    for (int i = 0; i < threadContexts.size(); ++i) {
174        ThreadContext *tc = threadContexts[i];
175        if (tc->status() == ThreadContext::Active && _status != Running) {
176            _status = Running;
177            break;
178        }
179    }
180
181    if (_status != Running) {
182        _status = Idle;
183    }
184    assert(threadContexts.size() == 1);
185    previousCycle = curCycle();
186}
187
188
189void
190TimingSimpleCPU::activateContext(ThreadID thread_num, Cycles delay)
191{
192    DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
193
194    assert(thread_num == 0);
195    assert(thread);
196
197    assert(_status == Idle);
198
199    notIdleFraction++;
200    _status = Running;
201
202    // kick things off by initiating the fetch of the next instruction
203    schedule(fetchEvent, clockEdge(delay));
204}
205
206
207void
208TimingSimpleCPU::suspendContext(ThreadID thread_num)
209{
210    DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
211
212    assert(thread_num == 0);
213    assert(thread);
214
215    if (_status == Idle)
216        return;
217
218    assert(_status == Running);
219
220    // just change status to Idle... if status != Running,
221    // completeInst() will not initiate fetch of next instruction.
222
223    notIdleFraction--;
224    _status = Idle;
225}
226
227bool
228TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
229{
230    RequestPtr req = pkt->req;
231    if (req->isMmappedIpr()) {
232        Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt);
233        new IprEvent(pkt, this, clockEdge(delay));
234        _status = DcacheWaitResponse;
235        dcache_pkt = NULL;
236    } else if (!dcachePort.sendTimingReq(pkt)) {
237        _status = DcacheRetry;
238        dcache_pkt = pkt;
239    } else {
240        _status = DcacheWaitResponse;
241        // memory system takes ownership of packet
242        dcache_pkt = NULL;
243    }
244    return dcache_pkt == NULL;
245}
246
247void
248TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
249                          bool read)
250{
251    PacketPtr pkt;
252    buildPacket(pkt, req, read);
253    pkt->dataDynamicArray<uint8_t>(data);
254    if (req->getFlags().isSet(Request::NO_ACCESS)) {
255        assert(!dcache_pkt);
256        pkt->makeResponse();
257        completeDataAccess(pkt);
258    } else if (read) {
259        handleReadPacket(pkt);
260    } else {
261        bool do_access = true;  // flag to suppress cache access
262
263        if (req->isLLSC()) {
264            do_access = TheISA::handleLockedWrite(thread, req);
265        } else if (req->isCondSwap()) {
266            assert(res);
267            req->setExtraData(*res);
268        }
269
270        if (do_access) {
271            dcache_pkt = pkt;
272            handleWritePacket();
273        } else {
274            _status = DcacheWaitResponse;
275            completeDataAccess(pkt);
276        }
277    }
278}
279
280void
281TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
282                               RequestPtr req, uint8_t *data, bool read)
283{
284    PacketPtr pkt1, pkt2;
285    buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
286    if (req->getFlags().isSet(Request::NO_ACCESS)) {
287        assert(!dcache_pkt);
288        pkt1->makeResponse();
289        completeDataAccess(pkt1);
290    } else if (read) {
291        SplitFragmentSenderState * send_state =
292            dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
293        if (handleReadPacket(pkt1)) {
294            send_state->clearFromParent();
295            send_state = dynamic_cast<SplitFragmentSenderState *>(
296                    pkt2->senderState);
297            if (handleReadPacket(pkt2)) {
298                send_state->clearFromParent();
299            }
300        }
301    } else {
302        dcache_pkt = pkt1;
303        SplitFragmentSenderState * send_state =
304            dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
305        if (handleWritePacket()) {
306            send_state->clearFromParent();
307            dcache_pkt = pkt2;
308            send_state = dynamic_cast<SplitFragmentSenderState *>(
309                    pkt2->senderState);
310            if (handleWritePacket()) {
311                send_state->clearFromParent();
312            }
313        }
314    }
315}
316
317void
318TimingSimpleCPU::translationFault(Fault fault)
319{
320    // fault may be NoFault in cases where a fault is suppressed,
321    // for instance prefetches.
322    numCycles += curCycle() - previousCycle;
323    previousCycle = curCycle();
324
325    if (traceData) {
326        // Since there was a fault, we shouldn't trace this instruction.
327        delete traceData;
328        traceData = NULL;
329    }
330
331    postExecute();
332
333    if (getState() == SimObject::Draining) {
334        advancePC(fault);
335        completeDrain();
336    } else {
337        advanceInst(fault);
338    }
339}
340
341void
342TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
343{
344    MemCmd cmd;
345    if (read) {
346        cmd = MemCmd::ReadReq;
347        if (req->isLLSC())
348            cmd = MemCmd::LoadLockedReq;
349    } else {
350        cmd = MemCmd::WriteReq;
351        if (req->isLLSC()) {
352            cmd = MemCmd::StoreCondReq;
353        } else if (req->isSwap()) {
354            cmd = MemCmd::SwapReq;
355        }
356    }
357    pkt = new Packet(req, cmd);
358}
359
360void
361TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
362        RequestPtr req1, RequestPtr req2, RequestPtr req,
363        uint8_t *data, bool read)
364{
365    pkt1 = pkt2 = NULL;
366
367    assert(!req1->isMmappedIpr() && !req2->isMmappedIpr());
368
369    if (req->getFlags().isSet(Request::NO_ACCESS)) {
370        buildPacket(pkt1, req, read);
371        return;
372    }
373
374    buildPacket(pkt1, req1, read);
375    buildPacket(pkt2, req2, read);
376
377    req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags(), dataMasterId());
378    PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand());
379
380    pkt->dataDynamicArray<uint8_t>(data);
381    pkt1->dataStatic<uint8_t>(data);
382    pkt2->dataStatic<uint8_t>(data + req1->getSize());
383
384    SplitMainSenderState * main_send_state = new SplitMainSenderState;
385    pkt->senderState = main_send_state;
386    main_send_state->fragments[0] = pkt1;
387    main_send_state->fragments[1] = pkt2;
388    main_send_state->outstanding = 2;
389    pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
390    pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
391}
392
393Fault
394TimingSimpleCPU::readMem(Addr addr, uint8_t *data,
395                         unsigned size, unsigned flags)
396{
397    Fault fault;
398    const int asid = 0;
399    const ThreadID tid = 0;
400    const Addr pc = thread->instAddr();
401    unsigned block_size = dcachePort.peerBlockSize();
402    BaseTLB::Mode mode = BaseTLB::Read;
403
404    if (traceData) {
405        traceData->setAddr(addr);
406    }
407
408    RequestPtr req  = new Request(asid, addr, size,
409                                  flags, dataMasterId(), pc, _cpuId, tid);
410
411    Addr split_addr = roundDown(addr + size - 1, block_size);
412    assert(split_addr <= addr || split_addr - addr < block_size);
413
414    _status = DTBWaitResponse;
415    if (split_addr > addr) {
416        RequestPtr req1, req2;
417        assert(!req->isLLSC() && !req->isSwap());
418        req->splitOnVaddr(split_addr, req1, req2);
419
420        WholeTranslationState *state =
421            new WholeTranslationState(req, req1, req2, new uint8_t[size],
422                                      NULL, mode);
423        DataTranslation<TimingSimpleCPU *> *trans1 =
424            new DataTranslation<TimingSimpleCPU *>(this, state, 0);
425        DataTranslation<TimingSimpleCPU *> *trans2 =
426            new DataTranslation<TimingSimpleCPU *>(this, state, 1);
427
428        thread->dtb->translateTiming(req1, tc, trans1, mode);
429        thread->dtb->translateTiming(req2, tc, trans2, mode);
430    } else {
431        WholeTranslationState *state =
432            new WholeTranslationState(req, new uint8_t[size], NULL, mode);
433        DataTranslation<TimingSimpleCPU *> *translation
434            = new DataTranslation<TimingSimpleCPU *>(this, state);
435        thread->dtb->translateTiming(req, tc, translation, mode);
436    }
437
438    return NoFault;
439}
440
441bool
442TimingSimpleCPU::handleWritePacket()
443{
444    RequestPtr req = dcache_pkt->req;
445    if (req->isMmappedIpr()) {
446        Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
447        new IprEvent(dcache_pkt, this, clockEdge(delay));
448        _status = DcacheWaitResponse;
449        dcache_pkt = NULL;
450    } else if (!dcachePort.sendTimingReq(dcache_pkt)) {
451        _status = DcacheRetry;
452    } else {
453        _status = DcacheWaitResponse;
454        // memory system takes ownership of packet
455        dcache_pkt = NULL;
456    }
457    return dcache_pkt == NULL;
458}
459
460Fault
461TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
462                          Addr addr, unsigned flags, uint64_t *res)
463{
464    uint8_t *newData = new uint8_t[size];
465    memcpy(newData, data, size);
466
467    const int asid = 0;
468    const ThreadID tid = 0;
469    const Addr pc = thread->instAddr();
470    unsigned block_size = dcachePort.peerBlockSize();
471    BaseTLB::Mode mode = BaseTLB::Write;
472
473    if (traceData) {
474        traceData->setAddr(addr);
475    }
476
477    RequestPtr req = new Request(asid, addr, size,
478                                 flags, dataMasterId(), pc, _cpuId, tid);
479
480    Addr split_addr = roundDown(addr + size - 1, block_size);
481    assert(split_addr <= addr || split_addr - addr < block_size);
482
483    _status = DTBWaitResponse;
484    if (split_addr > addr) {
485        RequestPtr req1, req2;
486        assert(!req->isLLSC() && !req->isSwap());
487        req->splitOnVaddr(split_addr, req1, req2);
488
489        WholeTranslationState *state =
490            new WholeTranslationState(req, req1, req2, newData, res, mode);
491        DataTranslation<TimingSimpleCPU *> *trans1 =
492            new DataTranslation<TimingSimpleCPU *>(this, state, 0);
493        DataTranslation<TimingSimpleCPU *> *trans2 =
494            new DataTranslation<TimingSimpleCPU *>(this, state, 1);
495
496        thread->dtb->translateTiming(req1, tc, trans1, mode);
497        thread->dtb->translateTiming(req2, tc, trans2, mode);
498    } else {
499        WholeTranslationState *state =
500            new WholeTranslationState(req, newData, res, mode);
501        DataTranslation<TimingSimpleCPU *> *translation =
502            new DataTranslation<TimingSimpleCPU *>(this, state);
503        thread->dtb->translateTiming(req, tc, translation, mode);
504    }
505
506    // Translation faults will be returned via finishTranslation()
507    return NoFault;
508}
509
510
511void
512TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
513{
514    _status = Running;
515
516    if (state->getFault() != NoFault) {
517        if (state->isPrefetch()) {
518            state->setNoFault();
519        }
520        delete [] state->data;
521        state->deleteReqs();
522        translationFault(state->getFault());
523    } else {
524        if (!state->isSplit) {
525            sendData(state->mainReq, state->data, state->res,
526                     state->mode == BaseTLB::Read);
527        } else {
528            sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
529                          state->data, state->mode == BaseTLB::Read);
530        }
531    }
532
533    delete state;
534}
535
536
537void
538TimingSimpleCPU::fetch()
539{
540    DPRINTF(SimpleCPU, "Fetch\n");
541
542    if (!curStaticInst || !curStaticInst->isDelayedCommit())
543        checkForInterrupts();
544
545    checkPcEventQueue();
546
547    // We must have just got suspended by a PC event
548    if (_status == Idle)
549        return;
550
551    TheISA::PCState pcState = thread->pcState();
552    bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
553
554    if (needToFetch) {
555        _status = Running;
556        Request *ifetch_req = new Request();
557        ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
558        setupFetchRequest(ifetch_req);
559        DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
560        thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
561                BaseTLB::Execute);
562    } else {
563        _status = IcacheWaitResponse;
564        completeIfetch(NULL);
565
566        numCycles += curCycle() - previousCycle;
567        previousCycle = curCycle();
568    }
569}
570
571
572void
573TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
574{
575    if (fault == NoFault) {
576        DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n",
577                req->getVaddr(), req->getPaddr());
578        ifetch_pkt = new Packet(req, MemCmd::ReadReq);
579        ifetch_pkt->dataStatic(&inst);
580        DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr());
581
582        if (!icachePort.sendTimingReq(ifetch_pkt)) {
583            // Need to wait for retry
584            _status = IcacheRetry;
585        } else {
586            // Need to wait for cache to respond
587            _status = IcacheWaitResponse;
588            // ownership of packet transferred to memory system
589            ifetch_pkt = NULL;
590        }
591    } else {
592        DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
593        delete req;
594        // fetch fault: advance directly to next instruction (fault handler)
595        _status = Running;
596        advanceInst(fault);
597    }
598
599    numCycles += curCycle() - previousCycle;
600    previousCycle = curCycle();
601}
602
603
604void
605TimingSimpleCPU::advanceInst(Fault fault)
606{
607
608    if (_status == Faulting)
609        return;
610
611    if (fault != NoFault) {
612        advancePC(fault);
613        DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
614        reschedule(fetchEvent, nextCycle(), true);
615        _status = Faulting;
616        return;
617    }
618
619
620    if (!stayAtPC)
621        advancePC(fault);
622
623    if (_status == Running) {
624        // kick off fetch of next instruction... callback from icache
625        // response will cause that instruction to be executed,
626        // keeping the CPU running.
627        fetch();
628    }
629}
630
631
632void
633TimingSimpleCPU::completeIfetch(PacketPtr pkt)
634{
635    DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ?
636            pkt->getAddr() : 0);
637
638    // received a response from the icache: execute the received
639    // instruction
640
641    assert(!pkt || !pkt->isError());
642    assert(_status == IcacheWaitResponse);
643
644    _status = Running;
645
646    numCycles += curCycle() - previousCycle;
647    previousCycle = curCycle();
648
649    if (getState() == SimObject::Draining) {
650        if (pkt) {
651            delete pkt->req;
652            delete pkt;
653        }
654
655        completeDrain();
656        return;
657    }
658
659    preExecute();
660    if (curStaticInst && curStaticInst->isMemRef()) {
661        // load or store: just send to dcache
662        Fault fault = curStaticInst->initiateAcc(this, traceData);
663
664        // If we're not running now the instruction will complete in a dcache
665        // response callback or the instruction faulted and has started an
666        // ifetch
667        if (_status == Running) {
668            if (fault != NoFault && traceData) {
669                // If there was a fault, we shouldn't trace this instruction.
670                delete traceData;
671                traceData = NULL;
672            }
673
674            postExecute();
675            // @todo remove me after debugging with legion done
676            if (curStaticInst && (!curStaticInst->isMicroop() ||
677                        curStaticInst->isFirstMicroop()))
678                instCnt++;
679            advanceInst(fault);
680        }
681    } else if (curStaticInst) {
682        // non-memory instruction: execute completely now
683        Fault fault = curStaticInst->execute(this, traceData);
684
685        // keep an instruction count
686        if (fault == NoFault)
687            countInst();
688        else if (traceData && !DTRACE(ExecFaulting)) {
689            delete traceData;
690            traceData = NULL;
691        }
692
693        postExecute();
694        // @todo remove me after debugging with legion done
695        if (curStaticInst && (!curStaticInst->isMicroop() ||
696                    curStaticInst->isFirstMicroop()))
697            instCnt++;
698        advanceInst(fault);
699    } else {
700        advanceInst(NoFault);
701    }
702
703    if (pkt) {
704        delete pkt->req;
705        delete pkt;
706    }
707}
708
709void
710TimingSimpleCPU::IcachePort::ITickEvent::process()
711{
712    cpu->completeIfetch(pkt);
713}
714
715bool
716TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt)
717{
718    DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
719    // delay processing of returned data until next CPU clock edge
720    Tick next_tick = cpu->nextCycle();
721
722    if (next_tick == curTick())
723        cpu->completeIfetch(pkt);
724    else
725        tickEvent.schedule(pkt, next_tick);
726
727    return true;
728}
729
730void
731TimingSimpleCPU::IcachePort::recvRetry()
732{
733    // we shouldn't get a retry unless we have a packet that we're
734    // waiting to transmit
735    assert(cpu->ifetch_pkt != NULL);
736    assert(cpu->_status == IcacheRetry);
737    PacketPtr tmp = cpu->ifetch_pkt;
738    if (sendTimingReq(tmp)) {
739        cpu->_status = IcacheWaitResponse;
740        cpu->ifetch_pkt = NULL;
741    }
742}
743
744void
745TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
746{
747    // received a response from the dcache: complete the load or store
748    // instruction
749    assert(!pkt->isError());
750    assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
751           pkt->req->getFlags().isSet(Request::NO_ACCESS));
752
753    numCycles += curCycle() - previousCycle;
754    previousCycle = curCycle();
755
756    if (pkt->senderState) {
757        SplitFragmentSenderState * send_state =
758            dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
759        assert(send_state);
760        delete pkt->req;
761        delete pkt;
762        PacketPtr big_pkt = send_state->bigPkt;
763        delete send_state;
764
765        SplitMainSenderState * main_send_state =
766            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
767        assert(main_send_state);
768        // Record the fact that this packet is no longer outstanding.
769        assert(main_send_state->outstanding != 0);
770        main_send_state->outstanding--;
771
772        if (main_send_state->outstanding) {
773            return;
774        } else {
775            delete main_send_state;
776            big_pkt->senderState = NULL;
777            pkt = big_pkt;
778        }
779    }
780
781    _status = Running;
782
783    Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
784
785    // keep an instruction count
786    if (fault == NoFault)
787        countInst();
788    else if (traceData) {
789        // If there was a fault, we shouldn't trace this instruction.
790        delete traceData;
791        traceData = NULL;
792    }
793
794    // the locked flag may be cleared on the response packet, so check
795    // pkt->req and not pkt to see if it was a load-locked
796    if (pkt->isRead() && pkt->req->isLLSC()) {
797        TheISA::handleLockedRead(thread, pkt->req);
798    }
799
800    delete pkt->req;
801    delete pkt;
802
803    postExecute();
804
805    if (getState() == SimObject::Draining) {
806        advancePC(fault);
807        completeDrain();
808
809        return;
810    }
811
812    advanceInst(fault);
813}
814
815
816void
817TimingSimpleCPU::completeDrain()
818{
819    DPRINTF(Drain, "CPU done draining, processing drain event\n");
820    changeState(SimObject::Drained);
821    drainEvent->process();
822}
823
824bool
825TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt)
826{
827    // delay processing of returned data until next CPU clock edge
828    Tick next_tick = cpu->nextCycle();
829
830    if (next_tick == curTick()) {
831        cpu->completeDataAccess(pkt);
832    } else {
833        if (!tickEvent.scheduled()) {
834            tickEvent.schedule(pkt, next_tick);
835        } else {
836            // In the case of a split transaction and a cache that is
837            // faster than a CPU we could get two responses before
838            // next_tick expires
839            if (!retryEvent.scheduled())
840                cpu->schedule(retryEvent, next_tick);
841            return false;
842        }
843    }
844
845    return true;
846}
847
848void
849TimingSimpleCPU::DcachePort::DTickEvent::process()
850{
851    cpu->completeDataAccess(pkt);
852}
853
854void
855TimingSimpleCPU::DcachePort::recvRetry()
856{
857    // we shouldn't get a retry unless we have a packet that we're
858    // waiting to transmit
859    assert(cpu->dcache_pkt != NULL);
860    assert(cpu->_status == DcacheRetry);
861    PacketPtr tmp = cpu->dcache_pkt;
862    if (tmp->senderState) {
863        // This is a packet from a split access.
864        SplitFragmentSenderState * send_state =
865            dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
866        assert(send_state);
867        PacketPtr big_pkt = send_state->bigPkt;
868
869        SplitMainSenderState * main_send_state =
870            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
871        assert(main_send_state);
872
873        if (sendTimingReq(tmp)) {
874            // If we were able to send without retrying, record that fact
875            // and try sending the other fragment.
876            send_state->clearFromParent();
877            int other_index = main_send_state->getPendingFragment();
878            if (other_index > 0) {
879                tmp = main_send_state->fragments[other_index];
880                cpu->dcache_pkt = tmp;
881                if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
882                        (big_pkt->isWrite() && cpu->handleWritePacket())) {
883                    main_send_state->fragments[other_index] = NULL;
884                }
885            } else {
886                cpu->_status = DcacheWaitResponse;
887                // memory system takes ownership of packet
888                cpu->dcache_pkt = NULL;
889            }
890        }
891    } else if (sendTimingReq(tmp)) {
892        cpu->_status = DcacheWaitResponse;
893        // memory system takes ownership of packet
894        cpu->dcache_pkt = NULL;
895    }
896}
897
898TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
899    Tick t)
900    : pkt(_pkt), cpu(_cpu)
901{
902    cpu->schedule(this, t);
903}
904
905void
906TimingSimpleCPU::IprEvent::process()
907{
908    cpu->completeDataAccess(pkt);
909}
910
911const char *
912TimingSimpleCPU::IprEvent::description() const
913{
914    return "Timing Simple CPU Delay IPR event";
915}
916
917
918void
919TimingSimpleCPU::printAddr(Addr a)
920{
921    dcachePort.printAddr(a);
922}
923
924
925////////////////////////////////////////////////////////////////////////
926//
927//  TimingSimpleCPU Simulation Object
928//
929TimingSimpleCPU *
930TimingSimpleCPUParams::create()
931{
932    numThreads = 1;
933    if (!FullSystem && workload.size() != 1)
934        panic("only one workload allowed");
935    return new TimingSimpleCPU(this);
936}
937