timing.cc revision 10464
1/*
2 * Copyright (c) 2010-2013 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Steve Reinhardt
41 */
42
43#include "arch/locked_mem.hh"
44#include "arch/mmapped_ipr.hh"
45#include "arch/utility.hh"
46#include "base/bigint.hh"
47#include "config/the_isa.hh"
48#include "cpu/simple/timing.hh"
49#include "cpu/exetrace.hh"
50#include "debug/Config.hh"
51#include "debug/Drain.hh"
52#include "debug/ExecFaulting.hh"
53#include "debug/SimpleCPU.hh"
54#include "mem/packet.hh"
55#include "mem/packet_access.hh"
56#include "params/TimingSimpleCPU.hh"
57#include "sim/faults.hh"
58#include "sim/full_system.hh"
59#include "sim/system.hh"
60
61using namespace std;
62using namespace TheISA;
63
64void
65TimingSimpleCPU::init()
66{
67    BaseCPU::init();
68
69    // Initialise the ThreadContext's memory proxies
70    tcBase()->initMemProxies(tcBase());
71
72    if (FullSystem && !params()->switched_out) {
73        for (int i = 0; i < threadContexts.size(); ++i) {
74            ThreadContext *tc = threadContexts[i];
75            // initialize CPU, including PC
76            TheISA::initCPU(tc, _cpuId);
77        }
78    }
79}
80
81void
82TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
83{
84    pkt = _pkt;
85    cpu->schedule(this, t);
86}
87
88TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
89    : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this),
90      dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0),
91      fetchEvent(this), drainManager(NULL)
92{
93    _status = Idle;
94
95    system->totalNumInsts = 0;
96}
97
98
99
100TimingSimpleCPU::~TimingSimpleCPU()
101{
102}
103
104unsigned int
105TimingSimpleCPU::drain(DrainManager *drain_manager)
106{
107    assert(!drainManager);
108    if (switchedOut())
109        return 0;
110
111    if (_status == Idle ||
112        (_status == BaseSimpleCPU::Running && isDrained())) {
113        DPRINTF(Drain, "No need to drain.\n");
114        return 0;
115    } else {
116        drainManager = drain_manager;
117        DPRINTF(Drain, "Requesting drain: %s\n", pcState());
118
119        // The fetch event can become descheduled if a drain didn't
120        // succeed on the first attempt. We need to reschedule it if
121        // the CPU is waiting for a microcode routine to complete.
122        if (_status == BaseSimpleCPU::Running && !fetchEvent.scheduled())
123            schedule(fetchEvent, clockEdge());
124
125        return 1;
126    }
127}
128
129void
130TimingSimpleCPU::drainResume()
131{
132    assert(!fetchEvent.scheduled());
133    assert(!drainManager);
134    if (switchedOut())
135        return;
136
137    DPRINTF(SimpleCPU, "Resume\n");
138    verifyMemoryMode();
139
140    assert(!threadContexts.empty());
141    if (threadContexts.size() > 1)
142        fatal("The timing CPU only supports one thread.\n");
143
144    if (thread->status() == ThreadContext::Active) {
145        schedule(fetchEvent, nextCycle());
146        _status = BaseSimpleCPU::Running;
147        notIdleFraction = 1;
148    } else {
149        _status = BaseSimpleCPU::Idle;
150        notIdleFraction = 0;
151    }
152}
153
154bool
155TimingSimpleCPU::tryCompleteDrain()
156{
157    if (!drainManager)
158        return false;
159
160    DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState());
161    if (!isDrained())
162        return false;
163
164    DPRINTF(Drain, "CPU done draining, processing drain event\n");
165    drainManager->signalDrainDone();
166    drainManager = NULL;
167
168    return true;
169}
170
171void
172TimingSimpleCPU::switchOut()
173{
174    BaseSimpleCPU::switchOut();
175
176    assert(!fetchEvent.scheduled());
177    assert(_status == BaseSimpleCPU::Running || _status == Idle);
178    assert(!stayAtPC);
179    assert(microPC() == 0);
180
181    updateCycleCounts();
182}
183
184
185void
186TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
187{
188    BaseSimpleCPU::takeOverFrom(oldCPU);
189
190    previousCycle = curCycle();
191}
192
193void
194TimingSimpleCPU::verifyMemoryMode() const
195{
196    if (!system->isTimingMode()) {
197        fatal("The timing CPU requires the memory system to be in "
198              "'timing' mode.\n");
199    }
200}
201
202void
203TimingSimpleCPU::activateContext(ThreadID thread_num)
204{
205    DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num);
206
207    assert(thread_num == 0);
208    assert(thread);
209
210    assert(_status == Idle);
211
212    notIdleFraction = 1;
213    _status = BaseSimpleCPU::Running;
214
215    // kick things off by initiating the fetch of the next instruction
216    schedule(fetchEvent, clockEdge(Cycles(0)));
217}
218
219
220void
221TimingSimpleCPU::suspendContext(ThreadID thread_num)
222{
223    DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
224
225    assert(thread_num == 0);
226    assert(thread);
227
228    if (_status == Idle)
229        return;
230
231    assert(_status == BaseSimpleCPU::Running);
232
233    // just change status to Idle... if status != Running,
234    // completeInst() will not initiate fetch of next instruction.
235
236    notIdleFraction = 0;
237    _status = Idle;
238}
239
240bool
241TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
242{
243    RequestPtr req = pkt->req;
244    if (req->isMmappedIpr()) {
245        Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt);
246        new IprEvent(pkt, this, clockEdge(delay));
247        _status = DcacheWaitResponse;
248        dcache_pkt = NULL;
249    } else if (!dcachePort.sendTimingReq(pkt)) {
250        _status = DcacheRetry;
251        dcache_pkt = pkt;
252    } else {
253        _status = DcacheWaitResponse;
254        // memory system takes ownership of packet
255        dcache_pkt = NULL;
256    }
257    return dcache_pkt == NULL;
258}
259
260void
261TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
262                          bool read)
263{
264    PacketPtr pkt;
265    buildPacket(pkt, req, read);
266    pkt->dataDynamicArray<uint8_t>(data);
267    if (req->getFlags().isSet(Request::NO_ACCESS)) {
268        assert(!dcache_pkt);
269        pkt->makeResponse();
270        completeDataAccess(pkt);
271    } else if (read) {
272        handleReadPacket(pkt);
273    } else {
274        bool do_access = true;  // flag to suppress cache access
275
276        if (req->isLLSC()) {
277            do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask);
278        } else if (req->isCondSwap()) {
279            assert(res);
280            req->setExtraData(*res);
281        }
282
283        if (do_access) {
284            dcache_pkt = pkt;
285            handleWritePacket();
286        } else {
287            _status = DcacheWaitResponse;
288            completeDataAccess(pkt);
289        }
290    }
291}
292
293void
294TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
295                               RequestPtr req, uint8_t *data, bool read)
296{
297    PacketPtr pkt1, pkt2;
298    buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
299    if (req->getFlags().isSet(Request::NO_ACCESS)) {
300        assert(!dcache_pkt);
301        pkt1->makeResponse();
302        completeDataAccess(pkt1);
303    } else if (read) {
304        SplitFragmentSenderState * send_state =
305            dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
306        if (handleReadPacket(pkt1)) {
307            send_state->clearFromParent();
308            send_state = dynamic_cast<SplitFragmentSenderState *>(
309                    pkt2->senderState);
310            if (handleReadPacket(pkt2)) {
311                send_state->clearFromParent();
312            }
313        }
314    } else {
315        dcache_pkt = pkt1;
316        SplitFragmentSenderState * send_state =
317            dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
318        if (handleWritePacket()) {
319            send_state->clearFromParent();
320            dcache_pkt = pkt2;
321            send_state = dynamic_cast<SplitFragmentSenderState *>(
322                    pkt2->senderState);
323            if (handleWritePacket()) {
324                send_state->clearFromParent();
325            }
326        }
327    }
328}
329
330void
331TimingSimpleCPU::translationFault(const Fault &fault)
332{
333    // fault may be NoFault in cases where a fault is suppressed,
334    // for instance prefetches.
335    updateCycleCounts();
336
337    if (traceData) {
338        // Since there was a fault, we shouldn't trace this instruction.
339        delete traceData;
340        traceData = NULL;
341    }
342
343    postExecute();
344
345    advanceInst(fault);
346}
347
348void
349TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
350{
351    pkt = read ? Packet::createRead(req) : Packet::createWrite(req);
352}
353
354void
355TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
356        RequestPtr req1, RequestPtr req2, RequestPtr req,
357        uint8_t *data, bool read)
358{
359    pkt1 = pkt2 = NULL;
360
361    assert(!req1->isMmappedIpr() && !req2->isMmappedIpr());
362
363    if (req->getFlags().isSet(Request::NO_ACCESS)) {
364        buildPacket(pkt1, req, read);
365        return;
366    }
367
368    buildPacket(pkt1, req1, read);
369    buildPacket(pkt2, req2, read);
370
371    req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags(), dataMasterId());
372    PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand());
373
374    pkt->dataDynamicArray<uint8_t>(data);
375    pkt1->dataStatic<uint8_t>(data);
376    pkt2->dataStatic<uint8_t>(data + req1->getSize());
377
378    SplitMainSenderState * main_send_state = new SplitMainSenderState;
379    pkt->senderState = main_send_state;
380    main_send_state->fragments[0] = pkt1;
381    main_send_state->fragments[1] = pkt2;
382    main_send_state->outstanding = 2;
383    pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
384    pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
385}
386
387Fault
388TimingSimpleCPU::readMem(Addr addr, uint8_t *data,
389                         unsigned size, unsigned flags)
390{
391    Fault fault;
392    const int asid = 0;
393    const ThreadID tid = 0;
394    const Addr pc = thread->instAddr();
395    unsigned block_size = cacheLineSize();
396    BaseTLB::Mode mode = BaseTLB::Read;
397
398    if (traceData) {
399        traceData->setAddr(addr);
400    }
401
402    RequestPtr req  = new Request(asid, addr, size,
403                                  flags, dataMasterId(), pc, _cpuId, tid);
404
405    req->taskId(taskId());
406
407    Addr split_addr = roundDown(addr + size - 1, block_size);
408    assert(split_addr <= addr || split_addr - addr < block_size);
409
410    _status = DTBWaitResponse;
411    if (split_addr > addr) {
412        RequestPtr req1, req2;
413        assert(!req->isLLSC() && !req->isSwap());
414        req->splitOnVaddr(split_addr, req1, req2);
415
416        WholeTranslationState *state =
417            new WholeTranslationState(req, req1, req2, new uint8_t[size],
418                                      NULL, mode);
419        DataTranslation<TimingSimpleCPU *> *trans1 =
420            new DataTranslation<TimingSimpleCPU *>(this, state, 0);
421        DataTranslation<TimingSimpleCPU *> *trans2 =
422            new DataTranslation<TimingSimpleCPU *>(this, state, 1);
423
424        thread->dtb->translateTiming(req1, tc, trans1, mode);
425        thread->dtb->translateTiming(req2, tc, trans2, mode);
426    } else {
427        WholeTranslationState *state =
428            new WholeTranslationState(req, new uint8_t[size], NULL, mode);
429        DataTranslation<TimingSimpleCPU *> *translation
430            = new DataTranslation<TimingSimpleCPU *>(this, state);
431        thread->dtb->translateTiming(req, tc, translation, mode);
432    }
433
434    return NoFault;
435}
436
437bool
438TimingSimpleCPU::handleWritePacket()
439{
440    RequestPtr req = dcache_pkt->req;
441    if (req->isMmappedIpr()) {
442        Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
443        new IprEvent(dcache_pkt, this, clockEdge(delay));
444        _status = DcacheWaitResponse;
445        dcache_pkt = NULL;
446    } else if (!dcachePort.sendTimingReq(dcache_pkt)) {
447        _status = DcacheRetry;
448    } else {
449        _status = DcacheWaitResponse;
450        // memory system takes ownership of packet
451        dcache_pkt = NULL;
452    }
453    return dcache_pkt == NULL;
454}
455
456Fault
457TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
458                          Addr addr, unsigned flags, uint64_t *res)
459{
460    uint8_t *newData = new uint8_t[size];
461    const int asid = 0;
462    const ThreadID tid = 0;
463    const Addr pc = thread->instAddr();
464    unsigned block_size = cacheLineSize();
465    BaseTLB::Mode mode = BaseTLB::Write;
466
467    if (data == NULL) {
468        assert(flags & Request::CACHE_BLOCK_ZERO);
469        // This must be a cache block cleaning request
470        memset(newData, 0, size);
471    } else {
472        memcpy(newData, data, size);
473    }
474
475    if (traceData) {
476        traceData->setAddr(addr);
477    }
478
479    RequestPtr req = new Request(asid, addr, size,
480                                 flags, dataMasterId(), pc, _cpuId, tid);
481
482    req->taskId(taskId());
483
484    Addr split_addr = roundDown(addr + size - 1, block_size);
485    assert(split_addr <= addr || split_addr - addr < block_size);
486
487    _status = DTBWaitResponse;
488    if (split_addr > addr) {
489        RequestPtr req1, req2;
490        assert(!req->isLLSC() && !req->isSwap());
491        req->splitOnVaddr(split_addr, req1, req2);
492
493        WholeTranslationState *state =
494            new WholeTranslationState(req, req1, req2, newData, res, mode);
495        DataTranslation<TimingSimpleCPU *> *trans1 =
496            new DataTranslation<TimingSimpleCPU *>(this, state, 0);
497        DataTranslation<TimingSimpleCPU *> *trans2 =
498            new DataTranslation<TimingSimpleCPU *>(this, state, 1);
499
500        thread->dtb->translateTiming(req1, tc, trans1, mode);
501        thread->dtb->translateTiming(req2, tc, trans2, mode);
502    } else {
503        WholeTranslationState *state =
504            new WholeTranslationState(req, newData, res, mode);
505        DataTranslation<TimingSimpleCPU *> *translation =
506            new DataTranslation<TimingSimpleCPU *>(this, state);
507        thread->dtb->translateTiming(req, tc, translation, mode);
508    }
509
510    // Translation faults will be returned via finishTranslation()
511    return NoFault;
512}
513
514
515void
516TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
517{
518    _status = BaseSimpleCPU::Running;
519
520    if (state->getFault() != NoFault) {
521        if (state->isPrefetch()) {
522            state->setNoFault();
523        }
524        delete [] state->data;
525        state->deleteReqs();
526        translationFault(state->getFault());
527    } else {
528        if (!state->isSplit) {
529            sendData(state->mainReq, state->data, state->res,
530                     state->mode == BaseTLB::Read);
531        } else {
532            sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
533                          state->data, state->mode == BaseTLB::Read);
534        }
535    }
536
537    delete state;
538}
539
540
541void
542TimingSimpleCPU::fetch()
543{
544    DPRINTF(SimpleCPU, "Fetch\n");
545
546    if (!curStaticInst || !curStaticInst->isDelayedCommit())
547        checkForInterrupts();
548
549    checkPcEventQueue();
550
551    // We must have just got suspended by a PC event
552    if (_status == Idle)
553        return;
554
555    TheISA::PCState pcState = thread->pcState();
556    bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
557
558    if (needToFetch) {
559        _status = BaseSimpleCPU::Running;
560        Request *ifetch_req = new Request();
561        ifetch_req->taskId(taskId());
562        ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
563        setupFetchRequest(ifetch_req);
564        DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
565        thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
566                BaseTLB::Execute);
567    } else {
568        _status = IcacheWaitResponse;
569        completeIfetch(NULL);
570
571        updateCycleCounts();
572    }
573}
574
575
576void
577TimingSimpleCPU::sendFetch(const Fault &fault, RequestPtr req,
578                           ThreadContext *tc)
579{
580    if (fault == NoFault) {
581        DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n",
582                req->getVaddr(), req->getPaddr());
583        ifetch_pkt = new Packet(req, MemCmd::ReadReq);
584        ifetch_pkt->dataStatic(&inst);
585        DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr());
586
587        if (!icachePort.sendTimingReq(ifetch_pkt)) {
588            // Need to wait for retry
589            _status = IcacheRetry;
590        } else {
591            // Need to wait for cache to respond
592            _status = IcacheWaitResponse;
593            // ownership of packet transferred to memory system
594            ifetch_pkt = NULL;
595        }
596    } else {
597        DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
598        delete req;
599        // fetch fault: advance directly to next instruction (fault handler)
600        _status = BaseSimpleCPU::Running;
601        advanceInst(fault);
602    }
603
604    updateCycleCounts();
605}
606
607
608void
609TimingSimpleCPU::advanceInst(const Fault &fault)
610{
611    if (_status == Faulting)
612        return;
613
614    if (fault != NoFault) {
615        advancePC(fault);
616        DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
617        reschedule(fetchEvent, clockEdge(), true);
618        _status = Faulting;
619        return;
620    }
621
622
623    if (!stayAtPC)
624        advancePC(fault);
625
626    if (tryCompleteDrain())
627            return;
628
629    if (_status == BaseSimpleCPU::Running) {
630        // kick off fetch of next instruction... callback from icache
631        // response will cause that instruction to be executed,
632        // keeping the CPU running.
633        fetch();
634    }
635}
636
637
638void
639TimingSimpleCPU::completeIfetch(PacketPtr pkt)
640{
641    DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ?
642            pkt->getAddr() : 0);
643
644    // received a response from the icache: execute the received
645    // instruction
646    assert(!pkt || !pkt->isError());
647    assert(_status == IcacheWaitResponse);
648
649    _status = BaseSimpleCPU::Running;
650
651    updateCycleCounts();
652
653    if (pkt)
654        pkt->req->setAccessLatency();
655
656
657    preExecute();
658    if (curStaticInst && curStaticInst->isMemRef()) {
659        // load or store: just send to dcache
660        Fault fault = curStaticInst->initiateAcc(this, traceData);
661
662        // If we're not running now the instruction will complete in a dcache
663        // response callback or the instruction faulted and has started an
664        // ifetch
665        if (_status == BaseSimpleCPU::Running) {
666            if (fault != NoFault && traceData) {
667                // If there was a fault, we shouldn't trace this instruction.
668                delete traceData;
669                traceData = NULL;
670            }
671
672            postExecute();
673            // @todo remove me after debugging with legion done
674            if (curStaticInst && (!curStaticInst->isMicroop() ||
675                        curStaticInst->isFirstMicroop()))
676                instCnt++;
677            advanceInst(fault);
678        }
679    } else if (curStaticInst) {
680        // non-memory instruction: execute completely now
681        Fault fault = curStaticInst->execute(this, traceData);
682
683        // keep an instruction count
684        if (fault == NoFault)
685            countInst();
686        else if (traceData && !DTRACE(ExecFaulting)) {
687            delete traceData;
688            traceData = NULL;
689        }
690
691        postExecute();
692        // @todo remove me after debugging with legion done
693        if (curStaticInst && (!curStaticInst->isMicroop() ||
694                    curStaticInst->isFirstMicroop()))
695            instCnt++;
696        advanceInst(fault);
697    } else {
698        advanceInst(NoFault);
699    }
700
701    if (pkt) {
702        delete pkt->req;
703        delete pkt;
704    }
705}
706
707void
708TimingSimpleCPU::IcachePort::ITickEvent::process()
709{
710    cpu->completeIfetch(pkt);
711}
712
713bool
714TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt)
715{
716    DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
717    // delay processing of returned data until next CPU clock edge
718    Tick next_tick = cpu->clockEdge();
719
720    if (next_tick == curTick())
721        cpu->completeIfetch(pkt);
722    else
723        tickEvent.schedule(pkt, next_tick);
724
725    return true;
726}
727
728void
729TimingSimpleCPU::IcachePort::recvRetry()
730{
731    // we shouldn't get a retry unless we have a packet that we're
732    // waiting to transmit
733    assert(cpu->ifetch_pkt != NULL);
734    assert(cpu->_status == IcacheRetry);
735    PacketPtr tmp = cpu->ifetch_pkt;
736    if (sendTimingReq(tmp)) {
737        cpu->_status = IcacheWaitResponse;
738        cpu->ifetch_pkt = NULL;
739    }
740}
741
742void
743TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
744{
745    // received a response from the dcache: complete the load or store
746    // instruction
747    assert(!pkt->isError());
748    assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
749           pkt->req->getFlags().isSet(Request::NO_ACCESS));
750
751    pkt->req->setAccessLatency();
752
753    updateCycleCounts();
754
755    if (pkt->senderState) {
756        SplitFragmentSenderState * send_state =
757            dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
758        assert(send_state);
759        delete pkt->req;
760        delete pkt;
761        PacketPtr big_pkt = send_state->bigPkt;
762        delete send_state;
763
764        SplitMainSenderState * main_send_state =
765            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
766        assert(main_send_state);
767        // Record the fact that this packet is no longer outstanding.
768        assert(main_send_state->outstanding != 0);
769        main_send_state->outstanding--;
770
771        if (main_send_state->outstanding) {
772            return;
773        } else {
774            delete main_send_state;
775            big_pkt->senderState = NULL;
776            pkt = big_pkt;
777        }
778    }
779
780    _status = BaseSimpleCPU::Running;
781
782    Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
783
784    // keep an instruction count
785    if (fault == NoFault)
786        countInst();
787    else if (traceData) {
788        // If there was a fault, we shouldn't trace this instruction.
789        delete traceData;
790        traceData = NULL;
791    }
792
793    // the locked flag may be cleared on the response packet, so check
794    // pkt->req and not pkt to see if it was a load-locked
795    if (pkt->isRead() && pkt->req->isLLSC()) {
796        TheISA::handleLockedRead(thread, pkt->req);
797    }
798
799    delete pkt->req;
800    delete pkt;
801
802    postExecute();
803
804    advanceInst(fault);
805}
806
807void
808TimingSimpleCPU::updateCycleCounts()
809{
810    const Cycles delta(curCycle() - previousCycle);
811
812    numCycles += delta;
813    ppCycles->notify(delta);
814
815    previousCycle = curCycle();
816}
817
818void
819TimingSimpleCPU::DcachePort::recvTimingSnoopReq(PacketPtr pkt)
820{
821    TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask);
822}
823
824
825bool
826TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt)
827{
828    // delay processing of returned data until next CPU clock edge
829    Tick next_tick = cpu->clockEdge();
830
831    if (next_tick == curTick()) {
832        cpu->completeDataAccess(pkt);
833    } else {
834        if (!tickEvent.scheduled()) {
835            tickEvent.schedule(pkt, next_tick);
836        } else {
837            // In the case of a split transaction and a cache that is
838            // faster than a CPU we could get two responses before
839            // next_tick expires
840            if (!retryEvent.scheduled())
841                cpu->schedule(retryEvent, next_tick);
842            return false;
843        }
844    }
845
846    return true;
847}
848
849void
850TimingSimpleCPU::DcachePort::DTickEvent::process()
851{
852    cpu->completeDataAccess(pkt);
853}
854
855void
856TimingSimpleCPU::DcachePort::recvRetry()
857{
858    // we shouldn't get a retry unless we have a packet that we're
859    // waiting to transmit
860    assert(cpu->dcache_pkt != NULL);
861    assert(cpu->_status == DcacheRetry);
862    PacketPtr tmp = cpu->dcache_pkt;
863    if (tmp->senderState) {
864        // This is a packet from a split access.
865        SplitFragmentSenderState * send_state =
866            dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
867        assert(send_state);
868        PacketPtr big_pkt = send_state->bigPkt;
869
870        SplitMainSenderState * main_send_state =
871            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
872        assert(main_send_state);
873
874        if (sendTimingReq(tmp)) {
875            // If we were able to send without retrying, record that fact
876            // and try sending the other fragment.
877            send_state->clearFromParent();
878            int other_index = main_send_state->getPendingFragment();
879            if (other_index > 0) {
880                tmp = main_send_state->fragments[other_index];
881                cpu->dcache_pkt = tmp;
882                if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
883                        (big_pkt->isWrite() && cpu->handleWritePacket())) {
884                    main_send_state->fragments[other_index] = NULL;
885                }
886            } else {
887                cpu->_status = DcacheWaitResponse;
888                // memory system takes ownership of packet
889                cpu->dcache_pkt = NULL;
890            }
891        }
892    } else if (sendTimingReq(tmp)) {
893        cpu->_status = DcacheWaitResponse;
894        // memory system takes ownership of packet
895        cpu->dcache_pkt = NULL;
896    }
897}
898
899TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
900    Tick t)
901    : pkt(_pkt), cpu(_cpu)
902{
903    cpu->schedule(this, t);
904}
905
906void
907TimingSimpleCPU::IprEvent::process()
908{
909    cpu->completeDataAccess(pkt);
910}
911
912const char *
913TimingSimpleCPU::IprEvent::description() const
914{
915    return "Timing Simple CPU Delay IPR event";
916}
917
918
919void
920TimingSimpleCPU::printAddr(Addr a)
921{
922    dcachePort.printAddr(a);
923}
924
925
926////////////////////////////////////////////////////////////////////////
927//
928//  TimingSimpleCPU Simulation Object
929//
930TimingSimpleCPU *
931TimingSimpleCPUParams::create()
932{
933    numThreads = 1;
934    if (!FullSystem && workload.size() != 1)
935        panic("only one workload allowed");
936    return new TimingSimpleCPU(this);
937}
938