timing.cc revision 10031
1/*
2 * Copyright (c) 2010-2013 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Steve Reinhardt
41 */
42
43#include "arch/locked_mem.hh"
44#include "arch/mmapped_ipr.hh"
45#include "arch/utility.hh"
46#include "base/bigint.hh"
47#include "config/the_isa.hh"
48#include "cpu/simple/timing.hh"
49#include "cpu/exetrace.hh"
50#include "debug/Config.hh"
51#include "debug/Drain.hh"
52#include "debug/ExecFaulting.hh"
53#include "debug/SimpleCPU.hh"
54#include "mem/packet.hh"
55#include "mem/packet_access.hh"
56#include "params/TimingSimpleCPU.hh"
57#include "sim/faults.hh"
58#include "sim/full_system.hh"
59#include "sim/system.hh"
60
61using namespace std;
62using namespace TheISA;
63
64void
65TimingSimpleCPU::init()
66{
67    BaseCPU::init();
68
69    // Initialise the ThreadContext's memory proxies
70    tcBase()->initMemProxies(tcBase());
71
72    if (FullSystem && !params()->switched_out) {
73        for (int i = 0; i < threadContexts.size(); ++i) {
74            ThreadContext *tc = threadContexts[i];
75            // initialize CPU, including PC
76            TheISA::initCPU(tc, _cpuId);
77        }
78    }
79}
80
81void
82TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
83{
84    pkt = _pkt;
85    cpu->schedule(this, t);
86}
87
88TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
89    : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this),
90      dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0),
91      fetchEvent(this), drainManager(NULL)
92{
93    _status = Idle;
94
95    system->totalNumInsts = 0;
96}
97
98
99
100TimingSimpleCPU::~TimingSimpleCPU()
101{
102}
103
104unsigned int
105TimingSimpleCPU::drain(DrainManager *drain_manager)
106{
107    assert(!drainManager);
108    if (switchedOut())
109        return 0;
110
111    if (_status == Idle ||
112        (_status == BaseSimpleCPU::Running && isDrained())) {
113        DPRINTF(Drain, "No need to drain.\n");
114        return 0;
115    } else {
116        drainManager = drain_manager;
117        DPRINTF(Drain, "Requesting drain: %s\n", pcState());
118
119        // The fetch event can become descheduled if a drain didn't
120        // succeed on the first attempt. We need to reschedule it if
121        // the CPU is waiting for a microcode routine to complete.
122        if (_status == BaseSimpleCPU::Running && !fetchEvent.scheduled())
123            schedule(fetchEvent, clockEdge());
124
125        return 1;
126    }
127}
128
129void
130TimingSimpleCPU::drainResume()
131{
132    assert(!fetchEvent.scheduled());
133    assert(!drainManager);
134    if (switchedOut())
135        return;
136
137    DPRINTF(SimpleCPU, "Resume\n");
138    verifyMemoryMode();
139
140    assert(!threadContexts.empty());
141    if (threadContexts.size() > 1)
142        fatal("The timing CPU only supports one thread.\n");
143
144    if (thread->status() == ThreadContext::Active) {
145        schedule(fetchEvent, nextCycle());
146        _status = BaseSimpleCPU::Running;
147        notIdleFraction = 1;
148    } else {
149        _status = BaseSimpleCPU::Idle;
150        notIdleFraction = 0;
151    }
152}
153
154bool
155TimingSimpleCPU::tryCompleteDrain()
156{
157    if (!drainManager)
158        return false;
159
160    DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState());
161    if (!isDrained())
162        return false;
163
164    DPRINTF(Drain, "CPU done draining, processing drain event\n");
165    drainManager->signalDrainDone();
166    drainManager = NULL;
167
168    return true;
169}
170
171void
172TimingSimpleCPU::switchOut()
173{
174    BaseSimpleCPU::switchOut();
175
176    assert(!fetchEvent.scheduled());
177    assert(_status == BaseSimpleCPU::Running || _status == Idle);
178    assert(!stayAtPC);
179    assert(microPC() == 0);
180
181    numCycles += curCycle() - previousCycle;
182}
183
184
185void
186TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
187{
188    BaseSimpleCPU::takeOverFrom(oldCPU);
189
190    previousCycle = curCycle();
191}
192
193void
194TimingSimpleCPU::verifyMemoryMode() const
195{
196    if (!system->isTimingMode()) {
197        fatal("The timing CPU requires the memory system to be in "
198              "'timing' mode.\n");
199    }
200}
201
202void
203TimingSimpleCPU::activateContext(ThreadID thread_num, Cycles delay)
204{
205    DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
206
207    assert(thread_num == 0);
208    assert(thread);
209
210    assert(_status == Idle);
211
212    notIdleFraction = 1;
213    _status = BaseSimpleCPU::Running;
214
215    // kick things off by initiating the fetch of the next instruction
216    schedule(fetchEvent, clockEdge(delay));
217}
218
219
220void
221TimingSimpleCPU::suspendContext(ThreadID thread_num)
222{
223    DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
224
225    assert(thread_num == 0);
226    assert(thread);
227
228    if (_status == Idle)
229        return;
230
231    assert(_status == BaseSimpleCPU::Running);
232
233    // just change status to Idle... if status != Running,
234    // completeInst() will not initiate fetch of next instruction.
235
236    notIdleFraction = 0;
237    _status = Idle;
238}
239
240bool
241TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
242{
243    RequestPtr req = pkt->req;
244    if (req->isMmappedIpr()) {
245        Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt);
246        new IprEvent(pkt, this, clockEdge(delay));
247        _status = DcacheWaitResponse;
248        dcache_pkt = NULL;
249    } else if (!dcachePort.sendTimingReq(pkt)) {
250        _status = DcacheRetry;
251        dcache_pkt = pkt;
252    } else {
253        _status = DcacheWaitResponse;
254        // memory system takes ownership of packet
255        dcache_pkt = NULL;
256    }
257    return dcache_pkt == NULL;
258}
259
260void
261TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
262                          bool read)
263{
264    PacketPtr pkt;
265    buildPacket(pkt, req, read);
266    pkt->dataDynamicArray<uint8_t>(data);
267    if (req->getFlags().isSet(Request::NO_ACCESS)) {
268        assert(!dcache_pkt);
269        pkt->makeResponse();
270        completeDataAccess(pkt);
271    } else if (read) {
272        handleReadPacket(pkt);
273    } else {
274        bool do_access = true;  // flag to suppress cache access
275
276        if (req->isLLSC()) {
277            do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask);
278        } else if (req->isCondSwap()) {
279            assert(res);
280            req->setExtraData(*res);
281        }
282
283        if (do_access) {
284            dcache_pkt = pkt;
285            handleWritePacket();
286        } else {
287            _status = DcacheWaitResponse;
288            completeDataAccess(pkt);
289        }
290    }
291}
292
293void
294TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
295                               RequestPtr req, uint8_t *data, bool read)
296{
297    PacketPtr pkt1, pkt2;
298    buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
299    if (req->getFlags().isSet(Request::NO_ACCESS)) {
300        assert(!dcache_pkt);
301        pkt1->makeResponse();
302        completeDataAccess(pkt1);
303    } else if (read) {
304        SplitFragmentSenderState * send_state =
305            dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
306        if (handleReadPacket(pkt1)) {
307            send_state->clearFromParent();
308            send_state = dynamic_cast<SplitFragmentSenderState *>(
309                    pkt2->senderState);
310            if (handleReadPacket(pkt2)) {
311                send_state->clearFromParent();
312            }
313        }
314    } else {
315        dcache_pkt = pkt1;
316        SplitFragmentSenderState * send_state =
317            dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
318        if (handleWritePacket()) {
319            send_state->clearFromParent();
320            dcache_pkt = pkt2;
321            send_state = dynamic_cast<SplitFragmentSenderState *>(
322                    pkt2->senderState);
323            if (handleWritePacket()) {
324                send_state->clearFromParent();
325            }
326        }
327    }
328}
329
330void
331TimingSimpleCPU::translationFault(Fault fault)
332{
333    // fault may be NoFault in cases where a fault is suppressed,
334    // for instance prefetches.
335    numCycles += curCycle() - previousCycle;
336    previousCycle = curCycle();
337
338    if (traceData) {
339        // Since there was a fault, we shouldn't trace this instruction.
340        delete traceData;
341        traceData = NULL;
342    }
343
344    postExecute();
345
346    advanceInst(fault);
347}
348
349void
350TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
351{
352    MemCmd cmd;
353    if (read) {
354        cmd = MemCmd::ReadReq;
355        if (req->isLLSC())
356            cmd = MemCmd::LoadLockedReq;
357    } else {
358        cmd = MemCmd::WriteReq;
359        if (req->isLLSC()) {
360            cmd = MemCmd::StoreCondReq;
361        } else if (req->isSwap()) {
362            cmd = MemCmd::SwapReq;
363        }
364    }
365    pkt = new Packet(req, cmd);
366}
367
368void
369TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
370        RequestPtr req1, RequestPtr req2, RequestPtr req,
371        uint8_t *data, bool read)
372{
373    pkt1 = pkt2 = NULL;
374
375    assert(!req1->isMmappedIpr() && !req2->isMmappedIpr());
376
377    if (req->getFlags().isSet(Request::NO_ACCESS)) {
378        buildPacket(pkt1, req, read);
379        return;
380    }
381
382    buildPacket(pkt1, req1, read);
383    buildPacket(pkt2, req2, read);
384
385    req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags(), dataMasterId());
386    PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand());
387
388    pkt->dataDynamicArray<uint8_t>(data);
389    pkt1->dataStatic<uint8_t>(data);
390    pkt2->dataStatic<uint8_t>(data + req1->getSize());
391
392    SplitMainSenderState * main_send_state = new SplitMainSenderState;
393    pkt->senderState = main_send_state;
394    main_send_state->fragments[0] = pkt1;
395    main_send_state->fragments[1] = pkt2;
396    main_send_state->outstanding = 2;
397    pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
398    pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
399}
400
401Fault
402TimingSimpleCPU::readMem(Addr addr, uint8_t *data,
403                         unsigned size, unsigned flags)
404{
405    Fault fault;
406    const int asid = 0;
407    const ThreadID tid = 0;
408    const Addr pc = thread->instAddr();
409    unsigned block_size = cacheLineSize();
410    BaseTLB::Mode mode = BaseTLB::Read;
411
412    if (traceData) {
413        traceData->setAddr(addr);
414    }
415
416    RequestPtr req  = new Request(asid, addr, size,
417                                  flags, dataMasterId(), pc, _cpuId, tid);
418
419    req->taskId(taskId());
420
421    Addr split_addr = roundDown(addr + size - 1, block_size);
422    assert(split_addr <= addr || split_addr - addr < block_size);
423
424    _status = DTBWaitResponse;
425    if (split_addr > addr) {
426        RequestPtr req1, req2;
427        assert(!req->isLLSC() && !req->isSwap());
428        req->splitOnVaddr(split_addr, req1, req2);
429
430        WholeTranslationState *state =
431            new WholeTranslationState(req, req1, req2, new uint8_t[size],
432                                      NULL, mode);
433        DataTranslation<TimingSimpleCPU *> *trans1 =
434            new DataTranslation<TimingSimpleCPU *>(this, state, 0);
435        DataTranslation<TimingSimpleCPU *> *trans2 =
436            new DataTranslation<TimingSimpleCPU *>(this, state, 1);
437
438        thread->dtb->translateTiming(req1, tc, trans1, mode);
439        thread->dtb->translateTiming(req2, tc, trans2, mode);
440    } else {
441        WholeTranslationState *state =
442            new WholeTranslationState(req, new uint8_t[size], NULL, mode);
443        DataTranslation<TimingSimpleCPU *> *translation
444            = new DataTranslation<TimingSimpleCPU *>(this, state);
445        thread->dtb->translateTiming(req, tc, translation, mode);
446    }
447
448    return NoFault;
449}
450
451bool
452TimingSimpleCPU::handleWritePacket()
453{
454    RequestPtr req = dcache_pkt->req;
455    if (req->isMmappedIpr()) {
456        Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
457        new IprEvent(dcache_pkt, this, clockEdge(delay));
458        _status = DcacheWaitResponse;
459        dcache_pkt = NULL;
460    } else if (!dcachePort.sendTimingReq(dcache_pkt)) {
461        _status = DcacheRetry;
462    } else {
463        _status = DcacheWaitResponse;
464        // memory system takes ownership of packet
465        dcache_pkt = NULL;
466    }
467    return dcache_pkt == NULL;
468}
469
470Fault
471TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
472                          Addr addr, unsigned flags, uint64_t *res)
473{
474    uint8_t *newData = new uint8_t[size];
475    const int asid = 0;
476    const ThreadID tid = 0;
477    const Addr pc = thread->instAddr();
478    unsigned block_size = cacheLineSize();
479    BaseTLB::Mode mode = BaseTLB::Write;
480
481    if (data == NULL) {
482        assert(flags & Request::CACHE_BLOCK_ZERO);
483        // This must be a cache block cleaning request
484        memset(newData, 0, size);
485    } else {
486        memcpy(newData, data, size);
487    }
488
489    if (traceData) {
490        traceData->setAddr(addr);
491    }
492
493    RequestPtr req = new Request(asid, addr, size,
494                                 flags, dataMasterId(), pc, _cpuId, tid);
495
496    req->taskId(taskId());
497
498    Addr split_addr = roundDown(addr + size - 1, block_size);
499    assert(split_addr <= addr || split_addr - addr < block_size);
500
501    _status = DTBWaitResponse;
502    if (split_addr > addr) {
503        RequestPtr req1, req2;
504        assert(!req->isLLSC() && !req->isSwap());
505        req->splitOnVaddr(split_addr, req1, req2);
506
507        WholeTranslationState *state =
508            new WholeTranslationState(req, req1, req2, newData, res, mode);
509        DataTranslation<TimingSimpleCPU *> *trans1 =
510            new DataTranslation<TimingSimpleCPU *>(this, state, 0);
511        DataTranslation<TimingSimpleCPU *> *trans2 =
512            new DataTranslation<TimingSimpleCPU *>(this, state, 1);
513
514        thread->dtb->translateTiming(req1, tc, trans1, mode);
515        thread->dtb->translateTiming(req2, tc, trans2, mode);
516    } else {
517        WholeTranslationState *state =
518            new WholeTranslationState(req, newData, res, mode);
519        DataTranslation<TimingSimpleCPU *> *translation =
520            new DataTranslation<TimingSimpleCPU *>(this, state);
521        thread->dtb->translateTiming(req, tc, translation, mode);
522    }
523
524    // Translation faults will be returned via finishTranslation()
525    return NoFault;
526}
527
528
529void
530TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
531{
532    _status = BaseSimpleCPU::Running;
533
534    if (state->getFault() != NoFault) {
535        if (state->isPrefetch()) {
536            state->setNoFault();
537        }
538        delete [] state->data;
539        state->deleteReqs();
540        translationFault(state->getFault());
541    } else {
542        if (!state->isSplit) {
543            sendData(state->mainReq, state->data, state->res,
544                     state->mode == BaseTLB::Read);
545        } else {
546            sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
547                          state->data, state->mode == BaseTLB::Read);
548        }
549    }
550
551    delete state;
552}
553
554
555void
556TimingSimpleCPU::fetch()
557{
558    DPRINTF(SimpleCPU, "Fetch\n");
559
560    if (!curStaticInst || !curStaticInst->isDelayedCommit())
561        checkForInterrupts();
562
563    checkPcEventQueue();
564
565    // We must have just got suspended by a PC event
566    if (_status == Idle)
567        return;
568
569    TheISA::PCState pcState = thread->pcState();
570    bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
571
572    if (needToFetch) {
573        _status = BaseSimpleCPU::Running;
574        Request *ifetch_req = new Request();
575        ifetch_req->taskId(taskId());
576        ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
577        setupFetchRequest(ifetch_req);
578        DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
579        thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
580                BaseTLB::Execute);
581    } else {
582        _status = IcacheWaitResponse;
583        completeIfetch(NULL);
584
585        numCycles += curCycle() - previousCycle;
586        previousCycle = curCycle();
587    }
588}
589
590
591void
592TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
593{
594    if (fault == NoFault) {
595        DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n",
596                req->getVaddr(), req->getPaddr());
597        ifetch_pkt = new Packet(req, MemCmd::ReadReq);
598        ifetch_pkt->dataStatic(&inst);
599        DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr());
600
601        if (!icachePort.sendTimingReq(ifetch_pkt)) {
602            // Need to wait for retry
603            _status = IcacheRetry;
604        } else {
605            // Need to wait for cache to respond
606            _status = IcacheWaitResponse;
607            // ownership of packet transferred to memory system
608            ifetch_pkt = NULL;
609        }
610    } else {
611        DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
612        delete req;
613        // fetch fault: advance directly to next instruction (fault handler)
614        _status = BaseSimpleCPU::Running;
615        advanceInst(fault);
616    }
617
618    numCycles += curCycle() - previousCycle;
619    previousCycle = curCycle();
620}
621
622
623void
624TimingSimpleCPU::advanceInst(Fault fault)
625{
626    if (_status == Faulting)
627        return;
628
629    if (fault != NoFault) {
630        advancePC(fault);
631        DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
632        reschedule(fetchEvent, clockEdge(), true);
633        _status = Faulting;
634        return;
635    }
636
637
638    if (!stayAtPC)
639        advancePC(fault);
640
641    if (tryCompleteDrain())
642            return;
643
644    if (_status == BaseSimpleCPU::Running) {
645        // kick off fetch of next instruction... callback from icache
646        // response will cause that instruction to be executed,
647        // keeping the CPU running.
648        fetch();
649    }
650}
651
652
653void
654TimingSimpleCPU::completeIfetch(PacketPtr pkt)
655{
656    DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ?
657            pkt->getAddr() : 0);
658
659    // received a response from the icache: execute the received
660    // instruction
661    assert(!pkt || !pkt->isError());
662    assert(_status == IcacheWaitResponse);
663
664    _status = BaseSimpleCPU::Running;
665
666    numCycles += curCycle() - previousCycle;
667    previousCycle = curCycle();
668
669    if (pkt)
670        pkt->req->setAccessLatency();
671
672
673    preExecute();
674    if (curStaticInst && curStaticInst->isMemRef()) {
675        // load or store: just send to dcache
676        Fault fault = curStaticInst->initiateAcc(this, traceData);
677
678        // If we're not running now the instruction will complete in a dcache
679        // response callback or the instruction faulted and has started an
680        // ifetch
681        if (_status == BaseSimpleCPU::Running) {
682            if (fault != NoFault && traceData) {
683                // If there was a fault, we shouldn't trace this instruction.
684                delete traceData;
685                traceData = NULL;
686            }
687
688            postExecute();
689            // @todo remove me after debugging with legion done
690            if (curStaticInst && (!curStaticInst->isMicroop() ||
691                        curStaticInst->isFirstMicroop()))
692                instCnt++;
693            advanceInst(fault);
694        }
695    } else if (curStaticInst) {
696        // non-memory instruction: execute completely now
697        Fault fault = curStaticInst->execute(this, traceData);
698
699        // keep an instruction count
700        if (fault == NoFault)
701            countInst();
702        else if (traceData && !DTRACE(ExecFaulting)) {
703            delete traceData;
704            traceData = NULL;
705        }
706
707        postExecute();
708        // @todo remove me after debugging with legion done
709        if (curStaticInst && (!curStaticInst->isMicroop() ||
710                    curStaticInst->isFirstMicroop()))
711            instCnt++;
712        advanceInst(fault);
713    } else {
714        advanceInst(NoFault);
715    }
716
717    if (pkt) {
718        delete pkt->req;
719        delete pkt;
720    }
721}
722
723void
724TimingSimpleCPU::IcachePort::ITickEvent::process()
725{
726    cpu->completeIfetch(pkt);
727}
728
729bool
730TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt)
731{
732    DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
733    // delay processing of returned data until next CPU clock edge
734    Tick next_tick = cpu->clockEdge();
735
736    if (next_tick == curTick())
737        cpu->completeIfetch(pkt);
738    else
739        tickEvent.schedule(pkt, next_tick);
740
741    return true;
742}
743
744void
745TimingSimpleCPU::IcachePort::recvRetry()
746{
747    // we shouldn't get a retry unless we have a packet that we're
748    // waiting to transmit
749    assert(cpu->ifetch_pkt != NULL);
750    assert(cpu->_status == IcacheRetry);
751    PacketPtr tmp = cpu->ifetch_pkt;
752    if (sendTimingReq(tmp)) {
753        cpu->_status = IcacheWaitResponse;
754        cpu->ifetch_pkt = NULL;
755    }
756}
757
758void
759TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
760{
761    // received a response from the dcache: complete the load or store
762    // instruction
763    assert(!pkt->isError());
764    assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
765           pkt->req->getFlags().isSet(Request::NO_ACCESS));
766
767    pkt->req->setAccessLatency();
768    numCycles += curCycle() - previousCycle;
769    previousCycle = curCycle();
770
771    if (pkt->senderState) {
772        SplitFragmentSenderState * send_state =
773            dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
774        assert(send_state);
775        delete pkt->req;
776        delete pkt;
777        PacketPtr big_pkt = send_state->bigPkt;
778        delete send_state;
779
780        SplitMainSenderState * main_send_state =
781            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
782        assert(main_send_state);
783        // Record the fact that this packet is no longer outstanding.
784        assert(main_send_state->outstanding != 0);
785        main_send_state->outstanding--;
786
787        if (main_send_state->outstanding) {
788            return;
789        } else {
790            delete main_send_state;
791            big_pkt->senderState = NULL;
792            pkt = big_pkt;
793        }
794    }
795
796    _status = BaseSimpleCPU::Running;
797
798    Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
799
800    // keep an instruction count
801    if (fault == NoFault)
802        countInst();
803    else if (traceData) {
804        // If there was a fault, we shouldn't trace this instruction.
805        delete traceData;
806        traceData = NULL;
807    }
808
809    // the locked flag may be cleared on the response packet, so check
810    // pkt->req and not pkt to see if it was a load-locked
811    if (pkt->isRead() && pkt->req->isLLSC()) {
812        TheISA::handleLockedRead(thread, pkt->req);
813    }
814
815    delete pkt->req;
816    delete pkt;
817
818    postExecute();
819
820    advanceInst(fault);
821}
822
823void
824TimingSimpleCPU::DcachePort::recvTimingSnoopReq(PacketPtr pkt)
825{
826    TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask);
827}
828
829
830bool
831TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt)
832{
833    // delay processing of returned data until next CPU clock edge
834    Tick next_tick = cpu->clockEdge();
835
836    if (next_tick == curTick()) {
837        cpu->completeDataAccess(pkt);
838    } else {
839        if (!tickEvent.scheduled()) {
840            tickEvent.schedule(pkt, next_tick);
841        } else {
842            // In the case of a split transaction and a cache that is
843            // faster than a CPU we could get two responses before
844            // next_tick expires
845            if (!retryEvent.scheduled())
846                cpu->schedule(retryEvent, next_tick);
847            return false;
848        }
849    }
850
851    return true;
852}
853
854void
855TimingSimpleCPU::DcachePort::DTickEvent::process()
856{
857    cpu->completeDataAccess(pkt);
858}
859
860void
861TimingSimpleCPU::DcachePort::recvRetry()
862{
863    // we shouldn't get a retry unless we have a packet that we're
864    // waiting to transmit
865    assert(cpu->dcache_pkt != NULL);
866    assert(cpu->_status == DcacheRetry);
867    PacketPtr tmp = cpu->dcache_pkt;
868    if (tmp->senderState) {
869        // This is a packet from a split access.
870        SplitFragmentSenderState * send_state =
871            dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
872        assert(send_state);
873        PacketPtr big_pkt = send_state->bigPkt;
874
875        SplitMainSenderState * main_send_state =
876            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
877        assert(main_send_state);
878
879        if (sendTimingReq(tmp)) {
880            // If we were able to send without retrying, record that fact
881            // and try sending the other fragment.
882            send_state->clearFromParent();
883            int other_index = main_send_state->getPendingFragment();
884            if (other_index > 0) {
885                tmp = main_send_state->fragments[other_index];
886                cpu->dcache_pkt = tmp;
887                if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
888                        (big_pkt->isWrite() && cpu->handleWritePacket())) {
889                    main_send_state->fragments[other_index] = NULL;
890                }
891            } else {
892                cpu->_status = DcacheWaitResponse;
893                // memory system takes ownership of packet
894                cpu->dcache_pkt = NULL;
895            }
896        }
897    } else if (sendTimingReq(tmp)) {
898        cpu->_status = DcacheWaitResponse;
899        // memory system takes ownership of packet
900        cpu->dcache_pkt = NULL;
901    }
902}
903
904TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
905    Tick t)
906    : pkt(_pkt), cpu(_cpu)
907{
908    cpu->schedule(this, t);
909}
910
911void
912TimingSimpleCPU::IprEvent::process()
913{
914    cpu->completeDataAccess(pkt);
915}
916
917const char *
918TimingSimpleCPU::IprEvent::description() const
919{
920    return "Timing Simple CPU Delay IPR event";
921}
922
923
924void
925TimingSimpleCPU::printAddr(Addr a)
926{
927    dcachePort.printAddr(a);
928}
929
930
931////////////////////////////////////////////////////////////////////////
932//
933//  TimingSimpleCPU Simulation Object
934//
935TimingSimpleCPU *
936TimingSimpleCPUParams::create()
937{
938    numThreads = 1;
939    if (!FullSystem && workload.size() != 1)
940        panic("only one workload allowed");
941    return new TimingSimpleCPU(this);
942}
943