timing.cc revision 10596
1/*
2 * Copyright 2014 Google, Inc.
3 * Copyright (c) 2010-2013 ARM Limited
4 * All rights reserved
5 *
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder.  You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
14 *
15 * Copyright (c) 2002-2005 The Regents of The University of Michigan
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Steve Reinhardt
42 */
43
44#include "arch/locked_mem.hh"
45#include "arch/mmapped_ipr.hh"
46#include "arch/utility.hh"
47#include "base/bigint.hh"
48#include "config/the_isa.hh"
49#include "cpu/simple/timing.hh"
50#include "cpu/exetrace.hh"
51#include "debug/Config.hh"
52#include "debug/Drain.hh"
53#include "debug/ExecFaulting.hh"
54#include "debug/SimpleCPU.hh"
55#include "mem/packet.hh"
56#include "mem/packet_access.hh"
57#include "params/TimingSimpleCPU.hh"
58#include "sim/faults.hh"
59#include "sim/full_system.hh"
60#include "sim/system.hh"
61
62#include "debug/Mwait.hh"
63
64using namespace std;
65using namespace TheISA;
66
67void
68TimingSimpleCPU::init()
69{
70    BaseCPU::init();
71
72    // Initialise the ThreadContext's memory proxies
73    tcBase()->initMemProxies(tcBase());
74
75    if (FullSystem && !params()->switched_out) {
76        for (int i = 0; i < threadContexts.size(); ++i) {
77            ThreadContext *tc = threadContexts[i];
78            // initialize CPU, including PC
79            TheISA::initCPU(tc, _cpuId);
80        }
81    }
82}
83
84void
85TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
86{
87    pkt = _pkt;
88    cpu->schedule(this, t);
89}
90
91TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
92    : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this),
93      dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0),
94      fetchEvent(this), drainManager(NULL)
95{
96    _status = Idle;
97
98    system->totalNumInsts = 0;
99}
100
101
102
103TimingSimpleCPU::~TimingSimpleCPU()
104{
105}
106
107unsigned int
108TimingSimpleCPU::drain(DrainManager *drain_manager)
109{
110    assert(!drainManager);
111    if (switchedOut())
112        return 0;
113
114    if (_status == Idle ||
115        (_status == BaseSimpleCPU::Running && isDrained())) {
116        DPRINTF(Drain, "No need to drain.\n");
117        return 0;
118    } else {
119        drainManager = drain_manager;
120        DPRINTF(Drain, "Requesting drain: %s\n", pcState());
121
122        // The fetch event can become descheduled if a drain didn't
123        // succeed on the first attempt. We need to reschedule it if
124        // the CPU is waiting for a microcode routine to complete.
125        if (_status == BaseSimpleCPU::Running && !fetchEvent.scheduled())
126            schedule(fetchEvent, clockEdge());
127
128        return 1;
129    }
130}
131
132void
133TimingSimpleCPU::drainResume()
134{
135    assert(!fetchEvent.scheduled());
136    assert(!drainManager);
137    if (switchedOut())
138        return;
139
140    DPRINTF(SimpleCPU, "Resume\n");
141    verifyMemoryMode();
142
143    assert(!threadContexts.empty());
144    if (threadContexts.size() > 1)
145        fatal("The timing CPU only supports one thread.\n");
146
147    if (thread->status() == ThreadContext::Active) {
148        schedule(fetchEvent, nextCycle());
149        _status = BaseSimpleCPU::Running;
150        notIdleFraction = 1;
151    } else {
152        _status = BaseSimpleCPU::Idle;
153        notIdleFraction = 0;
154    }
155}
156
157bool
158TimingSimpleCPU::tryCompleteDrain()
159{
160    if (!drainManager)
161        return false;
162
163    DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState());
164    if (!isDrained())
165        return false;
166
167    DPRINTF(Drain, "CPU done draining, processing drain event\n");
168    drainManager->signalDrainDone();
169    drainManager = NULL;
170
171    return true;
172}
173
174void
175TimingSimpleCPU::switchOut()
176{
177    BaseSimpleCPU::switchOut();
178
179    assert(!fetchEvent.scheduled());
180    assert(_status == BaseSimpleCPU::Running || _status == Idle);
181    assert(!stayAtPC);
182    assert(microPC() == 0);
183
184    updateCycleCounts();
185}
186
187
188void
189TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
190{
191    BaseSimpleCPU::takeOverFrom(oldCPU);
192
193    previousCycle = curCycle();
194}
195
196void
197TimingSimpleCPU::verifyMemoryMode() const
198{
199    if (!system->isTimingMode()) {
200        fatal("The timing CPU requires the memory system to be in "
201              "'timing' mode.\n");
202    }
203}
204
205void
206TimingSimpleCPU::activateContext(ThreadID thread_num)
207{
208    DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num);
209
210    assert(thread_num == 0);
211    assert(thread);
212
213    assert(_status == Idle);
214
215    notIdleFraction = 1;
216    _status = BaseSimpleCPU::Running;
217
218    // kick things off by initiating the fetch of the next instruction
219    schedule(fetchEvent, clockEdge(Cycles(0)));
220}
221
222
223void
224TimingSimpleCPU::suspendContext(ThreadID thread_num)
225{
226    DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
227
228    assert(thread_num == 0);
229    assert(thread);
230
231    if (_status == Idle)
232        return;
233
234    assert(_status == BaseSimpleCPU::Running);
235
236    // just change status to Idle... if status != Running,
237    // completeInst() will not initiate fetch of next instruction.
238
239    notIdleFraction = 0;
240    _status = Idle;
241}
242
243bool
244TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
245{
246    RequestPtr req = pkt->req;
247
248    // We're about the issues a locked load, so tell the monitor
249    // to start caring about this address
250    if (pkt->isRead() && pkt->req->isLLSC()) {
251        TheISA::handleLockedRead(thread, pkt->req);
252    }
253    if (req->isMmappedIpr()) {
254        Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt);
255        new IprEvent(pkt, this, clockEdge(delay));
256        _status = DcacheWaitResponse;
257        dcache_pkt = NULL;
258    } else if (!dcachePort.sendTimingReq(pkt)) {
259        _status = DcacheRetry;
260        dcache_pkt = pkt;
261    } else {
262        _status = DcacheWaitResponse;
263        // memory system takes ownership of packet
264        dcache_pkt = NULL;
265    }
266    return dcache_pkt == NULL;
267}
268
269void
270TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
271                          bool read)
272{
273    PacketPtr pkt;
274    buildPacket(pkt, req, read);
275    pkt->dataDynamic<uint8_t>(data);
276    if (req->getFlags().isSet(Request::NO_ACCESS)) {
277        assert(!dcache_pkt);
278        pkt->makeResponse();
279        completeDataAccess(pkt);
280    } else if (read) {
281        handleReadPacket(pkt);
282    } else {
283        bool do_access = true;  // flag to suppress cache access
284
285        if (req->isLLSC()) {
286            do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask);
287        } else if (req->isCondSwap()) {
288            assert(res);
289            req->setExtraData(*res);
290        }
291
292        if (do_access) {
293            dcache_pkt = pkt;
294            handleWritePacket();
295        } else {
296            _status = DcacheWaitResponse;
297            completeDataAccess(pkt);
298        }
299    }
300}
301
302void
303TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
304                               RequestPtr req, uint8_t *data, bool read)
305{
306    PacketPtr pkt1, pkt2;
307    buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
308    if (req->getFlags().isSet(Request::NO_ACCESS)) {
309        assert(!dcache_pkt);
310        pkt1->makeResponse();
311        completeDataAccess(pkt1);
312    } else if (read) {
313        SplitFragmentSenderState * send_state =
314            dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
315        if (handleReadPacket(pkt1)) {
316            send_state->clearFromParent();
317            send_state = dynamic_cast<SplitFragmentSenderState *>(
318                    pkt2->senderState);
319            if (handleReadPacket(pkt2)) {
320                send_state->clearFromParent();
321            }
322        }
323    } else {
324        dcache_pkt = pkt1;
325        SplitFragmentSenderState * send_state =
326            dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
327        if (handleWritePacket()) {
328            send_state->clearFromParent();
329            dcache_pkt = pkt2;
330            send_state = dynamic_cast<SplitFragmentSenderState *>(
331                    pkt2->senderState);
332            if (handleWritePacket()) {
333                send_state->clearFromParent();
334            }
335        }
336    }
337}
338
339void
340TimingSimpleCPU::translationFault(const Fault &fault)
341{
342    // fault may be NoFault in cases where a fault is suppressed,
343    // for instance prefetches.
344    updateCycleCounts();
345
346    if (traceData) {
347        // Since there was a fault, we shouldn't trace this instruction.
348        delete traceData;
349        traceData = NULL;
350    }
351
352    postExecute();
353
354    advanceInst(fault);
355}
356
357void
358TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
359{
360    pkt = read ? Packet::createRead(req) : Packet::createWrite(req);
361}
362
363void
364TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
365        RequestPtr req1, RequestPtr req2, RequestPtr req,
366        uint8_t *data, bool read)
367{
368    pkt1 = pkt2 = NULL;
369
370    assert(!req1->isMmappedIpr() && !req2->isMmappedIpr());
371
372    if (req->getFlags().isSet(Request::NO_ACCESS)) {
373        buildPacket(pkt1, req, read);
374        return;
375    }
376
377    buildPacket(pkt1, req1, read);
378    buildPacket(pkt2, req2, read);
379
380    req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags(), dataMasterId());
381    PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand());
382
383    pkt->dataDynamic<uint8_t>(data);
384    pkt1->dataStatic<uint8_t>(data);
385    pkt2->dataStatic<uint8_t>(data + req1->getSize());
386
387    SplitMainSenderState * main_send_state = new SplitMainSenderState;
388    pkt->senderState = main_send_state;
389    main_send_state->fragments[0] = pkt1;
390    main_send_state->fragments[1] = pkt2;
391    main_send_state->outstanding = 2;
392    pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
393    pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
394}
395
396Fault
397TimingSimpleCPU::readMem(Addr addr, uint8_t *data,
398                         unsigned size, unsigned flags)
399{
400    Fault fault;
401    const int asid = 0;
402    const ThreadID tid = 0;
403    const Addr pc = thread->instAddr();
404    unsigned block_size = cacheLineSize();
405    BaseTLB::Mode mode = BaseTLB::Read;
406
407    if (traceData) {
408        traceData->setAddr(addr);
409    }
410
411    RequestPtr req  = new Request(asid, addr, size,
412                                  flags, dataMasterId(), pc, _cpuId, tid);
413
414    req->taskId(taskId());
415
416    Addr split_addr = roundDown(addr + size - 1, block_size);
417    assert(split_addr <= addr || split_addr - addr < block_size);
418
419    _status = DTBWaitResponse;
420    if (split_addr > addr) {
421        RequestPtr req1, req2;
422        assert(!req->isLLSC() && !req->isSwap());
423        req->splitOnVaddr(split_addr, req1, req2);
424
425        WholeTranslationState *state =
426            new WholeTranslationState(req, req1, req2, new uint8_t[size],
427                                      NULL, mode);
428        DataTranslation<TimingSimpleCPU *> *trans1 =
429            new DataTranslation<TimingSimpleCPU *>(this, state, 0);
430        DataTranslation<TimingSimpleCPU *> *trans2 =
431            new DataTranslation<TimingSimpleCPU *>(this, state, 1);
432
433        thread->dtb->translateTiming(req1, tc, trans1, mode);
434        thread->dtb->translateTiming(req2, tc, trans2, mode);
435    } else {
436        WholeTranslationState *state =
437            new WholeTranslationState(req, new uint8_t[size], NULL, mode);
438        DataTranslation<TimingSimpleCPU *> *translation
439            = new DataTranslation<TimingSimpleCPU *>(this, state);
440        thread->dtb->translateTiming(req, tc, translation, mode);
441    }
442
443    return NoFault;
444}
445
446bool
447TimingSimpleCPU::handleWritePacket()
448{
449    RequestPtr req = dcache_pkt->req;
450    if (req->isMmappedIpr()) {
451        Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
452        new IprEvent(dcache_pkt, this, clockEdge(delay));
453        _status = DcacheWaitResponse;
454        dcache_pkt = NULL;
455    } else if (!dcachePort.sendTimingReq(dcache_pkt)) {
456        _status = DcacheRetry;
457    } else {
458        _status = DcacheWaitResponse;
459        // memory system takes ownership of packet
460        dcache_pkt = NULL;
461    }
462    return dcache_pkt == NULL;
463}
464
465Fault
466TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
467                          Addr addr, unsigned flags, uint64_t *res)
468{
469    uint8_t *newData = new uint8_t[size];
470    const int asid = 0;
471    const ThreadID tid = 0;
472    const Addr pc = thread->instAddr();
473    unsigned block_size = cacheLineSize();
474    BaseTLB::Mode mode = BaseTLB::Write;
475
476    if (data == NULL) {
477        assert(flags & Request::CACHE_BLOCK_ZERO);
478        // This must be a cache block cleaning request
479        memset(newData, 0, size);
480    } else {
481        memcpy(newData, data, size);
482    }
483
484    if (traceData) {
485        traceData->setAddr(addr);
486    }
487
488    RequestPtr req = new Request(asid, addr, size,
489                                 flags, dataMasterId(), pc, _cpuId, tid);
490
491    req->taskId(taskId());
492
493    Addr split_addr = roundDown(addr + size - 1, block_size);
494    assert(split_addr <= addr || split_addr - addr < block_size);
495
496    _status = DTBWaitResponse;
497    if (split_addr > addr) {
498        RequestPtr req1, req2;
499        assert(!req->isLLSC() && !req->isSwap());
500        req->splitOnVaddr(split_addr, req1, req2);
501
502        WholeTranslationState *state =
503            new WholeTranslationState(req, req1, req2, newData, res, mode);
504        DataTranslation<TimingSimpleCPU *> *trans1 =
505            new DataTranslation<TimingSimpleCPU *>(this, state, 0);
506        DataTranslation<TimingSimpleCPU *> *trans2 =
507            new DataTranslation<TimingSimpleCPU *>(this, state, 1);
508
509        thread->dtb->translateTiming(req1, tc, trans1, mode);
510        thread->dtb->translateTiming(req2, tc, trans2, mode);
511    } else {
512        WholeTranslationState *state =
513            new WholeTranslationState(req, newData, res, mode);
514        DataTranslation<TimingSimpleCPU *> *translation =
515            new DataTranslation<TimingSimpleCPU *>(this, state);
516        thread->dtb->translateTiming(req, tc, translation, mode);
517    }
518
519    // Translation faults will be returned via finishTranslation()
520    return NoFault;
521}
522
523
524void
525TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
526{
527    _status = BaseSimpleCPU::Running;
528
529    if (state->getFault() != NoFault) {
530        if (state->isPrefetch()) {
531            state->setNoFault();
532        }
533        delete [] state->data;
534        state->deleteReqs();
535        translationFault(state->getFault());
536    } else {
537        if (!state->isSplit) {
538            sendData(state->mainReq, state->data, state->res,
539                     state->mode == BaseTLB::Read);
540        } else {
541            sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
542                          state->data, state->mode == BaseTLB::Read);
543        }
544    }
545
546    delete state;
547}
548
549
550void
551TimingSimpleCPU::fetch()
552{
553    DPRINTF(SimpleCPU, "Fetch\n");
554
555    if (!curStaticInst || !curStaticInst->isDelayedCommit()) {
556        checkForInterrupts();
557        checkPcEventQueue();
558    }
559
560    // We must have just got suspended by a PC event
561    if (_status == Idle)
562        return;
563
564    TheISA::PCState pcState = thread->pcState();
565    bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
566
567    if (needToFetch) {
568        _status = BaseSimpleCPU::Running;
569        Request *ifetch_req = new Request();
570        ifetch_req->taskId(taskId());
571        ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
572        setupFetchRequest(ifetch_req);
573        DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
574        thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
575                BaseTLB::Execute);
576    } else {
577        _status = IcacheWaitResponse;
578        completeIfetch(NULL);
579
580        updateCycleCounts();
581    }
582}
583
584
585void
586TimingSimpleCPU::sendFetch(const Fault &fault, RequestPtr req,
587                           ThreadContext *tc)
588{
589    if (fault == NoFault) {
590        DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n",
591                req->getVaddr(), req->getPaddr());
592        ifetch_pkt = new Packet(req, MemCmd::ReadReq);
593        ifetch_pkt->dataStatic(&inst);
594        DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr());
595
596        if (!icachePort.sendTimingReq(ifetch_pkt)) {
597            // Need to wait for retry
598            _status = IcacheRetry;
599        } else {
600            // Need to wait for cache to respond
601            _status = IcacheWaitResponse;
602            // ownership of packet transferred to memory system
603            ifetch_pkt = NULL;
604        }
605    } else {
606        DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
607        delete req;
608        // fetch fault: advance directly to next instruction (fault handler)
609        _status = BaseSimpleCPU::Running;
610        advanceInst(fault);
611    }
612
613    updateCycleCounts();
614}
615
616
617void
618TimingSimpleCPU::advanceInst(const Fault &fault)
619{
620    if (_status == Faulting)
621        return;
622
623    if (fault != NoFault) {
624        advancePC(fault);
625        DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
626        reschedule(fetchEvent, clockEdge(), true);
627        _status = Faulting;
628        return;
629    }
630
631
632    if (!stayAtPC)
633        advancePC(fault);
634
635    if (tryCompleteDrain())
636            return;
637
638    if (_status == BaseSimpleCPU::Running) {
639        // kick off fetch of next instruction... callback from icache
640        // response will cause that instruction to be executed,
641        // keeping the CPU running.
642        fetch();
643    }
644}
645
646
647void
648TimingSimpleCPU::completeIfetch(PacketPtr pkt)
649{
650    DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ?
651            pkt->getAddr() : 0);
652
653    // received a response from the icache: execute the received
654    // instruction
655    assert(!pkt || !pkt->isError());
656    assert(_status == IcacheWaitResponse);
657
658    _status = BaseSimpleCPU::Running;
659
660    updateCycleCounts();
661
662    if (pkt)
663        pkt->req->setAccessLatency();
664
665
666    preExecute();
667    if (curStaticInst && curStaticInst->isMemRef()) {
668        // load or store: just send to dcache
669        Fault fault = curStaticInst->initiateAcc(this, traceData);
670
671        // If we're not running now the instruction will complete in a dcache
672        // response callback or the instruction faulted and has started an
673        // ifetch
674        if (_status == BaseSimpleCPU::Running) {
675            if (fault != NoFault && traceData) {
676                // If there was a fault, we shouldn't trace this instruction.
677                delete traceData;
678                traceData = NULL;
679            }
680
681            postExecute();
682            // @todo remove me after debugging with legion done
683            if (curStaticInst && (!curStaticInst->isMicroop() ||
684                        curStaticInst->isFirstMicroop()))
685                instCnt++;
686            advanceInst(fault);
687        }
688    } else if (curStaticInst) {
689        // non-memory instruction: execute completely now
690        Fault fault = curStaticInst->execute(this, traceData);
691
692        // keep an instruction count
693        if (fault == NoFault)
694            countInst();
695        else if (traceData && !DTRACE(ExecFaulting)) {
696            delete traceData;
697            traceData = NULL;
698        }
699
700        postExecute();
701        // @todo remove me after debugging with legion done
702        if (curStaticInst && (!curStaticInst->isMicroop() ||
703                    curStaticInst->isFirstMicroop()))
704            instCnt++;
705        advanceInst(fault);
706    } else {
707        advanceInst(NoFault);
708    }
709
710    if (pkt) {
711        delete pkt->req;
712        delete pkt;
713    }
714}
715
716void
717TimingSimpleCPU::IcachePort::ITickEvent::process()
718{
719    cpu->completeIfetch(pkt);
720}
721
722bool
723TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt)
724{
725    DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
726    // delay processing of returned data until next CPU clock edge
727    Tick next_tick = cpu->clockEdge();
728
729    if (next_tick == curTick())
730        cpu->completeIfetch(pkt);
731    else
732        tickEvent.schedule(pkt, next_tick);
733
734    return true;
735}
736
737void
738TimingSimpleCPU::IcachePort::recvRetry()
739{
740    // we shouldn't get a retry unless we have a packet that we're
741    // waiting to transmit
742    assert(cpu->ifetch_pkt != NULL);
743    assert(cpu->_status == IcacheRetry);
744    PacketPtr tmp = cpu->ifetch_pkt;
745    if (sendTimingReq(tmp)) {
746        cpu->_status = IcacheWaitResponse;
747        cpu->ifetch_pkt = NULL;
748    }
749}
750
751void
752TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
753{
754    // received a response from the dcache: complete the load or store
755    // instruction
756    assert(!pkt->isError());
757    assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
758           pkt->req->getFlags().isSet(Request::NO_ACCESS));
759
760    pkt->req->setAccessLatency();
761
762    updateCycleCounts();
763
764    if (pkt->senderState) {
765        SplitFragmentSenderState * send_state =
766            dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
767        assert(send_state);
768        delete pkt->req;
769        delete pkt;
770        PacketPtr big_pkt = send_state->bigPkt;
771        delete send_state;
772
773        SplitMainSenderState * main_send_state =
774            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
775        assert(main_send_state);
776        // Record the fact that this packet is no longer outstanding.
777        assert(main_send_state->outstanding != 0);
778        main_send_state->outstanding--;
779
780        if (main_send_state->outstanding) {
781            return;
782        } else {
783            delete main_send_state;
784            big_pkt->senderState = NULL;
785            pkt = big_pkt;
786        }
787    }
788
789    _status = BaseSimpleCPU::Running;
790
791    Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
792
793    // keep an instruction count
794    if (fault == NoFault)
795        countInst();
796    else if (traceData) {
797        // If there was a fault, we shouldn't trace this instruction.
798        delete traceData;
799        traceData = NULL;
800    }
801
802    delete pkt->req;
803    delete pkt;
804
805    postExecute();
806
807    advanceInst(fault);
808}
809
810void
811TimingSimpleCPU::updateCycleCounts()
812{
813    const Cycles delta(curCycle() - previousCycle);
814
815    numCycles += delta;
816    ppCycles->notify(delta);
817
818    previousCycle = curCycle();
819}
820
821void
822TimingSimpleCPU::DcachePort::recvTimingSnoopReq(PacketPtr pkt)
823{
824    // X86 ISA: Snooping an invalidation for monitor/mwait
825    if(cpu->getAddrMonitor()->doMonitor(pkt)) {
826        cpu->wakeup();
827    }
828    TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask);
829}
830
831void
832TimingSimpleCPU::DcachePort::recvFunctionalSnoop(PacketPtr pkt)
833{
834    // X86 ISA: Snooping an invalidation for monitor/mwait
835    if(cpu->getAddrMonitor()->doMonitor(pkt)) {
836        cpu->wakeup();
837    }
838}
839
840bool
841TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt)
842{
843    // delay processing of returned data until next CPU clock edge
844    Tick next_tick = cpu->clockEdge();
845
846    if (next_tick == curTick()) {
847        cpu->completeDataAccess(pkt);
848    } else {
849        if (!tickEvent.scheduled()) {
850            tickEvent.schedule(pkt, next_tick);
851        } else {
852            // In the case of a split transaction and a cache that is
853            // faster than a CPU we could get two responses before
854            // next_tick expires
855            if (!retryEvent.scheduled())
856                cpu->schedule(retryEvent, next_tick);
857            return false;
858        }
859    }
860
861    return true;
862}
863
864void
865TimingSimpleCPU::DcachePort::DTickEvent::process()
866{
867    cpu->completeDataAccess(pkt);
868}
869
870void
871TimingSimpleCPU::DcachePort::recvRetry()
872{
873    // we shouldn't get a retry unless we have a packet that we're
874    // waiting to transmit
875    assert(cpu->dcache_pkt != NULL);
876    assert(cpu->_status == DcacheRetry);
877    PacketPtr tmp = cpu->dcache_pkt;
878    if (tmp->senderState) {
879        // This is a packet from a split access.
880        SplitFragmentSenderState * send_state =
881            dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
882        assert(send_state);
883        PacketPtr big_pkt = send_state->bigPkt;
884
885        SplitMainSenderState * main_send_state =
886            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
887        assert(main_send_state);
888
889        if (sendTimingReq(tmp)) {
890            // If we were able to send without retrying, record that fact
891            // and try sending the other fragment.
892            send_state->clearFromParent();
893            int other_index = main_send_state->getPendingFragment();
894            if (other_index > 0) {
895                tmp = main_send_state->fragments[other_index];
896                cpu->dcache_pkt = tmp;
897                if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
898                        (big_pkt->isWrite() && cpu->handleWritePacket())) {
899                    main_send_state->fragments[other_index] = NULL;
900                }
901            } else {
902                cpu->_status = DcacheWaitResponse;
903                // memory system takes ownership of packet
904                cpu->dcache_pkt = NULL;
905            }
906        }
907    } else if (sendTimingReq(tmp)) {
908        cpu->_status = DcacheWaitResponse;
909        // memory system takes ownership of packet
910        cpu->dcache_pkt = NULL;
911    }
912}
913
914TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
915    Tick t)
916    : pkt(_pkt), cpu(_cpu)
917{
918    cpu->schedule(this, t);
919}
920
921void
922TimingSimpleCPU::IprEvent::process()
923{
924    cpu->completeDataAccess(pkt);
925}
926
927const char *
928TimingSimpleCPU::IprEvent::description() const
929{
930    return "Timing Simple CPU Delay IPR event";
931}
932
933
934void
935TimingSimpleCPU::printAddr(Addr a)
936{
937    dcachePort.printAddr(a);
938}
939
940
941////////////////////////////////////////////////////////////////////////
942//
943//  TimingSimpleCPU Simulation Object
944//
945TimingSimpleCPU *
946TimingSimpleCPUParams::create()
947{
948    numThreads = 1;
949    if (!FullSystem && workload.size() != 1)
950        panic("only one workload allowed");
951    return new TimingSimpleCPU(this);
952}
953