timing.cc revision 10533
1/*
2 * Copyright (c) 2010-2013 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Steve Reinhardt
41 */
42
43#include "arch/locked_mem.hh"
44#include "arch/mmapped_ipr.hh"
45#include "arch/utility.hh"
46#include "base/bigint.hh"
47#include "config/the_isa.hh"
48#include "cpu/simple/timing.hh"
49#include "cpu/exetrace.hh"
50#include "debug/Config.hh"
51#include "debug/Drain.hh"
52#include "debug/ExecFaulting.hh"
53#include "debug/SimpleCPU.hh"
54#include "mem/packet.hh"
55#include "mem/packet_access.hh"
56#include "params/TimingSimpleCPU.hh"
57#include "sim/faults.hh"
58#include "sim/full_system.hh"
59#include "sim/system.hh"
60
61#include "debug/Mwait.hh"
62
63using namespace std;
64using namespace TheISA;
65
66void
67TimingSimpleCPU::init()
68{
69    BaseCPU::init();
70
71    // Initialise the ThreadContext's memory proxies
72    tcBase()->initMemProxies(tcBase());
73
74    if (FullSystem && !params()->switched_out) {
75        for (int i = 0; i < threadContexts.size(); ++i) {
76            ThreadContext *tc = threadContexts[i];
77            // initialize CPU, including PC
78            TheISA::initCPU(tc, _cpuId);
79        }
80    }
81}
82
83void
84TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
85{
86    pkt = _pkt;
87    cpu->schedule(this, t);
88}
89
90TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
91    : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this),
92      dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0),
93      fetchEvent(this), drainManager(NULL)
94{
95    _status = Idle;
96
97    system->totalNumInsts = 0;
98}
99
100
101
102TimingSimpleCPU::~TimingSimpleCPU()
103{
104}
105
106unsigned int
107TimingSimpleCPU::drain(DrainManager *drain_manager)
108{
109    assert(!drainManager);
110    if (switchedOut())
111        return 0;
112
113    if (_status == Idle ||
114        (_status == BaseSimpleCPU::Running && isDrained())) {
115        DPRINTF(Drain, "No need to drain.\n");
116        return 0;
117    } else {
118        drainManager = drain_manager;
119        DPRINTF(Drain, "Requesting drain: %s\n", pcState());
120
121        // The fetch event can become descheduled if a drain didn't
122        // succeed on the first attempt. We need to reschedule it if
123        // the CPU is waiting for a microcode routine to complete.
124        if (_status == BaseSimpleCPU::Running && !fetchEvent.scheduled())
125            schedule(fetchEvent, clockEdge());
126
127        return 1;
128    }
129}
130
131void
132TimingSimpleCPU::drainResume()
133{
134    assert(!fetchEvent.scheduled());
135    assert(!drainManager);
136    if (switchedOut())
137        return;
138
139    DPRINTF(SimpleCPU, "Resume\n");
140    verifyMemoryMode();
141
142    assert(!threadContexts.empty());
143    if (threadContexts.size() > 1)
144        fatal("The timing CPU only supports one thread.\n");
145
146    if (thread->status() == ThreadContext::Active) {
147        schedule(fetchEvent, nextCycle());
148        _status = BaseSimpleCPU::Running;
149        notIdleFraction = 1;
150    } else {
151        _status = BaseSimpleCPU::Idle;
152        notIdleFraction = 0;
153    }
154}
155
156bool
157TimingSimpleCPU::tryCompleteDrain()
158{
159    if (!drainManager)
160        return false;
161
162    DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState());
163    if (!isDrained())
164        return false;
165
166    DPRINTF(Drain, "CPU done draining, processing drain event\n");
167    drainManager->signalDrainDone();
168    drainManager = NULL;
169
170    return true;
171}
172
173void
174TimingSimpleCPU::switchOut()
175{
176    BaseSimpleCPU::switchOut();
177
178    assert(!fetchEvent.scheduled());
179    assert(_status == BaseSimpleCPU::Running || _status == Idle);
180    assert(!stayAtPC);
181    assert(microPC() == 0);
182
183    updateCycleCounts();
184}
185
186
187void
188TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
189{
190    BaseSimpleCPU::takeOverFrom(oldCPU);
191
192    previousCycle = curCycle();
193}
194
195void
196TimingSimpleCPU::verifyMemoryMode() const
197{
198    if (!system->isTimingMode()) {
199        fatal("The timing CPU requires the memory system to be in "
200              "'timing' mode.\n");
201    }
202}
203
204void
205TimingSimpleCPU::activateContext(ThreadID thread_num)
206{
207    DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num);
208
209    assert(thread_num == 0);
210    assert(thread);
211
212    assert(_status == Idle);
213
214    notIdleFraction = 1;
215    _status = BaseSimpleCPU::Running;
216
217    // kick things off by initiating the fetch of the next instruction
218    schedule(fetchEvent, clockEdge(Cycles(0)));
219}
220
221
222void
223TimingSimpleCPU::suspendContext(ThreadID thread_num)
224{
225    DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
226
227    assert(thread_num == 0);
228    assert(thread);
229
230    if (_status == Idle)
231        return;
232
233    assert(_status == BaseSimpleCPU::Running);
234
235    // just change status to Idle... if status != Running,
236    // completeInst() will not initiate fetch of next instruction.
237
238    notIdleFraction = 0;
239    _status = Idle;
240}
241
242bool
243TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
244{
245    RequestPtr req = pkt->req;
246
247    // We're about the issues a locked load, so tell the monitor
248    // to start caring about this address
249    if (pkt->isRead() && pkt->req->isLLSC()) {
250        TheISA::handleLockedRead(thread, pkt->req);
251    }
252    if (req->isMmappedIpr()) {
253        Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt);
254        new IprEvent(pkt, this, clockEdge(delay));
255        _status = DcacheWaitResponse;
256        dcache_pkt = NULL;
257    } else if (!dcachePort.sendTimingReq(pkt)) {
258        _status = DcacheRetry;
259        dcache_pkt = pkt;
260    } else {
261        _status = DcacheWaitResponse;
262        // memory system takes ownership of packet
263        dcache_pkt = NULL;
264    }
265    return dcache_pkt == NULL;
266}
267
268void
269TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
270                          bool read)
271{
272    PacketPtr pkt;
273    buildPacket(pkt, req, read);
274    pkt->dataDynamicArray<uint8_t>(data);
275    if (req->getFlags().isSet(Request::NO_ACCESS)) {
276        assert(!dcache_pkt);
277        pkt->makeResponse();
278        completeDataAccess(pkt);
279    } else if (read) {
280        handleReadPacket(pkt);
281    } else {
282        bool do_access = true;  // flag to suppress cache access
283
284        if (req->isLLSC()) {
285            do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask);
286        } else if (req->isCondSwap()) {
287            assert(res);
288            req->setExtraData(*res);
289        }
290
291        if (do_access) {
292            dcache_pkt = pkt;
293            handleWritePacket();
294        } else {
295            _status = DcacheWaitResponse;
296            completeDataAccess(pkt);
297        }
298    }
299}
300
301void
302TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
303                               RequestPtr req, uint8_t *data, bool read)
304{
305    PacketPtr pkt1, pkt2;
306    buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
307    if (req->getFlags().isSet(Request::NO_ACCESS)) {
308        assert(!dcache_pkt);
309        pkt1->makeResponse();
310        completeDataAccess(pkt1);
311    } else if (read) {
312        SplitFragmentSenderState * send_state =
313            dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
314        if (handleReadPacket(pkt1)) {
315            send_state->clearFromParent();
316            send_state = dynamic_cast<SplitFragmentSenderState *>(
317                    pkt2->senderState);
318            if (handleReadPacket(pkt2)) {
319                send_state->clearFromParent();
320            }
321        }
322    } else {
323        dcache_pkt = pkt1;
324        SplitFragmentSenderState * send_state =
325            dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
326        if (handleWritePacket()) {
327            send_state->clearFromParent();
328            dcache_pkt = pkt2;
329            send_state = dynamic_cast<SplitFragmentSenderState *>(
330                    pkt2->senderState);
331            if (handleWritePacket()) {
332                send_state->clearFromParent();
333            }
334        }
335    }
336}
337
338void
339TimingSimpleCPU::translationFault(const Fault &fault)
340{
341    // fault may be NoFault in cases where a fault is suppressed,
342    // for instance prefetches.
343    updateCycleCounts();
344
345    if (traceData) {
346        // Since there was a fault, we shouldn't trace this instruction.
347        delete traceData;
348        traceData = NULL;
349    }
350
351    postExecute();
352
353    advanceInst(fault);
354}
355
356void
357TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
358{
359    pkt = read ? Packet::createRead(req) : Packet::createWrite(req);
360}
361
362void
363TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
364        RequestPtr req1, RequestPtr req2, RequestPtr req,
365        uint8_t *data, bool read)
366{
367    pkt1 = pkt2 = NULL;
368
369    assert(!req1->isMmappedIpr() && !req2->isMmappedIpr());
370
371    if (req->getFlags().isSet(Request::NO_ACCESS)) {
372        buildPacket(pkt1, req, read);
373        return;
374    }
375
376    buildPacket(pkt1, req1, read);
377    buildPacket(pkt2, req2, read);
378
379    req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags(), dataMasterId());
380    PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand());
381
382    pkt->dataDynamicArray<uint8_t>(data);
383    pkt1->dataStatic<uint8_t>(data);
384    pkt2->dataStatic<uint8_t>(data + req1->getSize());
385
386    SplitMainSenderState * main_send_state = new SplitMainSenderState;
387    pkt->senderState = main_send_state;
388    main_send_state->fragments[0] = pkt1;
389    main_send_state->fragments[1] = pkt2;
390    main_send_state->outstanding = 2;
391    pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
392    pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
393}
394
395Fault
396TimingSimpleCPU::readMem(Addr addr, uint8_t *data,
397                         unsigned size, unsigned flags)
398{
399    Fault fault;
400    const int asid = 0;
401    const ThreadID tid = 0;
402    const Addr pc = thread->instAddr();
403    unsigned block_size = cacheLineSize();
404    BaseTLB::Mode mode = BaseTLB::Read;
405
406    if (traceData) {
407        traceData->setAddr(addr);
408    }
409
410    RequestPtr req  = new Request(asid, addr, size,
411                                  flags, dataMasterId(), pc, _cpuId, tid);
412
413    req->taskId(taskId());
414
415    Addr split_addr = roundDown(addr + size - 1, block_size);
416    assert(split_addr <= addr || split_addr - addr < block_size);
417
418    _status = DTBWaitResponse;
419    if (split_addr > addr) {
420        RequestPtr req1, req2;
421        assert(!req->isLLSC() && !req->isSwap());
422        req->splitOnVaddr(split_addr, req1, req2);
423
424        WholeTranslationState *state =
425            new WholeTranslationState(req, req1, req2, new uint8_t[size],
426                                      NULL, mode);
427        DataTranslation<TimingSimpleCPU *> *trans1 =
428            new DataTranslation<TimingSimpleCPU *>(this, state, 0);
429        DataTranslation<TimingSimpleCPU *> *trans2 =
430            new DataTranslation<TimingSimpleCPU *>(this, state, 1);
431
432        thread->dtb->translateTiming(req1, tc, trans1, mode);
433        thread->dtb->translateTiming(req2, tc, trans2, mode);
434    } else {
435        WholeTranslationState *state =
436            new WholeTranslationState(req, new uint8_t[size], NULL, mode);
437        DataTranslation<TimingSimpleCPU *> *translation
438            = new DataTranslation<TimingSimpleCPU *>(this, state);
439        thread->dtb->translateTiming(req, tc, translation, mode);
440    }
441
442    return NoFault;
443}
444
445bool
446TimingSimpleCPU::handleWritePacket()
447{
448    RequestPtr req = dcache_pkt->req;
449    if (req->isMmappedIpr()) {
450        Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
451        new IprEvent(dcache_pkt, this, clockEdge(delay));
452        _status = DcacheWaitResponse;
453        dcache_pkt = NULL;
454    } else if (!dcachePort.sendTimingReq(dcache_pkt)) {
455        _status = DcacheRetry;
456    } else {
457        _status = DcacheWaitResponse;
458        // memory system takes ownership of packet
459        dcache_pkt = NULL;
460    }
461    return dcache_pkt == NULL;
462}
463
464Fault
465TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
466                          Addr addr, unsigned flags, uint64_t *res)
467{
468    uint8_t *newData = new uint8_t[size];
469    const int asid = 0;
470    const ThreadID tid = 0;
471    const Addr pc = thread->instAddr();
472    unsigned block_size = cacheLineSize();
473    BaseTLB::Mode mode = BaseTLB::Write;
474
475    if (data == NULL) {
476        assert(flags & Request::CACHE_BLOCK_ZERO);
477        // This must be a cache block cleaning request
478        memset(newData, 0, size);
479    } else {
480        memcpy(newData, data, size);
481    }
482
483    if (traceData) {
484        traceData->setAddr(addr);
485    }
486
487    RequestPtr req = new Request(asid, addr, size,
488                                 flags, dataMasterId(), pc, _cpuId, tid);
489
490    req->taskId(taskId());
491
492    Addr split_addr = roundDown(addr + size - 1, block_size);
493    assert(split_addr <= addr || split_addr - addr < block_size);
494
495    _status = DTBWaitResponse;
496    if (split_addr > addr) {
497        RequestPtr req1, req2;
498        assert(!req->isLLSC() && !req->isSwap());
499        req->splitOnVaddr(split_addr, req1, req2);
500
501        WholeTranslationState *state =
502            new WholeTranslationState(req, req1, req2, newData, res, mode);
503        DataTranslation<TimingSimpleCPU *> *trans1 =
504            new DataTranslation<TimingSimpleCPU *>(this, state, 0);
505        DataTranslation<TimingSimpleCPU *> *trans2 =
506            new DataTranslation<TimingSimpleCPU *>(this, state, 1);
507
508        thread->dtb->translateTiming(req1, tc, trans1, mode);
509        thread->dtb->translateTiming(req2, tc, trans2, mode);
510    } else {
511        WholeTranslationState *state =
512            new WholeTranslationState(req, newData, res, mode);
513        DataTranslation<TimingSimpleCPU *> *translation =
514            new DataTranslation<TimingSimpleCPU *>(this, state);
515        thread->dtb->translateTiming(req, tc, translation, mode);
516    }
517
518    // Translation faults will be returned via finishTranslation()
519    return NoFault;
520}
521
522
523void
524TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
525{
526    _status = BaseSimpleCPU::Running;
527
528    if (state->getFault() != NoFault) {
529        if (state->isPrefetch()) {
530            state->setNoFault();
531        }
532        delete [] state->data;
533        state->deleteReqs();
534        translationFault(state->getFault());
535    } else {
536        if (!state->isSplit) {
537            sendData(state->mainReq, state->data, state->res,
538                     state->mode == BaseTLB::Read);
539        } else {
540            sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
541                          state->data, state->mode == BaseTLB::Read);
542        }
543    }
544
545    delete state;
546}
547
548
549void
550TimingSimpleCPU::fetch()
551{
552    DPRINTF(SimpleCPU, "Fetch\n");
553
554    if (!curStaticInst || !curStaticInst->isDelayedCommit())
555        checkForInterrupts();
556
557    checkPcEventQueue();
558
559    // We must have just got suspended by a PC event
560    if (_status == Idle)
561        return;
562
563    TheISA::PCState pcState = thread->pcState();
564    bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
565
566    if (needToFetch) {
567        _status = BaseSimpleCPU::Running;
568        Request *ifetch_req = new Request();
569        ifetch_req->taskId(taskId());
570        ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
571        setupFetchRequest(ifetch_req);
572        DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
573        thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
574                BaseTLB::Execute);
575    } else {
576        _status = IcacheWaitResponse;
577        completeIfetch(NULL);
578
579        updateCycleCounts();
580    }
581}
582
583
584void
585TimingSimpleCPU::sendFetch(const Fault &fault, RequestPtr req,
586                           ThreadContext *tc)
587{
588    if (fault == NoFault) {
589        DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n",
590                req->getVaddr(), req->getPaddr());
591        ifetch_pkt = new Packet(req, MemCmd::ReadReq);
592        ifetch_pkt->dataStatic(&inst);
593        DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr());
594
595        if (!icachePort.sendTimingReq(ifetch_pkt)) {
596            // Need to wait for retry
597            _status = IcacheRetry;
598        } else {
599            // Need to wait for cache to respond
600            _status = IcacheWaitResponse;
601            // ownership of packet transferred to memory system
602            ifetch_pkt = NULL;
603        }
604    } else {
605        DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
606        delete req;
607        // fetch fault: advance directly to next instruction (fault handler)
608        _status = BaseSimpleCPU::Running;
609        advanceInst(fault);
610    }
611
612    updateCycleCounts();
613}
614
615
616void
617TimingSimpleCPU::advanceInst(const Fault &fault)
618{
619    if (_status == Faulting)
620        return;
621
622    if (fault != NoFault) {
623        advancePC(fault);
624        DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
625        reschedule(fetchEvent, clockEdge(), true);
626        _status = Faulting;
627        return;
628    }
629
630
631    if (!stayAtPC)
632        advancePC(fault);
633
634    if (tryCompleteDrain())
635            return;
636
637    if (_status == BaseSimpleCPU::Running) {
638        // kick off fetch of next instruction... callback from icache
639        // response will cause that instruction to be executed,
640        // keeping the CPU running.
641        fetch();
642    }
643}
644
645
646void
647TimingSimpleCPU::completeIfetch(PacketPtr pkt)
648{
649    DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ?
650            pkt->getAddr() : 0);
651
652    // received a response from the icache: execute the received
653    // instruction
654    assert(!pkt || !pkt->isError());
655    assert(_status == IcacheWaitResponse);
656
657    _status = BaseSimpleCPU::Running;
658
659    updateCycleCounts();
660
661    if (pkt)
662        pkt->req->setAccessLatency();
663
664
665    preExecute();
666    if (curStaticInst && curStaticInst->isMemRef()) {
667        // load or store: just send to dcache
668        Fault fault = curStaticInst->initiateAcc(this, traceData);
669
670        // If we're not running now the instruction will complete in a dcache
671        // response callback or the instruction faulted and has started an
672        // ifetch
673        if (_status == BaseSimpleCPU::Running) {
674            if (fault != NoFault && traceData) {
675                // If there was a fault, we shouldn't trace this instruction.
676                delete traceData;
677                traceData = NULL;
678            }
679
680            postExecute();
681            // @todo remove me after debugging with legion done
682            if (curStaticInst && (!curStaticInst->isMicroop() ||
683                        curStaticInst->isFirstMicroop()))
684                instCnt++;
685            advanceInst(fault);
686        }
687    } else if (curStaticInst) {
688        // non-memory instruction: execute completely now
689        Fault fault = curStaticInst->execute(this, traceData);
690
691        // keep an instruction count
692        if (fault == NoFault)
693            countInst();
694        else if (traceData && !DTRACE(ExecFaulting)) {
695            delete traceData;
696            traceData = NULL;
697        }
698
699        postExecute();
700        // @todo remove me after debugging with legion done
701        if (curStaticInst && (!curStaticInst->isMicroop() ||
702                    curStaticInst->isFirstMicroop()))
703            instCnt++;
704        advanceInst(fault);
705    } else {
706        advanceInst(NoFault);
707    }
708
709    if (pkt) {
710        delete pkt->req;
711        delete pkt;
712    }
713}
714
715void
716TimingSimpleCPU::IcachePort::ITickEvent::process()
717{
718    cpu->completeIfetch(pkt);
719}
720
721bool
722TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt)
723{
724    DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
725    // delay processing of returned data until next CPU clock edge
726    Tick next_tick = cpu->clockEdge();
727
728    if (next_tick == curTick())
729        cpu->completeIfetch(pkt);
730    else
731        tickEvent.schedule(pkt, next_tick);
732
733    return true;
734}
735
736void
737TimingSimpleCPU::IcachePort::recvRetry()
738{
739    // we shouldn't get a retry unless we have a packet that we're
740    // waiting to transmit
741    assert(cpu->ifetch_pkt != NULL);
742    assert(cpu->_status == IcacheRetry);
743    PacketPtr tmp = cpu->ifetch_pkt;
744    if (sendTimingReq(tmp)) {
745        cpu->_status = IcacheWaitResponse;
746        cpu->ifetch_pkt = NULL;
747    }
748}
749
750void
751TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
752{
753    // received a response from the dcache: complete the load or store
754    // instruction
755    assert(!pkt->isError());
756    assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
757           pkt->req->getFlags().isSet(Request::NO_ACCESS));
758
759    pkt->req->setAccessLatency();
760
761    updateCycleCounts();
762
763    if (pkt->senderState) {
764        SplitFragmentSenderState * send_state =
765            dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
766        assert(send_state);
767        delete pkt->req;
768        delete pkt;
769        PacketPtr big_pkt = send_state->bigPkt;
770        delete send_state;
771
772        SplitMainSenderState * main_send_state =
773            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
774        assert(main_send_state);
775        // Record the fact that this packet is no longer outstanding.
776        assert(main_send_state->outstanding != 0);
777        main_send_state->outstanding--;
778
779        if (main_send_state->outstanding) {
780            return;
781        } else {
782            delete main_send_state;
783            big_pkt->senderState = NULL;
784            pkt = big_pkt;
785        }
786    }
787
788    _status = BaseSimpleCPU::Running;
789
790    Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
791
792    // keep an instruction count
793    if (fault == NoFault)
794        countInst();
795    else if (traceData) {
796        // If there was a fault, we shouldn't trace this instruction.
797        delete traceData;
798        traceData = NULL;
799    }
800
801    delete pkt->req;
802    delete pkt;
803
804    postExecute();
805
806    advanceInst(fault);
807}
808
809void
810TimingSimpleCPU::updateCycleCounts()
811{
812    const Cycles delta(curCycle() - previousCycle);
813
814    numCycles += delta;
815    ppCycles->notify(delta);
816
817    previousCycle = curCycle();
818}
819
820void
821TimingSimpleCPU::DcachePort::recvTimingSnoopReq(PacketPtr pkt)
822{
823    // X86 ISA: Snooping an invalidation for monitor/mwait
824    if(cpu->getAddrMonitor()->doMonitor(pkt)) {
825        cpu->wakeup();
826    }
827    TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask);
828}
829
830void
831TimingSimpleCPU::DcachePort::recvFunctionalSnoop(PacketPtr pkt)
832{
833    // X86 ISA: Snooping an invalidation for monitor/mwait
834    if(cpu->getAddrMonitor()->doMonitor(pkt)) {
835        cpu->wakeup();
836    }
837}
838
839bool
840TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt)
841{
842    // delay processing of returned data until next CPU clock edge
843    Tick next_tick = cpu->clockEdge();
844
845    if (next_tick == curTick()) {
846        cpu->completeDataAccess(pkt);
847    } else {
848        if (!tickEvent.scheduled()) {
849            tickEvent.schedule(pkt, next_tick);
850        } else {
851            // In the case of a split transaction and a cache that is
852            // faster than a CPU we could get two responses before
853            // next_tick expires
854            if (!retryEvent.scheduled())
855                cpu->schedule(retryEvent, next_tick);
856            return false;
857        }
858    }
859
860    return true;
861}
862
863void
864TimingSimpleCPU::DcachePort::DTickEvent::process()
865{
866    cpu->completeDataAccess(pkt);
867}
868
869void
870TimingSimpleCPU::DcachePort::recvRetry()
871{
872    // we shouldn't get a retry unless we have a packet that we're
873    // waiting to transmit
874    assert(cpu->dcache_pkt != NULL);
875    assert(cpu->_status == DcacheRetry);
876    PacketPtr tmp = cpu->dcache_pkt;
877    if (tmp->senderState) {
878        // This is a packet from a split access.
879        SplitFragmentSenderState * send_state =
880            dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
881        assert(send_state);
882        PacketPtr big_pkt = send_state->bigPkt;
883
884        SplitMainSenderState * main_send_state =
885            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
886        assert(main_send_state);
887
888        if (sendTimingReq(tmp)) {
889            // If we were able to send without retrying, record that fact
890            // and try sending the other fragment.
891            send_state->clearFromParent();
892            int other_index = main_send_state->getPendingFragment();
893            if (other_index > 0) {
894                tmp = main_send_state->fragments[other_index];
895                cpu->dcache_pkt = tmp;
896                if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
897                        (big_pkt->isWrite() && cpu->handleWritePacket())) {
898                    main_send_state->fragments[other_index] = NULL;
899                }
900            } else {
901                cpu->_status = DcacheWaitResponse;
902                // memory system takes ownership of packet
903                cpu->dcache_pkt = NULL;
904            }
905        }
906    } else if (sendTimingReq(tmp)) {
907        cpu->_status = DcacheWaitResponse;
908        // memory system takes ownership of packet
909        cpu->dcache_pkt = NULL;
910    }
911}
912
913TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
914    Tick t)
915    : pkt(_pkt), cpu(_cpu)
916{
917    cpu->schedule(this, t);
918}
919
920void
921TimingSimpleCPU::IprEvent::process()
922{
923    cpu->completeDataAccess(pkt);
924}
925
926const char *
927TimingSimpleCPU::IprEvent::description() const
928{
929    return "Timing Simple CPU Delay IPR event";
930}
931
932
933void
934TimingSimpleCPU::printAddr(Addr a)
935{
936    dcachePort.printAddr(a);
937}
938
939
940////////////////////////////////////////////////////////////////////////
941//
942//  TimingSimpleCPU Simulation Object
943//
944TimingSimpleCPU *
945TimingSimpleCPUParams::create()
946{
947    numThreads = 1;
948    if (!FullSystem && workload.size() != 1)
949        panic("only one workload allowed");
950    return new TimingSimpleCPU(this);
951}
952