timing.cc revision 10665:aef704eaedd2
1/*
2 * Copyright 2014 Google, Inc.
3 * Copyright (c) 2010-2013 ARM Limited
4 * All rights reserved
5 *
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder.  You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
14 *
15 * Copyright (c) 2002-2005 The Regents of The University of Michigan
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Steve Reinhardt
42 */
43
44#include "arch/locked_mem.hh"
45#include "arch/mmapped_ipr.hh"
46#include "arch/utility.hh"
47#include "base/bigint.hh"
48#include "config/the_isa.hh"
49#include "cpu/simple/timing.hh"
50#include "cpu/exetrace.hh"
51#include "debug/Config.hh"
52#include "debug/Drain.hh"
53#include "debug/ExecFaulting.hh"
54#include "debug/SimpleCPU.hh"
55#include "mem/packet.hh"
56#include "mem/packet_access.hh"
57#include "params/TimingSimpleCPU.hh"
58#include "sim/faults.hh"
59#include "sim/full_system.hh"
60#include "sim/system.hh"
61
62#include "debug/Mwait.hh"
63
64using namespace std;
65using namespace TheISA;
66
67void
68TimingSimpleCPU::init()
69{
70    BaseCPU::init();
71
72    // Initialise the ThreadContext's memory proxies
73    tcBase()->initMemProxies(tcBase());
74
75    if (FullSystem && !params()->switched_out) {
76        for (int i = 0; i < threadContexts.size(); ++i) {
77            ThreadContext *tc = threadContexts[i];
78            // initialize CPU, including PC
79            TheISA::initCPU(tc, _cpuId);
80        }
81    }
82}
83
84void
85TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
86{
87    pkt = _pkt;
88    cpu->schedule(this, t);
89}
90
91TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
92    : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this),
93      dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0),
94      fetchEvent(this), drainManager(NULL)
95{
96    _status = Idle;
97
98    system->totalNumInsts = 0;
99}
100
101
102
103TimingSimpleCPU::~TimingSimpleCPU()
104{
105}
106
107unsigned int
108TimingSimpleCPU::drain(DrainManager *drain_manager)
109{
110    assert(!drainManager);
111    if (switchedOut())
112        return 0;
113
114    if (_status == Idle ||
115        (_status == BaseSimpleCPU::Running && isDrained())) {
116        DPRINTF(Drain, "No need to drain.\n");
117        return 0;
118    } else {
119        drainManager = drain_manager;
120        DPRINTF(Drain, "Requesting drain: %s\n", pcState());
121
122        // The fetch event can become descheduled if a drain didn't
123        // succeed on the first attempt. We need to reschedule it if
124        // the CPU is waiting for a microcode routine to complete.
125        if (_status == BaseSimpleCPU::Running && !fetchEvent.scheduled())
126            schedule(fetchEvent, clockEdge());
127
128        return 1;
129    }
130}
131
132void
133TimingSimpleCPU::drainResume()
134{
135    assert(!fetchEvent.scheduled());
136    assert(!drainManager);
137    if (switchedOut())
138        return;
139
140    DPRINTF(SimpleCPU, "Resume\n");
141    verifyMemoryMode();
142
143    assert(!threadContexts.empty());
144    if (threadContexts.size() > 1)
145        fatal("The timing CPU only supports one thread.\n");
146
147    if (thread->status() == ThreadContext::Active) {
148        schedule(fetchEvent, nextCycle());
149        _status = BaseSimpleCPU::Running;
150        notIdleFraction = 1;
151    } else {
152        _status = BaseSimpleCPU::Idle;
153        notIdleFraction = 0;
154    }
155}
156
157bool
158TimingSimpleCPU::tryCompleteDrain()
159{
160    if (!drainManager)
161        return false;
162
163    DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState());
164    if (!isDrained())
165        return false;
166
167    DPRINTF(Drain, "CPU done draining, processing drain event\n");
168    drainManager->signalDrainDone();
169    drainManager = NULL;
170
171    return true;
172}
173
174void
175TimingSimpleCPU::switchOut()
176{
177    BaseSimpleCPU::switchOut();
178
179    assert(!fetchEvent.scheduled());
180    assert(_status == BaseSimpleCPU::Running || _status == Idle);
181    assert(!stayAtPC);
182    assert(microPC() == 0);
183
184    updateCycleCounts();
185}
186
187
188void
189TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
190{
191    BaseSimpleCPU::takeOverFrom(oldCPU);
192
193    previousCycle = curCycle();
194}
195
196void
197TimingSimpleCPU::verifyMemoryMode() const
198{
199    if (!system->isTimingMode()) {
200        fatal("The timing CPU requires the memory system to be in "
201              "'timing' mode.\n");
202    }
203}
204
205void
206TimingSimpleCPU::activateContext(ThreadID thread_num)
207{
208    DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num);
209
210    assert(thread_num == 0);
211    assert(thread);
212
213    assert(_status == Idle);
214
215    notIdleFraction = 1;
216    _status = BaseSimpleCPU::Running;
217
218    // kick things off by initiating the fetch of the next instruction
219    schedule(fetchEvent, clockEdge(Cycles(0)));
220}
221
222
223void
224TimingSimpleCPU::suspendContext(ThreadID thread_num)
225{
226    DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
227
228    assert(thread_num == 0);
229    assert(thread);
230
231    if (_status == Idle)
232        return;
233
234    assert(_status == BaseSimpleCPU::Running);
235
236    // just change status to Idle... if status != Running,
237    // completeInst() will not initiate fetch of next instruction.
238
239    notIdleFraction = 0;
240    _status = Idle;
241}
242
243bool
244TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
245{
246    RequestPtr req = pkt->req;
247
248    // We're about the issues a locked load, so tell the monitor
249    // to start caring about this address
250    if (pkt->isRead() && pkt->req->isLLSC()) {
251        TheISA::handleLockedRead(thread, pkt->req);
252    }
253    if (req->isMmappedIpr()) {
254        Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt);
255        new IprEvent(pkt, this, clockEdge(delay));
256        _status = DcacheWaitResponse;
257        dcache_pkt = NULL;
258    } else if (!dcachePort.sendTimingReq(pkt)) {
259        _status = DcacheRetry;
260        dcache_pkt = pkt;
261    } else {
262        _status = DcacheWaitResponse;
263        // memory system takes ownership of packet
264        dcache_pkt = NULL;
265    }
266    return dcache_pkt == NULL;
267}
268
269void
270TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
271                          bool read)
272{
273    PacketPtr pkt = buildPacket(req, read);
274    pkt->dataDynamic<uint8_t>(data);
275    if (req->getFlags().isSet(Request::NO_ACCESS)) {
276        assert(!dcache_pkt);
277        pkt->makeResponse();
278        completeDataAccess(pkt);
279    } else if (read) {
280        handleReadPacket(pkt);
281    } else {
282        bool do_access = true;  // flag to suppress cache access
283
284        if (req->isLLSC()) {
285            do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask);
286        } else if (req->isCondSwap()) {
287            assert(res);
288            req->setExtraData(*res);
289        }
290
291        if (do_access) {
292            dcache_pkt = pkt;
293            handleWritePacket();
294        } else {
295            _status = DcacheWaitResponse;
296            completeDataAccess(pkt);
297        }
298    }
299}
300
301void
302TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
303                               RequestPtr req, uint8_t *data, bool read)
304{
305    PacketPtr pkt1, pkt2;
306    buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
307    if (req->getFlags().isSet(Request::NO_ACCESS)) {
308        assert(!dcache_pkt);
309        pkt1->makeResponse();
310        completeDataAccess(pkt1);
311    } else if (read) {
312        SplitFragmentSenderState * send_state =
313            dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
314        if (handleReadPacket(pkt1)) {
315            send_state->clearFromParent();
316            send_state = dynamic_cast<SplitFragmentSenderState *>(
317                    pkt2->senderState);
318            if (handleReadPacket(pkt2)) {
319                send_state->clearFromParent();
320            }
321        }
322    } else {
323        dcache_pkt = pkt1;
324        SplitFragmentSenderState * send_state =
325            dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
326        if (handleWritePacket()) {
327            send_state->clearFromParent();
328            dcache_pkt = pkt2;
329            send_state = dynamic_cast<SplitFragmentSenderState *>(
330                    pkt2->senderState);
331            if (handleWritePacket()) {
332                send_state->clearFromParent();
333            }
334        }
335    }
336}
337
338void
339TimingSimpleCPU::translationFault(const Fault &fault)
340{
341    // fault may be NoFault in cases where a fault is suppressed,
342    // for instance prefetches.
343    updateCycleCounts();
344
345    if (traceData) {
346        // Since there was a fault, we shouldn't trace this instruction.
347        delete traceData;
348        traceData = NULL;
349    }
350
351    postExecute();
352
353    advanceInst(fault);
354}
355
356PacketPtr
357TimingSimpleCPU::buildPacket(RequestPtr req, bool read)
358{
359    return read ? Packet::createRead(req) : Packet::createWrite(req);
360}
361
362void
363TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
364        RequestPtr req1, RequestPtr req2, RequestPtr req,
365        uint8_t *data, bool read)
366{
367    pkt1 = pkt2 = NULL;
368
369    assert(!req1->isMmappedIpr() && !req2->isMmappedIpr());
370
371    if (req->getFlags().isSet(Request::NO_ACCESS)) {
372        pkt1 = buildPacket(req, read);
373        return;
374    }
375
376    pkt1 = buildPacket(req1, read);
377    pkt2 = buildPacket(req2, read);
378
379    PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand());
380
381    pkt->dataDynamic<uint8_t>(data);
382    pkt1->dataStatic<uint8_t>(data);
383    pkt2->dataStatic<uint8_t>(data + req1->getSize());
384
385    SplitMainSenderState * main_send_state = new SplitMainSenderState;
386    pkt->senderState = main_send_state;
387    main_send_state->fragments[0] = pkt1;
388    main_send_state->fragments[1] = pkt2;
389    main_send_state->outstanding = 2;
390    pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
391    pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
392}
393
394Fault
395TimingSimpleCPU::readMem(Addr addr, uint8_t *data,
396                         unsigned size, unsigned flags)
397{
398    Fault fault;
399    const int asid = 0;
400    const ThreadID tid = 0;
401    const Addr pc = thread->instAddr();
402    unsigned block_size = cacheLineSize();
403    BaseTLB::Mode mode = BaseTLB::Read;
404
405    if (traceData)
406        traceData->setMem(addr, size, flags);
407
408    RequestPtr req  = new Request(asid, addr, size,
409                                  flags, dataMasterId(), pc, _cpuId, tid);
410
411    req->taskId(taskId());
412
413    Addr split_addr = roundDown(addr + size - 1, block_size);
414    assert(split_addr <= addr || split_addr - addr < block_size);
415
416    _status = DTBWaitResponse;
417    if (split_addr > addr) {
418        RequestPtr req1, req2;
419        assert(!req->isLLSC() && !req->isSwap());
420        req->splitOnVaddr(split_addr, req1, req2);
421
422        WholeTranslationState *state =
423            new WholeTranslationState(req, req1, req2, new uint8_t[size],
424                                      NULL, mode);
425        DataTranslation<TimingSimpleCPU *> *trans1 =
426            new DataTranslation<TimingSimpleCPU *>(this, state, 0);
427        DataTranslation<TimingSimpleCPU *> *trans2 =
428            new DataTranslation<TimingSimpleCPU *>(this, state, 1);
429
430        thread->dtb->translateTiming(req1, tc, trans1, mode);
431        thread->dtb->translateTiming(req2, tc, trans2, mode);
432    } else {
433        WholeTranslationState *state =
434            new WholeTranslationState(req, new uint8_t[size], NULL, mode);
435        DataTranslation<TimingSimpleCPU *> *translation
436            = new DataTranslation<TimingSimpleCPU *>(this, state);
437        thread->dtb->translateTiming(req, tc, translation, mode);
438    }
439
440    return NoFault;
441}
442
443bool
444TimingSimpleCPU::handleWritePacket()
445{
446    RequestPtr req = dcache_pkt->req;
447    if (req->isMmappedIpr()) {
448        Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
449        new IprEvent(dcache_pkt, this, clockEdge(delay));
450        _status = DcacheWaitResponse;
451        dcache_pkt = NULL;
452    } else if (!dcachePort.sendTimingReq(dcache_pkt)) {
453        _status = DcacheRetry;
454    } else {
455        _status = DcacheWaitResponse;
456        // memory system takes ownership of packet
457        dcache_pkt = NULL;
458    }
459    return dcache_pkt == NULL;
460}
461
462Fault
463TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
464                          Addr addr, unsigned flags, uint64_t *res)
465{
466    uint8_t *newData = new uint8_t[size];
467    const int asid = 0;
468    const ThreadID tid = 0;
469    const Addr pc = thread->instAddr();
470    unsigned block_size = cacheLineSize();
471    BaseTLB::Mode mode = BaseTLB::Write;
472
473    if (data == NULL) {
474        assert(flags & Request::CACHE_BLOCK_ZERO);
475        // This must be a cache block cleaning request
476        memset(newData, 0, size);
477    } else {
478        memcpy(newData, data, size);
479    }
480
481    if (traceData)
482        traceData->setMem(addr, size, flags);
483
484    RequestPtr req = new Request(asid, addr, size,
485                                 flags, dataMasterId(), pc, _cpuId, tid);
486
487    req->taskId(taskId());
488
489    Addr split_addr = roundDown(addr + size - 1, block_size);
490    assert(split_addr <= addr || split_addr - addr < block_size);
491
492    _status = DTBWaitResponse;
493    if (split_addr > addr) {
494        RequestPtr req1, req2;
495        assert(!req->isLLSC() && !req->isSwap());
496        req->splitOnVaddr(split_addr, req1, req2);
497
498        WholeTranslationState *state =
499            new WholeTranslationState(req, req1, req2, newData, res, mode);
500        DataTranslation<TimingSimpleCPU *> *trans1 =
501            new DataTranslation<TimingSimpleCPU *>(this, state, 0);
502        DataTranslation<TimingSimpleCPU *> *trans2 =
503            new DataTranslation<TimingSimpleCPU *>(this, state, 1);
504
505        thread->dtb->translateTiming(req1, tc, trans1, mode);
506        thread->dtb->translateTiming(req2, tc, trans2, mode);
507    } else {
508        WholeTranslationState *state =
509            new WholeTranslationState(req, newData, res, mode);
510        DataTranslation<TimingSimpleCPU *> *translation =
511            new DataTranslation<TimingSimpleCPU *>(this, state);
512        thread->dtb->translateTiming(req, tc, translation, mode);
513    }
514
515    // Translation faults will be returned via finishTranslation()
516    return NoFault;
517}
518
519
520void
521TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
522{
523    _status = BaseSimpleCPU::Running;
524
525    if (state->getFault() != NoFault) {
526        if (state->isPrefetch()) {
527            state->setNoFault();
528        }
529        delete [] state->data;
530        state->deleteReqs();
531        translationFault(state->getFault());
532    } else {
533        if (!state->isSplit) {
534            sendData(state->mainReq, state->data, state->res,
535                     state->mode == BaseTLB::Read);
536        } else {
537            sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
538                          state->data, state->mode == BaseTLB::Read);
539        }
540    }
541
542    delete state;
543}
544
545
546void
547TimingSimpleCPU::fetch()
548{
549    DPRINTF(SimpleCPU, "Fetch\n");
550
551    if (!curStaticInst || !curStaticInst->isDelayedCommit()) {
552        checkForInterrupts();
553        checkPcEventQueue();
554    }
555
556    // We must have just got suspended by a PC event
557    if (_status == Idle)
558        return;
559
560    TheISA::PCState pcState = thread->pcState();
561    bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
562
563    if (needToFetch) {
564        _status = BaseSimpleCPU::Running;
565        Request *ifetch_req = new Request();
566        ifetch_req->taskId(taskId());
567        ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
568        setupFetchRequest(ifetch_req);
569        DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
570        thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
571                BaseTLB::Execute);
572    } else {
573        _status = IcacheWaitResponse;
574        completeIfetch(NULL);
575
576        updateCycleCounts();
577    }
578}
579
580
581void
582TimingSimpleCPU::sendFetch(const Fault &fault, RequestPtr req,
583                           ThreadContext *tc)
584{
585    if (fault == NoFault) {
586        DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n",
587                req->getVaddr(), req->getPaddr());
588        ifetch_pkt = new Packet(req, MemCmd::ReadReq);
589        ifetch_pkt->dataStatic(&inst);
590        DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr());
591
592        if (!icachePort.sendTimingReq(ifetch_pkt)) {
593            // Need to wait for retry
594            _status = IcacheRetry;
595        } else {
596            // Need to wait for cache to respond
597            _status = IcacheWaitResponse;
598            // ownership of packet transferred to memory system
599            ifetch_pkt = NULL;
600        }
601    } else {
602        DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
603        delete req;
604        // fetch fault: advance directly to next instruction (fault handler)
605        _status = BaseSimpleCPU::Running;
606        advanceInst(fault);
607    }
608
609    updateCycleCounts();
610}
611
612
613void
614TimingSimpleCPU::advanceInst(const Fault &fault)
615{
616    if (_status == Faulting)
617        return;
618
619    if (fault != NoFault) {
620        advancePC(fault);
621        DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
622        reschedule(fetchEvent, clockEdge(), true);
623        _status = Faulting;
624        return;
625    }
626
627
628    if (!stayAtPC)
629        advancePC(fault);
630
631    if (tryCompleteDrain())
632            return;
633
634    if (_status == BaseSimpleCPU::Running) {
635        // kick off fetch of next instruction... callback from icache
636        // response will cause that instruction to be executed,
637        // keeping the CPU running.
638        fetch();
639    }
640}
641
642
643void
644TimingSimpleCPU::completeIfetch(PacketPtr pkt)
645{
646    DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ?
647            pkt->getAddr() : 0);
648
649    // received a response from the icache: execute the received
650    // instruction
651    assert(!pkt || !pkt->isError());
652    assert(_status == IcacheWaitResponse);
653
654    _status = BaseSimpleCPU::Running;
655
656    updateCycleCounts();
657
658    if (pkt)
659        pkt->req->setAccessLatency();
660
661
662    preExecute();
663    if (curStaticInst && curStaticInst->isMemRef()) {
664        // load or store: just send to dcache
665        Fault fault = curStaticInst->initiateAcc(this, traceData);
666
667        // If we're not running now the instruction will complete in a dcache
668        // response callback or the instruction faulted and has started an
669        // ifetch
670        if (_status == BaseSimpleCPU::Running) {
671            if (fault != NoFault && traceData) {
672                // If there was a fault, we shouldn't trace this instruction.
673                delete traceData;
674                traceData = NULL;
675            }
676
677            postExecute();
678            // @todo remove me after debugging with legion done
679            if (curStaticInst && (!curStaticInst->isMicroop() ||
680                        curStaticInst->isFirstMicroop()))
681                instCnt++;
682            advanceInst(fault);
683        }
684    } else if (curStaticInst) {
685        // non-memory instruction: execute completely now
686        Fault fault = curStaticInst->execute(this, traceData);
687
688        // keep an instruction count
689        if (fault == NoFault)
690            countInst();
691        else if (traceData && !DTRACE(ExecFaulting)) {
692            delete traceData;
693            traceData = NULL;
694        }
695
696        postExecute();
697        // @todo remove me after debugging with legion done
698        if (curStaticInst && (!curStaticInst->isMicroop() ||
699                    curStaticInst->isFirstMicroop()))
700            instCnt++;
701        advanceInst(fault);
702    } else {
703        advanceInst(NoFault);
704    }
705
706    if (pkt) {
707        delete pkt->req;
708        delete pkt;
709    }
710}
711
712void
713TimingSimpleCPU::IcachePort::ITickEvent::process()
714{
715    cpu->completeIfetch(pkt);
716}
717
718bool
719TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt)
720{
721    DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
722    // delay processing of returned data until next CPU clock edge
723    Tick next_tick = cpu->clockEdge();
724
725    if (next_tick == curTick())
726        cpu->completeIfetch(pkt);
727    else
728        tickEvent.schedule(pkt, next_tick);
729
730    return true;
731}
732
733void
734TimingSimpleCPU::IcachePort::recvRetry()
735{
736    // we shouldn't get a retry unless we have a packet that we're
737    // waiting to transmit
738    assert(cpu->ifetch_pkt != NULL);
739    assert(cpu->_status == IcacheRetry);
740    PacketPtr tmp = cpu->ifetch_pkt;
741    if (sendTimingReq(tmp)) {
742        cpu->_status = IcacheWaitResponse;
743        cpu->ifetch_pkt = NULL;
744    }
745}
746
747void
748TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
749{
750    // received a response from the dcache: complete the load or store
751    // instruction
752    assert(!pkt->isError());
753    assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
754           pkt->req->getFlags().isSet(Request::NO_ACCESS));
755
756    pkt->req->setAccessLatency();
757
758    updateCycleCounts();
759
760    if (pkt->senderState) {
761        SplitFragmentSenderState * send_state =
762            dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
763        assert(send_state);
764        delete pkt->req;
765        delete pkt;
766        PacketPtr big_pkt = send_state->bigPkt;
767        delete send_state;
768
769        SplitMainSenderState * main_send_state =
770            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
771        assert(main_send_state);
772        // Record the fact that this packet is no longer outstanding.
773        assert(main_send_state->outstanding != 0);
774        main_send_state->outstanding--;
775
776        if (main_send_state->outstanding) {
777            return;
778        } else {
779            delete main_send_state;
780            big_pkt->senderState = NULL;
781            pkt = big_pkt;
782        }
783    }
784
785    _status = BaseSimpleCPU::Running;
786
787    Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
788
789    // keep an instruction count
790    if (fault == NoFault)
791        countInst();
792    else if (traceData) {
793        // If there was a fault, we shouldn't trace this instruction.
794        delete traceData;
795        traceData = NULL;
796    }
797
798    delete pkt->req;
799    delete pkt;
800
801    postExecute();
802
803    advanceInst(fault);
804}
805
806void
807TimingSimpleCPU::updateCycleCounts()
808{
809    const Cycles delta(curCycle() - previousCycle);
810
811    numCycles += delta;
812    ppCycles->notify(delta);
813
814    previousCycle = curCycle();
815}
816
817void
818TimingSimpleCPU::DcachePort::recvTimingSnoopReq(PacketPtr pkt)
819{
820    // X86 ISA: Snooping an invalidation for monitor/mwait
821    if(cpu->getAddrMonitor()->doMonitor(pkt)) {
822        cpu->wakeup();
823    }
824    TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask);
825}
826
827void
828TimingSimpleCPU::DcachePort::recvFunctionalSnoop(PacketPtr pkt)
829{
830    // X86 ISA: Snooping an invalidation for monitor/mwait
831    if(cpu->getAddrMonitor()->doMonitor(pkt)) {
832        cpu->wakeup();
833    }
834}
835
836bool
837TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt)
838{
839    // delay processing of returned data until next CPU clock edge
840    Tick next_tick = cpu->clockEdge();
841
842    if (next_tick == curTick()) {
843        cpu->completeDataAccess(pkt);
844    } else {
845        if (!tickEvent.scheduled()) {
846            tickEvent.schedule(pkt, next_tick);
847        } else {
848            // In the case of a split transaction and a cache that is
849            // faster than a CPU we could get two responses before
850            // next_tick expires
851            if (!retryEvent.scheduled())
852                cpu->schedule(retryEvent, next_tick);
853            return false;
854        }
855    }
856
857    return true;
858}
859
860void
861TimingSimpleCPU::DcachePort::DTickEvent::process()
862{
863    cpu->completeDataAccess(pkt);
864}
865
866void
867TimingSimpleCPU::DcachePort::recvRetry()
868{
869    // we shouldn't get a retry unless we have a packet that we're
870    // waiting to transmit
871    assert(cpu->dcache_pkt != NULL);
872    assert(cpu->_status == DcacheRetry);
873    PacketPtr tmp = cpu->dcache_pkt;
874    if (tmp->senderState) {
875        // This is a packet from a split access.
876        SplitFragmentSenderState * send_state =
877            dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
878        assert(send_state);
879        PacketPtr big_pkt = send_state->bigPkt;
880
881        SplitMainSenderState * main_send_state =
882            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
883        assert(main_send_state);
884
885        if (sendTimingReq(tmp)) {
886            // If we were able to send without retrying, record that fact
887            // and try sending the other fragment.
888            send_state->clearFromParent();
889            int other_index = main_send_state->getPendingFragment();
890            if (other_index > 0) {
891                tmp = main_send_state->fragments[other_index];
892                cpu->dcache_pkt = tmp;
893                if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
894                        (big_pkt->isWrite() && cpu->handleWritePacket())) {
895                    main_send_state->fragments[other_index] = NULL;
896                }
897            } else {
898                cpu->_status = DcacheWaitResponse;
899                // memory system takes ownership of packet
900                cpu->dcache_pkt = NULL;
901            }
902        }
903    } else if (sendTimingReq(tmp)) {
904        cpu->_status = DcacheWaitResponse;
905        // memory system takes ownership of packet
906        cpu->dcache_pkt = NULL;
907    }
908}
909
910TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
911    Tick t)
912    : pkt(_pkt), cpu(_cpu)
913{
914    cpu->schedule(this, t);
915}
916
917void
918TimingSimpleCPU::IprEvent::process()
919{
920    cpu->completeDataAccess(pkt);
921}
922
923const char *
924TimingSimpleCPU::IprEvent::description() const
925{
926    return "Timing Simple CPU Delay IPR event";
927}
928
929
930void
931TimingSimpleCPU::printAddr(Addr a)
932{
933    dcachePort.printAddr(a);
934}
935
936
937////////////////////////////////////////////////////////////////////////
938//
939//  TimingSimpleCPU Simulation Object
940//
941TimingSimpleCPU *
942TimingSimpleCPUParams::create()
943{
944    numThreads = 1;
945    if (!FullSystem && workload.size() != 1)
946        panic("only one workload allowed");
947    return new TimingSimpleCPU(this);
948}
949