timing.cc revision 10774:68d688cbe26c
111986Sandreas.sandberg@arm.com/*
211986Sandreas.sandberg@arm.com * Copyright 2014 Google, Inc.
311986Sandreas.sandberg@arm.com * Copyright (c) 2010-2013 ARM Limited
411986Sandreas.sandberg@arm.com * All rights reserved
511986Sandreas.sandberg@arm.com *
611986Sandreas.sandberg@arm.com * The license below extends only to copyright in the software and shall
711986Sandreas.sandberg@arm.com * not be construed as granting a license to any other intellectual
811986Sandreas.sandberg@arm.com * property including but not limited to intellectual property relating
911986Sandreas.sandberg@arm.com * to a hardware implementation of the functionality of the software
1011986Sandreas.sandberg@arm.com * licensed hereunder.  You may use the software subject to the license
1111986Sandreas.sandberg@arm.com * terms below provided that you ensure that this notice is replicated
1211986Sandreas.sandberg@arm.com * unmodified and in its entirety in all distributions of the software,
1311986Sandreas.sandberg@arm.com * modified or unmodified, in source code or in binary form.
1411986Sandreas.sandberg@arm.com *
1511986Sandreas.sandberg@arm.com * Copyright (c) 2002-2005 The Regents of The University of Michigan
1611986Sandreas.sandberg@arm.com * All rights reserved.
1711986Sandreas.sandberg@arm.com *
1811986Sandreas.sandberg@arm.com * Redistribution and use in source and binary forms, with or without
1911986Sandreas.sandberg@arm.com * modification, are permitted provided that the following conditions are
2011986Sandreas.sandberg@arm.com * met: redistributions of source code must retain the above copyright
2111986Sandreas.sandberg@arm.com * notice, this list of conditions and the following disclaimer;
2211986Sandreas.sandberg@arm.com * redistributions in binary form must reproduce the above copyright
2311986Sandreas.sandberg@arm.com * notice, this list of conditions and the following disclaimer in the
2411986Sandreas.sandberg@arm.com * documentation and/or other materials provided with the distribution;
2511986Sandreas.sandberg@arm.com * neither the name of the copyright holders nor the names of its
2611986Sandreas.sandberg@arm.com * contributors may be used to endorse or promote products derived from
2711986Sandreas.sandberg@arm.com * this software without specific prior written permission.
2811986Sandreas.sandberg@arm.com *
2911986Sandreas.sandberg@arm.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
3011986Sandreas.sandberg@arm.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
3111986Sandreas.sandberg@arm.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
3211986Sandreas.sandberg@arm.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
3311986Sandreas.sandberg@arm.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
3411986Sandreas.sandberg@arm.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
3511986Sandreas.sandberg@arm.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
3611986Sandreas.sandberg@arm.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
3711986Sandreas.sandberg@arm.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3811986Sandreas.sandberg@arm.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
3911986Sandreas.sandberg@arm.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4011986Sandreas.sandberg@arm.com *
4112037Sandreas.sandberg@arm.com * Authors: Steve Reinhardt
4212037Sandreas.sandberg@arm.com */
4312037Sandreas.sandberg@arm.com
4412037Sandreas.sandberg@arm.com#include "arch/locked_mem.hh"
4512037Sandreas.sandberg@arm.com#include "arch/mmapped_ipr.hh"
4612037Sandreas.sandberg@arm.com#include "arch/utility.hh"
4712037Sandreas.sandberg@arm.com#include "base/bigint.hh"
4812037Sandreas.sandberg@arm.com#include "config/the_isa.hh"
4912037Sandreas.sandberg@arm.com#include "cpu/simple/timing.hh"
5012037Sandreas.sandberg@arm.com#include "cpu/exetrace.hh"
5112037Sandreas.sandberg@arm.com#include "debug/Config.hh"
5212037Sandreas.sandberg@arm.com#include "debug/Drain.hh"
5312037Sandreas.sandberg@arm.com#include "debug/ExecFaulting.hh"
5412037Sandreas.sandberg@arm.com#include "debug/SimpleCPU.hh"
5512037Sandreas.sandberg@arm.com#include "mem/packet.hh"
5612037Sandreas.sandberg@arm.com#include "mem/packet_access.hh"
5712037Sandreas.sandberg@arm.com#include "params/TimingSimpleCPU.hh"
5811986Sandreas.sandberg@arm.com#include "sim/faults.hh"
59#include "sim/full_system.hh"
60#include "sim/system.hh"
61
62#include "debug/Mwait.hh"
63
64using namespace std;
65using namespace TheISA;
66
67void
68TimingSimpleCPU::init()
69{
70    BaseCPU::init();
71
72    // Initialise the ThreadContext's memory proxies
73    tcBase()->initMemProxies(tcBase());
74
75    if (FullSystem && !params()->switched_out) {
76        for (int i = 0; i < threadContexts.size(); ++i) {
77            ThreadContext *tc = threadContexts[i];
78            // initialize CPU, including PC
79            TheISA::initCPU(tc, _cpuId);
80        }
81    }
82}
83
84void
85TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
86{
87    pkt = _pkt;
88    cpu->schedule(this, t);
89}
90
91TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
92    : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this),
93      dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0),
94      fetchEvent(this), drainManager(NULL)
95{
96    _status = Idle;
97}
98
99
100
101TimingSimpleCPU::~TimingSimpleCPU()
102{
103}
104
105unsigned int
106TimingSimpleCPU::drain(DrainManager *drain_manager)
107{
108    assert(!drainManager);
109    if (switchedOut())
110        return 0;
111
112    if (_status == Idle ||
113        (_status == BaseSimpleCPU::Running && isDrained())) {
114        DPRINTF(Drain, "No need to drain.\n");
115        return 0;
116    } else {
117        drainManager = drain_manager;
118        DPRINTF(Drain, "Requesting drain: %s\n", pcState());
119
120        // The fetch event can become descheduled if a drain didn't
121        // succeed on the first attempt. We need to reschedule it if
122        // the CPU is waiting for a microcode routine to complete.
123        if (_status == BaseSimpleCPU::Running && !fetchEvent.scheduled())
124            schedule(fetchEvent, clockEdge());
125
126        return 1;
127    }
128}
129
130void
131TimingSimpleCPU::drainResume()
132{
133    assert(!fetchEvent.scheduled());
134    assert(!drainManager);
135    if (switchedOut())
136        return;
137
138    DPRINTF(SimpleCPU, "Resume\n");
139    verifyMemoryMode();
140
141    assert(!threadContexts.empty());
142    if (threadContexts.size() > 1)
143        fatal("The timing CPU only supports one thread.\n");
144
145    if (thread->status() == ThreadContext::Active) {
146        schedule(fetchEvent, nextCycle());
147        _status = BaseSimpleCPU::Running;
148        notIdleFraction = 1;
149    } else {
150        _status = BaseSimpleCPU::Idle;
151        notIdleFraction = 0;
152    }
153}
154
155bool
156TimingSimpleCPU::tryCompleteDrain()
157{
158    if (!drainManager)
159        return false;
160
161    DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState());
162    if (!isDrained())
163        return false;
164
165    DPRINTF(Drain, "CPU done draining, processing drain event\n");
166    drainManager->signalDrainDone();
167    drainManager = NULL;
168
169    return true;
170}
171
172void
173TimingSimpleCPU::switchOut()
174{
175    BaseSimpleCPU::switchOut();
176
177    assert(!fetchEvent.scheduled());
178    assert(_status == BaseSimpleCPU::Running || _status == Idle);
179    assert(!stayAtPC);
180    assert(microPC() == 0);
181
182    updateCycleCounts();
183}
184
185
186void
187TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
188{
189    BaseSimpleCPU::takeOverFrom(oldCPU);
190
191    previousCycle = curCycle();
192}
193
194void
195TimingSimpleCPU::verifyMemoryMode() const
196{
197    if (!system->isTimingMode()) {
198        fatal("The timing CPU requires the memory system to be in "
199              "'timing' mode.\n");
200    }
201}
202
203void
204TimingSimpleCPU::activateContext(ThreadID thread_num)
205{
206    DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num);
207
208    assert(thread_num == 0);
209    assert(thread);
210
211    assert(_status == Idle);
212
213    notIdleFraction = 1;
214    _status = BaseSimpleCPU::Running;
215
216    // kick things off by initiating the fetch of the next instruction
217    schedule(fetchEvent, clockEdge(Cycles(0)));
218}
219
220
221void
222TimingSimpleCPU::suspendContext(ThreadID thread_num)
223{
224    DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
225
226    assert(thread_num == 0);
227    assert(thread);
228
229    if (_status == Idle)
230        return;
231
232    assert(_status == BaseSimpleCPU::Running);
233
234    // just change status to Idle... if status != Running,
235    // completeInst() will not initiate fetch of next instruction.
236
237    notIdleFraction = 0;
238    _status = Idle;
239}
240
241bool
242TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
243{
244    RequestPtr req = pkt->req;
245
246    // We're about the issues a locked load, so tell the monitor
247    // to start caring about this address
248    if (pkt->isRead() && pkt->req->isLLSC()) {
249        TheISA::handleLockedRead(thread, pkt->req);
250    }
251    if (req->isMmappedIpr()) {
252        Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt);
253        new IprEvent(pkt, this, clockEdge(delay));
254        _status = DcacheWaitResponse;
255        dcache_pkt = NULL;
256    } else if (!dcachePort.sendTimingReq(pkt)) {
257        _status = DcacheRetry;
258        dcache_pkt = pkt;
259    } else {
260        _status = DcacheWaitResponse;
261        // memory system takes ownership of packet
262        dcache_pkt = NULL;
263    }
264    return dcache_pkt == NULL;
265}
266
267void
268TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
269                          bool read)
270{
271    PacketPtr pkt = buildPacket(req, read);
272    pkt->dataDynamic<uint8_t>(data);
273    if (req->getFlags().isSet(Request::NO_ACCESS)) {
274        assert(!dcache_pkt);
275        pkt->makeResponse();
276        completeDataAccess(pkt);
277    } else if (read) {
278        handleReadPacket(pkt);
279    } else {
280        bool do_access = true;  // flag to suppress cache access
281
282        if (req->isLLSC()) {
283            do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask);
284        } else if (req->isCondSwap()) {
285            assert(res);
286            req->setExtraData(*res);
287        }
288
289        if (do_access) {
290            dcache_pkt = pkt;
291            handleWritePacket();
292        } else {
293            _status = DcacheWaitResponse;
294            completeDataAccess(pkt);
295        }
296    }
297}
298
299void
300TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
301                               RequestPtr req, uint8_t *data, bool read)
302{
303    PacketPtr pkt1, pkt2;
304    buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
305    if (req->getFlags().isSet(Request::NO_ACCESS)) {
306        assert(!dcache_pkt);
307        pkt1->makeResponse();
308        completeDataAccess(pkt1);
309    } else if (read) {
310        SplitFragmentSenderState * send_state =
311            dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
312        if (handleReadPacket(pkt1)) {
313            send_state->clearFromParent();
314            send_state = dynamic_cast<SplitFragmentSenderState *>(
315                    pkt2->senderState);
316            if (handleReadPacket(pkt2)) {
317                send_state->clearFromParent();
318            }
319        }
320    } else {
321        dcache_pkt = pkt1;
322        SplitFragmentSenderState * send_state =
323            dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
324        if (handleWritePacket()) {
325            send_state->clearFromParent();
326            dcache_pkt = pkt2;
327            send_state = dynamic_cast<SplitFragmentSenderState *>(
328                    pkt2->senderState);
329            if (handleWritePacket()) {
330                send_state->clearFromParent();
331            }
332        }
333    }
334}
335
336void
337TimingSimpleCPU::translationFault(const Fault &fault)
338{
339    // fault may be NoFault in cases where a fault is suppressed,
340    // for instance prefetches.
341    updateCycleCounts();
342
343    if (traceData) {
344        // Since there was a fault, we shouldn't trace this instruction.
345        delete traceData;
346        traceData = NULL;
347    }
348
349    postExecute();
350
351    advanceInst(fault);
352}
353
354PacketPtr
355TimingSimpleCPU::buildPacket(RequestPtr req, bool read)
356{
357    return read ? Packet::createRead(req) : Packet::createWrite(req);
358}
359
360void
361TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
362        RequestPtr req1, RequestPtr req2, RequestPtr req,
363        uint8_t *data, bool read)
364{
365    pkt1 = pkt2 = NULL;
366
367    assert(!req1->isMmappedIpr() && !req2->isMmappedIpr());
368
369    if (req->getFlags().isSet(Request::NO_ACCESS)) {
370        pkt1 = buildPacket(req, read);
371        return;
372    }
373
374    pkt1 = buildPacket(req1, read);
375    pkt2 = buildPacket(req2, read);
376
377    PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand());
378
379    pkt->dataDynamic<uint8_t>(data);
380    pkt1->dataStatic<uint8_t>(data);
381    pkt2->dataStatic<uint8_t>(data + req1->getSize());
382
383    SplitMainSenderState * main_send_state = new SplitMainSenderState;
384    pkt->senderState = main_send_state;
385    main_send_state->fragments[0] = pkt1;
386    main_send_state->fragments[1] = pkt2;
387    main_send_state->outstanding = 2;
388    pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
389    pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
390}
391
392Fault
393TimingSimpleCPU::readMem(Addr addr, uint8_t *data,
394                         unsigned size, unsigned flags)
395{
396    Fault fault;
397    const int asid = 0;
398    const ThreadID tid = 0;
399    const Addr pc = thread->instAddr();
400    unsigned block_size = cacheLineSize();
401    BaseTLB::Mode mode = BaseTLB::Read;
402
403    if (traceData)
404        traceData->setMem(addr, size, flags);
405
406    RequestPtr req  = new Request(asid, addr, size,
407                                  flags, dataMasterId(), pc, _cpuId, tid);
408
409    req->taskId(taskId());
410
411    Addr split_addr = roundDown(addr + size - 1, block_size);
412    assert(split_addr <= addr || split_addr - addr < block_size);
413
414    _status = DTBWaitResponse;
415    if (split_addr > addr) {
416        RequestPtr req1, req2;
417        assert(!req->isLLSC() && !req->isSwap());
418        req->splitOnVaddr(split_addr, req1, req2);
419
420        WholeTranslationState *state =
421            new WholeTranslationState(req, req1, req2, new uint8_t[size],
422                                      NULL, mode);
423        DataTranslation<TimingSimpleCPU *> *trans1 =
424            new DataTranslation<TimingSimpleCPU *>(this, state, 0);
425        DataTranslation<TimingSimpleCPU *> *trans2 =
426            new DataTranslation<TimingSimpleCPU *>(this, state, 1);
427
428        thread->dtb->translateTiming(req1, tc, trans1, mode);
429        thread->dtb->translateTiming(req2, tc, trans2, mode);
430    } else {
431        WholeTranslationState *state =
432            new WholeTranslationState(req, new uint8_t[size], NULL, mode);
433        DataTranslation<TimingSimpleCPU *> *translation
434            = new DataTranslation<TimingSimpleCPU *>(this, state);
435        thread->dtb->translateTiming(req, tc, translation, mode);
436    }
437
438    return NoFault;
439}
440
441bool
442TimingSimpleCPU::handleWritePacket()
443{
444    RequestPtr req = dcache_pkt->req;
445    if (req->isMmappedIpr()) {
446        Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
447        new IprEvent(dcache_pkt, this, clockEdge(delay));
448        _status = DcacheWaitResponse;
449        dcache_pkt = NULL;
450    } else if (!dcachePort.sendTimingReq(dcache_pkt)) {
451        _status = DcacheRetry;
452    } else {
453        _status = DcacheWaitResponse;
454        // memory system takes ownership of packet
455        dcache_pkt = NULL;
456    }
457    return dcache_pkt == NULL;
458}
459
460Fault
461TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
462                          Addr addr, unsigned flags, uint64_t *res)
463{
464    uint8_t *newData = new uint8_t[size];
465    const int asid = 0;
466    const ThreadID tid = 0;
467    const Addr pc = thread->instAddr();
468    unsigned block_size = cacheLineSize();
469    BaseTLB::Mode mode = BaseTLB::Write;
470
471    if (data == NULL) {
472        assert(flags & Request::CACHE_BLOCK_ZERO);
473        // This must be a cache block cleaning request
474        memset(newData, 0, size);
475    } else {
476        memcpy(newData, data, size);
477    }
478
479    if (traceData)
480        traceData->setMem(addr, size, flags);
481
482    RequestPtr req = new Request(asid, addr, size,
483                                 flags, dataMasterId(), pc, _cpuId, tid);
484
485    req->taskId(taskId());
486
487    Addr split_addr = roundDown(addr + size - 1, block_size);
488    assert(split_addr <= addr || split_addr - addr < block_size);
489
490    _status = DTBWaitResponse;
491    if (split_addr > addr) {
492        RequestPtr req1, req2;
493        assert(!req->isLLSC() && !req->isSwap());
494        req->splitOnVaddr(split_addr, req1, req2);
495
496        WholeTranslationState *state =
497            new WholeTranslationState(req, req1, req2, newData, res, mode);
498        DataTranslation<TimingSimpleCPU *> *trans1 =
499            new DataTranslation<TimingSimpleCPU *>(this, state, 0);
500        DataTranslation<TimingSimpleCPU *> *trans2 =
501            new DataTranslation<TimingSimpleCPU *>(this, state, 1);
502
503        thread->dtb->translateTiming(req1, tc, trans1, mode);
504        thread->dtb->translateTiming(req2, tc, trans2, mode);
505    } else {
506        WholeTranslationState *state =
507            new WholeTranslationState(req, newData, res, mode);
508        DataTranslation<TimingSimpleCPU *> *translation =
509            new DataTranslation<TimingSimpleCPU *>(this, state);
510        thread->dtb->translateTiming(req, tc, translation, mode);
511    }
512
513    // Translation faults will be returned via finishTranslation()
514    return NoFault;
515}
516
517
518void
519TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
520{
521    _status = BaseSimpleCPU::Running;
522
523    if (state->getFault() != NoFault) {
524        if (state->isPrefetch()) {
525            state->setNoFault();
526        }
527        delete [] state->data;
528        state->deleteReqs();
529        translationFault(state->getFault());
530    } else {
531        if (!state->isSplit) {
532            sendData(state->mainReq, state->data, state->res,
533                     state->mode == BaseTLB::Read);
534        } else {
535            sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
536                          state->data, state->mode == BaseTLB::Read);
537        }
538    }
539
540    delete state;
541}
542
543
544void
545TimingSimpleCPU::fetch()
546{
547    DPRINTF(SimpleCPU, "Fetch\n");
548
549    if (!curStaticInst || !curStaticInst->isDelayedCommit()) {
550        checkForInterrupts();
551        checkPcEventQueue();
552    }
553
554    // We must have just got suspended by a PC event
555    if (_status == Idle)
556        return;
557
558    TheISA::PCState pcState = thread->pcState();
559    bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
560
561    if (needToFetch) {
562        _status = BaseSimpleCPU::Running;
563        Request *ifetch_req = new Request();
564        ifetch_req->taskId(taskId());
565        ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
566        setupFetchRequest(ifetch_req);
567        DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
568        thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
569                BaseTLB::Execute);
570    } else {
571        _status = IcacheWaitResponse;
572        completeIfetch(NULL);
573
574        updateCycleCounts();
575    }
576}
577
578
579void
580TimingSimpleCPU::sendFetch(const Fault &fault, RequestPtr req,
581                           ThreadContext *tc)
582{
583    if (fault == NoFault) {
584        DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n",
585                req->getVaddr(), req->getPaddr());
586        ifetch_pkt = new Packet(req, MemCmd::ReadReq);
587        ifetch_pkt->dataStatic(&inst);
588        DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr());
589
590        if (!icachePort.sendTimingReq(ifetch_pkt)) {
591            // Need to wait for retry
592            _status = IcacheRetry;
593        } else {
594            // Need to wait for cache to respond
595            _status = IcacheWaitResponse;
596            // ownership of packet transferred to memory system
597            ifetch_pkt = NULL;
598        }
599    } else {
600        DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
601        delete req;
602        // fetch fault: advance directly to next instruction (fault handler)
603        _status = BaseSimpleCPU::Running;
604        advanceInst(fault);
605    }
606
607    updateCycleCounts();
608}
609
610
611void
612TimingSimpleCPU::advanceInst(const Fault &fault)
613{
614    if (_status == Faulting)
615        return;
616
617    if (fault != NoFault) {
618        advancePC(fault);
619        DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
620        reschedule(fetchEvent, clockEdge(), true);
621        _status = Faulting;
622        return;
623    }
624
625
626    if (!stayAtPC)
627        advancePC(fault);
628
629    if (tryCompleteDrain())
630            return;
631
632    if (_status == BaseSimpleCPU::Running) {
633        // kick off fetch of next instruction... callback from icache
634        // response will cause that instruction to be executed,
635        // keeping the CPU running.
636        fetch();
637    }
638}
639
640
641void
642TimingSimpleCPU::completeIfetch(PacketPtr pkt)
643{
644    DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ?
645            pkt->getAddr() : 0);
646
647    // received a response from the icache: execute the received
648    // instruction
649    assert(!pkt || !pkt->isError());
650    assert(_status == IcacheWaitResponse);
651
652    _status = BaseSimpleCPU::Running;
653
654    updateCycleCounts();
655
656    if (pkt)
657        pkt->req->setAccessLatency();
658
659
660    preExecute();
661    if (curStaticInst && curStaticInst->isMemRef()) {
662        // load or store: just send to dcache
663        Fault fault = curStaticInst->initiateAcc(this, traceData);
664
665        // If we're not running now the instruction will complete in a dcache
666        // response callback or the instruction faulted and has started an
667        // ifetch
668        if (_status == BaseSimpleCPU::Running) {
669            if (fault != NoFault && traceData) {
670                // If there was a fault, we shouldn't trace this instruction.
671                delete traceData;
672                traceData = NULL;
673            }
674
675            postExecute();
676            // @todo remove me after debugging with legion done
677            if (curStaticInst && (!curStaticInst->isMicroop() ||
678                        curStaticInst->isFirstMicroop()))
679                instCnt++;
680            advanceInst(fault);
681        }
682    } else if (curStaticInst) {
683        // non-memory instruction: execute completely now
684        Fault fault = curStaticInst->execute(this, traceData);
685
686        // keep an instruction count
687        if (fault == NoFault)
688            countInst();
689        else if (traceData && !DTRACE(ExecFaulting)) {
690            delete traceData;
691            traceData = NULL;
692        }
693
694        postExecute();
695        // @todo remove me after debugging with legion done
696        if (curStaticInst && (!curStaticInst->isMicroop() ||
697                    curStaticInst->isFirstMicroop()))
698            instCnt++;
699        advanceInst(fault);
700    } else {
701        advanceInst(NoFault);
702    }
703
704    if (pkt) {
705        delete pkt->req;
706        delete pkt;
707    }
708}
709
710void
711TimingSimpleCPU::IcachePort::ITickEvent::process()
712{
713    cpu->completeIfetch(pkt);
714}
715
716bool
717TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt)
718{
719    DPRINTF(SimpleCPU, "Received fetch response %#x\n", pkt->getAddr());
720    // we should only ever see one response per cycle since we only
721    // issue a new request once this response is sunk
722    assert(!tickEvent.scheduled());
723    // delay processing of returned data until next CPU clock edge
724    tickEvent.schedule(pkt, cpu->clockEdge());
725
726    return true;
727}
728
729void
730TimingSimpleCPU::IcachePort::recvReqRetry()
731{
732    // we shouldn't get a retry unless we have a packet that we're
733    // waiting to transmit
734    assert(cpu->ifetch_pkt != NULL);
735    assert(cpu->_status == IcacheRetry);
736    PacketPtr tmp = cpu->ifetch_pkt;
737    if (sendTimingReq(tmp)) {
738        cpu->_status = IcacheWaitResponse;
739        cpu->ifetch_pkt = NULL;
740    }
741}
742
743void
744TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
745{
746    // received a response from the dcache: complete the load or store
747    // instruction
748    assert(!pkt->isError());
749    assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
750           pkt->req->getFlags().isSet(Request::NO_ACCESS));
751
752    pkt->req->setAccessLatency();
753
754    updateCycleCounts();
755
756    if (pkt->senderState) {
757        SplitFragmentSenderState * send_state =
758            dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
759        assert(send_state);
760        delete pkt->req;
761        delete pkt;
762        PacketPtr big_pkt = send_state->bigPkt;
763        delete send_state;
764
765        SplitMainSenderState * main_send_state =
766            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
767        assert(main_send_state);
768        // Record the fact that this packet is no longer outstanding.
769        assert(main_send_state->outstanding != 0);
770        main_send_state->outstanding--;
771
772        if (main_send_state->outstanding) {
773            return;
774        } else {
775            delete main_send_state;
776            big_pkt->senderState = NULL;
777            pkt = big_pkt;
778        }
779    }
780
781    _status = BaseSimpleCPU::Running;
782
783    Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
784
785    // keep an instruction count
786    if (fault == NoFault)
787        countInst();
788    else if (traceData) {
789        // If there was a fault, we shouldn't trace this instruction.
790        delete traceData;
791        traceData = NULL;
792    }
793
794    delete pkt->req;
795    delete pkt;
796
797    postExecute();
798
799    advanceInst(fault);
800}
801
802void
803TimingSimpleCPU::updateCycleCounts()
804{
805    const Cycles delta(curCycle() - previousCycle);
806
807    numCycles += delta;
808    ppCycles->notify(delta);
809
810    previousCycle = curCycle();
811}
812
813void
814TimingSimpleCPU::DcachePort::recvTimingSnoopReq(PacketPtr pkt)
815{
816    // X86 ISA: Snooping an invalidation for monitor/mwait
817    if(cpu->getAddrMonitor()->doMonitor(pkt)) {
818        cpu->wakeup();
819    }
820    TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask);
821}
822
823void
824TimingSimpleCPU::DcachePort::recvFunctionalSnoop(PacketPtr pkt)
825{
826    // X86 ISA: Snooping an invalidation for monitor/mwait
827    if(cpu->getAddrMonitor()->doMonitor(pkt)) {
828        cpu->wakeup();
829    }
830}
831
832bool
833TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt)
834{
835    DPRINTF(SimpleCPU, "Received load/store response %#x\n", pkt->getAddr());
836
837    // The timing CPU is not really ticked, instead it relies on the
838    // memory system (fetch and load/store) to set the pace.
839    if (!tickEvent.scheduled()) {
840        // Delay processing of returned data until next CPU clock edge
841        tickEvent.schedule(pkt, cpu->clockEdge());
842        return true;
843    } else {
844        // In the case of a split transaction and a cache that is
845        // faster than a CPU we could get two responses in the
846        // same tick, delay the second one
847        if (!retryRespEvent.scheduled())
848            cpu->schedule(retryRespEvent, cpu->clockEdge(Cycles(1)));
849        return false;
850    }
851}
852
853void
854TimingSimpleCPU::DcachePort::DTickEvent::process()
855{
856    cpu->completeDataAccess(pkt);
857}
858
859void
860TimingSimpleCPU::DcachePort::recvReqRetry()
861{
862    // we shouldn't get a retry unless we have a packet that we're
863    // waiting to transmit
864    assert(cpu->dcache_pkt != NULL);
865    assert(cpu->_status == DcacheRetry);
866    PacketPtr tmp = cpu->dcache_pkt;
867    if (tmp->senderState) {
868        // This is a packet from a split access.
869        SplitFragmentSenderState * send_state =
870            dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
871        assert(send_state);
872        PacketPtr big_pkt = send_state->bigPkt;
873
874        SplitMainSenderState * main_send_state =
875            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
876        assert(main_send_state);
877
878        if (sendTimingReq(tmp)) {
879            // If we were able to send without retrying, record that fact
880            // and try sending the other fragment.
881            send_state->clearFromParent();
882            int other_index = main_send_state->getPendingFragment();
883            if (other_index > 0) {
884                tmp = main_send_state->fragments[other_index];
885                cpu->dcache_pkt = tmp;
886                if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
887                        (big_pkt->isWrite() && cpu->handleWritePacket())) {
888                    main_send_state->fragments[other_index] = NULL;
889                }
890            } else {
891                cpu->_status = DcacheWaitResponse;
892                // memory system takes ownership of packet
893                cpu->dcache_pkt = NULL;
894            }
895        }
896    } else if (sendTimingReq(tmp)) {
897        cpu->_status = DcacheWaitResponse;
898        // memory system takes ownership of packet
899        cpu->dcache_pkt = NULL;
900    }
901}
902
903TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
904    Tick t)
905    : pkt(_pkt), cpu(_cpu)
906{
907    cpu->schedule(this, t);
908}
909
910void
911TimingSimpleCPU::IprEvent::process()
912{
913    cpu->completeDataAccess(pkt);
914}
915
916const char *
917TimingSimpleCPU::IprEvent::description() const
918{
919    return "Timing Simple CPU Delay IPR event";
920}
921
922
923void
924TimingSimpleCPU::printAddr(Addr a)
925{
926    dcachePort.printAddr(a);
927}
928
929
930////////////////////////////////////////////////////////////////////////
931//
932//  TimingSimpleCPU Simulation Object
933//
934TimingSimpleCPU *
935TimingSimpleCPUParams::create()
936{
937    numThreads = 1;
938    if (!FullSystem && workload.size() != 1)
939        panic("only one workload allowed");
940    return new TimingSimpleCPU(this);
941}
942