timing.cc revision 6739:48d10ba361c9
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Steve Reinhardt
29 */
30
31#include "arch/locked_mem.hh"
32#include "arch/mmaped_ipr.hh"
33#include "arch/utility.hh"
34#include "base/bigint.hh"
35#include "config/the_isa.hh"
36#include "cpu/exetrace.hh"
37#include "cpu/simple/timing.hh"
38#include "mem/packet.hh"
39#include "mem/packet_access.hh"
40#include "params/TimingSimpleCPU.hh"
41#include "sim/system.hh"
42
43using namespace std;
44using namespace TheISA;
45
46Port *
47TimingSimpleCPU::getPort(const std::string &if_name, int idx)
48{
49    if (if_name == "dcache_port")
50        return &dcachePort;
51    else if (if_name == "icache_port")
52        return &icachePort;
53    else
54        panic("No Such Port\n");
55}
56
57void
58TimingSimpleCPU::init()
59{
60    BaseCPU::init();
61#if FULL_SYSTEM
62    for (int i = 0; i < threadContexts.size(); ++i) {
63        ThreadContext *tc = threadContexts[i];
64
65        // initialize CPU, including PC
66        TheISA::initCPU(tc, _cpuId);
67    }
68#endif
69}
70
71Tick
72TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
73{
74    panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
75    return curTick;
76}
77
78void
79TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
80{
81    //No internal storage to update, jusst return
82    return;
83}
84
85void
86TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
87{
88    if (status == RangeChange) {
89        if (!snoopRangeSent) {
90            snoopRangeSent = true;
91            sendStatusChange(Port::RangeChange);
92        }
93        return;
94    }
95
96    panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
97}
98
99
100void
101TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
102{
103    pkt = _pkt;
104    cpu->schedule(this, t);
105}
106
107TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
108    : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this, p->clock),
109    dcachePort(this, p->clock), fetchEvent(this)
110{
111    _status = Idle;
112
113    icachePort.snoopRangeSent = false;
114    dcachePort.snoopRangeSent = false;
115
116    ifetch_pkt = dcache_pkt = NULL;
117    drainEvent = NULL;
118    previousTick = 0;
119    changeState(SimObject::Running);
120}
121
122
123TimingSimpleCPU::~TimingSimpleCPU()
124{
125}
126
127void
128TimingSimpleCPU::serialize(ostream &os)
129{
130    SimObject::State so_state = SimObject::getState();
131    SERIALIZE_ENUM(so_state);
132    BaseSimpleCPU::serialize(os);
133}
134
135void
136TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
137{
138    SimObject::State so_state;
139    UNSERIALIZE_ENUM(so_state);
140    BaseSimpleCPU::unserialize(cp, section);
141}
142
143unsigned int
144TimingSimpleCPU::drain(Event *drain_event)
145{
146    // TimingSimpleCPU is ready to drain if it's not waiting for
147    // an access to complete.
148    if (_status == Idle || _status == Running || _status == SwitchedOut) {
149        changeState(SimObject::Drained);
150        return 0;
151    } else {
152        changeState(SimObject::Draining);
153        drainEvent = drain_event;
154        return 1;
155    }
156}
157
158void
159TimingSimpleCPU::resume()
160{
161    DPRINTF(SimpleCPU, "Resume\n");
162    if (_status != SwitchedOut && _status != Idle) {
163        assert(system->getMemoryMode() == Enums::timing);
164
165        if (fetchEvent.scheduled())
166           deschedule(fetchEvent);
167
168        schedule(fetchEvent, nextCycle());
169    }
170
171    changeState(SimObject::Running);
172}
173
174void
175TimingSimpleCPU::switchOut()
176{
177    assert(_status == Running || _status == Idle);
178    _status = SwitchedOut;
179    numCycles += tickToCycles(curTick - previousTick);
180
181    // If we've been scheduled to resume but are then told to switch out,
182    // we'll need to cancel it.
183    if (fetchEvent.scheduled())
184        deschedule(fetchEvent);
185}
186
187
188void
189TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
190{
191    BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
192
193    // if any of this CPU's ThreadContexts are active, mark the CPU as
194    // running and schedule its tick event.
195    for (int i = 0; i < threadContexts.size(); ++i) {
196        ThreadContext *tc = threadContexts[i];
197        if (tc->status() == ThreadContext::Active && _status != Running) {
198            _status = Running;
199            break;
200        }
201    }
202
203    if (_status != Running) {
204        _status = Idle;
205    }
206    assert(threadContexts.size() == 1);
207    previousTick = curTick;
208}
209
210
211void
212TimingSimpleCPU::activateContext(int thread_num, int delay)
213{
214    DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
215
216    assert(thread_num == 0);
217    assert(thread);
218
219    assert(_status == Idle);
220
221    notIdleFraction++;
222    _status = Running;
223
224    // kick things off by initiating the fetch of the next instruction
225    schedule(fetchEvent, nextCycle(curTick + ticks(delay)));
226}
227
228
229void
230TimingSimpleCPU::suspendContext(int thread_num)
231{
232    DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
233
234    assert(thread_num == 0);
235    assert(thread);
236
237    if (_status == Idle)
238        return;
239
240    assert(_status == Running);
241
242    // just change status to Idle... if status != Running,
243    // completeInst() will not initiate fetch of next instruction.
244
245    notIdleFraction--;
246    _status = Idle;
247}
248
249bool
250TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
251{
252    RequestPtr req = pkt->req;
253    if (req->isMmapedIpr()) {
254        Tick delay;
255        delay = TheISA::handleIprRead(thread->getTC(), pkt);
256        new IprEvent(pkt, this, nextCycle(curTick + delay));
257        _status = DcacheWaitResponse;
258        dcache_pkt = NULL;
259    } else if (!dcachePort.sendTiming(pkt)) {
260        _status = DcacheRetry;
261        dcache_pkt = pkt;
262    } else {
263        _status = DcacheWaitResponse;
264        // memory system takes ownership of packet
265        dcache_pkt = NULL;
266    }
267    return dcache_pkt == NULL;
268}
269
270void
271TimingSimpleCPU::sendData(Fault fault, RequestPtr req,
272        uint8_t *data, uint64_t *res, bool read)
273{
274    _status = Running;
275    if (fault != NoFault) {
276        if (req->isPrefetch())
277            fault = NoFault;
278        delete data;
279        delete req;
280
281        translationFault(fault);
282        return;
283    }
284    PacketPtr pkt;
285    buildPacket(pkt, req, read);
286    pkt->dataDynamic<uint8_t>(data);
287    if (req->getFlags().isSet(Request::NO_ACCESS)) {
288        assert(!dcache_pkt);
289        pkt->makeResponse();
290        completeDataAccess(pkt);
291    } else if (read) {
292        handleReadPacket(pkt);
293    } else {
294        bool do_access = true;  // flag to suppress cache access
295
296        if (req->isLLSC()) {
297            do_access = TheISA::handleLockedWrite(thread, req);
298        } else if (req->isCondSwap()) {
299            assert(res);
300            req->setExtraData(*res);
301        }
302
303        if (do_access) {
304            dcache_pkt = pkt;
305            handleWritePacket();
306        } else {
307            _status = DcacheWaitResponse;
308            completeDataAccess(pkt);
309        }
310    }
311}
312
313void
314TimingSimpleCPU::sendSplitData(Fault fault1, Fault fault2,
315        RequestPtr req1, RequestPtr req2, RequestPtr req,
316        uint8_t *data, bool read)
317{
318    _status = Running;
319    if (fault1 != NoFault || fault2 != NoFault) {
320        if (req1->isPrefetch())
321            fault1 = NoFault;
322        if (req2->isPrefetch())
323            fault2 = NoFault;
324        delete data;
325        delete req1;
326        delete req2;
327        if (fault1 != NoFault)
328            translationFault(fault1);
329        else if (fault2 != NoFault)
330            translationFault(fault2);
331        return;
332    }
333    PacketPtr pkt1, pkt2;
334    buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
335    if (req->getFlags().isSet(Request::NO_ACCESS)) {
336        assert(!dcache_pkt);
337        pkt1->makeResponse();
338        completeDataAccess(pkt1);
339    } else if (read) {
340        if (handleReadPacket(pkt1)) {
341            SplitFragmentSenderState * send_state =
342                dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
343            send_state->clearFromParent();
344            if (handleReadPacket(pkt2)) {
345                send_state = dynamic_cast<SplitFragmentSenderState *>(
346                        pkt1->senderState);
347                send_state->clearFromParent();
348            }
349        }
350    } else {
351        dcache_pkt = pkt1;
352        if (handleWritePacket()) {
353            SplitFragmentSenderState * send_state =
354                dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
355            send_state->clearFromParent();
356            dcache_pkt = pkt2;
357            if (handleWritePacket()) {
358                send_state = dynamic_cast<SplitFragmentSenderState *>(
359                        pkt1->senderState);
360                send_state->clearFromParent();
361            }
362        }
363    }
364}
365
366void
367TimingSimpleCPU::translationFault(Fault fault)
368{
369    // fault may be NoFault in cases where a fault is suppressed,
370    // for instance prefetches.
371    numCycles += tickToCycles(curTick - previousTick);
372    previousTick = curTick;
373
374    if (traceData) {
375        // Since there was a fault, we shouldn't trace this instruction.
376        delete traceData;
377        traceData = NULL;
378    }
379
380    postExecute();
381
382    if (getState() == SimObject::Draining) {
383        advancePC(fault);
384        completeDrain();
385    } else {
386        advanceInst(fault);
387    }
388}
389
390void
391TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
392{
393    MemCmd cmd;
394    if (read) {
395        cmd = MemCmd::ReadReq;
396        if (req->isLLSC())
397            cmd = MemCmd::LoadLockedReq;
398    } else {
399        cmd = MemCmd::WriteReq;
400        if (req->isLLSC()) {
401            cmd = MemCmd::StoreCondReq;
402        } else if (req->isSwap()) {
403            cmd = MemCmd::SwapReq;
404        }
405    }
406    pkt = new Packet(req, cmd, Packet::Broadcast);
407}
408
409void
410TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
411        RequestPtr req1, RequestPtr req2, RequestPtr req,
412        uint8_t *data, bool read)
413{
414    pkt1 = pkt2 = NULL;
415
416    assert(!req1->isMmapedIpr() && !req2->isMmapedIpr());
417
418    if (req->getFlags().isSet(Request::NO_ACCESS)) {
419        buildPacket(pkt1, req, read);
420        return;
421    }
422
423    buildPacket(pkt1, req1, read);
424    buildPacket(pkt2, req2, read);
425
426    req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags());
427    PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand(),
428                               Packet::Broadcast);
429
430    pkt->dataDynamic<uint8_t>(data);
431    pkt1->dataStatic<uint8_t>(data);
432    pkt2->dataStatic<uint8_t>(data + req1->getSize());
433
434    SplitMainSenderState * main_send_state = new SplitMainSenderState;
435    pkt->senderState = main_send_state;
436    main_send_state->fragments[0] = pkt1;
437    main_send_state->fragments[1] = pkt2;
438    main_send_state->outstanding = 2;
439    pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
440    pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
441}
442
443template <class T>
444Fault
445TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
446{
447    Fault fault;
448    const int asid = 0;
449    const ThreadID tid = 0;
450    const Addr pc = thread->readPC();
451    unsigned block_size = dcachePort.peerBlockSize();
452    int data_size = sizeof(T);
453
454    RequestPtr req  = new Request(asid, addr, data_size,
455                                  flags, pc, _cpuId, tid);
456
457    Addr split_addr = roundDown(addr + data_size - 1, block_size);
458    assert(split_addr <= addr || split_addr - addr < block_size);
459
460
461    _status = DTBWaitResponse;
462    if (split_addr > addr) {
463        RequestPtr req1, req2;
464        assert(!req->isLLSC() && !req->isSwap());
465        req->splitOnVaddr(split_addr, req1, req2);
466
467        typedef SplitDataTranslation::WholeTranslationState WholeState;
468        WholeState *state = new WholeState(req1, req2, req,
469                                           (uint8_t *)(new T), BaseTLB::Read);
470        thread->dtb->translateTiming(req1, tc,
471                new SplitDataTranslation(this, 0, state), BaseTLB::Read);
472        thread->dtb->translateTiming(req2, tc,
473                new SplitDataTranslation(this, 1, state), BaseTLB::Read);
474    } else {
475        DataTranslation *translation =
476            new DataTranslation(this, (uint8_t *)(new T), NULL, BaseTLB::Read);
477        thread->dtb->translateTiming(req, tc, translation, BaseTLB::Read);
478    }
479
480    if (traceData) {
481        traceData->setData(data);
482        traceData->setAddr(addr);
483    }
484
485    // This will need a new way to tell if it has a dcache attached.
486    if (req->isUncacheable())
487        recordEvent("Uncached Read");
488
489    return NoFault;
490}
491
492#ifndef DOXYGEN_SHOULD_SKIP_THIS
493
494template
495Fault
496TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
497
498template
499Fault
500TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
501
502template
503Fault
504TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
505
506template
507Fault
508TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
509
510template
511Fault
512TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
513
514template
515Fault
516TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
517
518#endif //DOXYGEN_SHOULD_SKIP_THIS
519
520template<>
521Fault
522TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
523{
524    return read(addr, *(uint64_t*)&data, flags);
525}
526
527template<>
528Fault
529TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
530{
531    return read(addr, *(uint32_t*)&data, flags);
532}
533
534
535template<>
536Fault
537TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
538{
539    return read(addr, (uint32_t&)data, flags);
540}
541
542bool
543TimingSimpleCPU::handleWritePacket()
544{
545    RequestPtr req = dcache_pkt->req;
546    if (req->isMmapedIpr()) {
547        Tick delay;
548        delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
549        new IprEvent(dcache_pkt, this, nextCycle(curTick + delay));
550        _status = DcacheWaitResponse;
551        dcache_pkt = NULL;
552    } else if (!dcachePort.sendTiming(dcache_pkt)) {
553        _status = DcacheRetry;
554    } else {
555        _status = DcacheWaitResponse;
556        // memory system takes ownership of packet
557        dcache_pkt = NULL;
558    }
559    return dcache_pkt == NULL;
560}
561
562template <class T>
563Fault
564TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
565{
566    const int asid = 0;
567    const ThreadID tid = 0;
568    const Addr pc = thread->readPC();
569    unsigned block_size = dcachePort.peerBlockSize();
570    int data_size = sizeof(T);
571
572    RequestPtr req = new Request(asid, addr, data_size,
573                                 flags, pc, _cpuId, tid);
574
575    Addr split_addr = roundDown(addr + data_size - 1, block_size);
576    assert(split_addr <= addr || split_addr - addr < block_size);
577
578    T *dataP = new T;
579    *dataP = TheISA::htog(data);
580    _status = DTBWaitResponse;
581    if (split_addr > addr) {
582        RequestPtr req1, req2;
583        assert(!req->isLLSC() && !req->isSwap());
584        req->splitOnVaddr(split_addr, req1, req2);
585
586        typedef SplitDataTranslation::WholeTranslationState WholeState;
587        WholeState *state = new WholeState(req1, req2, req,
588                (uint8_t *)dataP, BaseTLB::Write);
589        thread->dtb->translateTiming(req1, tc,
590                new SplitDataTranslation(this, 0, state), BaseTLB::Write);
591        thread->dtb->translateTiming(req2, tc,
592                new SplitDataTranslation(this, 1, state), BaseTLB::Write);
593    } else {
594        DataTranslation *translation =
595            new DataTranslation(this, (uint8_t *)dataP, res, BaseTLB::Write);
596        thread->dtb->translateTiming(req, tc, translation, BaseTLB::Write);
597    }
598
599    if (traceData) {
600        traceData->setAddr(req->getVaddr());
601        traceData->setData(data);
602    }
603
604    // This will need a new way to tell if it's hooked up to a cache or not.
605    if (req->isUncacheable())
606        recordEvent("Uncached Write");
607
608    // If the write needs to have a fault on the access, consider calling
609    // changeStatus() and changing it to "bad addr write" or something.
610    return NoFault;
611}
612
613
614#ifndef DOXYGEN_SHOULD_SKIP_THIS
615template
616Fault
617TimingSimpleCPU::write(Twin32_t data, Addr addr,
618                       unsigned flags, uint64_t *res);
619
620template
621Fault
622TimingSimpleCPU::write(Twin64_t data, Addr addr,
623                       unsigned flags, uint64_t *res);
624
625template
626Fault
627TimingSimpleCPU::write(uint64_t data, Addr addr,
628                       unsigned flags, uint64_t *res);
629
630template
631Fault
632TimingSimpleCPU::write(uint32_t data, Addr addr,
633                       unsigned flags, uint64_t *res);
634
635template
636Fault
637TimingSimpleCPU::write(uint16_t data, Addr addr,
638                       unsigned flags, uint64_t *res);
639
640template
641Fault
642TimingSimpleCPU::write(uint8_t data, Addr addr,
643                       unsigned flags, uint64_t *res);
644
645#endif //DOXYGEN_SHOULD_SKIP_THIS
646
647template<>
648Fault
649TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
650{
651    return write(*(uint64_t*)&data, addr, flags, res);
652}
653
654template<>
655Fault
656TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
657{
658    return write(*(uint32_t*)&data, addr, flags, res);
659}
660
661
662template<>
663Fault
664TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
665{
666    return write((uint32_t)data, addr, flags, res);
667}
668
669
670void
671TimingSimpleCPU::fetch()
672{
673    DPRINTF(SimpleCPU, "Fetch\n");
674
675    if (!curStaticInst || !curStaticInst->isDelayedCommit())
676        checkForInterrupts();
677
678    checkPcEventQueue();
679
680    bool fromRom = isRomMicroPC(thread->readMicroPC());
681
682    if (!fromRom && !curMacroStaticInst) {
683        Request *ifetch_req = new Request();
684        ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
685        setupFetchRequest(ifetch_req);
686        thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
687                BaseTLB::Execute);
688    } else {
689        _status = IcacheWaitResponse;
690        completeIfetch(NULL);
691
692        numCycles += tickToCycles(curTick - previousTick);
693        previousTick = curTick;
694    }
695}
696
697
698void
699TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
700{
701    if (fault == NoFault) {
702        ifetch_pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
703        ifetch_pkt->dataStatic(&inst);
704
705        if (!icachePort.sendTiming(ifetch_pkt)) {
706            // Need to wait for retry
707            _status = IcacheRetry;
708        } else {
709            // Need to wait for cache to respond
710            _status = IcacheWaitResponse;
711            // ownership of packet transferred to memory system
712            ifetch_pkt = NULL;
713        }
714    } else {
715        delete req;
716        // fetch fault: advance directly to next instruction (fault handler)
717        advanceInst(fault);
718    }
719
720    numCycles += tickToCycles(curTick - previousTick);
721    previousTick = curTick;
722}
723
724
725void
726TimingSimpleCPU::advanceInst(Fault fault)
727{
728    if (fault != NoFault || !stayAtPC)
729        advancePC(fault);
730
731    if (_status == Running) {
732        // kick off fetch of next instruction... callback from icache
733        // response will cause that instruction to be executed,
734        // keeping the CPU running.
735        fetch();
736    }
737}
738
739
740void
741TimingSimpleCPU::completeIfetch(PacketPtr pkt)
742{
743    DPRINTF(SimpleCPU, "Complete ICache Fetch\n");
744
745    // received a response from the icache: execute the received
746    // instruction
747
748    assert(!pkt || !pkt->isError());
749    assert(_status == IcacheWaitResponse);
750
751    _status = Running;
752
753    numCycles += tickToCycles(curTick - previousTick);
754    previousTick = curTick;
755
756    if (getState() == SimObject::Draining) {
757        if (pkt) {
758            delete pkt->req;
759            delete pkt;
760        }
761
762        completeDrain();
763        return;
764    }
765
766    preExecute();
767    if (curStaticInst &&
768            curStaticInst->isMemRef() && !curStaticInst->isDataPrefetch()) {
769        // load or store: just send to dcache
770        Fault fault = curStaticInst->initiateAcc(this, traceData);
771        if (_status != Running) {
772            // instruction will complete in dcache response callback
773            assert(_status == DcacheWaitResponse ||
774                    _status == DcacheRetry || DTBWaitResponse);
775            assert(fault == NoFault);
776        } else {
777            if (fault != NoFault && traceData) {
778                // If there was a fault, we shouldn't trace this instruction.
779                delete traceData;
780                traceData = NULL;
781            }
782
783            postExecute();
784            // @todo remove me after debugging with legion done
785            if (curStaticInst && (!curStaticInst->isMicroop() ||
786                        curStaticInst->isFirstMicroop()))
787                instCnt++;
788            advanceInst(fault);
789        }
790    } else if (curStaticInst) {
791        // non-memory instruction: execute completely now
792        Fault fault = curStaticInst->execute(this, traceData);
793
794        // keep an instruction count
795        if (fault == NoFault)
796            countInst();
797        else if (traceData) {
798            // If there was a fault, we shouldn't trace this instruction.
799            delete traceData;
800            traceData = NULL;
801        }
802
803        postExecute();
804        // @todo remove me after debugging with legion done
805        if (curStaticInst && (!curStaticInst->isMicroop() ||
806                    curStaticInst->isFirstMicroop()))
807            instCnt++;
808        advanceInst(fault);
809    } else {
810        advanceInst(NoFault);
811    }
812
813    if (pkt) {
814        delete pkt->req;
815        delete pkt;
816    }
817}
818
819void
820TimingSimpleCPU::IcachePort::ITickEvent::process()
821{
822    cpu->completeIfetch(pkt);
823}
824
825bool
826TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
827{
828    if (pkt->isResponse() && !pkt->wasNacked()) {
829        // delay processing of returned data until next CPU clock edge
830        Tick next_tick = cpu->nextCycle(curTick);
831
832        if (next_tick == curTick)
833            cpu->completeIfetch(pkt);
834        else
835            tickEvent.schedule(pkt, next_tick);
836
837        return true;
838    }
839    else if (pkt->wasNacked()) {
840        assert(cpu->_status == IcacheWaitResponse);
841        pkt->reinitNacked();
842        if (!sendTiming(pkt)) {
843            cpu->_status = IcacheRetry;
844            cpu->ifetch_pkt = pkt;
845        }
846    }
847    //Snooping a Coherence Request, do nothing
848    return true;
849}
850
851void
852TimingSimpleCPU::IcachePort::recvRetry()
853{
854    // we shouldn't get a retry unless we have a packet that we're
855    // waiting to transmit
856    assert(cpu->ifetch_pkt != NULL);
857    assert(cpu->_status == IcacheRetry);
858    PacketPtr tmp = cpu->ifetch_pkt;
859    if (sendTiming(tmp)) {
860        cpu->_status = IcacheWaitResponse;
861        cpu->ifetch_pkt = NULL;
862    }
863}
864
865void
866TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
867{
868    // received a response from the dcache: complete the load or store
869    // instruction
870    assert(!pkt->isError());
871
872    numCycles += tickToCycles(curTick - previousTick);
873    previousTick = curTick;
874
875    if (pkt->senderState) {
876        SplitFragmentSenderState * send_state =
877            dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
878        assert(send_state);
879        delete pkt->req;
880        delete pkt;
881        PacketPtr big_pkt = send_state->bigPkt;
882        delete send_state;
883
884        SplitMainSenderState * main_send_state =
885            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
886        assert(main_send_state);
887        // Record the fact that this packet is no longer outstanding.
888        assert(main_send_state->outstanding != 0);
889        main_send_state->outstanding--;
890
891        if (main_send_state->outstanding) {
892            return;
893        } else {
894            delete main_send_state;
895            big_pkt->senderState = NULL;
896            pkt = big_pkt;
897        }
898    }
899
900    assert(_status == DcacheWaitResponse || _status == DTBWaitResponse);
901    _status = Running;
902
903    Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
904
905    // keep an instruction count
906    if (fault == NoFault)
907        countInst();
908    else if (traceData) {
909        // If there was a fault, we shouldn't trace this instruction.
910        delete traceData;
911        traceData = NULL;
912    }
913
914    // the locked flag may be cleared on the response packet, so check
915    // pkt->req and not pkt to see if it was a load-locked
916    if (pkt->isRead() && pkt->req->isLLSC()) {
917        TheISA::handleLockedRead(thread, pkt->req);
918    }
919
920    delete pkt->req;
921    delete pkt;
922
923    postExecute();
924
925    if (getState() == SimObject::Draining) {
926        advancePC(fault);
927        completeDrain();
928
929        return;
930    }
931
932    advanceInst(fault);
933}
934
935
936void
937TimingSimpleCPU::completeDrain()
938{
939    DPRINTF(Config, "Done draining\n");
940    changeState(SimObject::Drained);
941    drainEvent->process();
942}
943
944void
945TimingSimpleCPU::DcachePort::setPeer(Port *port)
946{
947    Port::setPeer(port);
948
949#if FULL_SYSTEM
950    // Update the ThreadContext's memory ports (Functional/Virtual
951    // Ports)
952    cpu->tcBase()->connectMemPorts(cpu->tcBase());
953#endif
954}
955
956bool
957TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
958{
959    if (pkt->isResponse() && !pkt->wasNacked()) {
960        // delay processing of returned data until next CPU clock edge
961        Tick next_tick = cpu->nextCycle(curTick);
962
963        if (next_tick == curTick) {
964            cpu->completeDataAccess(pkt);
965        } else {
966            tickEvent.schedule(pkt, next_tick);
967        }
968
969        return true;
970    }
971    else if (pkt->wasNacked()) {
972        assert(cpu->_status == DcacheWaitResponse);
973        pkt->reinitNacked();
974        if (!sendTiming(pkt)) {
975            cpu->_status = DcacheRetry;
976            cpu->dcache_pkt = pkt;
977        }
978    }
979    //Snooping a Coherence Request, do nothing
980    return true;
981}
982
983void
984TimingSimpleCPU::DcachePort::DTickEvent::process()
985{
986    cpu->completeDataAccess(pkt);
987}
988
989void
990TimingSimpleCPU::DcachePort::recvRetry()
991{
992    // we shouldn't get a retry unless we have a packet that we're
993    // waiting to transmit
994    assert(cpu->dcache_pkt != NULL);
995    assert(cpu->_status == DcacheRetry);
996    PacketPtr tmp = cpu->dcache_pkt;
997    if (tmp->senderState) {
998        // This is a packet from a split access.
999        SplitFragmentSenderState * send_state =
1000            dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
1001        assert(send_state);
1002        PacketPtr big_pkt = send_state->bigPkt;
1003
1004        SplitMainSenderState * main_send_state =
1005            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
1006        assert(main_send_state);
1007
1008        if (sendTiming(tmp)) {
1009            // If we were able to send without retrying, record that fact
1010            // and try sending the other fragment.
1011            send_state->clearFromParent();
1012            int other_index = main_send_state->getPendingFragment();
1013            if (other_index > 0) {
1014                tmp = main_send_state->fragments[other_index];
1015                cpu->dcache_pkt = tmp;
1016                if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
1017                        (big_pkt->isWrite() && cpu->handleWritePacket())) {
1018                    main_send_state->fragments[other_index] = NULL;
1019                }
1020            } else {
1021                cpu->_status = DcacheWaitResponse;
1022                // memory system takes ownership of packet
1023                cpu->dcache_pkt = NULL;
1024            }
1025        }
1026    } else if (sendTiming(tmp)) {
1027        cpu->_status = DcacheWaitResponse;
1028        // memory system takes ownership of packet
1029        cpu->dcache_pkt = NULL;
1030    }
1031}
1032
1033TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
1034    Tick t)
1035    : pkt(_pkt), cpu(_cpu)
1036{
1037    cpu->schedule(this, t);
1038}
1039
1040void
1041TimingSimpleCPU::IprEvent::process()
1042{
1043    cpu->completeDataAccess(pkt);
1044}
1045
1046const char *
1047TimingSimpleCPU::IprEvent::description() const
1048{
1049    return "Timing Simple CPU Delay IPR event";
1050}
1051
1052
1053void
1054TimingSimpleCPU::printAddr(Addr a)
1055{
1056    dcachePort.printAddr(a);
1057}
1058
1059
1060////////////////////////////////////////////////////////////////////////
1061//
1062//  TimingSimpleCPU Simulation Object
1063//
1064TimingSimpleCPU *
1065TimingSimpleCPUParams::create()
1066{
1067    numThreads = 1;
1068#if !FULL_SYSTEM
1069    if (workload.size() != 1)
1070        panic("only one workload allowed");
1071#endif
1072    return new TimingSimpleCPU(this);
1073}
1074