timing.cc revision 6658:f4de76601762
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Steve Reinhardt
29 */
30
31#include "arch/locked_mem.hh"
32#include "arch/mmaped_ipr.hh"
33#include "arch/utility.hh"
34#include "base/bigint.hh"
35#include "config/the_isa.hh"
36#include "cpu/exetrace.hh"
37#include "cpu/simple/timing.hh"
38#include "mem/packet.hh"
39#include "mem/packet_access.hh"
40#include "params/TimingSimpleCPU.hh"
41#include "sim/system.hh"
42
43using namespace std;
44using namespace TheISA;
45
46Port *
47TimingSimpleCPU::getPort(const std::string &if_name, int idx)
48{
49    if (if_name == "dcache_port")
50        return &dcachePort;
51    else if (if_name == "icache_port")
52        return &icachePort;
53    else
54        panic("No Such Port\n");
55}
56
57void
58TimingSimpleCPU::init()
59{
60    BaseCPU::init();
61#if FULL_SYSTEM
62    for (int i = 0; i < threadContexts.size(); ++i) {
63        ThreadContext *tc = threadContexts[i];
64
65        // initialize CPU, including PC
66        TheISA::initCPU(tc, _cpuId);
67    }
68#endif
69}
70
71Tick
72TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
73{
74    panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
75    return curTick;
76}
77
78void
79TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
80{
81    //No internal storage to update, jusst return
82    return;
83}
84
85void
86TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
87{
88    if (status == RangeChange) {
89        if (!snoopRangeSent) {
90            snoopRangeSent = true;
91            sendStatusChange(Port::RangeChange);
92        }
93        return;
94    }
95
96    panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
97}
98
99
100void
101TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
102{
103    pkt = _pkt;
104    cpu->schedule(this, t);
105}
106
107TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
108    : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this, p->clock),
109    dcachePort(this, p->clock), fetchEvent(this)
110{
111    _status = Idle;
112
113    icachePort.snoopRangeSent = false;
114    dcachePort.snoopRangeSent = false;
115
116    ifetch_pkt = dcache_pkt = NULL;
117    drainEvent = NULL;
118    previousTick = 0;
119    changeState(SimObject::Running);
120}
121
122
123TimingSimpleCPU::~TimingSimpleCPU()
124{
125}
126
127void
128TimingSimpleCPU::serialize(ostream &os)
129{
130    SimObject::State so_state = SimObject::getState();
131    SERIALIZE_ENUM(so_state);
132    BaseSimpleCPU::serialize(os);
133}
134
135void
136TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
137{
138    SimObject::State so_state;
139    UNSERIALIZE_ENUM(so_state);
140    BaseSimpleCPU::unserialize(cp, section);
141}
142
143unsigned int
144TimingSimpleCPU::drain(Event *drain_event)
145{
146    // TimingSimpleCPU is ready to drain if it's not waiting for
147    // an access to complete.
148    if (_status == Idle || _status == Running || _status == SwitchedOut) {
149        changeState(SimObject::Drained);
150        return 0;
151    } else {
152        changeState(SimObject::Draining);
153        drainEvent = drain_event;
154        return 1;
155    }
156}
157
158void
159TimingSimpleCPU::resume()
160{
161    DPRINTF(SimpleCPU, "Resume\n");
162    if (_status != SwitchedOut && _status != Idle) {
163        assert(system->getMemoryMode() == Enums::timing);
164
165        if (fetchEvent.scheduled())
166           deschedule(fetchEvent);
167
168        schedule(fetchEvent, nextCycle());
169    }
170
171    changeState(SimObject::Running);
172}
173
174void
175TimingSimpleCPU::switchOut()
176{
177    assert(_status == Running || _status == Idle);
178    _status = SwitchedOut;
179    numCycles += tickToCycles(curTick - previousTick);
180
181    // If we've been scheduled to resume but are then told to switch out,
182    // we'll need to cancel it.
183    if (fetchEvent.scheduled())
184        deschedule(fetchEvent);
185}
186
187
188void
189TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
190{
191    BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
192
193    // if any of this CPU's ThreadContexts are active, mark the CPU as
194    // running and schedule its tick event.
195    for (int i = 0; i < threadContexts.size(); ++i) {
196        ThreadContext *tc = threadContexts[i];
197        if (tc->status() == ThreadContext::Active && _status != Running) {
198            _status = Running;
199            break;
200        }
201    }
202
203    if (_status != Running) {
204        _status = Idle;
205    }
206    assert(threadContexts.size() == 1);
207    previousTick = curTick;
208}
209
210
211void
212TimingSimpleCPU::activateContext(int thread_num, int delay)
213{
214    DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
215
216    assert(thread_num == 0);
217    assert(thread);
218
219    assert(_status == Idle);
220
221    notIdleFraction++;
222    _status = Running;
223
224    // kick things off by initiating the fetch of the next instruction
225    schedule(fetchEvent, nextCycle(curTick + ticks(delay)));
226}
227
228
229void
230TimingSimpleCPU::suspendContext(int thread_num)
231{
232    DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
233
234    assert(thread_num == 0);
235    assert(thread);
236
237    if (_status == Idle)
238        return;
239
240    assert(_status == Running);
241
242    // just change status to Idle... if status != Running,
243    // completeInst() will not initiate fetch of next instruction.
244
245    notIdleFraction--;
246    _status = Idle;
247}
248
249bool
250TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
251{
252    RequestPtr req = pkt->req;
253    if (req->isMmapedIpr()) {
254        Tick delay;
255        delay = TheISA::handleIprRead(thread->getTC(), pkt);
256        new IprEvent(pkt, this, nextCycle(curTick + delay));
257        _status = DcacheWaitResponse;
258        dcache_pkt = NULL;
259    } else if (!dcachePort.sendTiming(pkt)) {
260        _status = DcacheRetry;
261        dcache_pkt = pkt;
262    } else {
263        _status = DcacheWaitResponse;
264        // memory system takes ownership of packet
265        dcache_pkt = NULL;
266    }
267    return dcache_pkt == NULL;
268}
269
270void
271TimingSimpleCPU::sendData(Fault fault, RequestPtr req,
272        uint8_t *data, uint64_t *res, bool read)
273{
274    _status = Running;
275    if (fault != NoFault) {
276        delete data;
277        delete req;
278
279        translationFault(fault);
280        return;
281    }
282    PacketPtr pkt;
283    buildPacket(pkt, req, read);
284    pkt->dataDynamic<uint8_t>(data);
285    if (req->getFlags().isSet(Request::NO_ACCESS)) {
286        assert(!dcache_pkt);
287        pkt->makeResponse();
288        completeDataAccess(pkt);
289    } else if (read) {
290        handleReadPacket(pkt);
291    } else {
292        bool do_access = true;  // flag to suppress cache access
293
294        if (req->isLLSC()) {
295            do_access = TheISA::handleLockedWrite(thread, req);
296        } else if (req->isCondSwap()) {
297            assert(res);
298            req->setExtraData(*res);
299        }
300
301        if (do_access) {
302            dcache_pkt = pkt;
303            handleWritePacket();
304        } else {
305            _status = DcacheWaitResponse;
306            completeDataAccess(pkt);
307        }
308    }
309}
310
311void
312TimingSimpleCPU::sendSplitData(Fault fault1, Fault fault2,
313        RequestPtr req1, RequestPtr req2, RequestPtr req,
314        uint8_t *data, bool read)
315{
316    _status = Running;
317    if (fault1 != NoFault || fault2 != NoFault) {
318        delete data;
319        delete req1;
320        delete req2;
321        if (fault1 != NoFault)
322            translationFault(fault1);
323        else if (fault2 != NoFault)
324            translationFault(fault2);
325        return;
326    }
327    PacketPtr pkt1, pkt2;
328    buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
329    if (req->getFlags().isSet(Request::NO_ACCESS)) {
330        assert(!dcache_pkt);
331        pkt1->makeResponse();
332        completeDataAccess(pkt1);
333    } else if (read) {
334        if (handleReadPacket(pkt1)) {
335            SplitFragmentSenderState * send_state =
336                dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
337            send_state->clearFromParent();
338            if (handleReadPacket(pkt2)) {
339                send_state = dynamic_cast<SplitFragmentSenderState *>(
340                        pkt1->senderState);
341                send_state->clearFromParent();
342            }
343        }
344    } else {
345        dcache_pkt = pkt1;
346        if (handleWritePacket()) {
347            SplitFragmentSenderState * send_state =
348                dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
349            send_state->clearFromParent();
350            dcache_pkt = pkt2;
351            if (handleWritePacket()) {
352                send_state = dynamic_cast<SplitFragmentSenderState *>(
353                        pkt1->senderState);
354                send_state->clearFromParent();
355            }
356        }
357    }
358}
359
360void
361TimingSimpleCPU::translationFault(Fault fault)
362{
363    numCycles += tickToCycles(curTick - previousTick);
364    previousTick = curTick;
365
366    if (traceData) {
367        // Since there was a fault, we shouldn't trace this instruction.
368        delete traceData;
369        traceData = NULL;
370    }
371
372    postExecute();
373
374    if (getState() == SimObject::Draining) {
375        advancePC(fault);
376        completeDrain();
377    } else {
378        advanceInst(fault);
379    }
380}
381
382void
383TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
384{
385    MemCmd cmd;
386    if (read) {
387        cmd = MemCmd::ReadReq;
388        if (req->isLLSC())
389            cmd = MemCmd::LoadLockedReq;
390    } else {
391        cmd = MemCmd::WriteReq;
392        if (req->isLLSC()) {
393            cmd = MemCmd::StoreCondReq;
394        } else if (req->isSwap()) {
395            cmd = MemCmd::SwapReq;
396        }
397    }
398    pkt = new Packet(req, cmd, Packet::Broadcast);
399}
400
401void
402TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
403        RequestPtr req1, RequestPtr req2, RequestPtr req,
404        uint8_t *data, bool read)
405{
406    pkt1 = pkt2 = NULL;
407
408    assert(!req1->isMmapedIpr() && !req2->isMmapedIpr());
409
410    if (req->getFlags().isSet(Request::NO_ACCESS)) {
411        buildPacket(pkt1, req, read);
412        return;
413    }
414
415    buildPacket(pkt1, req1, read);
416    buildPacket(pkt2, req2, read);
417
418    req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags());
419    PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand(),
420                               Packet::Broadcast);
421
422    pkt->dataDynamic<uint8_t>(data);
423    pkt1->dataStatic<uint8_t>(data);
424    pkt2->dataStatic<uint8_t>(data + req1->getSize());
425
426    SplitMainSenderState * main_send_state = new SplitMainSenderState;
427    pkt->senderState = main_send_state;
428    main_send_state->fragments[0] = pkt1;
429    main_send_state->fragments[1] = pkt2;
430    main_send_state->outstanding = 2;
431    pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
432    pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
433}
434
435template <class T>
436Fault
437TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
438{
439    Fault fault;
440    const int asid = 0;
441    const ThreadID tid = 0;
442    const Addr pc = thread->readPC();
443    unsigned block_size = dcachePort.peerBlockSize();
444    int data_size = sizeof(T);
445
446    RequestPtr req  = new Request(asid, addr, data_size,
447                                  flags, pc, _cpuId, tid);
448
449    Addr split_addr = roundDown(addr + data_size - 1, block_size);
450    assert(split_addr <= addr || split_addr - addr < block_size);
451
452
453    _status = DTBWaitResponse;
454    if (split_addr > addr) {
455        RequestPtr req1, req2;
456        assert(!req->isLLSC() && !req->isSwap());
457        req->splitOnVaddr(split_addr, req1, req2);
458
459        typedef SplitDataTranslation::WholeTranslationState WholeState;
460        WholeState *state = new WholeState(req1, req2, req,
461                                           (uint8_t *)(new T), BaseTLB::Read);
462        thread->dtb->translateTiming(req1, tc,
463                new SplitDataTranslation(this, 0, state), BaseTLB::Read);
464        thread->dtb->translateTiming(req2, tc,
465                new SplitDataTranslation(this, 1, state), BaseTLB::Read);
466    } else {
467        DataTranslation *translation =
468            new DataTranslation(this, (uint8_t *)(new T), NULL, BaseTLB::Read);
469        thread->dtb->translateTiming(req, tc, translation, BaseTLB::Read);
470    }
471
472    if (traceData) {
473        traceData->setData(data);
474        traceData->setAddr(addr);
475    }
476
477    // This will need a new way to tell if it has a dcache attached.
478    if (req->isUncacheable())
479        recordEvent("Uncached Read");
480
481    return NoFault;
482}
483
484#ifndef DOXYGEN_SHOULD_SKIP_THIS
485
486template
487Fault
488TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
489
490template
491Fault
492TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
493
494template
495Fault
496TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
497
498template
499Fault
500TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
501
502template
503Fault
504TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
505
506template
507Fault
508TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
509
510#endif //DOXYGEN_SHOULD_SKIP_THIS
511
512template<>
513Fault
514TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
515{
516    return read(addr, *(uint64_t*)&data, flags);
517}
518
519template<>
520Fault
521TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
522{
523    return read(addr, *(uint32_t*)&data, flags);
524}
525
526
527template<>
528Fault
529TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
530{
531    return read(addr, (uint32_t&)data, flags);
532}
533
534bool
535TimingSimpleCPU::handleWritePacket()
536{
537    RequestPtr req = dcache_pkt->req;
538    if (req->isMmapedIpr()) {
539        Tick delay;
540        delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
541        new IprEvent(dcache_pkt, this, nextCycle(curTick + delay));
542        _status = DcacheWaitResponse;
543        dcache_pkt = NULL;
544    } else if (!dcachePort.sendTiming(dcache_pkt)) {
545        _status = DcacheRetry;
546    } else {
547        _status = DcacheWaitResponse;
548        // memory system takes ownership of packet
549        dcache_pkt = NULL;
550    }
551    return dcache_pkt == NULL;
552}
553
554template <class T>
555Fault
556TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
557{
558    const int asid = 0;
559    const ThreadID tid = 0;
560    const Addr pc = thread->readPC();
561    unsigned block_size = dcachePort.peerBlockSize();
562    int data_size = sizeof(T);
563
564    RequestPtr req = new Request(asid, addr, data_size,
565                                 flags, pc, _cpuId, tid);
566
567    Addr split_addr = roundDown(addr + data_size - 1, block_size);
568    assert(split_addr <= addr || split_addr - addr < block_size);
569
570    T *dataP = new T;
571    *dataP = TheISA::htog(data);
572    _status = DTBWaitResponse;
573    if (split_addr > addr) {
574        RequestPtr req1, req2;
575        assert(!req->isLLSC() && !req->isSwap());
576        req->splitOnVaddr(split_addr, req1, req2);
577
578        typedef SplitDataTranslation::WholeTranslationState WholeState;
579        WholeState *state = new WholeState(req1, req2, req,
580                (uint8_t *)dataP, BaseTLB::Write);
581        thread->dtb->translateTiming(req1, tc,
582                new SplitDataTranslation(this, 0, state), BaseTLB::Write);
583        thread->dtb->translateTiming(req2, tc,
584                new SplitDataTranslation(this, 1, state), BaseTLB::Write);
585    } else {
586        DataTranslation *translation =
587            new DataTranslation(this, (uint8_t *)dataP, res, BaseTLB::Write);
588        thread->dtb->translateTiming(req, tc, translation, BaseTLB::Write);
589    }
590
591    if (traceData) {
592        traceData->setAddr(req->getVaddr());
593        traceData->setData(data);
594    }
595
596    // This will need a new way to tell if it's hooked up to a cache or not.
597    if (req->isUncacheable())
598        recordEvent("Uncached Write");
599
600    // If the write needs to have a fault on the access, consider calling
601    // changeStatus() and changing it to "bad addr write" or something.
602    return NoFault;
603}
604
605
606#ifndef DOXYGEN_SHOULD_SKIP_THIS
607template
608Fault
609TimingSimpleCPU::write(Twin32_t data, Addr addr,
610                       unsigned flags, uint64_t *res);
611
612template
613Fault
614TimingSimpleCPU::write(Twin64_t data, Addr addr,
615                       unsigned flags, uint64_t *res);
616
617template
618Fault
619TimingSimpleCPU::write(uint64_t data, Addr addr,
620                       unsigned flags, uint64_t *res);
621
622template
623Fault
624TimingSimpleCPU::write(uint32_t data, Addr addr,
625                       unsigned flags, uint64_t *res);
626
627template
628Fault
629TimingSimpleCPU::write(uint16_t data, Addr addr,
630                       unsigned flags, uint64_t *res);
631
632template
633Fault
634TimingSimpleCPU::write(uint8_t data, Addr addr,
635                       unsigned flags, uint64_t *res);
636
637#endif //DOXYGEN_SHOULD_SKIP_THIS
638
639template<>
640Fault
641TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
642{
643    return write(*(uint64_t*)&data, addr, flags, res);
644}
645
646template<>
647Fault
648TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
649{
650    return write(*(uint32_t*)&data, addr, flags, res);
651}
652
653
654template<>
655Fault
656TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
657{
658    return write((uint32_t)data, addr, flags, res);
659}
660
661
662void
663TimingSimpleCPU::fetch()
664{
665    DPRINTF(SimpleCPU, "Fetch\n");
666
667    if (!curStaticInst || !curStaticInst->isDelayedCommit())
668        checkForInterrupts();
669
670    checkPcEventQueue();
671
672    bool fromRom = isRomMicroPC(thread->readMicroPC());
673
674    if (!fromRom && !curMacroStaticInst) {
675        Request *ifetch_req = new Request();
676        ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
677        setupFetchRequest(ifetch_req);
678        thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
679                BaseTLB::Execute);
680    } else {
681        _status = IcacheWaitResponse;
682        completeIfetch(NULL);
683
684        numCycles += tickToCycles(curTick - previousTick);
685        previousTick = curTick;
686    }
687}
688
689
690void
691TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
692{
693    if (fault == NoFault) {
694        ifetch_pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
695        ifetch_pkt->dataStatic(&inst);
696
697        if (!icachePort.sendTiming(ifetch_pkt)) {
698            // Need to wait for retry
699            _status = IcacheRetry;
700        } else {
701            // Need to wait for cache to respond
702            _status = IcacheWaitResponse;
703            // ownership of packet transferred to memory system
704            ifetch_pkt = NULL;
705        }
706    } else {
707        delete req;
708        // fetch fault: advance directly to next instruction (fault handler)
709        advanceInst(fault);
710    }
711
712    numCycles += tickToCycles(curTick - previousTick);
713    previousTick = curTick;
714}
715
716
717void
718TimingSimpleCPU::advanceInst(Fault fault)
719{
720    if (fault != NoFault || !stayAtPC)
721        advancePC(fault);
722
723    if (_status == Running) {
724        // kick off fetch of next instruction... callback from icache
725        // response will cause that instruction to be executed,
726        // keeping the CPU running.
727        fetch();
728    }
729}
730
731
732void
733TimingSimpleCPU::completeIfetch(PacketPtr pkt)
734{
735    DPRINTF(SimpleCPU, "Complete ICache Fetch\n");
736
737    // received a response from the icache: execute the received
738    // instruction
739
740    assert(!pkt || !pkt->isError());
741    assert(_status == IcacheWaitResponse);
742
743    _status = Running;
744
745    numCycles += tickToCycles(curTick - previousTick);
746    previousTick = curTick;
747
748    if (getState() == SimObject::Draining) {
749        if (pkt) {
750            delete pkt->req;
751            delete pkt;
752        }
753
754        completeDrain();
755        return;
756    }
757
758    preExecute();
759    if (curStaticInst &&
760            curStaticInst->isMemRef() && !curStaticInst->isDataPrefetch()) {
761        // load or store: just send to dcache
762        Fault fault = curStaticInst->initiateAcc(this, traceData);
763        if (_status != Running) {
764            // instruction will complete in dcache response callback
765            assert(_status == DcacheWaitResponse ||
766                    _status == DcacheRetry || DTBWaitResponse);
767            assert(fault == NoFault);
768        } else {
769            if (fault != NoFault && traceData) {
770                // If there was a fault, we shouldn't trace this instruction.
771                delete traceData;
772                traceData = NULL;
773            }
774
775            postExecute();
776            // @todo remove me after debugging with legion done
777            if (curStaticInst && (!curStaticInst->isMicroop() ||
778                        curStaticInst->isFirstMicroop()))
779                instCnt++;
780            advanceInst(fault);
781        }
782    } else if (curStaticInst) {
783        // non-memory instruction: execute completely now
784        Fault fault = curStaticInst->execute(this, traceData);
785
786        // keep an instruction count
787        if (fault == NoFault)
788            countInst();
789        else if (traceData) {
790            // If there was a fault, we shouldn't trace this instruction.
791            delete traceData;
792            traceData = NULL;
793        }
794
795        postExecute();
796        // @todo remove me after debugging with legion done
797        if (curStaticInst && (!curStaticInst->isMicroop() ||
798                    curStaticInst->isFirstMicroop()))
799            instCnt++;
800        advanceInst(fault);
801    } else {
802        advanceInst(NoFault);
803    }
804
805    if (pkt) {
806        delete pkt->req;
807        delete pkt;
808    }
809}
810
811void
812TimingSimpleCPU::IcachePort::ITickEvent::process()
813{
814    cpu->completeIfetch(pkt);
815}
816
817bool
818TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
819{
820    if (pkt->isResponse() && !pkt->wasNacked()) {
821        // delay processing of returned data until next CPU clock edge
822        Tick next_tick = cpu->nextCycle(curTick);
823
824        if (next_tick == curTick)
825            cpu->completeIfetch(pkt);
826        else
827            tickEvent.schedule(pkt, next_tick);
828
829        return true;
830    }
831    else if (pkt->wasNacked()) {
832        assert(cpu->_status == IcacheWaitResponse);
833        pkt->reinitNacked();
834        if (!sendTiming(pkt)) {
835            cpu->_status = IcacheRetry;
836            cpu->ifetch_pkt = pkt;
837        }
838    }
839    //Snooping a Coherence Request, do nothing
840    return true;
841}
842
843void
844TimingSimpleCPU::IcachePort::recvRetry()
845{
846    // we shouldn't get a retry unless we have a packet that we're
847    // waiting to transmit
848    assert(cpu->ifetch_pkt != NULL);
849    assert(cpu->_status == IcacheRetry);
850    PacketPtr tmp = cpu->ifetch_pkt;
851    if (sendTiming(tmp)) {
852        cpu->_status = IcacheWaitResponse;
853        cpu->ifetch_pkt = NULL;
854    }
855}
856
857void
858TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
859{
860    // received a response from the dcache: complete the load or store
861    // instruction
862    assert(!pkt->isError());
863
864    numCycles += tickToCycles(curTick - previousTick);
865    previousTick = curTick;
866
867    if (pkt->senderState) {
868        SplitFragmentSenderState * send_state =
869            dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
870        assert(send_state);
871        delete pkt->req;
872        delete pkt;
873        PacketPtr big_pkt = send_state->bigPkt;
874        delete send_state;
875
876        SplitMainSenderState * main_send_state =
877            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
878        assert(main_send_state);
879        // Record the fact that this packet is no longer outstanding.
880        assert(main_send_state->outstanding != 0);
881        main_send_state->outstanding--;
882
883        if (main_send_state->outstanding) {
884            return;
885        } else {
886            delete main_send_state;
887            big_pkt->senderState = NULL;
888            pkt = big_pkt;
889        }
890    }
891
892    assert(_status == DcacheWaitResponse || _status == DTBWaitResponse);
893    _status = Running;
894
895    Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
896
897    // keep an instruction count
898    if (fault == NoFault)
899        countInst();
900    else if (traceData) {
901        // If there was a fault, we shouldn't trace this instruction.
902        delete traceData;
903        traceData = NULL;
904    }
905
906    // the locked flag may be cleared on the response packet, so check
907    // pkt->req and not pkt to see if it was a load-locked
908    if (pkt->isRead() && pkt->req->isLLSC()) {
909        TheISA::handleLockedRead(thread, pkt->req);
910    }
911
912    delete pkt->req;
913    delete pkt;
914
915    postExecute();
916
917    if (getState() == SimObject::Draining) {
918        advancePC(fault);
919        completeDrain();
920
921        return;
922    }
923
924    advanceInst(fault);
925}
926
927
928void
929TimingSimpleCPU::completeDrain()
930{
931    DPRINTF(Config, "Done draining\n");
932    changeState(SimObject::Drained);
933    drainEvent->process();
934}
935
936void
937TimingSimpleCPU::DcachePort::setPeer(Port *port)
938{
939    Port::setPeer(port);
940
941#if FULL_SYSTEM
942    // Update the ThreadContext's memory ports (Functional/Virtual
943    // Ports)
944    cpu->tcBase()->connectMemPorts(cpu->tcBase());
945#endif
946}
947
948bool
949TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
950{
951    if (pkt->isResponse() && !pkt->wasNacked()) {
952        // delay processing of returned data until next CPU clock edge
953        Tick next_tick = cpu->nextCycle(curTick);
954
955        if (next_tick == curTick) {
956            cpu->completeDataAccess(pkt);
957        } else {
958            tickEvent.schedule(pkt, next_tick);
959        }
960
961        return true;
962    }
963    else if (pkt->wasNacked()) {
964        assert(cpu->_status == DcacheWaitResponse);
965        pkt->reinitNacked();
966        if (!sendTiming(pkt)) {
967            cpu->_status = DcacheRetry;
968            cpu->dcache_pkt = pkt;
969        }
970    }
971    //Snooping a Coherence Request, do nothing
972    return true;
973}
974
975void
976TimingSimpleCPU::DcachePort::DTickEvent::process()
977{
978    cpu->completeDataAccess(pkt);
979}
980
981void
982TimingSimpleCPU::DcachePort::recvRetry()
983{
984    // we shouldn't get a retry unless we have a packet that we're
985    // waiting to transmit
986    assert(cpu->dcache_pkt != NULL);
987    assert(cpu->_status == DcacheRetry);
988    PacketPtr tmp = cpu->dcache_pkt;
989    if (tmp->senderState) {
990        // This is a packet from a split access.
991        SplitFragmentSenderState * send_state =
992            dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
993        assert(send_state);
994        PacketPtr big_pkt = send_state->bigPkt;
995
996        SplitMainSenderState * main_send_state =
997            dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
998        assert(main_send_state);
999
1000        if (sendTiming(tmp)) {
1001            // If we were able to send without retrying, record that fact
1002            // and try sending the other fragment.
1003            send_state->clearFromParent();
1004            int other_index = main_send_state->getPendingFragment();
1005            if (other_index > 0) {
1006                tmp = main_send_state->fragments[other_index];
1007                cpu->dcache_pkt = tmp;
1008                if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
1009                        (big_pkt->isWrite() && cpu->handleWritePacket())) {
1010                    main_send_state->fragments[other_index] = NULL;
1011                }
1012            } else {
1013                cpu->_status = DcacheWaitResponse;
1014                // memory system takes ownership of packet
1015                cpu->dcache_pkt = NULL;
1016            }
1017        }
1018    } else if (sendTiming(tmp)) {
1019        cpu->_status = DcacheWaitResponse;
1020        // memory system takes ownership of packet
1021        cpu->dcache_pkt = NULL;
1022    }
1023}
1024
1025TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
1026    Tick t)
1027    : pkt(_pkt), cpu(_cpu)
1028{
1029    cpu->schedule(this, t);
1030}
1031
1032void
1033TimingSimpleCPU::IprEvent::process()
1034{
1035    cpu->completeDataAccess(pkt);
1036}
1037
1038const char *
1039TimingSimpleCPU::IprEvent::description() const
1040{
1041    return "Timing Simple CPU Delay IPR event";
1042}
1043
1044
1045void
1046TimingSimpleCPU::printAddr(Addr a)
1047{
1048    dcachePort.printAddr(a);
1049}
1050
1051
1052////////////////////////////////////////////////////////////////////////
1053//
1054//  TimingSimpleCPU Simulation Object
1055//
1056TimingSimpleCPU *
1057TimingSimpleCPUParams::create()
1058{
1059    numThreads = 1;
1060#if !FULL_SYSTEM
1061    if (workload.size() != 1)
1062        panic("only one workload allowed");
1063#endif
1064    return new TimingSimpleCPU(this);
1065}
1066