timing.cc revision 5710:b44dd45bd604
15222Sksewell@umich.edu/*
25254Sksewell@umich.edu * Copyright (c) 2002-2005 The Regents of The University of Michigan
35254Sksewell@umich.edu * All rights reserved.
45222Sksewell@umich.edu *
55254Sksewell@umich.edu * Redistribution and use in source and binary forms, with or without
65254Sksewell@umich.edu * modification, are permitted provided that the following conditions are
75254Sksewell@umich.edu * met: redistributions of source code must retain the above copyright
85254Sksewell@umich.edu * notice, this list of conditions and the following disclaimer;
95254Sksewell@umich.edu * redistributions in binary form must reproduce the above copyright
105254Sksewell@umich.edu * notice, this list of conditions and the following disclaimer in the
115254Sksewell@umich.edu * documentation and/or other materials provided with the distribution;
125254Sksewell@umich.edu * neither the name of the copyright holders nor the names of its
135254Sksewell@umich.edu * contributors may be used to endorse or promote products derived from
145254Sksewell@umich.edu * this software without specific prior written permission.
155222Sksewell@umich.edu *
165254Sksewell@umich.edu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
175254Sksewell@umich.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
185254Sksewell@umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
195254Sksewell@umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
205254Sksewell@umich.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
215254Sksewell@umich.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
225254Sksewell@umich.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
235254Sksewell@umich.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
245254Sksewell@umich.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
255254Sksewell@umich.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
265254Sksewell@umich.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
275222Sksewell@umich.edu *
285254Sksewell@umich.edu * Authors: Steve Reinhardt
295254Sksewell@umich.edu */
305254Sksewell@umich.edu
315222Sksewell@umich.edu#include "arch/locked_mem.hh"
325222Sksewell@umich.edu#include "arch/mmaped_ipr.hh"
335222Sksewell@umich.edu#include "arch/utility.hh"
3411793Sbrandon.potter@amd.com#include "base/bigint.hh"
355222Sksewell@umich.edu#include "cpu/exetrace.hh"
365222Sksewell@umich.edu#include "cpu/simple/timing.hh"
375222Sksewell@umich.edu#include "mem/packet.hh"
385567Snate@binkert.org#include "mem/packet_access.hh"
395222Sksewell@umich.edu#include "params/TimingSimpleCPU.hh"
405222Sksewell@umich.edu#include "sim/system.hh"
415222Sksewell@umich.edu
425222Sksewell@umich.eduusing namespace std;
435222Sksewell@umich.eduusing namespace TheISA;
445222Sksewell@umich.edu
45Port *
46TimingSimpleCPU::getPort(const std::string &if_name, int idx)
47{
48    if (if_name == "dcache_port")
49        return &dcachePort;
50    else if (if_name == "icache_port")
51        return &icachePort;
52    else
53        panic("No Such Port\n");
54}
55
56void
57TimingSimpleCPU::init()
58{
59    BaseCPU::init();
60    cpuId = tc->readCpuId();
61#if FULL_SYSTEM
62    for (int i = 0; i < threadContexts.size(); ++i) {
63        ThreadContext *tc = threadContexts[i];
64
65        // initialize CPU, including PC
66        TheISA::initCPU(tc, cpuId);
67    }
68#endif
69}
70
71Tick
72TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
73{
74    panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
75    return curTick;
76}
77
78void
79TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
80{
81    //No internal storage to update, jusst return
82    return;
83}
84
85void
86TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
87{
88    if (status == RangeChange) {
89        if (!snoopRangeSent) {
90            snoopRangeSent = true;
91            sendStatusChange(Port::RangeChange);
92        }
93        return;
94    }
95
96    panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
97}
98
99
100void
101TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
102{
103    pkt = _pkt;
104    cpu->schedule(this, t);
105}
106
107TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
108    : BaseSimpleCPU(p), icachePort(this, p->clock), dcachePort(this, p->clock), fetchEvent(this)
109{
110    _status = Idle;
111
112    icachePort.snoopRangeSent = false;
113    dcachePort.snoopRangeSent = false;
114
115    ifetch_pkt = dcache_pkt = NULL;
116    drainEvent = NULL;
117    previousTick = 0;
118    changeState(SimObject::Running);
119}
120
121
122TimingSimpleCPU::~TimingSimpleCPU()
123{
124}
125
126void
127TimingSimpleCPU::serialize(ostream &os)
128{
129    SimObject::State so_state = SimObject::getState();
130    SERIALIZE_ENUM(so_state);
131    BaseSimpleCPU::serialize(os);
132}
133
134void
135TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
136{
137    SimObject::State so_state;
138    UNSERIALIZE_ENUM(so_state);
139    BaseSimpleCPU::unserialize(cp, section);
140}
141
142unsigned int
143TimingSimpleCPU::drain(Event *drain_event)
144{
145    // TimingSimpleCPU is ready to drain if it's not waiting for
146    // an access to complete.
147    if (_status == Idle || _status == Running || _status == SwitchedOut) {
148        changeState(SimObject::Drained);
149        return 0;
150    } else {
151        changeState(SimObject::Draining);
152        drainEvent = drain_event;
153        return 1;
154    }
155}
156
157void
158TimingSimpleCPU::resume()
159{
160    DPRINTF(SimpleCPU, "Resume\n");
161    if (_status != SwitchedOut && _status != Idle) {
162        assert(system->getMemoryMode() == Enums::timing);
163
164        if (fetchEvent.scheduled())
165           deschedule(fetchEvent);
166
167        schedule(fetchEvent, nextCycle());
168    }
169
170    changeState(SimObject::Running);
171}
172
173void
174TimingSimpleCPU::switchOut()
175{
176    assert(_status == Running || _status == Idle);
177    _status = SwitchedOut;
178    numCycles += tickToCycles(curTick - previousTick);
179
180    // If we've been scheduled to resume but are then told to switch out,
181    // we'll need to cancel it.
182    if (fetchEvent.scheduled())
183        deschedule(fetchEvent);
184}
185
186
187void
188TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
189{
190    BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
191
192    // if any of this CPU's ThreadContexts are active, mark the CPU as
193    // running and schedule its tick event.
194    for (int i = 0; i < threadContexts.size(); ++i) {
195        ThreadContext *tc = threadContexts[i];
196        if (tc->status() == ThreadContext::Active && _status != Running) {
197            _status = Running;
198            break;
199        }
200    }
201
202    if (_status != Running) {
203        _status = Idle;
204    }
205    assert(threadContexts.size() == 1);
206    cpuId = tc->readCpuId();
207    previousTick = curTick;
208}
209
210
211void
212TimingSimpleCPU::activateContext(int thread_num, int delay)
213{
214    DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
215
216    assert(thread_num == 0);
217    assert(thread);
218
219    assert(_status == Idle);
220
221    notIdleFraction++;
222    _status = Running;
223
224    // kick things off by initiating the fetch of the next instruction
225    schedule(fetchEvent, nextCycle(curTick + ticks(delay)));
226}
227
228
229void
230TimingSimpleCPU::suspendContext(int thread_num)
231{
232    DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
233
234    assert(thread_num == 0);
235    assert(thread);
236
237    assert(_status == Running);
238
239    // just change status to Idle... if status != Running,
240    // completeInst() will not initiate fetch of next instruction.
241
242    notIdleFraction--;
243    _status = Idle;
244}
245
246
247template <class T>
248Fault
249TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
250{
251    Request *req =
252        new Request(/* asid */ 0, addr, sizeof(T), flags, thread->readPC(),
253                    cpuId, /* thread ID */ 0);
254
255    if (traceData) {
256        traceData->setAddr(req->getVaddr());
257    }
258
259   // translate to physical address
260    Fault fault = thread->translateDataReadReq(req);
261
262    // Now do the access.
263    if (fault == NoFault) {
264        PacketPtr pkt =
265            new Packet(req,
266                       (req->isLocked() ?
267                        MemCmd::LoadLockedReq : MemCmd::ReadReq),
268                       Packet::Broadcast);
269        pkt->dataDynamic<T>(new T);
270
271        if (req->isMmapedIpr()) {
272            Tick delay;
273            delay = TheISA::handleIprRead(thread->getTC(), pkt);
274            new IprEvent(pkt, this, nextCycle(curTick + delay));
275            _status = DcacheWaitResponse;
276            dcache_pkt = NULL;
277        } else if (!dcachePort.sendTiming(pkt)) {
278            _status = DcacheRetry;
279            dcache_pkt = pkt;
280        } else {
281            _status = DcacheWaitResponse;
282            // memory system takes ownership of packet
283            dcache_pkt = NULL;
284        }
285
286        // This will need a new way to tell if it has a dcache attached.
287        if (req->isUncacheable())
288            recordEvent("Uncached Read");
289    } else {
290        delete req;
291    }
292
293    if (traceData) {
294        traceData->setData(data);
295    }
296    return fault;
297}
298
299Fault
300TimingSimpleCPU::translateDataReadAddr(Addr vaddr, Addr &paddr,
301        int size, unsigned flags)
302{
303    Request *req =
304        new Request(0, vaddr, size, flags, thread->readPC(), cpuId, 0);
305
306    if (traceData) {
307        traceData->setAddr(vaddr);
308    }
309
310    Fault fault = thread->translateDataWriteReq(req);
311
312    if (fault == NoFault)
313        paddr = req->getPaddr();
314
315    delete req;
316    return fault;
317}
318
319#ifndef DOXYGEN_SHOULD_SKIP_THIS
320
321template
322Fault
323TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
324
325template
326Fault
327TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
328
329template
330Fault
331TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
332
333template
334Fault
335TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
336
337template
338Fault
339TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
340
341template
342Fault
343TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
344
345#endif //DOXYGEN_SHOULD_SKIP_THIS
346
347template<>
348Fault
349TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
350{
351    return read(addr, *(uint64_t*)&data, flags);
352}
353
354template<>
355Fault
356TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
357{
358    return read(addr, *(uint32_t*)&data, flags);
359}
360
361
362template<>
363Fault
364TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
365{
366    return read(addr, (uint32_t&)data, flags);
367}
368
369
370template <class T>
371Fault
372TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
373{
374    Request *req =
375        new Request(/* asid */ 0, addr, sizeof(T), flags, thread->readPC(),
376                    cpuId, /* thread ID */ 0);
377
378    if (traceData) {
379        traceData->setAddr(req->getVaddr());
380    }
381
382    // translate to physical address
383    Fault fault = thread->translateDataWriteReq(req);
384
385    // Now do the access.
386    if (fault == NoFault) {
387        MemCmd cmd = MemCmd::WriteReq; // default
388        bool do_access = true;  // flag to suppress cache access
389
390        if (req->isLocked()) {
391            cmd = MemCmd::StoreCondReq;
392            do_access = TheISA::handleLockedWrite(thread, req);
393        } else if (req->isSwap()) {
394            cmd = MemCmd::SwapReq;
395            if (req->isCondSwap()) {
396                assert(res);
397                req->setExtraData(*res);
398            }
399        }
400
401        // Note: need to allocate dcache_pkt even if do_access is
402        // false, as it's used unconditionally to call completeAcc().
403        assert(dcache_pkt == NULL);
404        dcache_pkt = new Packet(req, cmd, Packet::Broadcast);
405        dcache_pkt->allocate();
406        dcache_pkt->set(data);
407
408        if (do_access) {
409            if (req->isMmapedIpr()) {
410                Tick delay;
411                dcache_pkt->set(htog(data));
412                delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
413                new IprEvent(dcache_pkt, this, nextCycle(curTick + delay));
414                _status = DcacheWaitResponse;
415                dcache_pkt = NULL;
416            } else if (!dcachePort.sendTiming(dcache_pkt)) {
417                _status = DcacheRetry;
418            } else {
419                _status = DcacheWaitResponse;
420                // memory system takes ownership of packet
421                dcache_pkt = NULL;
422            }
423        }
424        // This will need a new way to tell if it's hooked up to a cache or not.
425        if (req->isUncacheable())
426            recordEvent("Uncached Write");
427    } else {
428        delete req;
429    }
430
431    if (traceData) {
432        traceData->setData(data);
433    }
434
435    // If the write needs to have a fault on the access, consider calling
436    // changeStatus() and changing it to "bad addr write" or something.
437    return fault;
438}
439
440Fault
441TimingSimpleCPU::translateDataWriteAddr(Addr vaddr, Addr &paddr,
442        int size, unsigned flags)
443{
444    Request *req =
445        new Request(0, vaddr, size, flags, thread->readPC(), cpuId, 0);
446
447    if (traceData) {
448        traceData->setAddr(vaddr);
449    }
450
451    Fault fault = thread->translateDataWriteReq(req);
452
453    if (fault == NoFault)
454        paddr = req->getPaddr();
455
456    delete req;
457    return fault;
458}
459
460
461#ifndef DOXYGEN_SHOULD_SKIP_THIS
462template
463Fault
464TimingSimpleCPU::write(Twin32_t data, Addr addr,
465                       unsigned flags, uint64_t *res);
466
467template
468Fault
469TimingSimpleCPU::write(Twin64_t data, Addr addr,
470                       unsigned flags, uint64_t *res);
471
472template
473Fault
474TimingSimpleCPU::write(uint64_t data, Addr addr,
475                       unsigned flags, uint64_t *res);
476
477template
478Fault
479TimingSimpleCPU::write(uint32_t data, Addr addr,
480                       unsigned flags, uint64_t *res);
481
482template
483Fault
484TimingSimpleCPU::write(uint16_t data, Addr addr,
485                       unsigned flags, uint64_t *res);
486
487template
488Fault
489TimingSimpleCPU::write(uint8_t data, Addr addr,
490                       unsigned flags, uint64_t *res);
491
492#endif //DOXYGEN_SHOULD_SKIP_THIS
493
494template<>
495Fault
496TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
497{
498    return write(*(uint64_t*)&data, addr, flags, res);
499}
500
501template<>
502Fault
503TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
504{
505    return write(*(uint32_t*)&data, addr, flags, res);
506}
507
508
509template<>
510Fault
511TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
512{
513    return write((uint32_t)data, addr, flags, res);
514}
515
516
517void
518TimingSimpleCPU::fetch()
519{
520    DPRINTF(SimpleCPU, "Fetch\n");
521
522    if (!curStaticInst || !curStaticInst->isDelayedCommit())
523        checkForInterrupts();
524
525    checkPcEventQueue();
526
527    bool fromRom = isRomMicroPC(thread->readMicroPC());
528
529    if (!fromRom) {
530        Request *ifetch_req = new Request();
531        ifetch_req->setThreadContext(cpuId, /* thread ID */ 0);
532        Fault fault = setupFetchRequest(ifetch_req);
533
534        ifetch_pkt = new Packet(ifetch_req, MemCmd::ReadReq, Packet::Broadcast);
535        ifetch_pkt->dataStatic(&inst);
536
537        if (fault == NoFault) {
538            if (!icachePort.sendTiming(ifetch_pkt)) {
539                // Need to wait for retry
540                _status = IcacheRetry;
541            } else {
542                // Need to wait for cache to respond
543                _status = IcacheWaitResponse;
544                // ownership of packet transferred to memory system
545                ifetch_pkt = NULL;
546            }
547        } else {
548            delete ifetch_req;
549            delete ifetch_pkt;
550            // fetch fault: advance directly to next instruction (fault handler)
551            advanceInst(fault);
552        }
553    } else {
554        _status = IcacheWaitResponse;
555        completeIfetch(NULL);
556    }
557
558    numCycles += tickToCycles(curTick - previousTick);
559    previousTick = curTick;
560}
561
562
563void
564TimingSimpleCPU::advanceInst(Fault fault)
565{
566    advancePC(fault);
567
568    if (_status == Running) {
569        // kick off fetch of next instruction... callback from icache
570        // response will cause that instruction to be executed,
571        // keeping the CPU running.
572        fetch();
573    }
574}
575
576
577void
578TimingSimpleCPU::completeIfetch(PacketPtr pkt)
579{
580    DPRINTF(SimpleCPU, "Complete ICache Fetch\n");
581
582    // received a response from the icache: execute the received
583    // instruction
584
585    assert(!pkt || !pkt->isError());
586    assert(_status == IcacheWaitResponse);
587
588    _status = Running;
589
590    numCycles += tickToCycles(curTick - previousTick);
591    previousTick = curTick;
592
593    if (getState() == SimObject::Draining) {
594        if (pkt) {
595            delete pkt->req;
596            delete pkt;
597        }
598
599        completeDrain();
600        return;
601    }
602
603    preExecute();
604    if (curStaticInst->isMemRef() && !curStaticInst->isDataPrefetch()) {
605        // load or store: just send to dcache
606        Fault fault = curStaticInst->initiateAcc(this, traceData);
607        if (_status != Running) {
608            // instruction will complete in dcache response callback
609            assert(_status == DcacheWaitResponse || _status == DcacheRetry);
610            assert(fault == NoFault);
611        } else {
612            if (fault == NoFault) {
613                // Note that ARM can have NULL packets if the instruction gets
614                // squashed due to predication
615                // early fail on store conditional: complete now
616                assert(dcache_pkt != NULL || THE_ISA == ARM_ISA);
617
618                fault = curStaticInst->completeAcc(dcache_pkt, this,
619                                                   traceData);
620                if (dcache_pkt != NULL)
621                {
622                    delete dcache_pkt->req;
623                    delete dcache_pkt;
624                    dcache_pkt = NULL;
625                }
626
627                // keep an instruction count
628                if (fault == NoFault)
629                    countInst();
630            } else if (traceData) {
631                // If there was a fault, we shouldn't trace this instruction.
632                delete traceData;
633                traceData = NULL;
634            }
635
636            postExecute();
637            // @todo remove me after debugging with legion done
638            if (curStaticInst && (!curStaticInst->isMicroop() ||
639                        curStaticInst->isFirstMicroop()))
640                instCnt++;
641            advanceInst(fault);
642        }
643    } else {
644        // non-memory instruction: execute completely now
645        Fault fault = curStaticInst->execute(this, traceData);
646
647        // keep an instruction count
648        if (fault == NoFault)
649            countInst();
650        else if (traceData) {
651            // If there was a fault, we shouldn't trace this instruction.
652            delete traceData;
653            traceData = NULL;
654        }
655
656        postExecute();
657        // @todo remove me after debugging with legion done
658        if (curStaticInst && (!curStaticInst->isMicroop() ||
659                    curStaticInst->isFirstMicroop()))
660            instCnt++;
661        advanceInst(fault);
662    }
663
664    if (pkt) {
665        delete pkt->req;
666        delete pkt;
667    }
668}
669
670void
671TimingSimpleCPU::IcachePort::ITickEvent::process()
672{
673    cpu->completeIfetch(pkt);
674}
675
676bool
677TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
678{
679    if (pkt->isResponse() && !pkt->wasNacked()) {
680        // delay processing of returned data until next CPU clock edge
681        Tick next_tick = cpu->nextCycle(curTick);
682
683        if (next_tick == curTick)
684            cpu->completeIfetch(pkt);
685        else
686            tickEvent.schedule(pkt, next_tick);
687
688        return true;
689    }
690    else if (pkt->wasNacked()) {
691        assert(cpu->_status == IcacheWaitResponse);
692        pkt->reinitNacked();
693        if (!sendTiming(pkt)) {
694            cpu->_status = IcacheRetry;
695            cpu->ifetch_pkt = pkt;
696        }
697    }
698    //Snooping a Coherence Request, do nothing
699    return true;
700}
701
702void
703TimingSimpleCPU::IcachePort::recvRetry()
704{
705    // we shouldn't get a retry unless we have a packet that we're
706    // waiting to transmit
707    assert(cpu->ifetch_pkt != NULL);
708    assert(cpu->_status == IcacheRetry);
709    PacketPtr tmp = cpu->ifetch_pkt;
710    if (sendTiming(tmp)) {
711        cpu->_status = IcacheWaitResponse;
712        cpu->ifetch_pkt = NULL;
713    }
714}
715
716void
717TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
718{
719    // received a response from the dcache: complete the load or store
720    // instruction
721    assert(!pkt->isError());
722    assert(_status == DcacheWaitResponse);
723    _status = Running;
724
725    numCycles += tickToCycles(curTick - previousTick);
726    previousTick = curTick;
727
728    Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
729
730    // keep an instruction count
731    if (fault == NoFault)
732        countInst();
733    else if (traceData) {
734        // If there was a fault, we shouldn't trace this instruction.
735        delete traceData;
736        traceData = NULL;
737    }
738
739    // the locked flag may be cleared on the response packet, so check
740    // pkt->req and not pkt to see if it was a load-locked
741    if (pkt->isRead() && pkt->req->isLocked()) {
742        TheISA::handleLockedRead(thread, pkt->req);
743    }
744
745    delete pkt->req;
746    delete pkt;
747
748    postExecute();
749
750    if (getState() == SimObject::Draining) {
751        advancePC(fault);
752        completeDrain();
753
754        return;
755    }
756
757    advanceInst(fault);
758}
759
760
761void
762TimingSimpleCPU::completeDrain()
763{
764    DPRINTF(Config, "Done draining\n");
765    changeState(SimObject::Drained);
766    drainEvent->process();
767}
768
769void
770TimingSimpleCPU::DcachePort::setPeer(Port *port)
771{
772    Port::setPeer(port);
773
774#if FULL_SYSTEM
775    // Update the ThreadContext's memory ports (Functional/Virtual
776    // Ports)
777    cpu->tcBase()->connectMemPorts(cpu->tcBase());
778#endif
779}
780
781bool
782TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
783{
784    if (pkt->isResponse() && !pkt->wasNacked()) {
785        // delay processing of returned data until next CPU clock edge
786        Tick next_tick = cpu->nextCycle(curTick);
787
788        if (next_tick == curTick)
789            cpu->completeDataAccess(pkt);
790        else
791            tickEvent.schedule(pkt, next_tick);
792
793        return true;
794    }
795    else if (pkt->wasNacked()) {
796        assert(cpu->_status == DcacheWaitResponse);
797        pkt->reinitNacked();
798        if (!sendTiming(pkt)) {
799            cpu->_status = DcacheRetry;
800            cpu->dcache_pkt = pkt;
801        }
802    }
803    //Snooping a Coherence Request, do nothing
804    return true;
805}
806
807void
808TimingSimpleCPU::DcachePort::DTickEvent::process()
809{
810    cpu->completeDataAccess(pkt);
811}
812
813void
814TimingSimpleCPU::DcachePort::recvRetry()
815{
816    // we shouldn't get a retry unless we have a packet that we're
817    // waiting to transmit
818    assert(cpu->dcache_pkt != NULL);
819    assert(cpu->_status == DcacheRetry);
820    PacketPtr tmp = cpu->dcache_pkt;
821    if (sendTiming(tmp)) {
822        cpu->_status = DcacheWaitResponse;
823        // memory system takes ownership of packet
824        cpu->dcache_pkt = NULL;
825    }
826}
827
828TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
829    Tick t)
830    : pkt(_pkt), cpu(_cpu)
831{
832    cpu->schedule(this, t);
833}
834
835void
836TimingSimpleCPU::IprEvent::process()
837{
838    cpu->completeDataAccess(pkt);
839}
840
841const char *
842TimingSimpleCPU::IprEvent::description() const
843{
844    return "Timing Simple CPU Delay IPR event";
845}
846
847
848void
849TimingSimpleCPU::printAddr(Addr a)
850{
851    dcachePort.printAddr(a);
852}
853
854
855////////////////////////////////////////////////////////////////////////
856//
857//  TimingSimpleCPU Simulation Object
858//
859TimingSimpleCPU *
860TimingSimpleCPUParams::create()
861{
862    numThreads = 1;
863#if !FULL_SYSTEM
864    if (workload.size() != 1)
865        panic("only one workload allowed");
866#endif
867    return new TimingSimpleCPU(this);
868}
869