Deleted Added
sdiff udiff text old ( 5728:9574f561dfa2 ) new ( 5744:342cbc20a188 )
full compact
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Steve Reinhardt
29 */
30
31#include "arch/locked_mem.hh"
32#include "arch/mmaped_ipr.hh"
33#include "arch/utility.hh"
34#include "base/bigint.hh"
35#include "cpu/exetrace.hh"
36#include "cpu/simple/timing.hh"
37#include "mem/packet.hh"
38#include "mem/packet_access.hh"
39#include "params/TimingSimpleCPU.hh"
40#include "sim/system.hh"
41
42using namespace std;
43using namespace TheISA;
44
45Port *
46TimingSimpleCPU::getPort(const std::string &if_name, int idx)
47{
48 if (if_name == "dcache_port")
49 return &dcachePort;
50 else if (if_name == "icache_port")
51 return &icachePort;
52 else
53 panic("No Such Port\n");
54}
55
56void
57TimingSimpleCPU::init()
58{
59 BaseCPU::init();
60#if FULL_SYSTEM
61 for (int i = 0; i < threadContexts.size(); ++i) {
62 ThreadContext *tc = threadContexts[i];
63
64 // initialize CPU, including PC
65 TheISA::initCPU(tc, _cpuId);
66 }
67#endif
68}
69
70Tick
71TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
72{
73 panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
74 return curTick;
75}
76
77void
78TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
79{
80 //No internal storage to update, jusst return
81 return;
82}
83
84void
85TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
86{
87 if (status == RangeChange) {
88 if (!snoopRangeSent) {
89 snoopRangeSent = true;
90 sendStatusChange(Port::RangeChange);
91 }
92 return;
93 }
94
95 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
96}
97
98
99void
100TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
101{
102 pkt = _pkt;
103 cpu->schedule(this, t);
104}
105
106TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
107 : BaseSimpleCPU(p), icachePort(this, p->clock), dcachePort(this, p->clock), fetchEvent(this)
108{
109 _status = Idle;
110
111 icachePort.snoopRangeSent = false;
112 dcachePort.snoopRangeSent = false;
113
114 ifetch_pkt = dcache_pkt = NULL;
115 drainEvent = NULL;
116 previousTick = 0;
117 changeState(SimObject::Running);
118}
119
120
121TimingSimpleCPU::~TimingSimpleCPU()
122{
123}
124
125void
126TimingSimpleCPU::serialize(ostream &os)
127{
128 SimObject::State so_state = SimObject::getState();
129 SERIALIZE_ENUM(so_state);
130 BaseSimpleCPU::serialize(os);
131}
132
133void
134TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
135{
136 SimObject::State so_state;
137 UNSERIALIZE_ENUM(so_state);
138 BaseSimpleCPU::unserialize(cp, section);
139}
140
141unsigned int
142TimingSimpleCPU::drain(Event *drain_event)
143{
144 // TimingSimpleCPU is ready to drain if it's not waiting for
145 // an access to complete.
146 if (_status == Idle || _status == Running || _status == SwitchedOut) {
147 changeState(SimObject::Drained);
148 return 0;
149 } else {
150 changeState(SimObject::Draining);
151 drainEvent = drain_event;
152 return 1;
153 }
154}
155
156void
157TimingSimpleCPU::resume()
158{
159 DPRINTF(SimpleCPU, "Resume\n");
160 if (_status != SwitchedOut && _status != Idle) {
161 assert(system->getMemoryMode() == Enums::timing);
162
163 if (fetchEvent.scheduled())
164 deschedule(fetchEvent);
165
166 schedule(fetchEvent, nextCycle());
167 }
168
169 changeState(SimObject::Running);
170}
171
172void
173TimingSimpleCPU::switchOut()
174{
175 assert(_status == Running || _status == Idle);
176 _status = SwitchedOut;
177 numCycles += tickToCycles(curTick - previousTick);
178
179 // If we've been scheduled to resume but are then told to switch out,
180 // we'll need to cancel it.
181 if (fetchEvent.scheduled())
182 deschedule(fetchEvent);
183}
184
185
186void
187TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
188{
189 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
190
191 // if any of this CPU's ThreadContexts are active, mark the CPU as
192 // running and schedule its tick event.
193 for (int i = 0; i < threadContexts.size(); ++i) {
194 ThreadContext *tc = threadContexts[i];
195 if (tc->status() == ThreadContext::Active && _status != Running) {
196 _status = Running;
197 break;
198 }
199 }
200
201 if (_status != Running) {
202 _status = Idle;
203 }
204 assert(threadContexts.size() == 1);
205 previousTick = curTick;
206}
207
208
209void
210TimingSimpleCPU::activateContext(int thread_num, int delay)
211{
212 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
213
214 assert(thread_num == 0);
215 assert(thread);
216
217 assert(_status == Idle);
218
219 notIdleFraction++;
220 _status = Running;
221
222 // kick things off by initiating the fetch of the next instruction
223 schedule(fetchEvent, nextCycle(curTick + ticks(delay)));
224}
225
226
227void
228TimingSimpleCPU::suspendContext(int thread_num)
229{
230 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
231
232 assert(thread_num == 0);
233 assert(thread);
234
235 assert(_status == Running);
236
237 // just change status to Idle... if status != Running,
238 // completeInst() will not initiate fetch of next instruction.
239
240 notIdleFraction--;
241 _status = Idle;
242}
243
244bool
245TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
246{
247 RequestPtr req = pkt->req;
248 if (req->isMmapedIpr()) {
249 Tick delay;
250 delay = TheISA::handleIprRead(thread->getTC(), pkt);
251 new IprEvent(pkt, this, nextCycle(curTick + delay));
252 _status = DcacheWaitResponse;
253 dcache_pkt = NULL;
254 } else if (!dcachePort.sendTiming(pkt)) {
255 _status = DcacheRetry;
256 dcache_pkt = pkt;
257 } else {
258 _status = DcacheWaitResponse;
259 // memory system takes ownership of packet
260 dcache_pkt = NULL;
261 }
262 return dcache_pkt == NULL;
263}
264
265Fault
266TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
267 RequestPtr &req, Addr split_addr, uint8_t *data, bool read)
268{
269 Fault fault;
270 RequestPtr req1, req2;
271 assert(!req->isLocked() && !req->isSwap());
272 req->splitOnVaddr(split_addr, req1, req2);
273
274 pkt1 = pkt2 = NULL;
275 if ((fault = buildPacket(pkt1, req1, read)) != NoFault ||
276 (fault = buildPacket(pkt2, req2, read)) != NoFault) {
277 delete req;
278 delete pkt1;
279 req = NULL;
280 pkt1 = NULL;
281 return fault;
282 }
283
284 assert(!req1->isMmapedIpr() && !req2->isMmapedIpr());
285
286 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags());
287 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand(),
288 Packet::Broadcast);
289
290 pkt->dataDynamic<uint8_t>(data);
291 pkt1->dataStatic<uint8_t>(data);
292 pkt2->dataStatic<uint8_t>(data + req1->getSize());
293
294 SplitMainSenderState * main_send_state = new SplitMainSenderState;
295 pkt->senderState = main_send_state;
296 main_send_state->fragments[0] = pkt1;
297 main_send_state->fragments[1] = pkt2;
298 main_send_state->outstanding = 2;
299 pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
300 pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
301 return fault;
302}
303
304Fault
305TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr &req, bool read)
306{
307 Fault fault = read ? thread->translateDataReadReq(req) :
308 thread->translateDataWriteReq(req);
309 MemCmd cmd;
310 if (fault != NoFault) {
311 delete req;
312 req = NULL;
313 pkt = NULL;
314 return fault;
315 } else if (read) {
316 cmd = MemCmd::ReadReq;
317 if (req->isLocked())
318 cmd = MemCmd::LoadLockedReq;
319 } else {
320 cmd = MemCmd::WriteReq;
321 if (req->isLocked()) {
322 cmd = MemCmd::StoreCondReq;
323 } else if (req->isSwap()) {
324 cmd = MemCmd::SwapReq;
325 }
326 }
327 pkt = new Packet(req, cmd, Packet::Broadcast);
328 return NoFault;
329}
330
331template <class T>
332Fault
333TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
334{
335 Fault fault;
336 const int asid = 0;
337 const int thread_id = 0;
338 const Addr pc = thread->readPC();
339 int block_size = dcachePort.peerBlockSize();
340 int data_size = sizeof(T);
341
342 PacketPtr pkt;
343 RequestPtr req = new Request(asid, addr, data_size,
344 flags, pc, _cpuId, thread_id);
345
346 Addr split_addr = roundDown(addr + data_size - 1, block_size);
347 assert(split_addr <= addr || split_addr - addr < block_size);
348
349 if (split_addr > addr) {
350 PacketPtr pkt1, pkt2;
351 this->buildSplitPacket(pkt1, pkt2, req,
352 split_addr, (uint8_t *)(new T), true);
353 if (handleReadPacket(pkt1)) {
354 SplitFragmentSenderState * send_state =
355 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
356 send_state->clearFromParent();
357 if (handleReadPacket(pkt2)) {
358 send_state =
359 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
360 send_state->clearFromParent();
361 }
362 }
363 } else {
364 Fault fault = buildPacket(pkt, req, true);
365 if (fault != NoFault) {
366 return fault;
367 }
368 pkt->dataDynamic<T>(new T);
369
370 handleReadPacket(pkt);
371 }
372
373 if (traceData) {
374 traceData->setData(data);
375 traceData->setAddr(addr);
376 }
377
378 // This will need a new way to tell if it has a dcache attached.
379 if (req->isUncacheable())
380 recordEvent("Uncached Read");
381
382 return NoFault;
383}
384
385Fault
386TimingSimpleCPU::translateDataReadAddr(Addr vaddr, Addr &paddr,
387 int size, unsigned flags)
388{
389 Request *req =
390 new Request(0, vaddr, size, flags, thread->readPC(), _cpuId, 0);
391
392 if (traceData) {
393 traceData->setAddr(vaddr);
394 }
395
396 Fault fault = thread->translateDataWriteReq(req);
397
398 if (fault == NoFault)
399 paddr = req->getPaddr();
400
401 delete req;
402 return fault;
403}
404
405#ifndef DOXYGEN_SHOULD_SKIP_THIS
406
407template
408Fault
409TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
410
411template
412Fault
413TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
414
415template
416Fault
417TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
418
419template
420Fault
421TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
422
423template
424Fault
425TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
426
427template
428Fault
429TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
430
431#endif //DOXYGEN_SHOULD_SKIP_THIS
432
433template<>
434Fault
435TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
436{
437 return read(addr, *(uint64_t*)&data, flags);
438}
439
440template<>
441Fault
442TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
443{
444 return read(addr, *(uint32_t*)&data, flags);
445}
446
447
448template<>
449Fault
450TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
451{
452 return read(addr, (uint32_t&)data, flags);
453}
454
455bool
456TimingSimpleCPU::handleWritePacket()
457{
458 RequestPtr req = dcache_pkt->req;
459 if (req->isMmapedIpr()) {
460 Tick delay;
461 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
462 new IprEvent(dcache_pkt, this, nextCycle(curTick + delay));
463 _status = DcacheWaitResponse;
464 dcache_pkt = NULL;
465 } else if (!dcachePort.sendTiming(dcache_pkt)) {
466 _status = DcacheRetry;
467 } else {
468 _status = DcacheWaitResponse;
469 // memory system takes ownership of packet
470 dcache_pkt = NULL;
471 }
472 return dcache_pkt == NULL;
473}
474
475template <class T>
476Fault
477TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
478{
479 const int asid = 0;
480 const int thread_id = 0;
481 const Addr pc = thread->readPC();
482 int block_size = dcachePort.peerBlockSize();
483 int data_size = sizeof(T);
484
485 RequestPtr req = new Request(asid, addr, data_size,
486 flags, pc, _cpuId, thread_id);
487
488 Addr split_addr = roundDown(addr + data_size - 1, block_size);
489 assert(split_addr <= addr || split_addr - addr < block_size);
490
491 if (split_addr > addr) {
492 PacketPtr pkt1, pkt2;
493 T *dataP = new T;
494 *dataP = data;
495 Fault fault = this->buildSplitPacket(pkt1, pkt2, req, split_addr,
496 (uint8_t *)dataP, false);
497 if (fault != NoFault)
498 return fault;
499 dcache_pkt = pkt1;
500 if (handleWritePacket()) {
501 SplitFragmentSenderState * send_state =
502 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
503 send_state->clearFromParent();
504 dcache_pkt = pkt2;
505 if (handleReadPacket(pkt2)) {
506 send_state =
507 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
508 send_state->clearFromParent();
509 }
510 }
511 } else {
512 bool do_access = true; // flag to suppress cache access
513
514 Fault fault = buildPacket(dcache_pkt, req, false);
515 if (fault != NoFault)
516 return fault;
517
518 if (req->isLocked()) {
519 do_access = TheISA::handleLockedWrite(thread, req);
520 } else if (req->isCondSwap()) {
521 assert(res);
522 req->setExtraData(*res);
523 }
524
525 dcache_pkt->allocate();
526 if (req->isMmapedIpr())
527 dcache_pkt->set(htog(data));
528 else
529 dcache_pkt->set(data);
530
531 if (do_access)
532 handleWritePacket();
533 }
534
535 if (traceData) {
536 traceData->setAddr(req->getVaddr());
537 traceData->setData(data);
538 }
539
540 // This will need a new way to tell if it's hooked up to a cache or not.
541 if (req->isUncacheable())
542 recordEvent("Uncached Write");
543
544 // If the write needs to have a fault on the access, consider calling
545 // changeStatus() and changing it to "bad addr write" or something.
546 return NoFault;
547}
548
549Fault
550TimingSimpleCPU::translateDataWriteAddr(Addr vaddr, Addr &paddr,
551 int size, unsigned flags)
552{
553 Request *req =
554 new Request(0, vaddr, size, flags, thread->readPC(), _cpuId, 0);
555
556 if (traceData) {
557 traceData->setAddr(vaddr);
558 }
559
560 Fault fault = thread->translateDataWriteReq(req);
561
562 if (fault == NoFault)
563 paddr = req->getPaddr();
564
565 delete req;
566 return fault;
567}
568
569
570#ifndef DOXYGEN_SHOULD_SKIP_THIS
571template
572Fault
573TimingSimpleCPU::write(Twin32_t data, Addr addr,
574 unsigned flags, uint64_t *res);
575
576template
577Fault
578TimingSimpleCPU::write(Twin64_t data, Addr addr,
579 unsigned flags, uint64_t *res);
580
581template
582Fault
583TimingSimpleCPU::write(uint64_t data, Addr addr,
584 unsigned flags, uint64_t *res);
585
586template
587Fault
588TimingSimpleCPU::write(uint32_t data, Addr addr,
589 unsigned flags, uint64_t *res);
590
591template
592Fault
593TimingSimpleCPU::write(uint16_t data, Addr addr,
594 unsigned flags, uint64_t *res);
595
596template
597Fault
598TimingSimpleCPU::write(uint8_t data, Addr addr,
599 unsigned flags, uint64_t *res);
600
601#endif //DOXYGEN_SHOULD_SKIP_THIS
602
603template<>
604Fault
605TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
606{
607 return write(*(uint64_t*)&data, addr, flags, res);
608}
609
610template<>
611Fault
612TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
613{
614 return write(*(uint32_t*)&data, addr, flags, res);
615}
616
617
618template<>
619Fault
620TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
621{
622 return write((uint32_t)data, addr, flags, res);
623}
624
625
626void
627TimingSimpleCPU::fetch()
628{
629 DPRINTF(SimpleCPU, "Fetch\n");
630
631 if (!curStaticInst || !curStaticInst->isDelayedCommit())
632 checkForInterrupts();
633
634 checkPcEventQueue();
635
636 bool fromRom = isRomMicroPC(thread->readMicroPC());
637
638 if (!fromRom) {
639 Request *ifetch_req = new Request();
640 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
641 Fault fault = setupFetchRequest(ifetch_req);
642
643 ifetch_pkt = new Packet(ifetch_req, MemCmd::ReadReq, Packet::Broadcast);
644 ifetch_pkt->dataStatic(&inst);
645
646 if (fault == NoFault) {
647 if (!icachePort.sendTiming(ifetch_pkt)) {
648 // Need to wait for retry
649 _status = IcacheRetry;
650 } else {
651 // Need to wait for cache to respond
652 _status = IcacheWaitResponse;
653 // ownership of packet transferred to memory system
654 ifetch_pkt = NULL;
655 }
656 } else {
657 delete ifetch_req;
658 delete ifetch_pkt;
659 // fetch fault: advance directly to next instruction (fault handler)
660 advanceInst(fault);
661 }
662 } else {
663 _status = IcacheWaitResponse;
664 completeIfetch(NULL);
665 }
666
667 numCycles += tickToCycles(curTick - previousTick);
668 previousTick = curTick;
669}
670
671
672void
673TimingSimpleCPU::advanceInst(Fault fault)
674{
675 if (fault != NoFault || !stayAtPC)
676 advancePC(fault);
677
678 if (_status == Running) {
679 // kick off fetch of next instruction... callback from icache
680 // response will cause that instruction to be executed,
681 // keeping the CPU running.
682 fetch();
683 }
684}
685
686
687void
688TimingSimpleCPU::completeIfetch(PacketPtr pkt)
689{
690 DPRINTF(SimpleCPU, "Complete ICache Fetch\n");
691
692 // received a response from the icache: execute the received
693 // instruction
694
695 assert(!pkt || !pkt->isError());
696 assert(_status == IcacheWaitResponse);
697
698 _status = Running;
699
700 numCycles += tickToCycles(curTick - previousTick);
701 previousTick = curTick;
702
703 if (getState() == SimObject::Draining) {
704 if (pkt) {
705 delete pkt->req;
706 delete pkt;
707 }
708
709 completeDrain();
710 return;
711 }
712
713 preExecute();
714 if (curStaticInst &&
715 curStaticInst->isMemRef() && !curStaticInst->isDataPrefetch()) {
716 // load or store: just send to dcache
717 Fault fault = curStaticInst->initiateAcc(this, traceData);
718 if (_status != Running) {
719 // instruction will complete in dcache response callback
720 assert(_status == DcacheWaitResponse || _status == DcacheRetry);
721 assert(fault == NoFault);
722 } else {
723 if (fault == NoFault) {
724 // Note that ARM can have NULL packets if the instruction gets
725 // squashed due to predication
726 // early fail on store conditional: complete now
727 assert(dcache_pkt != NULL || THE_ISA == ARM_ISA);
728
729 fault = curStaticInst->completeAcc(dcache_pkt, this,
730 traceData);
731 if (dcache_pkt != NULL)
732 {
733 delete dcache_pkt->req;
734 delete dcache_pkt;
735 dcache_pkt = NULL;
736 }
737
738 // keep an instruction count
739 if (fault == NoFault)
740 countInst();
741 } else if (traceData) {
742 // If there was a fault, we shouldn't trace this instruction.
743 delete traceData;
744 traceData = NULL;
745 }
746
747 postExecute();
748 // @todo remove me after debugging with legion done
749 if (curStaticInst && (!curStaticInst->isMicroop() ||
750 curStaticInst->isFirstMicroop()))
751 instCnt++;
752 advanceInst(fault);
753 }
754 } else if (curStaticInst) {
755 // non-memory instruction: execute completely now
756 Fault fault = curStaticInst->execute(this, traceData);
757
758 // keep an instruction count
759 if (fault == NoFault)
760 countInst();
761 else if (traceData) {
762 // If there was a fault, we shouldn't trace this instruction.
763 delete traceData;
764 traceData = NULL;
765 }
766
767 postExecute();
768 // @todo remove me after debugging with legion done
769 if (curStaticInst && (!curStaticInst->isMicroop() ||
770 curStaticInst->isFirstMicroop()))
771 instCnt++;
772 advanceInst(fault);
773 } else {
774 advanceInst(NoFault);
775 }
776
777 if (pkt) {
778 delete pkt->req;
779 delete pkt;
780 }
781}
782
783void
784TimingSimpleCPU::IcachePort::ITickEvent::process()
785{
786 cpu->completeIfetch(pkt);
787}
788
789bool
790TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
791{
792 if (pkt->isResponse() && !pkt->wasNacked()) {
793 // delay processing of returned data until next CPU clock edge
794 Tick next_tick = cpu->nextCycle(curTick);
795
796 if (next_tick == curTick)
797 cpu->completeIfetch(pkt);
798 else
799 tickEvent.schedule(pkt, next_tick);
800
801 return true;
802 }
803 else if (pkt->wasNacked()) {
804 assert(cpu->_status == IcacheWaitResponse);
805 pkt->reinitNacked();
806 if (!sendTiming(pkt)) {
807 cpu->_status = IcacheRetry;
808 cpu->ifetch_pkt = pkt;
809 }
810 }
811 //Snooping a Coherence Request, do nothing
812 return true;
813}
814
815void
816TimingSimpleCPU::IcachePort::recvRetry()
817{
818 // we shouldn't get a retry unless we have a packet that we're
819 // waiting to transmit
820 assert(cpu->ifetch_pkt != NULL);
821 assert(cpu->_status == IcacheRetry);
822 PacketPtr tmp = cpu->ifetch_pkt;
823 if (sendTiming(tmp)) {
824 cpu->_status = IcacheWaitResponse;
825 cpu->ifetch_pkt = NULL;
826 }
827}
828
829void
830TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
831{
832 // received a response from the dcache: complete the load or store
833 // instruction
834 assert(!pkt->isError());
835
836 numCycles += tickToCycles(curTick - previousTick);
837 previousTick = curTick;
838
839 if (pkt->senderState) {
840 SplitFragmentSenderState * send_state =
841 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
842 assert(send_state);
843 delete pkt->req;
844 delete pkt;
845 PacketPtr big_pkt = send_state->bigPkt;
846 delete send_state;
847
848 SplitMainSenderState * main_send_state =
849 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
850 assert(main_send_state);
851 // Record the fact that this packet is no longer outstanding.
852 assert(main_send_state->outstanding != 0);
853 main_send_state->outstanding--;
854
855 if (main_send_state->outstanding) {
856 return;
857 } else {
858 delete main_send_state;
859 big_pkt->senderState = NULL;
860 pkt = big_pkt;
861 }
862 }
863
864 assert(_status == DcacheWaitResponse);
865 _status = Running;
866
867 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
868
869 // keep an instruction count
870 if (fault == NoFault)
871 countInst();
872 else if (traceData) {
873 // If there was a fault, we shouldn't trace this instruction.
874 delete traceData;
875 traceData = NULL;
876 }
877
878 // the locked flag may be cleared on the response packet, so check
879 // pkt->req and not pkt to see if it was a load-locked
880 if (pkt->isRead() && pkt->req->isLocked()) {
881 TheISA::handleLockedRead(thread, pkt->req);
882 }
883
884 delete pkt->req;
885 delete pkt;
886
887 postExecute();
888
889 if (getState() == SimObject::Draining) {
890 advancePC(fault);
891 completeDrain();
892
893 return;
894 }
895
896 advanceInst(fault);
897}
898
899
900void
901TimingSimpleCPU::completeDrain()
902{
903 DPRINTF(Config, "Done draining\n");
904 changeState(SimObject::Drained);
905 drainEvent->process();
906}
907
908void
909TimingSimpleCPU::DcachePort::setPeer(Port *port)
910{
911 Port::setPeer(port);
912
913#if FULL_SYSTEM
914 // Update the ThreadContext's memory ports (Functional/Virtual
915 // Ports)
916 cpu->tcBase()->connectMemPorts(cpu->tcBase());
917#endif
918}
919
920bool
921TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
922{
923 if (pkt->isResponse() && !pkt->wasNacked()) {
924 // delay processing of returned data until next CPU clock edge
925 Tick next_tick = cpu->nextCycle(curTick);
926
927 if (next_tick == curTick) {
928 cpu->completeDataAccess(pkt);
929 } else {
930 tickEvent.schedule(pkt, next_tick);
931 }
932
933 return true;
934 }
935 else if (pkt->wasNacked()) {
936 assert(cpu->_status == DcacheWaitResponse);
937 pkt->reinitNacked();
938 if (!sendTiming(pkt)) {
939 cpu->_status = DcacheRetry;
940 cpu->dcache_pkt = pkt;
941 }
942 }
943 //Snooping a Coherence Request, do nothing
944 return true;
945}
946
947void
948TimingSimpleCPU::DcachePort::DTickEvent::process()
949{
950 cpu->completeDataAccess(pkt);
951}
952
953void
954TimingSimpleCPU::DcachePort::recvRetry()
955{
956 // we shouldn't get a retry unless we have a packet that we're
957 // waiting to transmit
958 assert(cpu->dcache_pkt != NULL);
959 assert(cpu->_status == DcacheRetry);
960 PacketPtr tmp = cpu->dcache_pkt;
961 if (tmp->senderState) {
962 // This is a packet from a split access.
963 SplitFragmentSenderState * send_state =
964 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
965 assert(send_state);
966 PacketPtr big_pkt = send_state->bigPkt;
967
968 SplitMainSenderState * main_send_state =
969 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
970 assert(main_send_state);
971
972 if (sendTiming(tmp)) {
973 // If we were able to send without retrying, record that fact
974 // and try sending the other fragment.
975 send_state->clearFromParent();
976 int other_index = main_send_state->getPendingFragment();
977 if (other_index > 0) {
978 tmp = main_send_state->fragments[other_index];
979 cpu->dcache_pkt = tmp;
980 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
981 (big_pkt->isWrite() && cpu->handleWritePacket())) {
982 main_send_state->fragments[other_index] = NULL;
983 }
984 } else {
985 cpu->_status = DcacheWaitResponse;
986 // memory system takes ownership of packet
987 cpu->dcache_pkt = NULL;
988 }
989 }
990 } else if (sendTiming(tmp)) {
991 cpu->_status = DcacheWaitResponse;
992 // memory system takes ownership of packet
993 cpu->dcache_pkt = NULL;
994 }
995}
996
997TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
998 Tick t)
999 : pkt(_pkt), cpu(_cpu)
1000{
1001 cpu->schedule(this, t);
1002}
1003
1004void
1005TimingSimpleCPU::IprEvent::process()
1006{
1007 cpu->completeDataAccess(pkt);
1008}
1009
1010const char *
1011TimingSimpleCPU::IprEvent::description() const
1012{
1013 return "Timing Simple CPU Delay IPR event";
1014}
1015
1016
1017void
1018TimingSimpleCPU::printAddr(Addr a)
1019{
1020 dcachePort.printAddr(a);
1021}
1022
1023
1024////////////////////////////////////////////////////////////////////////
1025//
1026// TimingSimpleCPU Simulation Object
1027//
1028TimingSimpleCPU *
1029TimingSimpleCPUParams::create()
1030{
1031 numThreads = 1;
1032#if !FULL_SYSTEM
1033 if (workload.size() != 1)
1034 panic("only one workload allowed");
1035#endif
1036 return new TimingSimpleCPU(this);
1037}