timing.cc (7046:d21d575a6f99) timing.cc (7516:cfbbc9178e7a)
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Steve Reinhardt
29 */
30
31#include "arch/locked_mem.hh"
32#include "arch/mmaped_ipr.hh"
33#include "arch/utility.hh"
34#include "base/bigint.hh"
35#include "config/the_isa.hh"
36#include "cpu/exetrace.hh"
37#include "cpu/simple/timing.hh"
38#include "mem/packet.hh"
39#include "mem/packet_access.hh"
40#include "params/TimingSimpleCPU.hh"
41#include "sim/system.hh"
42
43using namespace std;
44using namespace TheISA;
45
46Port *
47TimingSimpleCPU::getPort(const std::string &if_name, int idx)
48{
49 if (if_name == "dcache_port")
50 return &dcachePort;
51 else if (if_name == "icache_port")
52 return &icachePort;
53 else
54 panic("No Such Port\n");
55}
56
57void
58TimingSimpleCPU::init()
59{
60 BaseCPU::init();
61#if FULL_SYSTEM
62 for (int i = 0; i < threadContexts.size(); ++i) {
63 ThreadContext *tc = threadContexts[i];
64
65 // initialize CPU, including PC
66 TheISA::initCPU(tc, _cpuId);
67 }
68#endif
69}
70
71Tick
72TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
73{
74 panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
75 return curTick;
76}
77
78void
79TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
80{
81 //No internal storage to update, jusst return
82 return;
83}
84
85void
86TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
87{
88 if (status == RangeChange) {
89 if (!snoopRangeSent) {
90 snoopRangeSent = true;
91 sendStatusChange(Port::RangeChange);
92 }
93 return;
94 }
95
96 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
97}
98
99
100void
101TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
102{
103 pkt = _pkt;
104 cpu->schedule(this, t);
105}
106
107TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
108 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this, p->clock),
109 dcachePort(this, p->clock), fetchEvent(this)
110{
111 _status = Idle;
112
113 icachePort.snoopRangeSent = false;
114 dcachePort.snoopRangeSent = false;
115
116 ifetch_pkt = dcache_pkt = NULL;
117 drainEvent = NULL;
118 previousTick = 0;
119 changeState(SimObject::Running);
120}
121
122
123TimingSimpleCPU::~TimingSimpleCPU()
124{
125}
126
127void
128TimingSimpleCPU::serialize(ostream &os)
129{
130 SimObject::State so_state = SimObject::getState();
131 SERIALIZE_ENUM(so_state);
132 BaseSimpleCPU::serialize(os);
133}
134
135void
136TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
137{
138 SimObject::State so_state;
139 UNSERIALIZE_ENUM(so_state);
140 BaseSimpleCPU::unserialize(cp, section);
141}
142
143unsigned int
144TimingSimpleCPU::drain(Event *drain_event)
145{
146 // TimingSimpleCPU is ready to drain if it's not waiting for
147 // an access to complete.
148 if (_status == Idle || _status == Running || _status == SwitchedOut) {
149 changeState(SimObject::Drained);
150 return 0;
151 } else {
152 changeState(SimObject::Draining);
153 drainEvent = drain_event;
154 return 1;
155 }
156}
157
158void
159TimingSimpleCPU::resume()
160{
161 DPRINTF(SimpleCPU, "Resume\n");
162 if (_status != SwitchedOut && _status != Idle) {
163 assert(system->getMemoryMode() == Enums::timing);
164
165 if (fetchEvent.scheduled())
166 deschedule(fetchEvent);
167
168 schedule(fetchEvent, nextCycle());
169 }
170
171 changeState(SimObject::Running);
172}
173
174void
175TimingSimpleCPU::switchOut()
176{
177 assert(_status == Running || _status == Idle);
178 _status = SwitchedOut;
179 numCycles += tickToCycles(curTick - previousTick);
180
181 // If we've been scheduled to resume but are then told to switch out,
182 // we'll need to cancel it.
183 if (fetchEvent.scheduled())
184 deschedule(fetchEvent);
185}
186
187
188void
189TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
190{
191 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
192
193 // if any of this CPU's ThreadContexts are active, mark the CPU as
194 // running and schedule its tick event.
195 for (int i = 0; i < threadContexts.size(); ++i) {
196 ThreadContext *tc = threadContexts[i];
197 if (tc->status() == ThreadContext::Active && _status != Running) {
198 _status = Running;
199 break;
200 }
201 }
202
203 if (_status != Running) {
204 _status = Idle;
205 }
206 assert(threadContexts.size() == 1);
207 previousTick = curTick;
208}
209
210
211void
212TimingSimpleCPU::activateContext(int thread_num, int delay)
213{
214 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
215
216 assert(thread_num == 0);
217 assert(thread);
218
219 assert(_status == Idle);
220
221 notIdleFraction++;
222 _status = Running;
223
224 // kick things off by initiating the fetch of the next instruction
225 schedule(fetchEvent, nextCycle(curTick + ticks(delay)));
226}
227
228
229void
230TimingSimpleCPU::suspendContext(int thread_num)
231{
232 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
233
234 assert(thread_num == 0);
235 assert(thread);
236
237 if (_status == Idle)
238 return;
239
240 assert(_status == Running);
241
242 // just change status to Idle... if status != Running,
243 // completeInst() will not initiate fetch of next instruction.
244
245 notIdleFraction--;
246 _status = Idle;
247}
248
249bool
250TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
251{
252 RequestPtr req = pkt->req;
253 if (req->isMmapedIpr()) {
254 Tick delay;
255 delay = TheISA::handleIprRead(thread->getTC(), pkt);
256 new IprEvent(pkt, this, nextCycle(curTick + delay));
257 _status = DcacheWaitResponse;
258 dcache_pkt = NULL;
259 } else if (!dcachePort.sendTiming(pkt)) {
260 _status = DcacheRetry;
261 dcache_pkt = pkt;
262 } else {
263 _status = DcacheWaitResponse;
264 // memory system takes ownership of packet
265 dcache_pkt = NULL;
266 }
267 return dcache_pkt == NULL;
268}
269
270void
271TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
272 bool read)
273{
274 PacketPtr pkt;
275 buildPacket(pkt, req, read);
276 pkt->dataDynamic<uint8_t>(data);
277 if (req->getFlags().isSet(Request::NO_ACCESS)) {
278 assert(!dcache_pkt);
279 pkt->makeResponse();
280 completeDataAccess(pkt);
281 } else if (read) {
282 handleReadPacket(pkt);
283 } else {
284 bool do_access = true; // flag to suppress cache access
285
286 if (req->isLLSC()) {
287 do_access = TheISA::handleLockedWrite(thread, req);
288 } else if (req->isCondSwap()) {
289 assert(res);
290 req->setExtraData(*res);
291 }
292
293 if (do_access) {
294 dcache_pkt = pkt;
295 handleWritePacket();
296 } else {
297 _status = DcacheWaitResponse;
298 completeDataAccess(pkt);
299 }
300 }
301}
302
303void
304TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
305 RequestPtr req, uint8_t *data, bool read)
306{
307 PacketPtr pkt1, pkt2;
308 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
309 if (req->getFlags().isSet(Request::NO_ACCESS)) {
310 assert(!dcache_pkt);
311 pkt1->makeResponse();
312 completeDataAccess(pkt1);
313 } else if (read) {
314 if (handleReadPacket(pkt1)) {
315 SplitFragmentSenderState * send_state =
316 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
317 send_state->clearFromParent();
318 if (handleReadPacket(pkt2)) {
319 send_state = dynamic_cast<SplitFragmentSenderState *>(
320 pkt1->senderState);
321 send_state->clearFromParent();
322 }
323 }
324 } else {
325 dcache_pkt = pkt1;
326 if (handleWritePacket()) {
327 SplitFragmentSenderState * send_state =
328 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
329 send_state->clearFromParent();
330 dcache_pkt = pkt2;
331 if (handleWritePacket()) {
332 send_state = dynamic_cast<SplitFragmentSenderState *>(
333 pkt1->senderState);
334 send_state->clearFromParent();
335 }
336 }
337 }
338}
339
340void
341TimingSimpleCPU::translationFault(Fault fault)
342{
343 // fault may be NoFault in cases where a fault is suppressed,
344 // for instance prefetches.
345 numCycles += tickToCycles(curTick - previousTick);
346 previousTick = curTick;
347
348 if (traceData) {
349 // Since there was a fault, we shouldn't trace this instruction.
350 delete traceData;
351 traceData = NULL;
352 }
353
354 postExecute();
355
356 if (getState() == SimObject::Draining) {
357 advancePC(fault);
358 completeDrain();
359 } else {
360 advanceInst(fault);
361 }
362}
363
364void
365TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
366{
367 MemCmd cmd;
368 if (read) {
369 cmd = MemCmd::ReadReq;
370 if (req->isLLSC())
371 cmd = MemCmd::LoadLockedReq;
372 } else {
373 cmd = MemCmd::WriteReq;
374 if (req->isLLSC()) {
375 cmd = MemCmd::StoreCondReq;
376 } else if (req->isSwap()) {
377 cmd = MemCmd::SwapReq;
378 }
379 }
380 pkt = new Packet(req, cmd, Packet::Broadcast);
381}
382
383void
384TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
385 RequestPtr req1, RequestPtr req2, RequestPtr req,
386 uint8_t *data, bool read)
387{
388 pkt1 = pkt2 = NULL;
389
390 assert(!req1->isMmapedIpr() && !req2->isMmapedIpr());
391
392 if (req->getFlags().isSet(Request::NO_ACCESS)) {
393 buildPacket(pkt1, req, read);
394 return;
395 }
396
397 buildPacket(pkt1, req1, read);
398 buildPacket(pkt2, req2, read);
399
400 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags());
401 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand(),
402 Packet::Broadcast);
403
404 pkt->dataDynamic<uint8_t>(data);
405 pkt1->dataStatic<uint8_t>(data);
406 pkt2->dataStatic<uint8_t>(data + req1->getSize());
407
408 SplitMainSenderState * main_send_state = new SplitMainSenderState;
409 pkt->senderState = main_send_state;
410 main_send_state->fragments[0] = pkt1;
411 main_send_state->fragments[1] = pkt2;
412 main_send_state->outstanding = 2;
413 pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
414 pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
415}
416
417template <class T>
418Fault
419TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
420{
421 Fault fault;
422 const int asid = 0;
423 const ThreadID tid = 0;
424 const Addr pc = thread->readPC();
425 unsigned block_size = dcachePort.peerBlockSize();
426 int data_size = sizeof(T);
427 BaseTLB::Mode mode = BaseTLB::Read;
428
429 if (traceData) {
430 traceData->setAddr(addr);
431 }
432
433 RequestPtr req = new Request(asid, addr, data_size,
434 flags, pc, _cpuId, tid);
435
436 Addr split_addr = roundDown(addr + data_size - 1, block_size);
437 assert(split_addr <= addr || split_addr - addr < block_size);
438
439 _status = DTBWaitResponse;
440 if (split_addr > addr) {
441 RequestPtr req1, req2;
442 assert(!req->isLLSC() && !req->isSwap());
443 req->splitOnVaddr(split_addr, req1, req2);
444
445 WholeTranslationState *state =
446 new WholeTranslationState(req, req1, req2, (uint8_t *)(new T),
447 NULL, mode);
448 DataTranslation<TimingSimpleCPU> *trans1 =
449 new DataTranslation<TimingSimpleCPU>(this, state, 0);
450 DataTranslation<TimingSimpleCPU> *trans2 =
451 new DataTranslation<TimingSimpleCPU>(this, state, 1);
452
453 thread->dtb->translateTiming(req1, tc, trans1, mode);
454 thread->dtb->translateTiming(req2, tc, trans2, mode);
455 } else {
456 WholeTranslationState *state =
457 new WholeTranslationState(req, (uint8_t *)(new T), NULL, mode);
458 DataTranslation<TimingSimpleCPU> *translation
459 = new DataTranslation<TimingSimpleCPU>(this, state);
460 thread->dtb->translateTiming(req, tc, translation, mode);
461 }
462
463 return NoFault;
464}
465
466#ifndef DOXYGEN_SHOULD_SKIP_THIS
467
468template
469Fault
470TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
471
472template
473Fault
474TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
475
476template
477Fault
478TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
479
480template
481Fault
482TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
483
484template
485Fault
486TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
487
488template
489Fault
490TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
491
492#endif //DOXYGEN_SHOULD_SKIP_THIS
493
494template<>
495Fault
496TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
497{
498 return read(addr, *(uint64_t*)&data, flags);
499}
500
501template<>
502Fault
503TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
504{
505 return read(addr, *(uint32_t*)&data, flags);
506}
507
508template<>
509Fault
510TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
511{
512 return read(addr, (uint32_t&)data, flags);
513}
514
515bool
516TimingSimpleCPU::handleWritePacket()
517{
518 RequestPtr req = dcache_pkt->req;
519 if (req->isMmapedIpr()) {
520 Tick delay;
521 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
522 new IprEvent(dcache_pkt, this, nextCycle(curTick + delay));
523 _status = DcacheWaitResponse;
524 dcache_pkt = NULL;
525 } else if (!dcachePort.sendTiming(dcache_pkt)) {
526 _status = DcacheRetry;
527 } else {
528 _status = DcacheWaitResponse;
529 // memory system takes ownership of packet
530 dcache_pkt = NULL;
531 }
532 return dcache_pkt == NULL;
533}
534
535template <class T>
536Fault
537TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
538{
539 const int asid = 0;
540 const ThreadID tid = 0;
541 const Addr pc = thread->readPC();
542 unsigned block_size = dcachePort.peerBlockSize();
543 int data_size = sizeof(T);
544 BaseTLB::Mode mode = BaseTLB::Write;
545
546 if (traceData) {
547 traceData->setAddr(addr);
548 traceData->setData(data);
549 }
550
551 RequestPtr req = new Request(asid, addr, data_size,
552 flags, pc, _cpuId, tid);
553
554 Addr split_addr = roundDown(addr + data_size - 1, block_size);
555 assert(split_addr <= addr || split_addr - addr < block_size);
556
557 T *dataP = new T;
558 *dataP = TheISA::htog(data);
559 _status = DTBWaitResponse;
560 if (split_addr > addr) {
561 RequestPtr req1, req2;
562 assert(!req->isLLSC() && !req->isSwap());
563 req->splitOnVaddr(split_addr, req1, req2);
564
565 WholeTranslationState *state =
566 new WholeTranslationState(req, req1, req2, (uint8_t *)dataP,
567 res, mode);
568 DataTranslation<TimingSimpleCPU> *trans1 =
569 new DataTranslation<TimingSimpleCPU>(this, state, 0);
570 DataTranslation<TimingSimpleCPU> *trans2 =
571 new DataTranslation<TimingSimpleCPU>(this, state, 1);
572
573 thread->dtb->translateTiming(req1, tc, trans1, mode);
574 thread->dtb->translateTiming(req2, tc, trans2, mode);
575 } else {
576 WholeTranslationState *state =
577 new WholeTranslationState(req, (uint8_t *)dataP, res, mode);
578 DataTranslation<TimingSimpleCPU> *translation =
579 new DataTranslation<TimingSimpleCPU>(this, state);
580 thread->dtb->translateTiming(req, tc, translation, mode);
581 }
582
583 // Translation faults will be returned via finishTranslation()
584 return NoFault;
585}
586
587
588#ifndef DOXYGEN_SHOULD_SKIP_THIS
589template
590Fault
591TimingSimpleCPU::write(Twin32_t data, Addr addr,
592 unsigned flags, uint64_t *res);
593
594template
595Fault
596TimingSimpleCPU::write(Twin64_t data, Addr addr,
597 unsigned flags, uint64_t *res);
598
599template
600Fault
601TimingSimpleCPU::write(uint64_t data, Addr addr,
602 unsigned flags, uint64_t *res);
603
604template
605Fault
606TimingSimpleCPU::write(uint32_t data, Addr addr,
607 unsigned flags, uint64_t *res);
608
609template
610Fault
611TimingSimpleCPU::write(uint16_t data, Addr addr,
612 unsigned flags, uint64_t *res);
613
614template
615Fault
616TimingSimpleCPU::write(uint8_t data, Addr addr,
617 unsigned flags, uint64_t *res);
618
619#endif //DOXYGEN_SHOULD_SKIP_THIS
620
621template<>
622Fault
623TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
624{
625 return write(*(uint64_t*)&data, addr, flags, res);
626}
627
628template<>
629Fault
630TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
631{
632 return write(*(uint32_t*)&data, addr, flags, res);
633}
634
635
636template<>
637Fault
638TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
639{
640 return write((uint32_t)data, addr, flags, res);
641}
642
643
644void
645TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
646{
647 _status = Running;
648
649 if (state->getFault() != NoFault) {
650 if (state->isPrefetch()) {
651 state->setNoFault();
652 }
653 delete state->data;
654 state->deleteReqs();
655 translationFault(state->getFault());
656 } else {
657 if (!state->isSplit) {
658 sendData(state->mainReq, state->data, state->res,
659 state->mode == BaseTLB::Read);
660 } else {
661 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
662 state->data, state->mode == BaseTLB::Read);
663 }
664 }
665
666 delete state;
667}
668
669
670void
671TimingSimpleCPU::fetch()
672{
673 DPRINTF(SimpleCPU, "Fetch\n");
674
675 if (!curStaticInst || !curStaticInst->isDelayedCommit())
676 checkForInterrupts();
677
678 checkPcEventQueue();
679
680 bool fromRom = isRomMicroPC(thread->readMicroPC());
681
682 if (!fromRom && !curMacroStaticInst) {
683 Request *ifetch_req = new Request();
684 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
685 setupFetchRequest(ifetch_req);
686 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
687 BaseTLB::Execute);
688 } else {
689 _status = IcacheWaitResponse;
690 completeIfetch(NULL);
691
692 numCycles += tickToCycles(curTick - previousTick);
693 previousTick = curTick;
694 }
695}
696
697
698void
699TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
700{
701 if (fault == NoFault) {
702 ifetch_pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
703 ifetch_pkt->dataStatic(&inst);
704
705 if (!icachePort.sendTiming(ifetch_pkt)) {
706 // Need to wait for retry
707 _status = IcacheRetry;
708 } else {
709 // Need to wait for cache to respond
710 _status = IcacheWaitResponse;
711 // ownership of packet transferred to memory system
712 ifetch_pkt = NULL;
713 }
714 } else {
715 delete req;
716 // fetch fault: advance directly to next instruction (fault handler)
717 advanceInst(fault);
718 }
719
720 numCycles += tickToCycles(curTick - previousTick);
721 previousTick = curTick;
722}
723
724
725void
726TimingSimpleCPU::advanceInst(Fault fault)
727{
728 if (fault != NoFault || !stayAtPC)
729 advancePC(fault);
730
731 if (_status == Running) {
732 // kick off fetch of next instruction... callback from icache
733 // response will cause that instruction to be executed,
734 // keeping the CPU running.
735 fetch();
736 }
737}
738
739
740void
741TimingSimpleCPU::completeIfetch(PacketPtr pkt)
742{
743 DPRINTF(SimpleCPU, "Complete ICache Fetch\n");
744
745 // received a response from the icache: execute the received
746 // instruction
747
748 assert(!pkt || !pkt->isError());
749 assert(_status == IcacheWaitResponse);
750
751 _status = Running;
752
753 numCycles += tickToCycles(curTick - previousTick);
754 previousTick = curTick;
755
756 if (getState() == SimObject::Draining) {
757 if (pkt) {
758 delete pkt->req;
759 delete pkt;
760 }
761
762 completeDrain();
763 return;
764 }
765
766 preExecute();
767 if (curStaticInst &&
768 curStaticInst->isMemRef() && !curStaticInst->isDataPrefetch()) {
769 // load or store: just send to dcache
770 Fault fault = curStaticInst->initiateAcc(this, traceData);
771 if (_status != Running) {
772 // instruction will complete in dcache response callback
773 assert(_status == DcacheWaitResponse ||
774 _status == DcacheRetry || DTBWaitResponse);
775 assert(fault == NoFault);
776 } else {
777 if (fault != NoFault && traceData) {
778 // If there was a fault, we shouldn't trace this instruction.
779 delete traceData;
780 traceData = NULL;
781 }
782
783 postExecute();
784 // @todo remove me after debugging with legion done
785 if (curStaticInst && (!curStaticInst->isMicroop() ||
786 curStaticInst->isFirstMicroop()))
787 instCnt++;
788 advanceInst(fault);
789 }
790 } else if (curStaticInst) {
791 // non-memory instruction: execute completely now
792 Fault fault = curStaticInst->execute(this, traceData);
793
794 // keep an instruction count
795 if (fault == NoFault)
796 countInst();
797 else if (traceData) {
798 // If there was a fault, we shouldn't trace this instruction.
799 delete traceData;
800 traceData = NULL;
801 }
802
803 postExecute();
804 // @todo remove me after debugging with legion done
805 if (curStaticInst && (!curStaticInst->isMicroop() ||
806 curStaticInst->isFirstMicroop()))
807 instCnt++;
808 advanceInst(fault);
809 } else {
810 advanceInst(NoFault);
811 }
812
813 if (pkt) {
814 delete pkt->req;
815 delete pkt;
816 }
817}
818
819void
820TimingSimpleCPU::IcachePort::ITickEvent::process()
821{
822 cpu->completeIfetch(pkt);
823}
824
825bool
826TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
827{
828 if (pkt->isResponse() && !pkt->wasNacked()) {
829 // delay processing of returned data until next CPU clock edge
830 Tick next_tick = cpu->nextCycle(curTick);
831
832 if (next_tick == curTick)
833 cpu->completeIfetch(pkt);
834 else
835 tickEvent.schedule(pkt, next_tick);
836
837 return true;
838 }
839 else if (pkt->wasNacked()) {
840 assert(cpu->_status == IcacheWaitResponse);
841 pkt->reinitNacked();
842 if (!sendTiming(pkt)) {
843 cpu->_status = IcacheRetry;
844 cpu->ifetch_pkt = pkt;
845 }
846 }
847 //Snooping a Coherence Request, do nothing
848 return true;
849}
850
851void
852TimingSimpleCPU::IcachePort::recvRetry()
853{
854 // we shouldn't get a retry unless we have a packet that we're
855 // waiting to transmit
856 assert(cpu->ifetch_pkt != NULL);
857 assert(cpu->_status == IcacheRetry);
858 PacketPtr tmp = cpu->ifetch_pkt;
859 if (sendTiming(tmp)) {
860 cpu->_status = IcacheWaitResponse;
861 cpu->ifetch_pkt = NULL;
862 }
863}
864
865void
866TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
867{
868 // received a response from the dcache: complete the load or store
869 // instruction
870 assert(!pkt->isError());
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Steve Reinhardt
29 */
30
31#include "arch/locked_mem.hh"
32#include "arch/mmaped_ipr.hh"
33#include "arch/utility.hh"
34#include "base/bigint.hh"
35#include "config/the_isa.hh"
36#include "cpu/exetrace.hh"
37#include "cpu/simple/timing.hh"
38#include "mem/packet.hh"
39#include "mem/packet_access.hh"
40#include "params/TimingSimpleCPU.hh"
41#include "sim/system.hh"
42
43using namespace std;
44using namespace TheISA;
45
46Port *
47TimingSimpleCPU::getPort(const std::string &if_name, int idx)
48{
49 if (if_name == "dcache_port")
50 return &dcachePort;
51 else if (if_name == "icache_port")
52 return &icachePort;
53 else
54 panic("No Such Port\n");
55}
56
57void
58TimingSimpleCPU::init()
59{
60 BaseCPU::init();
61#if FULL_SYSTEM
62 for (int i = 0; i < threadContexts.size(); ++i) {
63 ThreadContext *tc = threadContexts[i];
64
65 // initialize CPU, including PC
66 TheISA::initCPU(tc, _cpuId);
67 }
68#endif
69}
70
71Tick
72TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
73{
74 panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
75 return curTick;
76}
77
78void
79TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
80{
81 //No internal storage to update, jusst return
82 return;
83}
84
85void
86TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
87{
88 if (status == RangeChange) {
89 if (!snoopRangeSent) {
90 snoopRangeSent = true;
91 sendStatusChange(Port::RangeChange);
92 }
93 return;
94 }
95
96 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
97}
98
99
100void
101TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
102{
103 pkt = _pkt;
104 cpu->schedule(this, t);
105}
106
107TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
108 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this, p->clock),
109 dcachePort(this, p->clock), fetchEvent(this)
110{
111 _status = Idle;
112
113 icachePort.snoopRangeSent = false;
114 dcachePort.snoopRangeSent = false;
115
116 ifetch_pkt = dcache_pkt = NULL;
117 drainEvent = NULL;
118 previousTick = 0;
119 changeState(SimObject::Running);
120}
121
122
123TimingSimpleCPU::~TimingSimpleCPU()
124{
125}
126
127void
128TimingSimpleCPU::serialize(ostream &os)
129{
130 SimObject::State so_state = SimObject::getState();
131 SERIALIZE_ENUM(so_state);
132 BaseSimpleCPU::serialize(os);
133}
134
135void
136TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
137{
138 SimObject::State so_state;
139 UNSERIALIZE_ENUM(so_state);
140 BaseSimpleCPU::unserialize(cp, section);
141}
142
143unsigned int
144TimingSimpleCPU::drain(Event *drain_event)
145{
146 // TimingSimpleCPU is ready to drain if it's not waiting for
147 // an access to complete.
148 if (_status == Idle || _status == Running || _status == SwitchedOut) {
149 changeState(SimObject::Drained);
150 return 0;
151 } else {
152 changeState(SimObject::Draining);
153 drainEvent = drain_event;
154 return 1;
155 }
156}
157
158void
159TimingSimpleCPU::resume()
160{
161 DPRINTF(SimpleCPU, "Resume\n");
162 if (_status != SwitchedOut && _status != Idle) {
163 assert(system->getMemoryMode() == Enums::timing);
164
165 if (fetchEvent.scheduled())
166 deschedule(fetchEvent);
167
168 schedule(fetchEvent, nextCycle());
169 }
170
171 changeState(SimObject::Running);
172}
173
174void
175TimingSimpleCPU::switchOut()
176{
177 assert(_status == Running || _status == Idle);
178 _status = SwitchedOut;
179 numCycles += tickToCycles(curTick - previousTick);
180
181 // If we've been scheduled to resume but are then told to switch out,
182 // we'll need to cancel it.
183 if (fetchEvent.scheduled())
184 deschedule(fetchEvent);
185}
186
187
188void
189TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
190{
191 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
192
193 // if any of this CPU's ThreadContexts are active, mark the CPU as
194 // running and schedule its tick event.
195 for (int i = 0; i < threadContexts.size(); ++i) {
196 ThreadContext *tc = threadContexts[i];
197 if (tc->status() == ThreadContext::Active && _status != Running) {
198 _status = Running;
199 break;
200 }
201 }
202
203 if (_status != Running) {
204 _status = Idle;
205 }
206 assert(threadContexts.size() == 1);
207 previousTick = curTick;
208}
209
210
211void
212TimingSimpleCPU::activateContext(int thread_num, int delay)
213{
214 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
215
216 assert(thread_num == 0);
217 assert(thread);
218
219 assert(_status == Idle);
220
221 notIdleFraction++;
222 _status = Running;
223
224 // kick things off by initiating the fetch of the next instruction
225 schedule(fetchEvent, nextCycle(curTick + ticks(delay)));
226}
227
228
229void
230TimingSimpleCPU::suspendContext(int thread_num)
231{
232 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
233
234 assert(thread_num == 0);
235 assert(thread);
236
237 if (_status == Idle)
238 return;
239
240 assert(_status == Running);
241
242 // just change status to Idle... if status != Running,
243 // completeInst() will not initiate fetch of next instruction.
244
245 notIdleFraction--;
246 _status = Idle;
247}
248
249bool
250TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
251{
252 RequestPtr req = pkt->req;
253 if (req->isMmapedIpr()) {
254 Tick delay;
255 delay = TheISA::handleIprRead(thread->getTC(), pkt);
256 new IprEvent(pkt, this, nextCycle(curTick + delay));
257 _status = DcacheWaitResponse;
258 dcache_pkt = NULL;
259 } else if (!dcachePort.sendTiming(pkt)) {
260 _status = DcacheRetry;
261 dcache_pkt = pkt;
262 } else {
263 _status = DcacheWaitResponse;
264 // memory system takes ownership of packet
265 dcache_pkt = NULL;
266 }
267 return dcache_pkt == NULL;
268}
269
270void
271TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
272 bool read)
273{
274 PacketPtr pkt;
275 buildPacket(pkt, req, read);
276 pkt->dataDynamic<uint8_t>(data);
277 if (req->getFlags().isSet(Request::NO_ACCESS)) {
278 assert(!dcache_pkt);
279 pkt->makeResponse();
280 completeDataAccess(pkt);
281 } else if (read) {
282 handleReadPacket(pkt);
283 } else {
284 bool do_access = true; // flag to suppress cache access
285
286 if (req->isLLSC()) {
287 do_access = TheISA::handleLockedWrite(thread, req);
288 } else if (req->isCondSwap()) {
289 assert(res);
290 req->setExtraData(*res);
291 }
292
293 if (do_access) {
294 dcache_pkt = pkt;
295 handleWritePacket();
296 } else {
297 _status = DcacheWaitResponse;
298 completeDataAccess(pkt);
299 }
300 }
301}
302
303void
304TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
305 RequestPtr req, uint8_t *data, bool read)
306{
307 PacketPtr pkt1, pkt2;
308 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
309 if (req->getFlags().isSet(Request::NO_ACCESS)) {
310 assert(!dcache_pkt);
311 pkt1->makeResponse();
312 completeDataAccess(pkt1);
313 } else if (read) {
314 if (handleReadPacket(pkt1)) {
315 SplitFragmentSenderState * send_state =
316 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
317 send_state->clearFromParent();
318 if (handleReadPacket(pkt2)) {
319 send_state = dynamic_cast<SplitFragmentSenderState *>(
320 pkt1->senderState);
321 send_state->clearFromParent();
322 }
323 }
324 } else {
325 dcache_pkt = pkt1;
326 if (handleWritePacket()) {
327 SplitFragmentSenderState * send_state =
328 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
329 send_state->clearFromParent();
330 dcache_pkt = pkt2;
331 if (handleWritePacket()) {
332 send_state = dynamic_cast<SplitFragmentSenderState *>(
333 pkt1->senderState);
334 send_state->clearFromParent();
335 }
336 }
337 }
338}
339
340void
341TimingSimpleCPU::translationFault(Fault fault)
342{
343 // fault may be NoFault in cases where a fault is suppressed,
344 // for instance prefetches.
345 numCycles += tickToCycles(curTick - previousTick);
346 previousTick = curTick;
347
348 if (traceData) {
349 // Since there was a fault, we shouldn't trace this instruction.
350 delete traceData;
351 traceData = NULL;
352 }
353
354 postExecute();
355
356 if (getState() == SimObject::Draining) {
357 advancePC(fault);
358 completeDrain();
359 } else {
360 advanceInst(fault);
361 }
362}
363
364void
365TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
366{
367 MemCmd cmd;
368 if (read) {
369 cmd = MemCmd::ReadReq;
370 if (req->isLLSC())
371 cmd = MemCmd::LoadLockedReq;
372 } else {
373 cmd = MemCmd::WriteReq;
374 if (req->isLLSC()) {
375 cmd = MemCmd::StoreCondReq;
376 } else if (req->isSwap()) {
377 cmd = MemCmd::SwapReq;
378 }
379 }
380 pkt = new Packet(req, cmd, Packet::Broadcast);
381}
382
383void
384TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
385 RequestPtr req1, RequestPtr req2, RequestPtr req,
386 uint8_t *data, bool read)
387{
388 pkt1 = pkt2 = NULL;
389
390 assert(!req1->isMmapedIpr() && !req2->isMmapedIpr());
391
392 if (req->getFlags().isSet(Request::NO_ACCESS)) {
393 buildPacket(pkt1, req, read);
394 return;
395 }
396
397 buildPacket(pkt1, req1, read);
398 buildPacket(pkt2, req2, read);
399
400 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags());
401 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand(),
402 Packet::Broadcast);
403
404 pkt->dataDynamic<uint8_t>(data);
405 pkt1->dataStatic<uint8_t>(data);
406 pkt2->dataStatic<uint8_t>(data + req1->getSize());
407
408 SplitMainSenderState * main_send_state = new SplitMainSenderState;
409 pkt->senderState = main_send_state;
410 main_send_state->fragments[0] = pkt1;
411 main_send_state->fragments[1] = pkt2;
412 main_send_state->outstanding = 2;
413 pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
414 pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
415}
416
417template <class T>
418Fault
419TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
420{
421 Fault fault;
422 const int asid = 0;
423 const ThreadID tid = 0;
424 const Addr pc = thread->readPC();
425 unsigned block_size = dcachePort.peerBlockSize();
426 int data_size = sizeof(T);
427 BaseTLB::Mode mode = BaseTLB::Read;
428
429 if (traceData) {
430 traceData->setAddr(addr);
431 }
432
433 RequestPtr req = new Request(asid, addr, data_size,
434 flags, pc, _cpuId, tid);
435
436 Addr split_addr = roundDown(addr + data_size - 1, block_size);
437 assert(split_addr <= addr || split_addr - addr < block_size);
438
439 _status = DTBWaitResponse;
440 if (split_addr > addr) {
441 RequestPtr req1, req2;
442 assert(!req->isLLSC() && !req->isSwap());
443 req->splitOnVaddr(split_addr, req1, req2);
444
445 WholeTranslationState *state =
446 new WholeTranslationState(req, req1, req2, (uint8_t *)(new T),
447 NULL, mode);
448 DataTranslation<TimingSimpleCPU> *trans1 =
449 new DataTranslation<TimingSimpleCPU>(this, state, 0);
450 DataTranslation<TimingSimpleCPU> *trans2 =
451 new DataTranslation<TimingSimpleCPU>(this, state, 1);
452
453 thread->dtb->translateTiming(req1, tc, trans1, mode);
454 thread->dtb->translateTiming(req2, tc, trans2, mode);
455 } else {
456 WholeTranslationState *state =
457 new WholeTranslationState(req, (uint8_t *)(new T), NULL, mode);
458 DataTranslation<TimingSimpleCPU> *translation
459 = new DataTranslation<TimingSimpleCPU>(this, state);
460 thread->dtb->translateTiming(req, tc, translation, mode);
461 }
462
463 return NoFault;
464}
465
466#ifndef DOXYGEN_SHOULD_SKIP_THIS
467
468template
469Fault
470TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
471
472template
473Fault
474TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
475
476template
477Fault
478TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
479
480template
481Fault
482TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
483
484template
485Fault
486TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
487
488template
489Fault
490TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
491
492#endif //DOXYGEN_SHOULD_SKIP_THIS
493
494template<>
495Fault
496TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
497{
498 return read(addr, *(uint64_t*)&data, flags);
499}
500
501template<>
502Fault
503TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
504{
505 return read(addr, *(uint32_t*)&data, flags);
506}
507
508template<>
509Fault
510TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
511{
512 return read(addr, (uint32_t&)data, flags);
513}
514
515bool
516TimingSimpleCPU::handleWritePacket()
517{
518 RequestPtr req = dcache_pkt->req;
519 if (req->isMmapedIpr()) {
520 Tick delay;
521 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
522 new IprEvent(dcache_pkt, this, nextCycle(curTick + delay));
523 _status = DcacheWaitResponse;
524 dcache_pkt = NULL;
525 } else if (!dcachePort.sendTiming(dcache_pkt)) {
526 _status = DcacheRetry;
527 } else {
528 _status = DcacheWaitResponse;
529 // memory system takes ownership of packet
530 dcache_pkt = NULL;
531 }
532 return dcache_pkt == NULL;
533}
534
535template <class T>
536Fault
537TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
538{
539 const int asid = 0;
540 const ThreadID tid = 0;
541 const Addr pc = thread->readPC();
542 unsigned block_size = dcachePort.peerBlockSize();
543 int data_size = sizeof(T);
544 BaseTLB::Mode mode = BaseTLB::Write;
545
546 if (traceData) {
547 traceData->setAddr(addr);
548 traceData->setData(data);
549 }
550
551 RequestPtr req = new Request(asid, addr, data_size,
552 flags, pc, _cpuId, tid);
553
554 Addr split_addr = roundDown(addr + data_size - 1, block_size);
555 assert(split_addr <= addr || split_addr - addr < block_size);
556
557 T *dataP = new T;
558 *dataP = TheISA::htog(data);
559 _status = DTBWaitResponse;
560 if (split_addr > addr) {
561 RequestPtr req1, req2;
562 assert(!req->isLLSC() && !req->isSwap());
563 req->splitOnVaddr(split_addr, req1, req2);
564
565 WholeTranslationState *state =
566 new WholeTranslationState(req, req1, req2, (uint8_t *)dataP,
567 res, mode);
568 DataTranslation<TimingSimpleCPU> *trans1 =
569 new DataTranslation<TimingSimpleCPU>(this, state, 0);
570 DataTranslation<TimingSimpleCPU> *trans2 =
571 new DataTranslation<TimingSimpleCPU>(this, state, 1);
572
573 thread->dtb->translateTiming(req1, tc, trans1, mode);
574 thread->dtb->translateTiming(req2, tc, trans2, mode);
575 } else {
576 WholeTranslationState *state =
577 new WholeTranslationState(req, (uint8_t *)dataP, res, mode);
578 DataTranslation<TimingSimpleCPU> *translation =
579 new DataTranslation<TimingSimpleCPU>(this, state);
580 thread->dtb->translateTiming(req, tc, translation, mode);
581 }
582
583 // Translation faults will be returned via finishTranslation()
584 return NoFault;
585}
586
587
588#ifndef DOXYGEN_SHOULD_SKIP_THIS
589template
590Fault
591TimingSimpleCPU::write(Twin32_t data, Addr addr,
592 unsigned flags, uint64_t *res);
593
594template
595Fault
596TimingSimpleCPU::write(Twin64_t data, Addr addr,
597 unsigned flags, uint64_t *res);
598
599template
600Fault
601TimingSimpleCPU::write(uint64_t data, Addr addr,
602 unsigned flags, uint64_t *res);
603
604template
605Fault
606TimingSimpleCPU::write(uint32_t data, Addr addr,
607 unsigned flags, uint64_t *res);
608
609template
610Fault
611TimingSimpleCPU::write(uint16_t data, Addr addr,
612 unsigned flags, uint64_t *res);
613
614template
615Fault
616TimingSimpleCPU::write(uint8_t data, Addr addr,
617 unsigned flags, uint64_t *res);
618
619#endif //DOXYGEN_SHOULD_SKIP_THIS
620
621template<>
622Fault
623TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
624{
625 return write(*(uint64_t*)&data, addr, flags, res);
626}
627
628template<>
629Fault
630TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
631{
632 return write(*(uint32_t*)&data, addr, flags, res);
633}
634
635
636template<>
637Fault
638TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
639{
640 return write((uint32_t)data, addr, flags, res);
641}
642
643
644void
645TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
646{
647 _status = Running;
648
649 if (state->getFault() != NoFault) {
650 if (state->isPrefetch()) {
651 state->setNoFault();
652 }
653 delete state->data;
654 state->deleteReqs();
655 translationFault(state->getFault());
656 } else {
657 if (!state->isSplit) {
658 sendData(state->mainReq, state->data, state->res,
659 state->mode == BaseTLB::Read);
660 } else {
661 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
662 state->data, state->mode == BaseTLB::Read);
663 }
664 }
665
666 delete state;
667}
668
669
670void
671TimingSimpleCPU::fetch()
672{
673 DPRINTF(SimpleCPU, "Fetch\n");
674
675 if (!curStaticInst || !curStaticInst->isDelayedCommit())
676 checkForInterrupts();
677
678 checkPcEventQueue();
679
680 bool fromRom = isRomMicroPC(thread->readMicroPC());
681
682 if (!fromRom && !curMacroStaticInst) {
683 Request *ifetch_req = new Request();
684 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
685 setupFetchRequest(ifetch_req);
686 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
687 BaseTLB::Execute);
688 } else {
689 _status = IcacheWaitResponse;
690 completeIfetch(NULL);
691
692 numCycles += tickToCycles(curTick - previousTick);
693 previousTick = curTick;
694 }
695}
696
697
698void
699TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
700{
701 if (fault == NoFault) {
702 ifetch_pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
703 ifetch_pkt->dataStatic(&inst);
704
705 if (!icachePort.sendTiming(ifetch_pkt)) {
706 // Need to wait for retry
707 _status = IcacheRetry;
708 } else {
709 // Need to wait for cache to respond
710 _status = IcacheWaitResponse;
711 // ownership of packet transferred to memory system
712 ifetch_pkt = NULL;
713 }
714 } else {
715 delete req;
716 // fetch fault: advance directly to next instruction (fault handler)
717 advanceInst(fault);
718 }
719
720 numCycles += tickToCycles(curTick - previousTick);
721 previousTick = curTick;
722}
723
724
725void
726TimingSimpleCPU::advanceInst(Fault fault)
727{
728 if (fault != NoFault || !stayAtPC)
729 advancePC(fault);
730
731 if (_status == Running) {
732 // kick off fetch of next instruction... callback from icache
733 // response will cause that instruction to be executed,
734 // keeping the CPU running.
735 fetch();
736 }
737}
738
739
740void
741TimingSimpleCPU::completeIfetch(PacketPtr pkt)
742{
743 DPRINTF(SimpleCPU, "Complete ICache Fetch\n");
744
745 // received a response from the icache: execute the received
746 // instruction
747
748 assert(!pkt || !pkt->isError());
749 assert(_status == IcacheWaitResponse);
750
751 _status = Running;
752
753 numCycles += tickToCycles(curTick - previousTick);
754 previousTick = curTick;
755
756 if (getState() == SimObject::Draining) {
757 if (pkt) {
758 delete pkt->req;
759 delete pkt;
760 }
761
762 completeDrain();
763 return;
764 }
765
766 preExecute();
767 if (curStaticInst &&
768 curStaticInst->isMemRef() && !curStaticInst->isDataPrefetch()) {
769 // load or store: just send to dcache
770 Fault fault = curStaticInst->initiateAcc(this, traceData);
771 if (_status != Running) {
772 // instruction will complete in dcache response callback
773 assert(_status == DcacheWaitResponse ||
774 _status == DcacheRetry || DTBWaitResponse);
775 assert(fault == NoFault);
776 } else {
777 if (fault != NoFault && traceData) {
778 // If there was a fault, we shouldn't trace this instruction.
779 delete traceData;
780 traceData = NULL;
781 }
782
783 postExecute();
784 // @todo remove me after debugging with legion done
785 if (curStaticInst && (!curStaticInst->isMicroop() ||
786 curStaticInst->isFirstMicroop()))
787 instCnt++;
788 advanceInst(fault);
789 }
790 } else if (curStaticInst) {
791 // non-memory instruction: execute completely now
792 Fault fault = curStaticInst->execute(this, traceData);
793
794 // keep an instruction count
795 if (fault == NoFault)
796 countInst();
797 else if (traceData) {
798 // If there was a fault, we shouldn't trace this instruction.
799 delete traceData;
800 traceData = NULL;
801 }
802
803 postExecute();
804 // @todo remove me after debugging with legion done
805 if (curStaticInst && (!curStaticInst->isMicroop() ||
806 curStaticInst->isFirstMicroop()))
807 instCnt++;
808 advanceInst(fault);
809 } else {
810 advanceInst(NoFault);
811 }
812
813 if (pkt) {
814 delete pkt->req;
815 delete pkt;
816 }
817}
818
819void
820TimingSimpleCPU::IcachePort::ITickEvent::process()
821{
822 cpu->completeIfetch(pkt);
823}
824
825bool
826TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
827{
828 if (pkt->isResponse() && !pkt->wasNacked()) {
829 // delay processing of returned data until next CPU clock edge
830 Tick next_tick = cpu->nextCycle(curTick);
831
832 if (next_tick == curTick)
833 cpu->completeIfetch(pkt);
834 else
835 tickEvent.schedule(pkt, next_tick);
836
837 return true;
838 }
839 else if (pkt->wasNacked()) {
840 assert(cpu->_status == IcacheWaitResponse);
841 pkt->reinitNacked();
842 if (!sendTiming(pkt)) {
843 cpu->_status = IcacheRetry;
844 cpu->ifetch_pkt = pkt;
845 }
846 }
847 //Snooping a Coherence Request, do nothing
848 return true;
849}
850
851void
852TimingSimpleCPU::IcachePort::recvRetry()
853{
854 // we shouldn't get a retry unless we have a packet that we're
855 // waiting to transmit
856 assert(cpu->ifetch_pkt != NULL);
857 assert(cpu->_status == IcacheRetry);
858 PacketPtr tmp = cpu->ifetch_pkt;
859 if (sendTiming(tmp)) {
860 cpu->_status = IcacheWaitResponse;
861 cpu->ifetch_pkt = NULL;
862 }
863}
864
865void
866TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
867{
868 // received a response from the dcache: complete the load or store
869 // instruction
870 assert(!pkt->isError());
871 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
872 pkt->req->getFlags().isSet(Request::NO_ACCESS));
871
872 numCycles += tickToCycles(curTick - previousTick);
873 previousTick = curTick;
874
875 if (pkt->senderState) {
876 SplitFragmentSenderState * send_state =
877 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
878 assert(send_state);
879 delete pkt->req;
880 delete pkt;
881 PacketPtr big_pkt = send_state->bigPkt;
882 delete send_state;
883
884 SplitMainSenderState * main_send_state =
885 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
886 assert(main_send_state);
887 // Record the fact that this packet is no longer outstanding.
888 assert(main_send_state->outstanding != 0);
889 main_send_state->outstanding--;
890
891 if (main_send_state->outstanding) {
892 return;
893 } else {
894 delete main_send_state;
895 big_pkt->senderState = NULL;
896 pkt = big_pkt;
897 }
898 }
899
873
874 numCycles += tickToCycles(curTick - previousTick);
875 previousTick = curTick;
876
877 if (pkt->senderState) {
878 SplitFragmentSenderState * send_state =
879 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
880 assert(send_state);
881 delete pkt->req;
882 delete pkt;
883 PacketPtr big_pkt = send_state->bigPkt;
884 delete send_state;
885
886 SplitMainSenderState * main_send_state =
887 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
888 assert(main_send_state);
889 // Record the fact that this packet is no longer outstanding.
890 assert(main_send_state->outstanding != 0);
891 main_send_state->outstanding--;
892
893 if (main_send_state->outstanding) {
894 return;
895 } else {
896 delete main_send_state;
897 big_pkt->senderState = NULL;
898 pkt = big_pkt;
899 }
900 }
901
900 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse);
901 _status = Running;
902
903 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
904
905 // keep an instruction count
906 if (fault == NoFault)
907 countInst();
908 else if (traceData) {
909 // If there was a fault, we shouldn't trace this instruction.
910 delete traceData;
911 traceData = NULL;
912 }
913
914 // the locked flag may be cleared on the response packet, so check
915 // pkt->req and not pkt to see if it was a load-locked
916 if (pkt->isRead() && pkt->req->isLLSC()) {
917 TheISA::handleLockedRead(thread, pkt->req);
918 }
919
920 delete pkt->req;
921 delete pkt;
922
923 postExecute();
924
925 if (getState() == SimObject::Draining) {
926 advancePC(fault);
927 completeDrain();
928
929 return;
930 }
931
932 advanceInst(fault);
933}
934
935
936void
937TimingSimpleCPU::completeDrain()
938{
939 DPRINTF(Config, "Done draining\n");
940 changeState(SimObject::Drained);
941 drainEvent->process();
942}
943
944void
945TimingSimpleCPU::DcachePort::setPeer(Port *port)
946{
947 Port::setPeer(port);
948
949#if FULL_SYSTEM
950 // Update the ThreadContext's memory ports (Functional/Virtual
951 // Ports)
952 cpu->tcBase()->connectMemPorts(cpu->tcBase());
953#endif
954}
955
956bool
957TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
958{
959 if (pkt->isResponse() && !pkt->wasNacked()) {
960 // delay processing of returned data until next CPU clock edge
961 Tick next_tick = cpu->nextCycle(curTick);
962
963 if (next_tick == curTick) {
964 cpu->completeDataAccess(pkt);
965 } else {
966 tickEvent.schedule(pkt, next_tick);
967 }
968
969 return true;
970 }
971 else if (pkt->wasNacked()) {
972 assert(cpu->_status == DcacheWaitResponse);
973 pkt->reinitNacked();
974 if (!sendTiming(pkt)) {
975 cpu->_status = DcacheRetry;
976 cpu->dcache_pkt = pkt;
977 }
978 }
979 //Snooping a Coherence Request, do nothing
980 return true;
981}
982
983void
984TimingSimpleCPU::DcachePort::DTickEvent::process()
985{
986 cpu->completeDataAccess(pkt);
987}
988
989void
990TimingSimpleCPU::DcachePort::recvRetry()
991{
992 // we shouldn't get a retry unless we have a packet that we're
993 // waiting to transmit
994 assert(cpu->dcache_pkt != NULL);
995 assert(cpu->_status == DcacheRetry);
996 PacketPtr tmp = cpu->dcache_pkt;
997 if (tmp->senderState) {
998 // This is a packet from a split access.
999 SplitFragmentSenderState * send_state =
1000 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
1001 assert(send_state);
1002 PacketPtr big_pkt = send_state->bigPkt;
1003
1004 SplitMainSenderState * main_send_state =
1005 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
1006 assert(main_send_state);
1007
1008 if (sendTiming(tmp)) {
1009 // If we were able to send without retrying, record that fact
1010 // and try sending the other fragment.
1011 send_state->clearFromParent();
1012 int other_index = main_send_state->getPendingFragment();
1013 if (other_index > 0) {
1014 tmp = main_send_state->fragments[other_index];
1015 cpu->dcache_pkt = tmp;
1016 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
1017 (big_pkt->isWrite() && cpu->handleWritePacket())) {
1018 main_send_state->fragments[other_index] = NULL;
1019 }
1020 } else {
1021 cpu->_status = DcacheWaitResponse;
1022 // memory system takes ownership of packet
1023 cpu->dcache_pkt = NULL;
1024 }
1025 }
1026 } else if (sendTiming(tmp)) {
1027 cpu->_status = DcacheWaitResponse;
1028 // memory system takes ownership of packet
1029 cpu->dcache_pkt = NULL;
1030 }
1031}
1032
1033TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
1034 Tick t)
1035 : pkt(_pkt), cpu(_cpu)
1036{
1037 cpu->schedule(this, t);
1038}
1039
1040void
1041TimingSimpleCPU::IprEvent::process()
1042{
1043 cpu->completeDataAccess(pkt);
1044}
1045
1046const char *
1047TimingSimpleCPU::IprEvent::description() const
1048{
1049 return "Timing Simple CPU Delay IPR event";
1050}
1051
1052
1053void
1054TimingSimpleCPU::printAddr(Addr a)
1055{
1056 dcachePort.printAddr(a);
1057}
1058
1059
1060////////////////////////////////////////////////////////////////////////
1061//
1062// TimingSimpleCPU Simulation Object
1063//
1064TimingSimpleCPU *
1065TimingSimpleCPUParams::create()
1066{
1067 numThreads = 1;
1068#if !FULL_SYSTEM
1069 if (workload.size() != 1)
1070 panic("only one workload allowed");
1071#endif
1072 return new TimingSimpleCPU(this);
1073}
902 _status = Running;
903
904 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
905
906 // keep an instruction count
907 if (fault == NoFault)
908 countInst();
909 else if (traceData) {
910 // If there was a fault, we shouldn't trace this instruction.
911 delete traceData;
912 traceData = NULL;
913 }
914
915 // the locked flag may be cleared on the response packet, so check
916 // pkt->req and not pkt to see if it was a load-locked
917 if (pkt->isRead() && pkt->req->isLLSC()) {
918 TheISA::handleLockedRead(thread, pkt->req);
919 }
920
921 delete pkt->req;
922 delete pkt;
923
924 postExecute();
925
926 if (getState() == SimObject::Draining) {
927 advancePC(fault);
928 completeDrain();
929
930 return;
931 }
932
933 advanceInst(fault);
934}
935
936
937void
938TimingSimpleCPU::completeDrain()
939{
940 DPRINTF(Config, "Done draining\n");
941 changeState(SimObject::Drained);
942 drainEvent->process();
943}
944
945void
946TimingSimpleCPU::DcachePort::setPeer(Port *port)
947{
948 Port::setPeer(port);
949
950#if FULL_SYSTEM
951 // Update the ThreadContext's memory ports (Functional/Virtual
952 // Ports)
953 cpu->tcBase()->connectMemPorts(cpu->tcBase());
954#endif
955}
956
957bool
958TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
959{
960 if (pkt->isResponse() && !pkt->wasNacked()) {
961 // delay processing of returned data until next CPU clock edge
962 Tick next_tick = cpu->nextCycle(curTick);
963
964 if (next_tick == curTick) {
965 cpu->completeDataAccess(pkt);
966 } else {
967 tickEvent.schedule(pkt, next_tick);
968 }
969
970 return true;
971 }
972 else if (pkt->wasNacked()) {
973 assert(cpu->_status == DcacheWaitResponse);
974 pkt->reinitNacked();
975 if (!sendTiming(pkt)) {
976 cpu->_status = DcacheRetry;
977 cpu->dcache_pkt = pkt;
978 }
979 }
980 //Snooping a Coherence Request, do nothing
981 return true;
982}
983
984void
985TimingSimpleCPU::DcachePort::DTickEvent::process()
986{
987 cpu->completeDataAccess(pkt);
988}
989
990void
991TimingSimpleCPU::DcachePort::recvRetry()
992{
993 // we shouldn't get a retry unless we have a packet that we're
994 // waiting to transmit
995 assert(cpu->dcache_pkt != NULL);
996 assert(cpu->_status == DcacheRetry);
997 PacketPtr tmp = cpu->dcache_pkt;
998 if (tmp->senderState) {
999 // This is a packet from a split access.
1000 SplitFragmentSenderState * send_state =
1001 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
1002 assert(send_state);
1003 PacketPtr big_pkt = send_state->bigPkt;
1004
1005 SplitMainSenderState * main_send_state =
1006 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
1007 assert(main_send_state);
1008
1009 if (sendTiming(tmp)) {
1010 // If we were able to send without retrying, record that fact
1011 // and try sending the other fragment.
1012 send_state->clearFromParent();
1013 int other_index = main_send_state->getPendingFragment();
1014 if (other_index > 0) {
1015 tmp = main_send_state->fragments[other_index];
1016 cpu->dcache_pkt = tmp;
1017 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
1018 (big_pkt->isWrite() && cpu->handleWritePacket())) {
1019 main_send_state->fragments[other_index] = NULL;
1020 }
1021 } else {
1022 cpu->_status = DcacheWaitResponse;
1023 // memory system takes ownership of packet
1024 cpu->dcache_pkt = NULL;
1025 }
1026 }
1027 } else if (sendTiming(tmp)) {
1028 cpu->_status = DcacheWaitResponse;
1029 // memory system takes ownership of packet
1030 cpu->dcache_pkt = NULL;
1031 }
1032}
1033
1034TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
1035 Tick t)
1036 : pkt(_pkt), cpu(_cpu)
1037{
1038 cpu->schedule(this, t);
1039}
1040
1041void
1042TimingSimpleCPU::IprEvent::process()
1043{
1044 cpu->completeDataAccess(pkt);
1045}
1046
1047const char *
1048TimingSimpleCPU::IprEvent::description() const
1049{
1050 return "Timing Simple CPU Delay IPR event";
1051}
1052
1053
1054void
1055TimingSimpleCPU::printAddr(Addr a)
1056{
1057 dcachePort.printAddr(a);
1058}
1059
1060
1061////////////////////////////////////////////////////////////////////////
1062//
1063// TimingSimpleCPU Simulation Object
1064//
1065TimingSimpleCPU *
1066TimingSimpleCPUParams::create()
1067{
1068 numThreads = 1;
1069#if !FULL_SYSTEM
1070 if (workload.size() != 1)
1071 panic("only one workload allowed");
1072#endif
1073 return new TimingSimpleCPU(this);
1074}