Deleted Added
sdiff udiff text old ( 10024:fc10e1f9f124 ) new ( 10030:b531e328342d )
full compact
1/*
2 * Copyright (c) 2010-2012 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Steve Reinhardt
41 */
42
43#include "arch/locked_mem.hh"
44#include "arch/mmapped_ipr.hh"
45#include "arch/utility.hh"
46#include "base/bigint.hh"
47#include "config/the_isa.hh"
48#include "cpu/simple/timing.hh"
49#include "cpu/exetrace.hh"
50#include "debug/Config.hh"
51#include "debug/Drain.hh"
52#include "debug/ExecFaulting.hh"
53#include "debug/SimpleCPU.hh"
54#include "mem/packet.hh"
55#include "mem/packet_access.hh"
56#include "params/TimingSimpleCPU.hh"
57#include "sim/faults.hh"
58#include "sim/full_system.hh"
59#include "sim/system.hh"
60
61using namespace std;
62using namespace TheISA;
63
64void
65TimingSimpleCPU::init()
66{
67 BaseCPU::init();
68
69 // Initialise the ThreadContext's memory proxies
70 tcBase()->initMemProxies(tcBase());
71
72 if (FullSystem && !params()->switched_out) {
73 for (int i = 0; i < threadContexts.size(); ++i) {
74 ThreadContext *tc = threadContexts[i];
75 // initialize CPU, including PC
76 TheISA::initCPU(tc, _cpuId);
77 }
78 }
79}
80
81void
82TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
83{
84 pkt = _pkt;
85 cpu->schedule(this, t);
86}
87
88TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
89 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this),
90 dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0),
91 fetchEvent(this), drainManager(NULL)
92{
93 _status = Idle;
94
95 system->totalNumInsts = 0;
96}
97
98
99TimingSimpleCPU::~TimingSimpleCPU()
100{
101}
102
103unsigned int
104TimingSimpleCPU::drain(DrainManager *drain_manager)
105{
106 assert(!drainManager);
107 if (switchedOut())
108 return 0;
109
110 if (_status == Idle ||
111 (_status == BaseSimpleCPU::Running && isDrained())) {
112 DPRINTF(Drain, "No need to drain.\n");
113 return 0;
114 } else {
115 drainManager = drain_manager;
116 DPRINTF(Drain, "Requesting drain: %s\n", pcState());
117
118 // The fetch event can become descheduled if a drain didn't
119 // succeed on the first attempt. We need to reschedule it if
120 // the CPU is waiting for a microcode routine to complete.
121 if (_status == BaseSimpleCPU::Running && !fetchEvent.scheduled())
122 schedule(fetchEvent, clockEdge());
123
124 return 1;
125 }
126}
127
128void
129TimingSimpleCPU::drainResume()
130{
131 assert(!fetchEvent.scheduled());
132 assert(!drainManager);
133 if (switchedOut())
134 return;
135
136 DPRINTF(SimpleCPU, "Resume\n");
137 verifyMemoryMode();
138
139 assert(!threadContexts.empty());
140 if (threadContexts.size() > 1)
141 fatal("The timing CPU only supports one thread.\n");
142
143 if (thread->status() == ThreadContext::Active) {
144 schedule(fetchEvent, nextCycle());
145 _status = BaseSimpleCPU::Running;
146 notIdleFraction = 1;
147 } else {
148 _status = BaseSimpleCPU::Idle;
149 notIdleFraction = 0;
150 }
151}
152
153bool
154TimingSimpleCPU::tryCompleteDrain()
155{
156 if (!drainManager)
157 return false;
158
159 DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState());
160 if (!isDrained())
161 return false;
162
163 DPRINTF(Drain, "CPU done draining, processing drain event\n");
164 drainManager->signalDrainDone();
165 drainManager = NULL;
166
167 return true;
168}
169
170void
171TimingSimpleCPU::switchOut()
172{
173 BaseSimpleCPU::switchOut();
174
175 assert(!fetchEvent.scheduled());
176 assert(_status == BaseSimpleCPU::Running || _status == Idle);
177 assert(!stayAtPC);
178 assert(microPC() == 0);
179
180 numCycles += curCycle() - previousCycle;
181}
182
183
184void
185TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
186{
187 BaseSimpleCPU::takeOverFrom(oldCPU);
188
189 previousCycle = curCycle();
190}
191
192void
193TimingSimpleCPU::verifyMemoryMode() const
194{
195 if (!system->isTimingMode()) {
196 fatal("The timing CPU requires the memory system to be in "
197 "'timing' mode.\n");
198 }
199}
200
201void
202TimingSimpleCPU::activateContext(ThreadID thread_num, Cycles delay)
203{
204 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
205
206 assert(thread_num == 0);
207 assert(thread);
208
209 assert(_status == Idle);
210
211 notIdleFraction = 1;
212 _status = BaseSimpleCPU::Running;
213
214 // kick things off by initiating the fetch of the next instruction
215 schedule(fetchEvent, clockEdge(delay));
216}
217
218
219void
220TimingSimpleCPU::suspendContext(ThreadID thread_num)
221{
222 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
223
224 assert(thread_num == 0);
225 assert(thread);
226
227 if (_status == Idle)
228 return;
229
230 assert(_status == BaseSimpleCPU::Running);
231
232 // just change status to Idle... if status != Running,
233 // completeInst() will not initiate fetch of next instruction.
234
235 notIdleFraction = 0;
236 _status = Idle;
237}
238
239bool
240TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
241{
242 RequestPtr req = pkt->req;
243 if (req->isMmappedIpr()) {
244 Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt);
245 new IprEvent(pkt, this, clockEdge(delay));
246 _status = DcacheWaitResponse;
247 dcache_pkt = NULL;
248 } else if (!dcachePort.sendTimingReq(pkt)) {
249 _status = DcacheRetry;
250 dcache_pkt = pkt;
251 } else {
252 _status = DcacheWaitResponse;
253 // memory system takes ownership of packet
254 dcache_pkt = NULL;
255 }
256 return dcache_pkt == NULL;
257}
258
259void
260TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
261 bool read)
262{
263 PacketPtr pkt;
264 buildPacket(pkt, req, read);
265 pkt->dataDynamicArray<uint8_t>(data);
266 if (req->getFlags().isSet(Request::NO_ACCESS)) {
267 assert(!dcache_pkt);
268 pkt->makeResponse();
269 completeDataAccess(pkt);
270 } else if (read) {
271 handleReadPacket(pkt);
272 } else {
273 bool do_access = true; // flag to suppress cache access
274
275 if (req->isLLSC()) {
276 do_access = TheISA::handleLockedWrite(thread, req);
277 } else if (req->isCondSwap()) {
278 assert(res);
279 req->setExtraData(*res);
280 }
281
282 if (do_access) {
283 dcache_pkt = pkt;
284 handleWritePacket();
285 } else {
286 _status = DcacheWaitResponse;
287 completeDataAccess(pkt);
288 }
289 }
290}
291
292void
293TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
294 RequestPtr req, uint8_t *data, bool read)
295{
296 PacketPtr pkt1, pkt2;
297 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
298 if (req->getFlags().isSet(Request::NO_ACCESS)) {
299 assert(!dcache_pkt);
300 pkt1->makeResponse();
301 completeDataAccess(pkt1);
302 } else if (read) {
303 SplitFragmentSenderState * send_state =
304 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
305 if (handleReadPacket(pkt1)) {
306 send_state->clearFromParent();
307 send_state = dynamic_cast<SplitFragmentSenderState *>(
308 pkt2->senderState);
309 if (handleReadPacket(pkt2)) {
310 send_state->clearFromParent();
311 }
312 }
313 } else {
314 dcache_pkt = pkt1;
315 SplitFragmentSenderState * send_state =
316 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
317 if (handleWritePacket()) {
318 send_state->clearFromParent();
319 dcache_pkt = pkt2;
320 send_state = dynamic_cast<SplitFragmentSenderState *>(
321 pkt2->senderState);
322 if (handleWritePacket()) {
323 send_state->clearFromParent();
324 }
325 }
326 }
327}
328
329void
330TimingSimpleCPU::translationFault(Fault fault)
331{
332 // fault may be NoFault in cases where a fault is suppressed,
333 // for instance prefetches.
334 numCycles += curCycle() - previousCycle;
335 previousCycle = curCycle();
336
337 if (traceData) {
338 // Since there was a fault, we shouldn't trace this instruction.
339 delete traceData;
340 traceData = NULL;
341 }
342
343 postExecute();
344
345 advanceInst(fault);
346}
347
348void
349TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
350{
351 MemCmd cmd;
352 if (read) {
353 cmd = MemCmd::ReadReq;
354 if (req->isLLSC())
355 cmd = MemCmd::LoadLockedReq;
356 } else {
357 cmd = MemCmd::WriteReq;
358 if (req->isLLSC()) {
359 cmd = MemCmd::StoreCondReq;
360 } else if (req->isSwap()) {
361 cmd = MemCmd::SwapReq;
362 }
363 }
364 pkt = new Packet(req, cmd);
365}
366
367void
368TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
369 RequestPtr req1, RequestPtr req2, RequestPtr req,
370 uint8_t *data, bool read)
371{
372 pkt1 = pkt2 = NULL;
373
374 assert(!req1->isMmappedIpr() && !req2->isMmappedIpr());
375
376 if (req->getFlags().isSet(Request::NO_ACCESS)) {
377 buildPacket(pkt1, req, read);
378 return;
379 }
380
381 buildPacket(pkt1, req1, read);
382 buildPacket(pkt2, req2, read);
383
384 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags(), dataMasterId());
385 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand());
386
387 pkt->dataDynamicArray<uint8_t>(data);
388 pkt1->dataStatic<uint8_t>(data);
389 pkt2->dataStatic<uint8_t>(data + req1->getSize());
390
391 SplitMainSenderState * main_send_state = new SplitMainSenderState;
392 pkt->senderState = main_send_state;
393 main_send_state->fragments[0] = pkt1;
394 main_send_state->fragments[1] = pkt2;
395 main_send_state->outstanding = 2;
396 pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
397 pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
398}
399
400Fault
401TimingSimpleCPU::readMem(Addr addr, uint8_t *data,
402 unsigned size, unsigned flags)
403{
404 Fault fault;
405 const int asid = 0;
406 const ThreadID tid = 0;
407 const Addr pc = thread->instAddr();
408 unsigned block_size = cacheLineSize();
409 BaseTLB::Mode mode = BaseTLB::Read;
410
411 if (traceData) {
412 traceData->setAddr(addr);
413 }
414
415 RequestPtr req = new Request(asid, addr, size,
416 flags, dataMasterId(), pc, _cpuId, tid);
417
418 req->taskId(taskId());
419
420 Addr split_addr = roundDown(addr + size - 1, block_size);
421 assert(split_addr <= addr || split_addr - addr < block_size);
422
423 _status = DTBWaitResponse;
424 if (split_addr > addr) {
425 RequestPtr req1, req2;
426 assert(!req->isLLSC() && !req->isSwap());
427 req->splitOnVaddr(split_addr, req1, req2);
428
429 WholeTranslationState *state =
430 new WholeTranslationState(req, req1, req2, new uint8_t[size],
431 NULL, mode);
432 DataTranslation<TimingSimpleCPU *> *trans1 =
433 new DataTranslation<TimingSimpleCPU *>(this, state, 0);
434 DataTranslation<TimingSimpleCPU *> *trans2 =
435 new DataTranslation<TimingSimpleCPU *>(this, state, 1);
436
437 thread->dtb->translateTiming(req1, tc, trans1, mode);
438 thread->dtb->translateTiming(req2, tc, trans2, mode);
439 } else {
440 WholeTranslationState *state =
441 new WholeTranslationState(req, new uint8_t[size], NULL, mode);
442 DataTranslation<TimingSimpleCPU *> *translation
443 = new DataTranslation<TimingSimpleCPU *>(this, state);
444 thread->dtb->translateTiming(req, tc, translation, mode);
445 }
446
447 return NoFault;
448}
449
450bool
451TimingSimpleCPU::handleWritePacket()
452{
453 RequestPtr req = dcache_pkt->req;
454 if (req->isMmappedIpr()) {
455 Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
456 new IprEvent(dcache_pkt, this, clockEdge(delay));
457 _status = DcacheWaitResponse;
458 dcache_pkt = NULL;
459 } else if (!dcachePort.sendTimingReq(dcache_pkt)) {
460 _status = DcacheRetry;
461 } else {
462 _status = DcacheWaitResponse;
463 // memory system takes ownership of packet
464 dcache_pkt = NULL;
465 }
466 return dcache_pkt == NULL;
467}
468
469Fault
470TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
471 Addr addr, unsigned flags, uint64_t *res)
472{
473 uint8_t *newData = new uint8_t[size];
474 memcpy(newData, data, size);
475
476 const int asid = 0;
477 const ThreadID tid = 0;
478 const Addr pc = thread->instAddr();
479 unsigned block_size = cacheLineSize();
480 BaseTLB::Mode mode = BaseTLB::Write;
481
482 if (traceData) {
483 traceData->setAddr(addr);
484 }
485
486 RequestPtr req = new Request(asid, addr, size,
487 flags, dataMasterId(), pc, _cpuId, tid);
488
489 req->taskId(taskId());
490
491 Addr split_addr = roundDown(addr + size - 1, block_size);
492 assert(split_addr <= addr || split_addr - addr < block_size);
493
494 _status = DTBWaitResponse;
495 if (split_addr > addr) {
496 RequestPtr req1, req2;
497 assert(!req->isLLSC() && !req->isSwap());
498 req->splitOnVaddr(split_addr, req1, req2);
499
500 WholeTranslationState *state =
501 new WholeTranslationState(req, req1, req2, newData, res, mode);
502 DataTranslation<TimingSimpleCPU *> *trans1 =
503 new DataTranslation<TimingSimpleCPU *>(this, state, 0);
504 DataTranslation<TimingSimpleCPU *> *trans2 =
505 new DataTranslation<TimingSimpleCPU *>(this, state, 1);
506
507 thread->dtb->translateTiming(req1, tc, trans1, mode);
508 thread->dtb->translateTiming(req2, tc, trans2, mode);
509 } else {
510 WholeTranslationState *state =
511 new WholeTranslationState(req, newData, res, mode);
512 DataTranslation<TimingSimpleCPU *> *translation =
513 new DataTranslation<TimingSimpleCPU *>(this, state);
514 thread->dtb->translateTiming(req, tc, translation, mode);
515 }
516
517 // Translation faults will be returned via finishTranslation()
518 return NoFault;
519}
520
521
522void
523TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
524{
525 _status = BaseSimpleCPU::Running;
526
527 if (state->getFault() != NoFault) {
528 if (state->isPrefetch()) {
529 state->setNoFault();
530 }
531 delete [] state->data;
532 state->deleteReqs();
533 translationFault(state->getFault());
534 } else {
535 if (!state->isSplit) {
536 sendData(state->mainReq, state->data, state->res,
537 state->mode == BaseTLB::Read);
538 } else {
539 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
540 state->data, state->mode == BaseTLB::Read);
541 }
542 }
543
544 delete state;
545}
546
547
548void
549TimingSimpleCPU::fetch()
550{
551 DPRINTF(SimpleCPU, "Fetch\n");
552
553 if (!curStaticInst || !curStaticInst->isDelayedCommit())
554 checkForInterrupts();
555
556 checkPcEventQueue();
557
558 // We must have just got suspended by a PC event
559 if (_status == Idle)
560 return;
561
562 TheISA::PCState pcState = thread->pcState();
563 bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
564
565 if (needToFetch) {
566 _status = BaseSimpleCPU::Running;
567 Request *ifetch_req = new Request();
568 ifetch_req->taskId(taskId());
569 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
570 setupFetchRequest(ifetch_req);
571 DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
572 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
573 BaseTLB::Execute);
574 } else {
575 _status = IcacheWaitResponse;
576 completeIfetch(NULL);
577
578 numCycles += curCycle() - previousCycle;
579 previousCycle = curCycle();
580 }
581}
582
583
584void
585TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
586{
587 if (fault == NoFault) {
588 DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n",
589 req->getVaddr(), req->getPaddr());
590 ifetch_pkt = new Packet(req, MemCmd::ReadReq);
591 ifetch_pkt->dataStatic(&inst);
592 DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr());
593
594 if (!icachePort.sendTimingReq(ifetch_pkt)) {
595 // Need to wait for retry
596 _status = IcacheRetry;
597 } else {
598 // Need to wait for cache to respond
599 _status = IcacheWaitResponse;
600 // ownership of packet transferred to memory system
601 ifetch_pkt = NULL;
602 }
603 } else {
604 DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
605 delete req;
606 // fetch fault: advance directly to next instruction (fault handler)
607 _status = BaseSimpleCPU::Running;
608 advanceInst(fault);
609 }
610
611 numCycles += curCycle() - previousCycle;
612 previousCycle = curCycle();
613}
614
615
616void
617TimingSimpleCPU::advanceInst(Fault fault)
618{
619 if (_status == Faulting)
620 return;
621
622 if (fault != NoFault) {
623 advancePC(fault);
624 DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
625 reschedule(fetchEvent, clockEdge(), true);
626 _status = Faulting;
627 return;
628 }
629
630
631 if (!stayAtPC)
632 advancePC(fault);
633
634 if (tryCompleteDrain())
635 return;
636
637 if (_status == BaseSimpleCPU::Running) {
638 // kick off fetch of next instruction... callback from icache
639 // response will cause that instruction to be executed,
640 // keeping the CPU running.
641 fetch();
642 }
643}
644
645
646void
647TimingSimpleCPU::completeIfetch(PacketPtr pkt)
648{
649 DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ?
650 pkt->getAddr() : 0);
651
652 // received a response from the icache: execute the received
653 // instruction
654 assert(!pkt || !pkt->isError());
655 assert(_status == IcacheWaitResponse);
656
657 _status = BaseSimpleCPU::Running;
658
659 numCycles += curCycle() - previousCycle;
660 previousCycle = curCycle();
661
662 if (pkt)
663 pkt->req->setAccessLatency();
664
665
666 preExecute();
667 if (curStaticInst && curStaticInst->isMemRef()) {
668 // load or store: just send to dcache
669 Fault fault = curStaticInst->initiateAcc(this, traceData);
670
671 // If we're not running now the instruction will complete in a dcache
672 // response callback or the instruction faulted and has started an
673 // ifetch
674 if (_status == BaseSimpleCPU::Running) {
675 if (fault != NoFault && traceData) {
676 // If there was a fault, we shouldn't trace this instruction.
677 delete traceData;
678 traceData = NULL;
679 }
680
681 postExecute();
682 // @todo remove me after debugging with legion done
683 if (curStaticInst && (!curStaticInst->isMicroop() ||
684 curStaticInst->isFirstMicroop()))
685 instCnt++;
686 advanceInst(fault);
687 }
688 } else if (curStaticInst) {
689 // non-memory instruction: execute completely now
690 Fault fault = curStaticInst->execute(this, traceData);
691
692 // keep an instruction count
693 if (fault == NoFault)
694 countInst();
695 else if (traceData && !DTRACE(ExecFaulting)) {
696 delete traceData;
697 traceData = NULL;
698 }
699
700 postExecute();
701 // @todo remove me after debugging with legion done
702 if (curStaticInst && (!curStaticInst->isMicroop() ||
703 curStaticInst->isFirstMicroop()))
704 instCnt++;
705 advanceInst(fault);
706 } else {
707 advanceInst(NoFault);
708 }
709
710 if (pkt) {
711 delete pkt->req;
712 delete pkt;
713 }
714}
715
716void
717TimingSimpleCPU::IcachePort::ITickEvent::process()
718{
719 cpu->completeIfetch(pkt);
720}
721
722bool
723TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt)
724{
725 DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
726 // delay processing of returned data until next CPU clock edge
727 Tick next_tick = cpu->clockEdge();
728
729 if (next_tick == curTick())
730 cpu->completeIfetch(pkt);
731 else
732 tickEvent.schedule(pkt, next_tick);
733
734 return true;
735}
736
737void
738TimingSimpleCPU::IcachePort::recvRetry()
739{
740 // we shouldn't get a retry unless we have a packet that we're
741 // waiting to transmit
742 assert(cpu->ifetch_pkt != NULL);
743 assert(cpu->_status == IcacheRetry);
744 PacketPtr tmp = cpu->ifetch_pkt;
745 if (sendTimingReq(tmp)) {
746 cpu->_status = IcacheWaitResponse;
747 cpu->ifetch_pkt = NULL;
748 }
749}
750
751void
752TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
753{
754 // received a response from the dcache: complete the load or store
755 // instruction
756 assert(!pkt->isError());
757 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
758 pkt->req->getFlags().isSet(Request::NO_ACCESS));
759
760 pkt->req->setAccessLatency();
761 numCycles += curCycle() - previousCycle;
762 previousCycle = curCycle();
763
764 if (pkt->senderState) {
765 SplitFragmentSenderState * send_state =
766 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
767 assert(send_state);
768 delete pkt->req;
769 delete pkt;
770 PacketPtr big_pkt = send_state->bigPkt;
771 delete send_state;
772
773 SplitMainSenderState * main_send_state =
774 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
775 assert(main_send_state);
776 // Record the fact that this packet is no longer outstanding.
777 assert(main_send_state->outstanding != 0);
778 main_send_state->outstanding--;
779
780 if (main_send_state->outstanding) {
781 return;
782 } else {
783 delete main_send_state;
784 big_pkt->senderState = NULL;
785 pkt = big_pkt;
786 }
787 }
788
789 _status = BaseSimpleCPU::Running;
790
791 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
792
793 // keep an instruction count
794 if (fault == NoFault)
795 countInst();
796 else if (traceData) {
797 // If there was a fault, we shouldn't trace this instruction.
798 delete traceData;
799 traceData = NULL;
800 }
801
802 // the locked flag may be cleared on the response packet, so check
803 // pkt->req and not pkt to see if it was a load-locked
804 if (pkt->isRead() && pkt->req->isLLSC()) {
805 TheISA::handleLockedRead(thread, pkt->req);
806 }
807
808 delete pkt->req;
809 delete pkt;
810
811 postExecute();
812
813 advanceInst(fault);
814}
815
816bool
817TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt)
818{
819 // delay processing of returned data until next CPU clock edge
820 Tick next_tick = cpu->clockEdge();
821
822 if (next_tick == curTick()) {
823 cpu->completeDataAccess(pkt);
824 } else {
825 if (!tickEvent.scheduled()) {
826 tickEvent.schedule(pkt, next_tick);
827 } else {
828 // In the case of a split transaction and a cache that is
829 // faster than a CPU we could get two responses before
830 // next_tick expires
831 if (!retryEvent.scheduled())
832 cpu->schedule(retryEvent, next_tick);
833 return false;
834 }
835 }
836
837 return true;
838}
839
840void
841TimingSimpleCPU::DcachePort::DTickEvent::process()
842{
843 cpu->completeDataAccess(pkt);
844}
845
846void
847TimingSimpleCPU::DcachePort::recvRetry()
848{
849 // we shouldn't get a retry unless we have a packet that we're
850 // waiting to transmit
851 assert(cpu->dcache_pkt != NULL);
852 assert(cpu->_status == DcacheRetry);
853 PacketPtr tmp = cpu->dcache_pkt;
854 if (tmp->senderState) {
855 // This is a packet from a split access.
856 SplitFragmentSenderState * send_state =
857 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
858 assert(send_state);
859 PacketPtr big_pkt = send_state->bigPkt;
860
861 SplitMainSenderState * main_send_state =
862 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
863 assert(main_send_state);
864
865 if (sendTimingReq(tmp)) {
866 // If we were able to send without retrying, record that fact
867 // and try sending the other fragment.
868 send_state->clearFromParent();
869 int other_index = main_send_state->getPendingFragment();
870 if (other_index > 0) {
871 tmp = main_send_state->fragments[other_index];
872 cpu->dcache_pkt = tmp;
873 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
874 (big_pkt->isWrite() && cpu->handleWritePacket())) {
875 main_send_state->fragments[other_index] = NULL;
876 }
877 } else {
878 cpu->_status = DcacheWaitResponse;
879 // memory system takes ownership of packet
880 cpu->dcache_pkt = NULL;
881 }
882 }
883 } else if (sendTimingReq(tmp)) {
884 cpu->_status = DcacheWaitResponse;
885 // memory system takes ownership of packet
886 cpu->dcache_pkt = NULL;
887 }
888}
889
890TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
891 Tick t)
892 : pkt(_pkt), cpu(_cpu)
893{
894 cpu->schedule(this, t);
895}
896
897void
898TimingSimpleCPU::IprEvent::process()
899{
900 cpu->completeDataAccess(pkt);
901}
902
903const char *
904TimingSimpleCPU::IprEvent::description() const
905{
906 return "Timing Simple CPU Delay IPR event";
907}
908
909
910void
911TimingSimpleCPU::printAddr(Addr a)
912{
913 dcachePort.printAddr(a);
914}
915
916
917////////////////////////////////////////////////////////////////////////
918//
919// TimingSimpleCPU Simulation Object
920//
921TimingSimpleCPU *
922TimingSimpleCPUParams::create()
923{
924 numThreads = 1;
925 if (!FullSystem && workload.size() != 1)
926 panic("only one workload allowed");
927 return new TimingSimpleCPU(this);
928}