Deleted Added
sdiff udiff text old ( 9830:5995f4d33a11 ) new ( 9837:13a21202375d )
full compact
1/*
2 * Copyright (c) 2010-2012 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Steve Reinhardt
41 */
42
43#include "arch/locked_mem.hh"
44#include "arch/mmapped_ipr.hh"
45#include "arch/utility.hh"
46#include "base/bigint.hh"
47#include "config/the_isa.hh"
48#include "cpu/simple/timing.hh"
49#include "cpu/exetrace.hh"
50#include "debug/Config.hh"
51#include "debug/Drain.hh"
52#include "debug/ExecFaulting.hh"
53#include "debug/SimpleCPU.hh"
54#include "mem/packet.hh"
55#include "mem/packet_access.hh"
56#include "params/TimingSimpleCPU.hh"
57#include "sim/faults.hh"
58#include "sim/full_system.hh"
59#include "sim/system.hh"
60
61using namespace std;
62using namespace TheISA;
63
64void
65TimingSimpleCPU::init()
66{
67 BaseCPU::init();
68
69 // Initialise the ThreadContext's memory proxies
70 tcBase()->initMemProxies(tcBase());
71
72 if (FullSystem && !params()->switched_out) {
73 for (int i = 0; i < threadContexts.size(); ++i) {
74 ThreadContext *tc = threadContexts[i];
75 // initialize CPU, including PC
76 TheISA::initCPU(tc, _cpuId);
77 }
78 }
79}
80
81void
82TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
83{
84 pkt = _pkt;
85 cpu->schedule(this, t);
86}
87
88TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
89 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this),
90 dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0),
91 fetchEvent(this), drainManager(NULL)
92{
93 _status = Idle;
94
95 system->totalNumInsts = 0;
96}
97
98
99TimingSimpleCPU::~TimingSimpleCPU()
100{
101}
102
103unsigned int
104TimingSimpleCPU::drain(DrainManager *drain_manager)
105{
106 assert(!drainManager);
107 if (switchedOut())
108 return 0;
109
110 if (_status == Idle ||
111 (_status == BaseSimpleCPU::Running && isDrained())) {
112 DPRINTF(Drain, "No need to drain.\n");
113 return 0;
114 } else {
115 drainManager = drain_manager;
116 DPRINTF(Drain, "Requesting drain: %s\n", pcState());
117
118 // The fetch event can become descheduled if a drain didn't
119 // succeed on the first attempt. We need to reschedule it if
120 // the CPU is waiting for a microcode routine to complete.
121 if (_status == BaseSimpleCPU::Running && !fetchEvent.scheduled())
122 schedule(fetchEvent, clockEdge());
123
124 return 1;
125 }
126}
127
128void
129TimingSimpleCPU::drainResume()
130{
131 assert(!fetchEvent.scheduled());
132 assert(!drainManager);
133 if (switchedOut())
134 return;
135
136 DPRINTF(SimpleCPU, "Resume\n");
137 verifyMemoryMode();
138
139 assert(!threadContexts.empty());
140 if (threadContexts.size() > 1)
141 fatal("The timing CPU only supports one thread.\n");
142
143 if (thread->status() == ThreadContext::Active) {
144 schedule(fetchEvent, nextCycle());
145 _status = BaseSimpleCPU::Running;
146 } else {
147 _status = BaseSimpleCPU::Idle;
148 }
149}
150
151bool
152TimingSimpleCPU::tryCompleteDrain()
153{
154 if (!drainManager)
155 return false;
156
157 DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState());
158 if (!isDrained())
159 return false;
160
161 DPRINTF(Drain, "CPU done draining, processing drain event\n");
162 drainManager->signalDrainDone();
163 drainManager = NULL;
164
165 return true;
166}
167
168void
169TimingSimpleCPU::switchOut()
170{
171 BaseSimpleCPU::switchOut();
172
173 assert(!fetchEvent.scheduled());
174 assert(_status == BaseSimpleCPU::Running || _status == Idle);
175 assert(!stayAtPC);
176 assert(microPC() == 0);
177
178 numCycles += curCycle() - previousCycle;
179}
180
181
182void
183TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
184{
185 BaseSimpleCPU::takeOverFrom(oldCPU);
186
187 previousCycle = curCycle();
188}
189
190void
191TimingSimpleCPU::verifyMemoryMode() const
192{
193 if (!system->isTimingMode()) {
194 fatal("The timing CPU requires the memory system to be in "
195 "'timing' mode.\n");
196 }
197}
198
199void
200TimingSimpleCPU::activateContext(ThreadID thread_num, Cycles delay)
201{
202 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
203
204 assert(thread_num == 0);
205 assert(thread);
206
207 assert(_status == Idle);
208
209 notIdleFraction++;
210 _status = BaseSimpleCPU::Running;
211
212 // kick things off by initiating the fetch of the next instruction
213 schedule(fetchEvent, clockEdge(delay));
214}
215
216
217void
218TimingSimpleCPU::suspendContext(ThreadID thread_num)
219{
220 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
221
222 assert(thread_num == 0);
223 assert(thread);
224
225 if (_status == Idle)
226 return;
227
228 assert(_status == BaseSimpleCPU::Running);
229
230 // just change status to Idle... if status != Running,
231 // completeInst() will not initiate fetch of next instruction.
232
233 notIdleFraction--;
234 _status = Idle;
235}
236
237bool
238TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
239{
240 RequestPtr req = pkt->req;
241 if (req->isMmappedIpr()) {
242 Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt);
243 new IprEvent(pkt, this, clockEdge(delay));
244 _status = DcacheWaitResponse;
245 dcache_pkt = NULL;
246 } else if (!dcachePort.sendTimingReq(pkt)) {
247 _status = DcacheRetry;
248 dcache_pkt = pkt;
249 } else {
250 _status = DcacheWaitResponse;
251 // memory system takes ownership of packet
252 dcache_pkt = NULL;
253 }
254 return dcache_pkt == NULL;
255}
256
257void
258TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
259 bool read)
260{
261 PacketPtr pkt;
262 buildPacket(pkt, req, read);
263 pkt->dataDynamicArray<uint8_t>(data);
264 if (req->getFlags().isSet(Request::NO_ACCESS)) {
265 assert(!dcache_pkt);
266 pkt->makeResponse();
267 completeDataAccess(pkt);
268 } else if (read) {
269 handleReadPacket(pkt);
270 } else {
271 bool do_access = true; // flag to suppress cache access
272
273 if (req->isLLSC()) {
274 do_access = TheISA::handleLockedWrite(thread, req);
275 } else if (req->isCondSwap()) {
276 assert(res);
277 req->setExtraData(*res);
278 }
279
280 if (do_access) {
281 dcache_pkt = pkt;
282 handleWritePacket();
283 } else {
284 _status = DcacheWaitResponse;
285 completeDataAccess(pkt);
286 }
287 }
288}
289
290void
291TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
292 RequestPtr req, uint8_t *data, bool read)
293{
294 PacketPtr pkt1, pkt2;
295 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
296 if (req->getFlags().isSet(Request::NO_ACCESS)) {
297 assert(!dcache_pkt);
298 pkt1->makeResponse();
299 completeDataAccess(pkt1);
300 } else if (read) {
301 SplitFragmentSenderState * send_state =
302 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
303 if (handleReadPacket(pkt1)) {
304 send_state->clearFromParent();
305 send_state = dynamic_cast<SplitFragmentSenderState *>(
306 pkt2->senderState);
307 if (handleReadPacket(pkt2)) {
308 send_state->clearFromParent();
309 }
310 }
311 } else {
312 dcache_pkt = pkt1;
313 SplitFragmentSenderState * send_state =
314 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
315 if (handleWritePacket()) {
316 send_state->clearFromParent();
317 dcache_pkt = pkt2;
318 send_state = dynamic_cast<SplitFragmentSenderState *>(
319 pkt2->senderState);
320 if (handleWritePacket()) {
321 send_state->clearFromParent();
322 }
323 }
324 }
325}
326
327void
328TimingSimpleCPU::translationFault(Fault fault)
329{
330 // fault may be NoFault in cases where a fault is suppressed,
331 // for instance prefetches.
332 numCycles += curCycle() - previousCycle;
333 previousCycle = curCycle();
334
335 if (traceData) {
336 // Since there was a fault, we shouldn't trace this instruction.
337 delete traceData;
338 traceData = NULL;
339 }
340
341 postExecute();
342
343 advanceInst(fault);
344}
345
346void
347TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
348{
349 MemCmd cmd;
350 if (read) {
351 cmd = MemCmd::ReadReq;
352 if (req->isLLSC())
353 cmd = MemCmd::LoadLockedReq;
354 } else {
355 cmd = MemCmd::WriteReq;
356 if (req->isLLSC()) {
357 cmd = MemCmd::StoreCondReq;
358 } else if (req->isSwap()) {
359 cmd = MemCmd::SwapReq;
360 }
361 }
362 pkt = new Packet(req, cmd);
363}
364
365void
366TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
367 RequestPtr req1, RequestPtr req2, RequestPtr req,
368 uint8_t *data, bool read)
369{
370 pkt1 = pkt2 = NULL;
371
372 assert(!req1->isMmappedIpr() && !req2->isMmappedIpr());
373
374 if (req->getFlags().isSet(Request::NO_ACCESS)) {
375 buildPacket(pkt1, req, read);
376 return;
377 }
378
379 buildPacket(pkt1, req1, read);
380 buildPacket(pkt2, req2, read);
381
382 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags(), dataMasterId());
383 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand());
384
385 pkt->dataDynamicArray<uint8_t>(data);
386 pkt1->dataStatic<uint8_t>(data);
387 pkt2->dataStatic<uint8_t>(data + req1->getSize());
388
389 SplitMainSenderState * main_send_state = new SplitMainSenderState;
390 pkt->senderState = main_send_state;
391 main_send_state->fragments[0] = pkt1;
392 main_send_state->fragments[1] = pkt2;
393 main_send_state->outstanding = 2;
394 pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
395 pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
396}
397
398Fault
399TimingSimpleCPU::readMem(Addr addr, uint8_t *data,
400 unsigned size, unsigned flags)
401{
402 Fault fault;
403 const int asid = 0;
404 const ThreadID tid = 0;
405 const Addr pc = thread->instAddr();
406 unsigned block_size = cacheLineSize();
407 BaseTLB::Mode mode = BaseTLB::Read;
408
409 if (traceData) {
410 traceData->setAddr(addr);
411 }
412
413 RequestPtr req = new Request(asid, addr, size,
414 flags, dataMasterId(), pc, _cpuId, tid);
415
416 Addr split_addr = roundDown(addr + size - 1, block_size);
417 assert(split_addr <= addr || split_addr - addr < block_size);
418
419 _status = DTBWaitResponse;
420 if (split_addr > addr) {
421 RequestPtr req1, req2;
422 assert(!req->isLLSC() && !req->isSwap());
423 req->splitOnVaddr(split_addr, req1, req2);
424
425 WholeTranslationState *state =
426 new WholeTranslationState(req, req1, req2, new uint8_t[size],
427 NULL, mode);
428 DataTranslation<TimingSimpleCPU *> *trans1 =
429 new DataTranslation<TimingSimpleCPU *>(this, state, 0);
430 DataTranslation<TimingSimpleCPU *> *trans2 =
431 new DataTranslation<TimingSimpleCPU *>(this, state, 1);
432
433 thread->dtb->translateTiming(req1, tc, trans1, mode);
434 thread->dtb->translateTiming(req2, tc, trans2, mode);
435 } else {
436 WholeTranslationState *state =
437 new WholeTranslationState(req, new uint8_t[size], NULL, mode);
438 DataTranslation<TimingSimpleCPU *> *translation
439 = new DataTranslation<TimingSimpleCPU *>(this, state);
440 thread->dtb->translateTiming(req, tc, translation, mode);
441 }
442
443 return NoFault;
444}
445
446bool
447TimingSimpleCPU::handleWritePacket()
448{
449 RequestPtr req = dcache_pkt->req;
450 if (req->isMmappedIpr()) {
451 Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
452 new IprEvent(dcache_pkt, this, clockEdge(delay));
453 _status = DcacheWaitResponse;
454 dcache_pkt = NULL;
455 } else if (!dcachePort.sendTimingReq(dcache_pkt)) {
456 _status = DcacheRetry;
457 } else {
458 _status = DcacheWaitResponse;
459 // memory system takes ownership of packet
460 dcache_pkt = NULL;
461 }
462 return dcache_pkt == NULL;
463}
464
465Fault
466TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
467 Addr addr, unsigned flags, uint64_t *res)
468{
469 uint8_t *newData = new uint8_t[size];
470 memcpy(newData, data, size);
471
472 const int asid = 0;
473 const ThreadID tid = 0;
474 const Addr pc = thread->instAddr();
475 unsigned block_size = cacheLineSize();
476 BaseTLB::Mode mode = BaseTLB::Write;
477
478 if (traceData) {
479 traceData->setAddr(addr);
480 }
481
482 RequestPtr req = new Request(asid, addr, size,
483 flags, dataMasterId(), pc, _cpuId, tid);
484
485 Addr split_addr = roundDown(addr + size - 1, block_size);
486 assert(split_addr <= addr || split_addr - addr < block_size);
487
488 _status = DTBWaitResponse;
489 if (split_addr > addr) {
490 RequestPtr req1, req2;
491 assert(!req->isLLSC() && !req->isSwap());
492 req->splitOnVaddr(split_addr, req1, req2);
493
494 WholeTranslationState *state =
495 new WholeTranslationState(req, req1, req2, newData, res, mode);
496 DataTranslation<TimingSimpleCPU *> *trans1 =
497 new DataTranslation<TimingSimpleCPU *>(this, state, 0);
498 DataTranslation<TimingSimpleCPU *> *trans2 =
499 new DataTranslation<TimingSimpleCPU *>(this, state, 1);
500
501 thread->dtb->translateTiming(req1, tc, trans1, mode);
502 thread->dtb->translateTiming(req2, tc, trans2, mode);
503 } else {
504 WholeTranslationState *state =
505 new WholeTranslationState(req, newData, res, mode);
506 DataTranslation<TimingSimpleCPU *> *translation =
507 new DataTranslation<TimingSimpleCPU *>(this, state);
508 thread->dtb->translateTiming(req, tc, translation, mode);
509 }
510
511 // Translation faults will be returned via finishTranslation()
512 return NoFault;
513}
514
515
516void
517TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
518{
519 _status = BaseSimpleCPU::Running;
520
521 if (state->getFault() != NoFault) {
522 if (state->isPrefetch()) {
523 state->setNoFault();
524 }
525 delete [] state->data;
526 state->deleteReqs();
527 translationFault(state->getFault());
528 } else {
529 if (!state->isSplit) {
530 sendData(state->mainReq, state->data, state->res,
531 state->mode == BaseTLB::Read);
532 } else {
533 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
534 state->data, state->mode == BaseTLB::Read);
535 }
536 }
537
538 delete state;
539}
540
541
542void
543TimingSimpleCPU::fetch()
544{
545 DPRINTF(SimpleCPU, "Fetch\n");
546
547 if (!curStaticInst || !curStaticInst->isDelayedCommit())
548 checkForInterrupts();
549
550 checkPcEventQueue();
551
552 // We must have just got suspended by a PC event
553 if (_status == Idle)
554 return;
555
556 TheISA::PCState pcState = thread->pcState();
557 bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
558
559 if (needToFetch) {
560 _status = BaseSimpleCPU::Running;
561 Request *ifetch_req = new Request();
562 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
563 setupFetchRequest(ifetch_req);
564 DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
565 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
566 BaseTLB::Execute);
567 } else {
568 _status = IcacheWaitResponse;
569 completeIfetch(NULL);
570
571 numCycles += curCycle() - previousCycle;
572 previousCycle = curCycle();
573 }
574}
575
576
577void
578TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
579{
580 if (fault == NoFault) {
581 DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n",
582 req->getVaddr(), req->getPaddr());
583 ifetch_pkt = new Packet(req, MemCmd::ReadReq);
584 ifetch_pkt->dataStatic(&inst);
585 DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr());
586
587 if (!icachePort.sendTimingReq(ifetch_pkt)) {
588 // Need to wait for retry
589 _status = IcacheRetry;
590 } else {
591 // Need to wait for cache to respond
592 _status = IcacheWaitResponse;
593 // ownership of packet transferred to memory system
594 ifetch_pkt = NULL;
595 }
596 } else {
597 DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
598 delete req;
599 // fetch fault: advance directly to next instruction (fault handler)
600 _status = BaseSimpleCPU::Running;
601 advanceInst(fault);
602 }
603
604 numCycles += curCycle() - previousCycle;
605 previousCycle = curCycle();
606}
607
608
609void
610TimingSimpleCPU::advanceInst(Fault fault)
611{
612 if (_status == Faulting)
613 return;
614
615 if (fault != NoFault) {
616 advancePC(fault);
617 DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
618 reschedule(fetchEvent, clockEdge(), true);
619 _status = Faulting;
620 return;
621 }
622
623
624 if (!stayAtPC)
625 advancePC(fault);
626
627 if (tryCompleteDrain())
628 return;
629
630 if (_status == BaseSimpleCPU::Running) {
631 // kick off fetch of next instruction... callback from icache
632 // response will cause that instruction to be executed,
633 // keeping the CPU running.
634 fetch();
635 }
636}
637
638
639void
640TimingSimpleCPU::completeIfetch(PacketPtr pkt)
641{
642 DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ?
643 pkt->getAddr() : 0);
644
645 // received a response from the icache: execute the received
646 // instruction
647
648 assert(!pkt || !pkt->isError());
649 assert(_status == IcacheWaitResponse);
650
651 _status = BaseSimpleCPU::Running;
652
653 numCycles += curCycle() - previousCycle;
654 previousCycle = curCycle();
655
656 preExecute();
657 if (curStaticInst && curStaticInst->isMemRef()) {
658 // load or store: just send to dcache
659 Fault fault = curStaticInst->initiateAcc(this, traceData);
660
661 // If we're not running now the instruction will complete in a dcache
662 // response callback or the instruction faulted and has started an
663 // ifetch
664 if (_status == BaseSimpleCPU::Running) {
665 if (fault != NoFault && traceData) {
666 // If there was a fault, we shouldn't trace this instruction.
667 delete traceData;
668 traceData = NULL;
669 }
670
671 postExecute();
672 // @todo remove me after debugging with legion done
673 if (curStaticInst && (!curStaticInst->isMicroop() ||
674 curStaticInst->isFirstMicroop()))
675 instCnt++;
676 advanceInst(fault);
677 }
678 } else if (curStaticInst) {
679 // non-memory instruction: execute completely now
680 Fault fault = curStaticInst->execute(this, traceData);
681
682 // keep an instruction count
683 if (fault == NoFault)
684 countInst();
685 else if (traceData && !DTRACE(ExecFaulting)) {
686 delete traceData;
687 traceData = NULL;
688 }
689
690 postExecute();
691 // @todo remove me after debugging with legion done
692 if (curStaticInst && (!curStaticInst->isMicroop() ||
693 curStaticInst->isFirstMicroop()))
694 instCnt++;
695 advanceInst(fault);
696 } else {
697 advanceInst(NoFault);
698 }
699
700 if (pkt) {
701 delete pkt->req;
702 delete pkt;
703 }
704}
705
706void
707TimingSimpleCPU::IcachePort::ITickEvent::process()
708{
709 cpu->completeIfetch(pkt);
710}
711
712bool
713TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt)
714{
715 DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
716 // delay processing of returned data until next CPU clock edge
717 Tick next_tick = cpu->clockEdge();
718
719 if (next_tick == curTick())
720 cpu->completeIfetch(pkt);
721 else
722 tickEvent.schedule(pkt, next_tick);
723
724 return true;
725}
726
727void
728TimingSimpleCPU::IcachePort::recvRetry()
729{
730 // we shouldn't get a retry unless we have a packet that we're
731 // waiting to transmit
732 assert(cpu->ifetch_pkt != NULL);
733 assert(cpu->_status == IcacheRetry);
734 PacketPtr tmp = cpu->ifetch_pkt;
735 if (sendTimingReq(tmp)) {
736 cpu->_status = IcacheWaitResponse;
737 cpu->ifetch_pkt = NULL;
738 }
739}
740
741void
742TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
743{
744 // received a response from the dcache: complete the load or store
745 // instruction
746 assert(!pkt->isError());
747 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
748 pkt->req->getFlags().isSet(Request::NO_ACCESS));
749
750 numCycles += curCycle() - previousCycle;
751 previousCycle = curCycle();
752
753 if (pkt->senderState) {
754 SplitFragmentSenderState * send_state =
755 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
756 assert(send_state);
757 delete pkt->req;
758 delete pkt;
759 PacketPtr big_pkt = send_state->bigPkt;
760 delete send_state;
761
762 SplitMainSenderState * main_send_state =
763 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
764 assert(main_send_state);
765 // Record the fact that this packet is no longer outstanding.
766 assert(main_send_state->outstanding != 0);
767 main_send_state->outstanding--;
768
769 if (main_send_state->outstanding) {
770 return;
771 } else {
772 delete main_send_state;
773 big_pkt->senderState = NULL;
774 pkt = big_pkt;
775 }
776 }
777
778 _status = BaseSimpleCPU::Running;
779
780 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
781
782 // keep an instruction count
783 if (fault == NoFault)
784 countInst();
785 else if (traceData) {
786 // If there was a fault, we shouldn't trace this instruction.
787 delete traceData;
788 traceData = NULL;
789 }
790
791 // the locked flag may be cleared on the response packet, so check
792 // pkt->req and not pkt to see if it was a load-locked
793 if (pkt->isRead() && pkt->req->isLLSC()) {
794 TheISA::handleLockedRead(thread, pkt->req);
795 }
796
797 delete pkt->req;
798 delete pkt;
799
800 postExecute();
801
802 advanceInst(fault);
803}
804
805bool
806TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt)
807{
808 // delay processing of returned data until next CPU clock edge
809 Tick next_tick = cpu->clockEdge();
810
811 if (next_tick == curTick()) {
812 cpu->completeDataAccess(pkt);
813 } else {
814 if (!tickEvent.scheduled()) {
815 tickEvent.schedule(pkt, next_tick);
816 } else {
817 // In the case of a split transaction and a cache that is
818 // faster than a CPU we could get two responses before
819 // next_tick expires
820 if (!retryEvent.scheduled())
821 cpu->schedule(retryEvent, next_tick);
822 return false;
823 }
824 }
825
826 return true;
827}
828
829void
830TimingSimpleCPU::DcachePort::DTickEvent::process()
831{
832 cpu->completeDataAccess(pkt);
833}
834
835void
836TimingSimpleCPU::DcachePort::recvRetry()
837{
838 // we shouldn't get a retry unless we have a packet that we're
839 // waiting to transmit
840 assert(cpu->dcache_pkt != NULL);
841 assert(cpu->_status == DcacheRetry);
842 PacketPtr tmp = cpu->dcache_pkt;
843 if (tmp->senderState) {
844 // This is a packet from a split access.
845 SplitFragmentSenderState * send_state =
846 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
847 assert(send_state);
848 PacketPtr big_pkt = send_state->bigPkt;
849
850 SplitMainSenderState * main_send_state =
851 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
852 assert(main_send_state);
853
854 if (sendTimingReq(tmp)) {
855 // If we were able to send without retrying, record that fact
856 // and try sending the other fragment.
857 send_state->clearFromParent();
858 int other_index = main_send_state->getPendingFragment();
859 if (other_index > 0) {
860 tmp = main_send_state->fragments[other_index];
861 cpu->dcache_pkt = tmp;
862 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
863 (big_pkt->isWrite() && cpu->handleWritePacket())) {
864 main_send_state->fragments[other_index] = NULL;
865 }
866 } else {
867 cpu->_status = DcacheWaitResponse;
868 // memory system takes ownership of packet
869 cpu->dcache_pkt = NULL;
870 }
871 }
872 } else if (sendTimingReq(tmp)) {
873 cpu->_status = DcacheWaitResponse;
874 // memory system takes ownership of packet
875 cpu->dcache_pkt = NULL;
876 }
877}
878
879TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
880 Tick t)
881 : pkt(_pkt), cpu(_cpu)
882{
883 cpu->schedule(this, t);
884}
885
886void
887TimingSimpleCPU::IprEvent::process()
888{
889 cpu->completeDataAccess(pkt);
890}
891
892const char *
893TimingSimpleCPU::IprEvent::description() const
894{
895 return "Timing Simple CPU Delay IPR event";
896}
897
898
899void
900TimingSimpleCPU::printAddr(Addr a)
901{
902 dcachePort.printAddr(a);
903}
904
905
906////////////////////////////////////////////////////////////////////////
907//
908// TimingSimpleCPU Simulation Object
909//
910TimingSimpleCPU *
911TimingSimpleCPUParams::create()
912{
913 numThreads = 1;
914 if (!FullSystem && workload.size() != 1)
915 panic("only one workload allowed");
916 return new TimingSimpleCPU(this);
917}