timing.cc (9524:d6ffa982a68b) timing.cc (9648:f10eb34e3e38)
1/*
2 * Copyright (c) 2010-2012 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Steve Reinhardt
41 */
42
43#include "arch/locked_mem.hh"
44#include "arch/mmapped_ipr.hh"
45#include "arch/utility.hh"
46#include "base/bigint.hh"
47#include "config/the_isa.hh"
48#include "cpu/simple/timing.hh"
49#include "cpu/exetrace.hh"
50#include "debug/Config.hh"
51#include "debug/Drain.hh"
52#include "debug/ExecFaulting.hh"
53#include "debug/SimpleCPU.hh"
54#include "mem/packet.hh"
55#include "mem/packet_access.hh"
56#include "params/TimingSimpleCPU.hh"
57#include "sim/faults.hh"
58#include "sim/full_system.hh"
59#include "sim/system.hh"
60
61using namespace std;
62using namespace TheISA;
63
64void
65TimingSimpleCPU::init()
66{
67 BaseCPU::init();
68
69 // Initialise the ThreadContext's memory proxies
70 tcBase()->initMemProxies(tcBase());
71
72 if (FullSystem && !params()->switched_out) {
73 for (int i = 0; i < threadContexts.size(); ++i) {
74 ThreadContext *tc = threadContexts[i];
75 // initialize CPU, including PC
76 TheISA::initCPU(tc, _cpuId);
77 }
78 }
79}
80
81void
82TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
83{
84 pkt = _pkt;
85 cpu->schedule(this, t);
86}
87
88TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
89 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this),
90 dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0),
91 fetchEvent(this), drainManager(NULL)
92{
93 _status = Idle;
94
95 system->totalNumInsts = 0;
96}
97
98
99TimingSimpleCPU::~TimingSimpleCPU()
100{
101}
102
103unsigned int
104TimingSimpleCPU::drain(DrainManager *drain_manager)
105{
106 assert(!drainManager);
107 if (switchedOut())
108 return 0;
109
110 if (_status == Idle ||
111 (_status == BaseSimpleCPU::Running && isDrained())) {
112 assert(!fetchEvent.scheduled());
113 DPRINTF(Drain, "No need to drain.\n");
114 return 0;
115 } else {
116 drainManager = drain_manager;
117 DPRINTF(Drain, "Requesting drain: %s\n", pcState());
118
119 // The fetch event can become descheduled if a drain didn't
120 // succeed on the first attempt. We need to reschedule it if
121 // the CPU is waiting for a microcode routine to complete.
122 if (_status == BaseSimpleCPU::Running && !fetchEvent.scheduled())
1/*
2 * Copyright (c) 2010-2012 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Steve Reinhardt
41 */
42
43#include "arch/locked_mem.hh"
44#include "arch/mmapped_ipr.hh"
45#include "arch/utility.hh"
46#include "base/bigint.hh"
47#include "config/the_isa.hh"
48#include "cpu/simple/timing.hh"
49#include "cpu/exetrace.hh"
50#include "debug/Config.hh"
51#include "debug/Drain.hh"
52#include "debug/ExecFaulting.hh"
53#include "debug/SimpleCPU.hh"
54#include "mem/packet.hh"
55#include "mem/packet_access.hh"
56#include "params/TimingSimpleCPU.hh"
57#include "sim/faults.hh"
58#include "sim/full_system.hh"
59#include "sim/system.hh"
60
61using namespace std;
62using namespace TheISA;
63
64void
65TimingSimpleCPU::init()
66{
67 BaseCPU::init();
68
69 // Initialise the ThreadContext's memory proxies
70 tcBase()->initMemProxies(tcBase());
71
72 if (FullSystem && !params()->switched_out) {
73 for (int i = 0; i < threadContexts.size(); ++i) {
74 ThreadContext *tc = threadContexts[i];
75 // initialize CPU, including PC
76 TheISA::initCPU(tc, _cpuId);
77 }
78 }
79}
80
81void
82TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
83{
84 pkt = _pkt;
85 cpu->schedule(this, t);
86}
87
88TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
89 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this),
90 dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0),
91 fetchEvent(this), drainManager(NULL)
92{
93 _status = Idle;
94
95 system->totalNumInsts = 0;
96}
97
98
99TimingSimpleCPU::~TimingSimpleCPU()
100{
101}
102
103unsigned int
104TimingSimpleCPU::drain(DrainManager *drain_manager)
105{
106 assert(!drainManager);
107 if (switchedOut())
108 return 0;
109
110 if (_status == Idle ||
111 (_status == BaseSimpleCPU::Running && isDrained())) {
112 assert(!fetchEvent.scheduled());
113 DPRINTF(Drain, "No need to drain.\n");
114 return 0;
115 } else {
116 drainManager = drain_manager;
117 DPRINTF(Drain, "Requesting drain: %s\n", pcState());
118
119 // The fetch event can become descheduled if a drain didn't
120 // succeed on the first attempt. We need to reschedule it if
121 // the CPU is waiting for a microcode routine to complete.
122 if (_status == BaseSimpleCPU::Running && !fetchEvent.scheduled())
123 schedule(fetchEvent, nextCycle());
123 schedule(fetchEvent, clockEdge());
124
125 return 1;
126 }
127}
128
129void
130TimingSimpleCPU::drainResume()
131{
132 assert(!fetchEvent.scheduled());
133 assert(!drainManager);
134 if (switchedOut())
135 return;
136
137 DPRINTF(SimpleCPU, "Resume\n");
138 verifyMemoryMode();
139
140 assert(!threadContexts.empty());
141 if (threadContexts.size() > 1)
142 fatal("The timing CPU only supports one thread.\n");
143
144 if (thread->status() == ThreadContext::Active) {
145 schedule(fetchEvent, nextCycle());
146 _status = BaseSimpleCPU::Running;
147 } else {
148 _status = BaseSimpleCPU::Idle;
149 }
150}
151
152bool
153TimingSimpleCPU::tryCompleteDrain()
154{
155 if (!drainManager)
156 return false;
157
158 DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState());
159 if (!isDrained())
160 return false;
161
162 DPRINTF(Drain, "CPU done draining, processing drain event\n");
163 drainManager->signalDrainDone();
164 drainManager = NULL;
165
166 return true;
167}
168
169void
170TimingSimpleCPU::switchOut()
171{
172 BaseSimpleCPU::switchOut();
173
174 assert(!fetchEvent.scheduled());
175 assert(_status == BaseSimpleCPU::Running || _status == Idle);
176 assert(!stayAtPC);
177 assert(microPC() == 0);
178
179 numCycles += curCycle() - previousCycle;
180}
181
182
183void
184TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
185{
186 BaseSimpleCPU::takeOverFrom(oldCPU);
187
188 previousCycle = curCycle();
189}
190
191void
192TimingSimpleCPU::verifyMemoryMode() const
193{
194 if (!system->isTimingMode()) {
195 fatal("The timing CPU requires the memory system to be in "
196 "'timing' mode.\n");
197 }
198}
199
200void
201TimingSimpleCPU::activateContext(ThreadID thread_num, Cycles delay)
202{
203 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
204
205 assert(thread_num == 0);
206 assert(thread);
207
208 assert(_status == Idle);
209
210 notIdleFraction++;
211 _status = BaseSimpleCPU::Running;
212
213 // kick things off by initiating the fetch of the next instruction
214 schedule(fetchEvent, clockEdge(delay));
215}
216
217
218void
219TimingSimpleCPU::suspendContext(ThreadID thread_num)
220{
221 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
222
223 assert(thread_num == 0);
224 assert(thread);
225
226 if (_status == Idle)
227 return;
228
229 assert(_status == BaseSimpleCPU::Running);
230
231 // just change status to Idle... if status != Running,
232 // completeInst() will not initiate fetch of next instruction.
233
234 notIdleFraction--;
235 _status = Idle;
236}
237
238bool
239TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
240{
241 RequestPtr req = pkt->req;
242 if (req->isMmappedIpr()) {
243 Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt);
244 new IprEvent(pkt, this, clockEdge(delay));
245 _status = DcacheWaitResponse;
246 dcache_pkt = NULL;
247 } else if (!dcachePort.sendTimingReq(pkt)) {
248 _status = DcacheRetry;
249 dcache_pkt = pkt;
250 } else {
251 _status = DcacheWaitResponse;
252 // memory system takes ownership of packet
253 dcache_pkt = NULL;
254 }
255 return dcache_pkt == NULL;
256}
257
258void
259TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
260 bool read)
261{
262 PacketPtr pkt;
263 buildPacket(pkt, req, read);
264 pkt->dataDynamicArray<uint8_t>(data);
265 if (req->getFlags().isSet(Request::NO_ACCESS)) {
266 assert(!dcache_pkt);
267 pkt->makeResponse();
268 completeDataAccess(pkt);
269 } else if (read) {
270 handleReadPacket(pkt);
271 } else {
272 bool do_access = true; // flag to suppress cache access
273
274 if (req->isLLSC()) {
275 do_access = TheISA::handleLockedWrite(thread, req);
276 } else if (req->isCondSwap()) {
277 assert(res);
278 req->setExtraData(*res);
279 }
280
281 if (do_access) {
282 dcache_pkt = pkt;
283 handleWritePacket();
284 } else {
285 _status = DcacheWaitResponse;
286 completeDataAccess(pkt);
287 }
288 }
289}
290
291void
292TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
293 RequestPtr req, uint8_t *data, bool read)
294{
295 PacketPtr pkt1, pkt2;
296 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
297 if (req->getFlags().isSet(Request::NO_ACCESS)) {
298 assert(!dcache_pkt);
299 pkt1->makeResponse();
300 completeDataAccess(pkt1);
301 } else if (read) {
302 SplitFragmentSenderState * send_state =
303 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
304 if (handleReadPacket(pkt1)) {
305 send_state->clearFromParent();
306 send_state = dynamic_cast<SplitFragmentSenderState *>(
307 pkt2->senderState);
308 if (handleReadPacket(pkt2)) {
309 send_state->clearFromParent();
310 }
311 }
312 } else {
313 dcache_pkt = pkt1;
314 SplitFragmentSenderState * send_state =
315 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
316 if (handleWritePacket()) {
317 send_state->clearFromParent();
318 dcache_pkt = pkt2;
319 send_state = dynamic_cast<SplitFragmentSenderState *>(
320 pkt2->senderState);
321 if (handleWritePacket()) {
322 send_state->clearFromParent();
323 }
324 }
325 }
326}
327
328void
329TimingSimpleCPU::translationFault(Fault fault)
330{
331 // fault may be NoFault in cases where a fault is suppressed,
332 // for instance prefetches.
333 numCycles += curCycle() - previousCycle;
334 previousCycle = curCycle();
335
336 if (traceData) {
337 // Since there was a fault, we shouldn't trace this instruction.
338 delete traceData;
339 traceData = NULL;
340 }
341
342 postExecute();
343
344 advanceInst(fault);
345}
346
347void
348TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
349{
350 MemCmd cmd;
351 if (read) {
352 cmd = MemCmd::ReadReq;
353 if (req->isLLSC())
354 cmd = MemCmd::LoadLockedReq;
355 } else {
356 cmd = MemCmd::WriteReq;
357 if (req->isLLSC()) {
358 cmd = MemCmd::StoreCondReq;
359 } else if (req->isSwap()) {
360 cmd = MemCmd::SwapReq;
361 }
362 }
363 pkt = new Packet(req, cmd);
364}
365
366void
367TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
368 RequestPtr req1, RequestPtr req2, RequestPtr req,
369 uint8_t *data, bool read)
370{
371 pkt1 = pkt2 = NULL;
372
373 assert(!req1->isMmappedIpr() && !req2->isMmappedIpr());
374
375 if (req->getFlags().isSet(Request::NO_ACCESS)) {
376 buildPacket(pkt1, req, read);
377 return;
378 }
379
380 buildPacket(pkt1, req1, read);
381 buildPacket(pkt2, req2, read);
382
383 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags(), dataMasterId());
384 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand());
385
386 pkt->dataDynamicArray<uint8_t>(data);
387 pkt1->dataStatic<uint8_t>(data);
388 pkt2->dataStatic<uint8_t>(data + req1->getSize());
389
390 SplitMainSenderState * main_send_state = new SplitMainSenderState;
391 pkt->senderState = main_send_state;
392 main_send_state->fragments[0] = pkt1;
393 main_send_state->fragments[1] = pkt2;
394 main_send_state->outstanding = 2;
395 pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
396 pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
397}
398
399Fault
400TimingSimpleCPU::readMem(Addr addr, uint8_t *data,
401 unsigned size, unsigned flags)
402{
403 Fault fault;
404 const int asid = 0;
405 const ThreadID tid = 0;
406 const Addr pc = thread->instAddr();
407 unsigned block_size = dcachePort.peerBlockSize();
408 BaseTLB::Mode mode = BaseTLB::Read;
409
410 if (traceData) {
411 traceData->setAddr(addr);
412 }
413
414 RequestPtr req = new Request(asid, addr, size,
415 flags, dataMasterId(), pc, _cpuId, tid);
416
417 Addr split_addr = roundDown(addr + size - 1, block_size);
418 assert(split_addr <= addr || split_addr - addr < block_size);
419
420 _status = DTBWaitResponse;
421 if (split_addr > addr) {
422 RequestPtr req1, req2;
423 assert(!req->isLLSC() && !req->isSwap());
424 req->splitOnVaddr(split_addr, req1, req2);
425
426 WholeTranslationState *state =
427 new WholeTranslationState(req, req1, req2, new uint8_t[size],
428 NULL, mode);
429 DataTranslation<TimingSimpleCPU *> *trans1 =
430 new DataTranslation<TimingSimpleCPU *>(this, state, 0);
431 DataTranslation<TimingSimpleCPU *> *trans2 =
432 new DataTranslation<TimingSimpleCPU *>(this, state, 1);
433
434 thread->dtb->translateTiming(req1, tc, trans1, mode);
435 thread->dtb->translateTiming(req2, tc, trans2, mode);
436 } else {
437 WholeTranslationState *state =
438 new WholeTranslationState(req, new uint8_t[size], NULL, mode);
439 DataTranslation<TimingSimpleCPU *> *translation
440 = new DataTranslation<TimingSimpleCPU *>(this, state);
441 thread->dtb->translateTiming(req, tc, translation, mode);
442 }
443
444 return NoFault;
445}
446
447bool
448TimingSimpleCPU::handleWritePacket()
449{
450 RequestPtr req = dcache_pkt->req;
451 if (req->isMmappedIpr()) {
452 Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
453 new IprEvent(dcache_pkt, this, clockEdge(delay));
454 _status = DcacheWaitResponse;
455 dcache_pkt = NULL;
456 } else if (!dcachePort.sendTimingReq(dcache_pkt)) {
457 _status = DcacheRetry;
458 } else {
459 _status = DcacheWaitResponse;
460 // memory system takes ownership of packet
461 dcache_pkt = NULL;
462 }
463 return dcache_pkt == NULL;
464}
465
466Fault
467TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
468 Addr addr, unsigned flags, uint64_t *res)
469{
470 uint8_t *newData = new uint8_t[size];
471 memcpy(newData, data, size);
472
473 const int asid = 0;
474 const ThreadID tid = 0;
475 const Addr pc = thread->instAddr();
476 unsigned block_size = dcachePort.peerBlockSize();
477 BaseTLB::Mode mode = BaseTLB::Write;
478
479 if (traceData) {
480 traceData->setAddr(addr);
481 }
482
483 RequestPtr req = new Request(asid, addr, size,
484 flags, dataMasterId(), pc, _cpuId, tid);
485
486 Addr split_addr = roundDown(addr + size - 1, block_size);
487 assert(split_addr <= addr || split_addr - addr < block_size);
488
489 _status = DTBWaitResponse;
490 if (split_addr > addr) {
491 RequestPtr req1, req2;
492 assert(!req->isLLSC() && !req->isSwap());
493 req->splitOnVaddr(split_addr, req1, req2);
494
495 WholeTranslationState *state =
496 new WholeTranslationState(req, req1, req2, newData, res, mode);
497 DataTranslation<TimingSimpleCPU *> *trans1 =
498 new DataTranslation<TimingSimpleCPU *>(this, state, 0);
499 DataTranslation<TimingSimpleCPU *> *trans2 =
500 new DataTranslation<TimingSimpleCPU *>(this, state, 1);
501
502 thread->dtb->translateTiming(req1, tc, trans1, mode);
503 thread->dtb->translateTiming(req2, tc, trans2, mode);
504 } else {
505 WholeTranslationState *state =
506 new WholeTranslationState(req, newData, res, mode);
507 DataTranslation<TimingSimpleCPU *> *translation =
508 new DataTranslation<TimingSimpleCPU *>(this, state);
509 thread->dtb->translateTiming(req, tc, translation, mode);
510 }
511
512 // Translation faults will be returned via finishTranslation()
513 return NoFault;
514}
515
516
517void
518TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
519{
520 _status = BaseSimpleCPU::Running;
521
522 if (state->getFault() != NoFault) {
523 if (state->isPrefetch()) {
524 state->setNoFault();
525 }
526 delete [] state->data;
527 state->deleteReqs();
528 translationFault(state->getFault());
529 } else {
530 if (!state->isSplit) {
531 sendData(state->mainReq, state->data, state->res,
532 state->mode == BaseTLB::Read);
533 } else {
534 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
535 state->data, state->mode == BaseTLB::Read);
536 }
537 }
538
539 delete state;
540}
541
542
543void
544TimingSimpleCPU::fetch()
545{
546 DPRINTF(SimpleCPU, "Fetch\n");
547
548 if (!curStaticInst || !curStaticInst->isDelayedCommit())
549 checkForInterrupts();
550
551 checkPcEventQueue();
552
553 // We must have just got suspended by a PC event
554 if (_status == Idle)
555 return;
556
557 TheISA::PCState pcState = thread->pcState();
558 bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
559
560 if (needToFetch) {
561 _status = BaseSimpleCPU::Running;
562 Request *ifetch_req = new Request();
563 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
564 setupFetchRequest(ifetch_req);
565 DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
566 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
567 BaseTLB::Execute);
568 } else {
569 _status = IcacheWaitResponse;
570 completeIfetch(NULL);
571
572 numCycles += curCycle() - previousCycle;
573 previousCycle = curCycle();
574 }
575}
576
577
578void
579TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
580{
581 if (fault == NoFault) {
582 DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n",
583 req->getVaddr(), req->getPaddr());
584 ifetch_pkt = new Packet(req, MemCmd::ReadReq);
585 ifetch_pkt->dataStatic(&inst);
586 DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr());
587
588 if (!icachePort.sendTimingReq(ifetch_pkt)) {
589 // Need to wait for retry
590 _status = IcacheRetry;
591 } else {
592 // Need to wait for cache to respond
593 _status = IcacheWaitResponse;
594 // ownership of packet transferred to memory system
595 ifetch_pkt = NULL;
596 }
597 } else {
598 DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
599 delete req;
600 // fetch fault: advance directly to next instruction (fault handler)
601 _status = BaseSimpleCPU::Running;
602 advanceInst(fault);
603 }
604
605 numCycles += curCycle() - previousCycle;
606 previousCycle = curCycle();
607}
608
609
610void
611TimingSimpleCPU::advanceInst(Fault fault)
612{
613 if (_status == Faulting)
614 return;
615
616 if (fault != NoFault) {
617 advancePC(fault);
618 DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
124
125 return 1;
126 }
127}
128
129void
130TimingSimpleCPU::drainResume()
131{
132 assert(!fetchEvent.scheduled());
133 assert(!drainManager);
134 if (switchedOut())
135 return;
136
137 DPRINTF(SimpleCPU, "Resume\n");
138 verifyMemoryMode();
139
140 assert(!threadContexts.empty());
141 if (threadContexts.size() > 1)
142 fatal("The timing CPU only supports one thread.\n");
143
144 if (thread->status() == ThreadContext::Active) {
145 schedule(fetchEvent, nextCycle());
146 _status = BaseSimpleCPU::Running;
147 } else {
148 _status = BaseSimpleCPU::Idle;
149 }
150}
151
152bool
153TimingSimpleCPU::tryCompleteDrain()
154{
155 if (!drainManager)
156 return false;
157
158 DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState());
159 if (!isDrained())
160 return false;
161
162 DPRINTF(Drain, "CPU done draining, processing drain event\n");
163 drainManager->signalDrainDone();
164 drainManager = NULL;
165
166 return true;
167}
168
169void
170TimingSimpleCPU::switchOut()
171{
172 BaseSimpleCPU::switchOut();
173
174 assert(!fetchEvent.scheduled());
175 assert(_status == BaseSimpleCPU::Running || _status == Idle);
176 assert(!stayAtPC);
177 assert(microPC() == 0);
178
179 numCycles += curCycle() - previousCycle;
180}
181
182
183void
184TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
185{
186 BaseSimpleCPU::takeOverFrom(oldCPU);
187
188 previousCycle = curCycle();
189}
190
191void
192TimingSimpleCPU::verifyMemoryMode() const
193{
194 if (!system->isTimingMode()) {
195 fatal("The timing CPU requires the memory system to be in "
196 "'timing' mode.\n");
197 }
198}
199
200void
201TimingSimpleCPU::activateContext(ThreadID thread_num, Cycles delay)
202{
203 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
204
205 assert(thread_num == 0);
206 assert(thread);
207
208 assert(_status == Idle);
209
210 notIdleFraction++;
211 _status = BaseSimpleCPU::Running;
212
213 // kick things off by initiating the fetch of the next instruction
214 schedule(fetchEvent, clockEdge(delay));
215}
216
217
218void
219TimingSimpleCPU::suspendContext(ThreadID thread_num)
220{
221 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
222
223 assert(thread_num == 0);
224 assert(thread);
225
226 if (_status == Idle)
227 return;
228
229 assert(_status == BaseSimpleCPU::Running);
230
231 // just change status to Idle... if status != Running,
232 // completeInst() will not initiate fetch of next instruction.
233
234 notIdleFraction--;
235 _status = Idle;
236}
237
238bool
239TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
240{
241 RequestPtr req = pkt->req;
242 if (req->isMmappedIpr()) {
243 Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt);
244 new IprEvent(pkt, this, clockEdge(delay));
245 _status = DcacheWaitResponse;
246 dcache_pkt = NULL;
247 } else if (!dcachePort.sendTimingReq(pkt)) {
248 _status = DcacheRetry;
249 dcache_pkt = pkt;
250 } else {
251 _status = DcacheWaitResponse;
252 // memory system takes ownership of packet
253 dcache_pkt = NULL;
254 }
255 return dcache_pkt == NULL;
256}
257
258void
259TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
260 bool read)
261{
262 PacketPtr pkt;
263 buildPacket(pkt, req, read);
264 pkt->dataDynamicArray<uint8_t>(data);
265 if (req->getFlags().isSet(Request::NO_ACCESS)) {
266 assert(!dcache_pkt);
267 pkt->makeResponse();
268 completeDataAccess(pkt);
269 } else if (read) {
270 handleReadPacket(pkt);
271 } else {
272 bool do_access = true; // flag to suppress cache access
273
274 if (req->isLLSC()) {
275 do_access = TheISA::handleLockedWrite(thread, req);
276 } else if (req->isCondSwap()) {
277 assert(res);
278 req->setExtraData(*res);
279 }
280
281 if (do_access) {
282 dcache_pkt = pkt;
283 handleWritePacket();
284 } else {
285 _status = DcacheWaitResponse;
286 completeDataAccess(pkt);
287 }
288 }
289}
290
291void
292TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
293 RequestPtr req, uint8_t *data, bool read)
294{
295 PacketPtr pkt1, pkt2;
296 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
297 if (req->getFlags().isSet(Request::NO_ACCESS)) {
298 assert(!dcache_pkt);
299 pkt1->makeResponse();
300 completeDataAccess(pkt1);
301 } else if (read) {
302 SplitFragmentSenderState * send_state =
303 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
304 if (handleReadPacket(pkt1)) {
305 send_state->clearFromParent();
306 send_state = dynamic_cast<SplitFragmentSenderState *>(
307 pkt2->senderState);
308 if (handleReadPacket(pkt2)) {
309 send_state->clearFromParent();
310 }
311 }
312 } else {
313 dcache_pkt = pkt1;
314 SplitFragmentSenderState * send_state =
315 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
316 if (handleWritePacket()) {
317 send_state->clearFromParent();
318 dcache_pkt = pkt2;
319 send_state = dynamic_cast<SplitFragmentSenderState *>(
320 pkt2->senderState);
321 if (handleWritePacket()) {
322 send_state->clearFromParent();
323 }
324 }
325 }
326}
327
328void
329TimingSimpleCPU::translationFault(Fault fault)
330{
331 // fault may be NoFault in cases where a fault is suppressed,
332 // for instance prefetches.
333 numCycles += curCycle() - previousCycle;
334 previousCycle = curCycle();
335
336 if (traceData) {
337 // Since there was a fault, we shouldn't trace this instruction.
338 delete traceData;
339 traceData = NULL;
340 }
341
342 postExecute();
343
344 advanceInst(fault);
345}
346
347void
348TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
349{
350 MemCmd cmd;
351 if (read) {
352 cmd = MemCmd::ReadReq;
353 if (req->isLLSC())
354 cmd = MemCmd::LoadLockedReq;
355 } else {
356 cmd = MemCmd::WriteReq;
357 if (req->isLLSC()) {
358 cmd = MemCmd::StoreCondReq;
359 } else if (req->isSwap()) {
360 cmd = MemCmd::SwapReq;
361 }
362 }
363 pkt = new Packet(req, cmd);
364}
365
366void
367TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
368 RequestPtr req1, RequestPtr req2, RequestPtr req,
369 uint8_t *data, bool read)
370{
371 pkt1 = pkt2 = NULL;
372
373 assert(!req1->isMmappedIpr() && !req2->isMmappedIpr());
374
375 if (req->getFlags().isSet(Request::NO_ACCESS)) {
376 buildPacket(pkt1, req, read);
377 return;
378 }
379
380 buildPacket(pkt1, req1, read);
381 buildPacket(pkt2, req2, read);
382
383 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags(), dataMasterId());
384 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand());
385
386 pkt->dataDynamicArray<uint8_t>(data);
387 pkt1->dataStatic<uint8_t>(data);
388 pkt2->dataStatic<uint8_t>(data + req1->getSize());
389
390 SplitMainSenderState * main_send_state = new SplitMainSenderState;
391 pkt->senderState = main_send_state;
392 main_send_state->fragments[0] = pkt1;
393 main_send_state->fragments[1] = pkt2;
394 main_send_state->outstanding = 2;
395 pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
396 pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
397}
398
399Fault
400TimingSimpleCPU::readMem(Addr addr, uint8_t *data,
401 unsigned size, unsigned flags)
402{
403 Fault fault;
404 const int asid = 0;
405 const ThreadID tid = 0;
406 const Addr pc = thread->instAddr();
407 unsigned block_size = dcachePort.peerBlockSize();
408 BaseTLB::Mode mode = BaseTLB::Read;
409
410 if (traceData) {
411 traceData->setAddr(addr);
412 }
413
414 RequestPtr req = new Request(asid, addr, size,
415 flags, dataMasterId(), pc, _cpuId, tid);
416
417 Addr split_addr = roundDown(addr + size - 1, block_size);
418 assert(split_addr <= addr || split_addr - addr < block_size);
419
420 _status = DTBWaitResponse;
421 if (split_addr > addr) {
422 RequestPtr req1, req2;
423 assert(!req->isLLSC() && !req->isSwap());
424 req->splitOnVaddr(split_addr, req1, req2);
425
426 WholeTranslationState *state =
427 new WholeTranslationState(req, req1, req2, new uint8_t[size],
428 NULL, mode);
429 DataTranslation<TimingSimpleCPU *> *trans1 =
430 new DataTranslation<TimingSimpleCPU *>(this, state, 0);
431 DataTranslation<TimingSimpleCPU *> *trans2 =
432 new DataTranslation<TimingSimpleCPU *>(this, state, 1);
433
434 thread->dtb->translateTiming(req1, tc, trans1, mode);
435 thread->dtb->translateTiming(req2, tc, trans2, mode);
436 } else {
437 WholeTranslationState *state =
438 new WholeTranslationState(req, new uint8_t[size], NULL, mode);
439 DataTranslation<TimingSimpleCPU *> *translation
440 = new DataTranslation<TimingSimpleCPU *>(this, state);
441 thread->dtb->translateTiming(req, tc, translation, mode);
442 }
443
444 return NoFault;
445}
446
447bool
448TimingSimpleCPU::handleWritePacket()
449{
450 RequestPtr req = dcache_pkt->req;
451 if (req->isMmappedIpr()) {
452 Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
453 new IprEvent(dcache_pkt, this, clockEdge(delay));
454 _status = DcacheWaitResponse;
455 dcache_pkt = NULL;
456 } else if (!dcachePort.sendTimingReq(dcache_pkt)) {
457 _status = DcacheRetry;
458 } else {
459 _status = DcacheWaitResponse;
460 // memory system takes ownership of packet
461 dcache_pkt = NULL;
462 }
463 return dcache_pkt == NULL;
464}
465
466Fault
467TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
468 Addr addr, unsigned flags, uint64_t *res)
469{
470 uint8_t *newData = new uint8_t[size];
471 memcpy(newData, data, size);
472
473 const int asid = 0;
474 const ThreadID tid = 0;
475 const Addr pc = thread->instAddr();
476 unsigned block_size = dcachePort.peerBlockSize();
477 BaseTLB::Mode mode = BaseTLB::Write;
478
479 if (traceData) {
480 traceData->setAddr(addr);
481 }
482
483 RequestPtr req = new Request(asid, addr, size,
484 flags, dataMasterId(), pc, _cpuId, tid);
485
486 Addr split_addr = roundDown(addr + size - 1, block_size);
487 assert(split_addr <= addr || split_addr - addr < block_size);
488
489 _status = DTBWaitResponse;
490 if (split_addr > addr) {
491 RequestPtr req1, req2;
492 assert(!req->isLLSC() && !req->isSwap());
493 req->splitOnVaddr(split_addr, req1, req2);
494
495 WholeTranslationState *state =
496 new WholeTranslationState(req, req1, req2, newData, res, mode);
497 DataTranslation<TimingSimpleCPU *> *trans1 =
498 new DataTranslation<TimingSimpleCPU *>(this, state, 0);
499 DataTranslation<TimingSimpleCPU *> *trans2 =
500 new DataTranslation<TimingSimpleCPU *>(this, state, 1);
501
502 thread->dtb->translateTiming(req1, tc, trans1, mode);
503 thread->dtb->translateTiming(req2, tc, trans2, mode);
504 } else {
505 WholeTranslationState *state =
506 new WholeTranslationState(req, newData, res, mode);
507 DataTranslation<TimingSimpleCPU *> *translation =
508 new DataTranslation<TimingSimpleCPU *>(this, state);
509 thread->dtb->translateTiming(req, tc, translation, mode);
510 }
511
512 // Translation faults will be returned via finishTranslation()
513 return NoFault;
514}
515
516
517void
518TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
519{
520 _status = BaseSimpleCPU::Running;
521
522 if (state->getFault() != NoFault) {
523 if (state->isPrefetch()) {
524 state->setNoFault();
525 }
526 delete [] state->data;
527 state->deleteReqs();
528 translationFault(state->getFault());
529 } else {
530 if (!state->isSplit) {
531 sendData(state->mainReq, state->data, state->res,
532 state->mode == BaseTLB::Read);
533 } else {
534 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
535 state->data, state->mode == BaseTLB::Read);
536 }
537 }
538
539 delete state;
540}
541
542
543void
544TimingSimpleCPU::fetch()
545{
546 DPRINTF(SimpleCPU, "Fetch\n");
547
548 if (!curStaticInst || !curStaticInst->isDelayedCommit())
549 checkForInterrupts();
550
551 checkPcEventQueue();
552
553 // We must have just got suspended by a PC event
554 if (_status == Idle)
555 return;
556
557 TheISA::PCState pcState = thread->pcState();
558 bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
559
560 if (needToFetch) {
561 _status = BaseSimpleCPU::Running;
562 Request *ifetch_req = new Request();
563 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
564 setupFetchRequest(ifetch_req);
565 DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
566 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
567 BaseTLB::Execute);
568 } else {
569 _status = IcacheWaitResponse;
570 completeIfetch(NULL);
571
572 numCycles += curCycle() - previousCycle;
573 previousCycle = curCycle();
574 }
575}
576
577
578void
579TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
580{
581 if (fault == NoFault) {
582 DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n",
583 req->getVaddr(), req->getPaddr());
584 ifetch_pkt = new Packet(req, MemCmd::ReadReq);
585 ifetch_pkt->dataStatic(&inst);
586 DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr());
587
588 if (!icachePort.sendTimingReq(ifetch_pkt)) {
589 // Need to wait for retry
590 _status = IcacheRetry;
591 } else {
592 // Need to wait for cache to respond
593 _status = IcacheWaitResponse;
594 // ownership of packet transferred to memory system
595 ifetch_pkt = NULL;
596 }
597 } else {
598 DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
599 delete req;
600 // fetch fault: advance directly to next instruction (fault handler)
601 _status = BaseSimpleCPU::Running;
602 advanceInst(fault);
603 }
604
605 numCycles += curCycle() - previousCycle;
606 previousCycle = curCycle();
607}
608
609
610void
611TimingSimpleCPU::advanceInst(Fault fault)
612{
613 if (_status == Faulting)
614 return;
615
616 if (fault != NoFault) {
617 advancePC(fault);
618 DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
619 reschedule(fetchEvent, nextCycle(), true);
619 reschedule(fetchEvent, clockEdge(), true);
620 _status = Faulting;
621 return;
622 }
623
624
625 if (!stayAtPC)
626 advancePC(fault);
627
628 if (tryCompleteDrain())
629 return;
630
631 if (_status == BaseSimpleCPU::Running) {
632 // kick off fetch of next instruction... callback from icache
633 // response will cause that instruction to be executed,
634 // keeping the CPU running.
635 fetch();
636 }
637}
638
639
640void
641TimingSimpleCPU::completeIfetch(PacketPtr pkt)
642{
643 DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ?
644 pkt->getAddr() : 0);
645
646 // received a response from the icache: execute the received
647 // instruction
648
649 assert(!pkt || !pkt->isError());
650 assert(_status == IcacheWaitResponse);
651
652 _status = BaseSimpleCPU::Running;
653
654 numCycles += curCycle() - previousCycle;
655 previousCycle = curCycle();
656
657 preExecute();
658 if (curStaticInst && curStaticInst->isMemRef()) {
659 // load or store: just send to dcache
660 Fault fault = curStaticInst->initiateAcc(this, traceData);
661
662 // If we're not running now the instruction will complete in a dcache
663 // response callback or the instruction faulted and has started an
664 // ifetch
665 if (_status == BaseSimpleCPU::Running) {
666 if (fault != NoFault && traceData) {
667 // If there was a fault, we shouldn't trace this instruction.
668 delete traceData;
669 traceData = NULL;
670 }
671
672 postExecute();
673 // @todo remove me after debugging with legion done
674 if (curStaticInst && (!curStaticInst->isMicroop() ||
675 curStaticInst->isFirstMicroop()))
676 instCnt++;
677 advanceInst(fault);
678 }
679 } else if (curStaticInst) {
680 // non-memory instruction: execute completely now
681 Fault fault = curStaticInst->execute(this, traceData);
682
683 // keep an instruction count
684 if (fault == NoFault)
685 countInst();
686 else if (traceData && !DTRACE(ExecFaulting)) {
687 delete traceData;
688 traceData = NULL;
689 }
690
691 postExecute();
692 // @todo remove me after debugging with legion done
693 if (curStaticInst && (!curStaticInst->isMicroop() ||
694 curStaticInst->isFirstMicroop()))
695 instCnt++;
696 advanceInst(fault);
697 } else {
698 advanceInst(NoFault);
699 }
700
701 if (pkt) {
702 delete pkt->req;
703 delete pkt;
704 }
705}
706
707void
708TimingSimpleCPU::IcachePort::ITickEvent::process()
709{
710 cpu->completeIfetch(pkt);
711}
712
713bool
714TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt)
715{
716 DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
717 // delay processing of returned data until next CPU clock edge
620 _status = Faulting;
621 return;
622 }
623
624
625 if (!stayAtPC)
626 advancePC(fault);
627
628 if (tryCompleteDrain())
629 return;
630
631 if (_status == BaseSimpleCPU::Running) {
632 // kick off fetch of next instruction... callback from icache
633 // response will cause that instruction to be executed,
634 // keeping the CPU running.
635 fetch();
636 }
637}
638
639
640void
641TimingSimpleCPU::completeIfetch(PacketPtr pkt)
642{
643 DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ?
644 pkt->getAddr() : 0);
645
646 // received a response from the icache: execute the received
647 // instruction
648
649 assert(!pkt || !pkt->isError());
650 assert(_status == IcacheWaitResponse);
651
652 _status = BaseSimpleCPU::Running;
653
654 numCycles += curCycle() - previousCycle;
655 previousCycle = curCycle();
656
657 preExecute();
658 if (curStaticInst && curStaticInst->isMemRef()) {
659 // load or store: just send to dcache
660 Fault fault = curStaticInst->initiateAcc(this, traceData);
661
662 // If we're not running now the instruction will complete in a dcache
663 // response callback or the instruction faulted and has started an
664 // ifetch
665 if (_status == BaseSimpleCPU::Running) {
666 if (fault != NoFault && traceData) {
667 // If there was a fault, we shouldn't trace this instruction.
668 delete traceData;
669 traceData = NULL;
670 }
671
672 postExecute();
673 // @todo remove me after debugging with legion done
674 if (curStaticInst && (!curStaticInst->isMicroop() ||
675 curStaticInst->isFirstMicroop()))
676 instCnt++;
677 advanceInst(fault);
678 }
679 } else if (curStaticInst) {
680 // non-memory instruction: execute completely now
681 Fault fault = curStaticInst->execute(this, traceData);
682
683 // keep an instruction count
684 if (fault == NoFault)
685 countInst();
686 else if (traceData && !DTRACE(ExecFaulting)) {
687 delete traceData;
688 traceData = NULL;
689 }
690
691 postExecute();
692 // @todo remove me after debugging with legion done
693 if (curStaticInst && (!curStaticInst->isMicroop() ||
694 curStaticInst->isFirstMicroop()))
695 instCnt++;
696 advanceInst(fault);
697 } else {
698 advanceInst(NoFault);
699 }
700
701 if (pkt) {
702 delete pkt->req;
703 delete pkt;
704 }
705}
706
707void
708TimingSimpleCPU::IcachePort::ITickEvent::process()
709{
710 cpu->completeIfetch(pkt);
711}
712
713bool
714TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt)
715{
716 DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
717 // delay processing of returned data until next CPU clock edge
718 Tick next_tick = cpu->nextCycle();
718 Tick next_tick = cpu->clockEdge();
719
720 if (next_tick == curTick())
721 cpu->completeIfetch(pkt);
722 else
723 tickEvent.schedule(pkt, next_tick);
724
725 return true;
726}
727
728void
729TimingSimpleCPU::IcachePort::recvRetry()
730{
731 // we shouldn't get a retry unless we have a packet that we're
732 // waiting to transmit
733 assert(cpu->ifetch_pkt != NULL);
734 assert(cpu->_status == IcacheRetry);
735 PacketPtr tmp = cpu->ifetch_pkt;
736 if (sendTimingReq(tmp)) {
737 cpu->_status = IcacheWaitResponse;
738 cpu->ifetch_pkt = NULL;
739 }
740}
741
742void
743TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
744{
745 // received a response from the dcache: complete the load or store
746 // instruction
747 assert(!pkt->isError());
748 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
749 pkt->req->getFlags().isSet(Request::NO_ACCESS));
750
751 numCycles += curCycle() - previousCycle;
752 previousCycle = curCycle();
753
754 if (pkt->senderState) {
755 SplitFragmentSenderState * send_state =
756 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
757 assert(send_state);
758 delete pkt->req;
759 delete pkt;
760 PacketPtr big_pkt = send_state->bigPkt;
761 delete send_state;
762
763 SplitMainSenderState * main_send_state =
764 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
765 assert(main_send_state);
766 // Record the fact that this packet is no longer outstanding.
767 assert(main_send_state->outstanding != 0);
768 main_send_state->outstanding--;
769
770 if (main_send_state->outstanding) {
771 return;
772 } else {
773 delete main_send_state;
774 big_pkt->senderState = NULL;
775 pkt = big_pkt;
776 }
777 }
778
779 _status = BaseSimpleCPU::Running;
780
781 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
782
783 // keep an instruction count
784 if (fault == NoFault)
785 countInst();
786 else if (traceData) {
787 // If there was a fault, we shouldn't trace this instruction.
788 delete traceData;
789 traceData = NULL;
790 }
791
792 // the locked flag may be cleared on the response packet, so check
793 // pkt->req and not pkt to see if it was a load-locked
794 if (pkt->isRead() && pkt->req->isLLSC()) {
795 TheISA::handleLockedRead(thread, pkt->req);
796 }
797
798 delete pkt->req;
799 delete pkt;
800
801 postExecute();
802
803 advanceInst(fault);
804}
805
806bool
807TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt)
808{
809 // delay processing of returned data until next CPU clock edge
719
720 if (next_tick == curTick())
721 cpu->completeIfetch(pkt);
722 else
723 tickEvent.schedule(pkt, next_tick);
724
725 return true;
726}
727
728void
729TimingSimpleCPU::IcachePort::recvRetry()
730{
731 // we shouldn't get a retry unless we have a packet that we're
732 // waiting to transmit
733 assert(cpu->ifetch_pkt != NULL);
734 assert(cpu->_status == IcacheRetry);
735 PacketPtr tmp = cpu->ifetch_pkt;
736 if (sendTimingReq(tmp)) {
737 cpu->_status = IcacheWaitResponse;
738 cpu->ifetch_pkt = NULL;
739 }
740}
741
742void
743TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
744{
745 // received a response from the dcache: complete the load or store
746 // instruction
747 assert(!pkt->isError());
748 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
749 pkt->req->getFlags().isSet(Request::NO_ACCESS));
750
751 numCycles += curCycle() - previousCycle;
752 previousCycle = curCycle();
753
754 if (pkt->senderState) {
755 SplitFragmentSenderState * send_state =
756 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
757 assert(send_state);
758 delete pkt->req;
759 delete pkt;
760 PacketPtr big_pkt = send_state->bigPkt;
761 delete send_state;
762
763 SplitMainSenderState * main_send_state =
764 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
765 assert(main_send_state);
766 // Record the fact that this packet is no longer outstanding.
767 assert(main_send_state->outstanding != 0);
768 main_send_state->outstanding--;
769
770 if (main_send_state->outstanding) {
771 return;
772 } else {
773 delete main_send_state;
774 big_pkt->senderState = NULL;
775 pkt = big_pkt;
776 }
777 }
778
779 _status = BaseSimpleCPU::Running;
780
781 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
782
783 // keep an instruction count
784 if (fault == NoFault)
785 countInst();
786 else if (traceData) {
787 // If there was a fault, we shouldn't trace this instruction.
788 delete traceData;
789 traceData = NULL;
790 }
791
792 // the locked flag may be cleared on the response packet, so check
793 // pkt->req and not pkt to see if it was a load-locked
794 if (pkt->isRead() && pkt->req->isLLSC()) {
795 TheISA::handleLockedRead(thread, pkt->req);
796 }
797
798 delete pkt->req;
799 delete pkt;
800
801 postExecute();
802
803 advanceInst(fault);
804}
805
806bool
807TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt)
808{
809 // delay processing of returned data until next CPU clock edge
810 Tick next_tick = cpu->nextCycle();
810 Tick next_tick = cpu->clockEdge();
811
812 if (next_tick == curTick()) {
813 cpu->completeDataAccess(pkt);
814 } else {
815 if (!tickEvent.scheduled()) {
816 tickEvent.schedule(pkt, next_tick);
817 } else {
818 // In the case of a split transaction and a cache that is
819 // faster than a CPU we could get two responses before
820 // next_tick expires
821 if (!retryEvent.scheduled())
822 cpu->schedule(retryEvent, next_tick);
823 return false;
824 }
825 }
826
827 return true;
828}
829
830void
831TimingSimpleCPU::DcachePort::DTickEvent::process()
832{
833 cpu->completeDataAccess(pkt);
834}
835
836void
837TimingSimpleCPU::DcachePort::recvRetry()
838{
839 // we shouldn't get a retry unless we have a packet that we're
840 // waiting to transmit
841 assert(cpu->dcache_pkt != NULL);
842 assert(cpu->_status == DcacheRetry);
843 PacketPtr tmp = cpu->dcache_pkt;
844 if (tmp->senderState) {
845 // This is a packet from a split access.
846 SplitFragmentSenderState * send_state =
847 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
848 assert(send_state);
849 PacketPtr big_pkt = send_state->bigPkt;
850
851 SplitMainSenderState * main_send_state =
852 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
853 assert(main_send_state);
854
855 if (sendTimingReq(tmp)) {
856 // If we were able to send without retrying, record that fact
857 // and try sending the other fragment.
858 send_state->clearFromParent();
859 int other_index = main_send_state->getPendingFragment();
860 if (other_index > 0) {
861 tmp = main_send_state->fragments[other_index];
862 cpu->dcache_pkt = tmp;
863 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
864 (big_pkt->isWrite() && cpu->handleWritePacket())) {
865 main_send_state->fragments[other_index] = NULL;
866 }
867 } else {
868 cpu->_status = DcacheWaitResponse;
869 // memory system takes ownership of packet
870 cpu->dcache_pkt = NULL;
871 }
872 }
873 } else if (sendTimingReq(tmp)) {
874 cpu->_status = DcacheWaitResponse;
875 // memory system takes ownership of packet
876 cpu->dcache_pkt = NULL;
877 }
878}
879
880TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
881 Tick t)
882 : pkt(_pkt), cpu(_cpu)
883{
884 cpu->schedule(this, t);
885}
886
887void
888TimingSimpleCPU::IprEvent::process()
889{
890 cpu->completeDataAccess(pkt);
891}
892
893const char *
894TimingSimpleCPU::IprEvent::description() const
895{
896 return "Timing Simple CPU Delay IPR event";
897}
898
899
900void
901TimingSimpleCPU::printAddr(Addr a)
902{
903 dcachePort.printAddr(a);
904}
905
906
907////////////////////////////////////////////////////////////////////////
908//
909// TimingSimpleCPU Simulation Object
910//
911TimingSimpleCPU *
912TimingSimpleCPUParams::create()
913{
914 numThreads = 1;
915 if (!FullSystem && workload.size() != 1)
916 panic("only one workload allowed");
917 return new TimingSimpleCPU(this);
918}
811
812 if (next_tick == curTick()) {
813 cpu->completeDataAccess(pkt);
814 } else {
815 if (!tickEvent.scheduled()) {
816 tickEvent.schedule(pkt, next_tick);
817 } else {
818 // In the case of a split transaction and a cache that is
819 // faster than a CPU we could get two responses before
820 // next_tick expires
821 if (!retryEvent.scheduled())
822 cpu->schedule(retryEvent, next_tick);
823 return false;
824 }
825 }
826
827 return true;
828}
829
830void
831TimingSimpleCPU::DcachePort::DTickEvent::process()
832{
833 cpu->completeDataAccess(pkt);
834}
835
836void
837TimingSimpleCPU::DcachePort::recvRetry()
838{
839 // we shouldn't get a retry unless we have a packet that we're
840 // waiting to transmit
841 assert(cpu->dcache_pkt != NULL);
842 assert(cpu->_status == DcacheRetry);
843 PacketPtr tmp = cpu->dcache_pkt;
844 if (tmp->senderState) {
845 // This is a packet from a split access.
846 SplitFragmentSenderState * send_state =
847 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
848 assert(send_state);
849 PacketPtr big_pkt = send_state->bigPkt;
850
851 SplitMainSenderState * main_send_state =
852 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
853 assert(main_send_state);
854
855 if (sendTimingReq(tmp)) {
856 // If we were able to send without retrying, record that fact
857 // and try sending the other fragment.
858 send_state->clearFromParent();
859 int other_index = main_send_state->getPendingFragment();
860 if (other_index > 0) {
861 tmp = main_send_state->fragments[other_index];
862 cpu->dcache_pkt = tmp;
863 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
864 (big_pkt->isWrite() && cpu->handleWritePacket())) {
865 main_send_state->fragments[other_index] = NULL;
866 }
867 } else {
868 cpu->_status = DcacheWaitResponse;
869 // memory system takes ownership of packet
870 cpu->dcache_pkt = NULL;
871 }
872 }
873 } else if (sendTimingReq(tmp)) {
874 cpu->_status = DcacheWaitResponse;
875 // memory system takes ownership of packet
876 cpu->dcache_pkt = NULL;
877 }
878}
879
880TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
881 Tick t)
882 : pkt(_pkt), cpu(_cpu)
883{
884 cpu->schedule(this, t);
885}
886
887void
888TimingSimpleCPU::IprEvent::process()
889{
890 cpu->completeDataAccess(pkt);
891}
892
893const char *
894TimingSimpleCPU::IprEvent::description() const
895{
896 return "Timing Simple CPU Delay IPR event";
897}
898
899
900void
901TimingSimpleCPU::printAddr(Addr a)
902{
903 dcachePort.printAddr(a);
904}
905
906
907////////////////////////////////////////////////////////////////////////
908//
909// TimingSimpleCPU Simulation Object
910//
911TimingSimpleCPU *
912TimingSimpleCPUParams::create()
913{
914 numThreads = 1;
915 if (!FullSystem && workload.size() != 1)
916 panic("only one workload allowed");
917 return new TimingSimpleCPU(this);
918}