timing.cc (8850:ed91b534ed04) timing.cc (8921:e53972f72165)
1/*
2 * Copyright (c) 2010 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Steve Reinhardt
41 */
42
43#include "arch/locked_mem.hh"
44#include "arch/mmapped_ipr.hh"
45#include "arch/utility.hh"
46#include "base/bigint.hh"
47#include "config/the_isa.hh"
48#include "cpu/simple/timing.hh"
49#include "cpu/exetrace.hh"
50#include "debug/Config.hh"
51#include "debug/ExecFaulting.hh"
52#include "debug/SimpleCPU.hh"
53#include "mem/packet.hh"
54#include "mem/packet_access.hh"
55#include "params/TimingSimpleCPU.hh"
56#include "sim/faults.hh"
57#include "sim/full_system.hh"
58#include "sim/system.hh"
59
60using namespace std;
61using namespace TheISA;
62
63void
64TimingSimpleCPU::init()
65{
66 BaseCPU::init();
1/*
2 * Copyright (c) 2010 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Steve Reinhardt
41 */
42
43#include "arch/locked_mem.hh"
44#include "arch/mmapped_ipr.hh"
45#include "arch/utility.hh"
46#include "base/bigint.hh"
47#include "config/the_isa.hh"
48#include "cpu/simple/timing.hh"
49#include "cpu/exetrace.hh"
50#include "debug/Config.hh"
51#include "debug/ExecFaulting.hh"
52#include "debug/SimpleCPU.hh"
53#include "mem/packet.hh"
54#include "mem/packet_access.hh"
55#include "params/TimingSimpleCPU.hh"
56#include "sim/faults.hh"
57#include "sim/full_system.hh"
58#include "sim/system.hh"
59
60using namespace std;
61using namespace TheISA;
62
63void
64TimingSimpleCPU::init()
65{
66 BaseCPU::init();
67
68 // Initialise the ThreadContext's memory proxies
69 tcBase()->initMemProxies(tcBase());
70
67 if (FullSystem) {
68 for (int i = 0; i < threadContexts.size(); ++i) {
69 ThreadContext *tc = threadContexts[i];
70 // initialize CPU, including PC
71 TheISA::initCPU(tc, _cpuId);
72 }
73 }
71 if (FullSystem) {
72 for (int i = 0; i < threadContexts.size(); ++i) {
73 ThreadContext *tc = threadContexts[i];
74 // initialize CPU, including PC
75 TheISA::initCPU(tc, _cpuId);
76 }
77 }
74
75 // Initialise the ThreadContext's memory proxies
76 tcBase()->initMemProxies(tcBase());
77}
78
79void
80TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
81{
82 pkt = _pkt;
83 cpu->schedule(this, t);
84}
85
86TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
87 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this),
88 dcachePort(this), fetchEvent(this)
89{
90 _status = Idle;
91
92 ifetch_pkt = dcache_pkt = NULL;
93 drainEvent = NULL;
94 previousTick = 0;
95 changeState(SimObject::Running);
96 system->totalNumInsts = 0;
97}
98
99
100TimingSimpleCPU::~TimingSimpleCPU()
101{
102}
103
104void
105TimingSimpleCPU::serialize(ostream &os)
106{
107 SimObject::State so_state = SimObject::getState();
108 SERIALIZE_ENUM(so_state);
109 BaseSimpleCPU::serialize(os);
110}
111
112void
113TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
114{
115 SimObject::State so_state;
116 UNSERIALIZE_ENUM(so_state);
117 BaseSimpleCPU::unserialize(cp, section);
118}
119
120unsigned int
121TimingSimpleCPU::drain(Event *drain_event)
122{
123 // TimingSimpleCPU is ready to drain if it's not waiting for
124 // an access to complete.
125 if (_status == Idle || _status == Running || _status == SwitchedOut) {
126 changeState(SimObject::Drained);
127 return 0;
128 } else {
129 changeState(SimObject::Draining);
130 drainEvent = drain_event;
131 return 1;
132 }
133}
134
135void
136TimingSimpleCPU::resume()
137{
138 DPRINTF(SimpleCPU, "Resume\n");
139 if (_status != SwitchedOut && _status != Idle) {
140 assert(system->getMemoryMode() == Enums::timing);
141
142 if (fetchEvent.scheduled())
143 deschedule(fetchEvent);
144
145 schedule(fetchEvent, nextCycle());
146 }
147
148 changeState(SimObject::Running);
149}
150
151void
152TimingSimpleCPU::switchOut()
153{
154 assert(_status == Running || _status == Idle);
155 _status = SwitchedOut;
156 numCycles += tickToCycles(curTick() - previousTick);
157
158 // If we've been scheduled to resume but are then told to switch out,
159 // we'll need to cancel it.
160 if (fetchEvent.scheduled())
161 deschedule(fetchEvent);
162}
163
164
165void
166TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
167{
168 BaseCPU::takeOverFrom(oldCPU);
169
170 // if any of this CPU's ThreadContexts are active, mark the CPU as
171 // running and schedule its tick event.
172 for (int i = 0; i < threadContexts.size(); ++i) {
173 ThreadContext *tc = threadContexts[i];
174 if (tc->status() == ThreadContext::Active && _status != Running) {
175 _status = Running;
176 break;
177 }
178 }
179
180 if (_status != Running) {
181 _status = Idle;
182 }
183 assert(threadContexts.size() == 1);
184 previousTick = curTick();
185}
186
187
188void
189TimingSimpleCPU::activateContext(ThreadID thread_num, int delay)
190{
191 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
192
193 assert(thread_num == 0);
194 assert(thread);
195
196 assert(_status == Idle);
197
198 notIdleFraction++;
199 _status = Running;
200
201 // kick things off by initiating the fetch of the next instruction
202 schedule(fetchEvent, nextCycle(curTick() + ticks(delay)));
203}
204
205
206void
207TimingSimpleCPU::suspendContext(ThreadID thread_num)
208{
209 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
210
211 assert(thread_num == 0);
212 assert(thread);
213
214 if (_status == Idle)
215 return;
216
217 assert(_status == Running);
218
219 // just change status to Idle... if status != Running,
220 // completeInst() will not initiate fetch of next instruction.
221
222 notIdleFraction--;
223 _status = Idle;
224}
225
226bool
227TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
228{
229 RequestPtr req = pkt->req;
230 if (req->isMmappedIpr()) {
231 Tick delay;
232 delay = TheISA::handleIprRead(thread->getTC(), pkt);
233 new IprEvent(pkt, this, nextCycle(curTick() + delay));
234 _status = DcacheWaitResponse;
235 dcache_pkt = NULL;
236 } else if (!dcachePort.sendTiming(pkt)) {
237 _status = DcacheRetry;
238 dcache_pkt = pkt;
239 } else {
240 _status = DcacheWaitResponse;
241 // memory system takes ownership of packet
242 dcache_pkt = NULL;
243 }
244 return dcache_pkt == NULL;
245}
246
247void
248TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
249 bool read)
250{
251 PacketPtr pkt;
252 buildPacket(pkt, req, read);
253 pkt->dataDynamicArray<uint8_t>(data);
254 if (req->getFlags().isSet(Request::NO_ACCESS)) {
255 assert(!dcache_pkt);
256 pkt->makeResponse();
257 completeDataAccess(pkt);
258 } else if (read) {
259 handleReadPacket(pkt);
260 } else {
261 bool do_access = true; // flag to suppress cache access
262
263 if (req->isLLSC()) {
264 do_access = TheISA::handleLockedWrite(thread, req);
265 } else if (req->isCondSwap()) {
266 assert(res);
267 req->setExtraData(*res);
268 }
269
270 if (do_access) {
271 dcache_pkt = pkt;
272 handleWritePacket();
273 } else {
274 _status = DcacheWaitResponse;
275 completeDataAccess(pkt);
276 }
277 }
278}
279
280void
281TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
282 RequestPtr req, uint8_t *data, bool read)
283{
284 PacketPtr pkt1, pkt2;
285 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
286 if (req->getFlags().isSet(Request::NO_ACCESS)) {
287 assert(!dcache_pkt);
288 pkt1->makeResponse();
289 completeDataAccess(pkt1);
290 } else if (read) {
291 SplitFragmentSenderState * send_state =
292 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
293 if (handleReadPacket(pkt1)) {
294 send_state->clearFromParent();
295 send_state = dynamic_cast<SplitFragmentSenderState *>(
296 pkt2->senderState);
297 if (handleReadPacket(pkt2)) {
298 send_state->clearFromParent();
299 }
300 }
301 } else {
302 dcache_pkt = pkt1;
303 SplitFragmentSenderState * send_state =
304 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
305 if (handleWritePacket()) {
306 send_state->clearFromParent();
307 dcache_pkt = pkt2;
308 send_state = dynamic_cast<SplitFragmentSenderState *>(
309 pkt2->senderState);
310 if (handleWritePacket()) {
311 send_state->clearFromParent();
312 }
313 }
314 }
315}
316
317void
318TimingSimpleCPU::translationFault(Fault fault)
319{
320 // fault may be NoFault in cases where a fault is suppressed,
321 // for instance prefetches.
322 numCycles += tickToCycles(curTick() - previousTick);
323 previousTick = curTick();
324
325 if (traceData) {
326 // Since there was a fault, we shouldn't trace this instruction.
327 delete traceData;
328 traceData = NULL;
329 }
330
331 postExecute();
332
333 if (getState() == SimObject::Draining) {
334 advancePC(fault);
335 completeDrain();
336 } else {
337 advanceInst(fault);
338 }
339}
340
341void
342TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
343{
344 MemCmd cmd;
345 if (read) {
346 cmd = MemCmd::ReadReq;
347 if (req->isLLSC())
348 cmd = MemCmd::LoadLockedReq;
349 } else {
350 cmd = MemCmd::WriteReq;
351 if (req->isLLSC()) {
352 cmd = MemCmd::StoreCondReq;
353 } else if (req->isSwap()) {
354 cmd = MemCmd::SwapReq;
355 }
356 }
357 pkt = new Packet(req, cmd, Packet::Broadcast);
358}
359
360void
361TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
362 RequestPtr req1, RequestPtr req2, RequestPtr req,
363 uint8_t *data, bool read)
364{
365 pkt1 = pkt2 = NULL;
366
367 assert(!req1->isMmappedIpr() && !req2->isMmappedIpr());
368
369 if (req->getFlags().isSet(Request::NO_ACCESS)) {
370 buildPacket(pkt1, req, read);
371 return;
372 }
373
374 buildPacket(pkt1, req1, read);
375 buildPacket(pkt2, req2, read);
376
377 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags(), dataMasterId());
378 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand(),
379 Packet::Broadcast);
380
381 pkt->dataDynamicArray<uint8_t>(data);
382 pkt1->dataStatic<uint8_t>(data);
383 pkt2->dataStatic<uint8_t>(data + req1->getSize());
384
385 SplitMainSenderState * main_send_state = new SplitMainSenderState;
386 pkt->senderState = main_send_state;
387 main_send_state->fragments[0] = pkt1;
388 main_send_state->fragments[1] = pkt2;
389 main_send_state->outstanding = 2;
390 pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
391 pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
392}
393
394Fault
395TimingSimpleCPU::readMem(Addr addr, uint8_t *data,
396 unsigned size, unsigned flags)
397{
398 Fault fault;
399 const int asid = 0;
400 const ThreadID tid = 0;
401 const Addr pc = thread->instAddr();
402 unsigned block_size = dcachePort.peerBlockSize();
403 BaseTLB::Mode mode = BaseTLB::Read;
404
405 if (traceData) {
406 traceData->setAddr(addr);
407 }
408
409 RequestPtr req = new Request(asid, addr, size,
410 flags, dataMasterId(), pc, _cpuId, tid);
411
412 Addr split_addr = roundDown(addr + size - 1, block_size);
413 assert(split_addr <= addr || split_addr - addr < block_size);
414
415 _status = DTBWaitResponse;
416 if (split_addr > addr) {
417 RequestPtr req1, req2;
418 assert(!req->isLLSC() && !req->isSwap());
419 req->splitOnVaddr(split_addr, req1, req2);
420
421 WholeTranslationState *state =
422 new WholeTranslationState(req, req1, req2, new uint8_t[size],
423 NULL, mode);
424 DataTranslation<TimingSimpleCPU *> *trans1 =
425 new DataTranslation<TimingSimpleCPU *>(this, state, 0);
426 DataTranslation<TimingSimpleCPU *> *trans2 =
427 new DataTranslation<TimingSimpleCPU *>(this, state, 1);
428
429 thread->dtb->translateTiming(req1, tc, trans1, mode);
430 thread->dtb->translateTiming(req2, tc, trans2, mode);
431 } else {
432 WholeTranslationState *state =
433 new WholeTranslationState(req, new uint8_t[size], NULL, mode);
434 DataTranslation<TimingSimpleCPU *> *translation
435 = new DataTranslation<TimingSimpleCPU *>(this, state);
436 thread->dtb->translateTiming(req, tc, translation, mode);
437 }
438
439 return NoFault;
440}
441
442bool
443TimingSimpleCPU::handleWritePacket()
444{
445 RequestPtr req = dcache_pkt->req;
446 if (req->isMmappedIpr()) {
447 Tick delay;
448 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
449 new IprEvent(dcache_pkt, this, nextCycle(curTick() + delay));
450 _status = DcacheWaitResponse;
451 dcache_pkt = NULL;
452 } else if (!dcachePort.sendTiming(dcache_pkt)) {
453 _status = DcacheRetry;
454 } else {
455 _status = DcacheWaitResponse;
456 // memory system takes ownership of packet
457 dcache_pkt = NULL;
458 }
459 return dcache_pkt == NULL;
460}
461
462Fault
463TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
464 Addr addr, unsigned flags, uint64_t *res)
465{
466 uint8_t *newData = new uint8_t[size];
467 memcpy(newData, data, size);
468
469 const int asid = 0;
470 const ThreadID tid = 0;
471 const Addr pc = thread->instAddr();
472 unsigned block_size = dcachePort.peerBlockSize();
473 BaseTLB::Mode mode = BaseTLB::Write;
474
475 if (traceData) {
476 traceData->setAddr(addr);
477 }
478
479 RequestPtr req = new Request(asid, addr, size,
480 flags, dataMasterId(), pc, _cpuId, tid);
481
482 Addr split_addr = roundDown(addr + size - 1, block_size);
483 assert(split_addr <= addr || split_addr - addr < block_size);
484
485 _status = DTBWaitResponse;
486 if (split_addr > addr) {
487 RequestPtr req1, req2;
488 assert(!req->isLLSC() && !req->isSwap());
489 req->splitOnVaddr(split_addr, req1, req2);
490
491 WholeTranslationState *state =
492 new WholeTranslationState(req, req1, req2, newData, res, mode);
493 DataTranslation<TimingSimpleCPU *> *trans1 =
494 new DataTranslation<TimingSimpleCPU *>(this, state, 0);
495 DataTranslation<TimingSimpleCPU *> *trans2 =
496 new DataTranslation<TimingSimpleCPU *>(this, state, 1);
497
498 thread->dtb->translateTiming(req1, tc, trans1, mode);
499 thread->dtb->translateTiming(req2, tc, trans2, mode);
500 } else {
501 WholeTranslationState *state =
502 new WholeTranslationState(req, newData, res, mode);
503 DataTranslation<TimingSimpleCPU *> *translation =
504 new DataTranslation<TimingSimpleCPU *>(this, state);
505 thread->dtb->translateTiming(req, tc, translation, mode);
506 }
507
508 // Translation faults will be returned via finishTranslation()
509 return NoFault;
510}
511
512
513void
514TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
515{
516 _status = Running;
517
518 if (state->getFault() != NoFault) {
519 if (state->isPrefetch()) {
520 state->setNoFault();
521 }
522 delete [] state->data;
523 state->deleteReqs();
524 translationFault(state->getFault());
525 } else {
526 if (!state->isSplit) {
527 sendData(state->mainReq, state->data, state->res,
528 state->mode == BaseTLB::Read);
529 } else {
530 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
531 state->data, state->mode == BaseTLB::Read);
532 }
533 }
534
535 delete state;
536}
537
538
539void
540TimingSimpleCPU::fetch()
541{
542 DPRINTF(SimpleCPU, "Fetch\n");
543
544 if (!curStaticInst || !curStaticInst->isDelayedCommit())
545 checkForInterrupts();
546
547 checkPcEventQueue();
548
549 // We must have just got suspended by a PC event
550 if (_status == Idle)
551 return;
552
553 TheISA::PCState pcState = thread->pcState();
554 bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
555
556 if (needToFetch) {
557 _status = Running;
558 Request *ifetch_req = new Request();
559 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
560 setupFetchRequest(ifetch_req);
561 DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
562 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
563 BaseTLB::Execute);
564 } else {
565 _status = IcacheWaitResponse;
566 completeIfetch(NULL);
567
568 numCycles += tickToCycles(curTick() - previousTick);
569 previousTick = curTick();
570 }
571}
572
573
574void
575TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
576{
577 if (fault == NoFault) {
578 DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n",
579 req->getVaddr(), req->getPaddr());
580 ifetch_pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
581 ifetch_pkt->dataStatic(&inst);
582 DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr());
583
584 if (!icachePort.sendTiming(ifetch_pkt)) {
585 // Need to wait for retry
586 _status = IcacheRetry;
587 } else {
588 // Need to wait for cache to respond
589 _status = IcacheWaitResponse;
590 // ownership of packet transferred to memory system
591 ifetch_pkt = NULL;
592 }
593 } else {
594 DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
595 delete req;
596 // fetch fault: advance directly to next instruction (fault handler)
597 _status = Running;
598 advanceInst(fault);
599 }
600
601 numCycles += tickToCycles(curTick() - previousTick);
602 previousTick = curTick();
603}
604
605
606void
607TimingSimpleCPU::advanceInst(Fault fault)
608{
609
610 if (_status == Faulting)
611 return;
612
613 if (fault != NoFault) {
614 advancePC(fault);
615 DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
616 reschedule(fetchEvent, nextCycle(), true);
617 _status = Faulting;
618 return;
619 }
620
621
622 if (!stayAtPC)
623 advancePC(fault);
624
625 if (_status == Running) {
626 // kick off fetch of next instruction... callback from icache
627 // response will cause that instruction to be executed,
628 // keeping the CPU running.
629 fetch();
630 }
631}
632
633
634void
635TimingSimpleCPU::completeIfetch(PacketPtr pkt)
636{
637 DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ?
638 pkt->getAddr() : 0);
639
640 // received a response from the icache: execute the received
641 // instruction
642
643 assert(!pkt || !pkt->isError());
644 assert(_status == IcacheWaitResponse);
645
646 _status = Running;
647
648 numCycles += tickToCycles(curTick() - previousTick);
649 previousTick = curTick();
650
651 if (getState() == SimObject::Draining) {
652 if (pkt) {
653 delete pkt->req;
654 delete pkt;
655 }
656
657 completeDrain();
658 return;
659 }
660
661 preExecute();
662 if (curStaticInst && curStaticInst->isMemRef()) {
663 // load or store: just send to dcache
664 Fault fault = curStaticInst->initiateAcc(this, traceData);
665
666 // If we're not running now the instruction will complete in a dcache
667 // response callback or the instruction faulted and has started an
668 // ifetch
669 if (_status == Running) {
670 if (fault != NoFault && traceData) {
671 // If there was a fault, we shouldn't trace this instruction.
672 delete traceData;
673 traceData = NULL;
674 }
675
676 postExecute();
677 // @todo remove me after debugging with legion done
678 if (curStaticInst && (!curStaticInst->isMicroop() ||
679 curStaticInst->isFirstMicroop()))
680 instCnt++;
681 advanceInst(fault);
682 }
683 } else if (curStaticInst) {
684 // non-memory instruction: execute completely now
685 Fault fault = curStaticInst->execute(this, traceData);
686
687 // keep an instruction count
688 if (fault == NoFault)
689 countInst();
690 else if (traceData && !DTRACE(ExecFaulting)) {
691 delete traceData;
692 traceData = NULL;
693 }
694
695 postExecute();
696 // @todo remove me after debugging with legion done
697 if (curStaticInst && (!curStaticInst->isMicroop() ||
698 curStaticInst->isFirstMicroop()))
699 instCnt++;
700 advanceInst(fault);
701 } else {
702 advanceInst(NoFault);
703 }
704
705 if (pkt) {
706 delete pkt->req;
707 delete pkt;
708 }
709}
710
711void
712TimingSimpleCPU::IcachePort::ITickEvent::process()
713{
714 cpu->completeIfetch(pkt);
715}
716
717bool
718TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
719{
720 if (pkt->isResponse() && !pkt->wasNacked()) {
721 DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
722 // delay processing of returned data until next CPU clock edge
723 Tick next_tick = cpu->nextCycle(curTick());
724
725 if (next_tick == curTick())
726 cpu->completeIfetch(pkt);
727 else
728 tickEvent.schedule(pkt, next_tick);
729
730 return true;
731 } else if (pkt->wasNacked()) {
732 assert(cpu->_status == IcacheWaitResponse);
733 pkt->reinitNacked();
734 if (!sendTiming(pkt)) {
735 cpu->_status = IcacheRetry;
736 cpu->ifetch_pkt = pkt;
737 }
738 }
739 //Snooping a Coherence Request, do nothing
740 return true;
741}
742
743void
744TimingSimpleCPU::IcachePort::recvRetry()
745{
746 // we shouldn't get a retry unless we have a packet that we're
747 // waiting to transmit
748 assert(cpu->ifetch_pkt != NULL);
749 assert(cpu->_status == IcacheRetry);
750 PacketPtr tmp = cpu->ifetch_pkt;
751 if (sendTiming(tmp)) {
752 cpu->_status = IcacheWaitResponse;
753 cpu->ifetch_pkt = NULL;
754 }
755}
756
757void
758TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
759{
760 // received a response from the dcache: complete the load or store
761 // instruction
762 assert(!pkt->isError());
763 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
764 pkt->req->getFlags().isSet(Request::NO_ACCESS));
765
766 numCycles += tickToCycles(curTick() - previousTick);
767 previousTick = curTick();
768
769 if (pkt->senderState) {
770 SplitFragmentSenderState * send_state =
771 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
772 assert(send_state);
773 delete pkt->req;
774 delete pkt;
775 PacketPtr big_pkt = send_state->bigPkt;
776 delete send_state;
777
778 SplitMainSenderState * main_send_state =
779 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
780 assert(main_send_state);
781 // Record the fact that this packet is no longer outstanding.
782 assert(main_send_state->outstanding != 0);
783 main_send_state->outstanding--;
784
785 if (main_send_state->outstanding) {
786 return;
787 } else {
788 delete main_send_state;
789 big_pkt->senderState = NULL;
790 pkt = big_pkt;
791 }
792 }
793
794 _status = Running;
795
796 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
797
798 // keep an instruction count
799 if (fault == NoFault)
800 countInst();
801 else if (traceData) {
802 // If there was a fault, we shouldn't trace this instruction.
803 delete traceData;
804 traceData = NULL;
805 }
806
807 // the locked flag may be cleared on the response packet, so check
808 // pkt->req and not pkt to see if it was a load-locked
809 if (pkt->isRead() && pkt->req->isLLSC()) {
810 TheISA::handleLockedRead(thread, pkt->req);
811 }
812
813 delete pkt->req;
814 delete pkt;
815
816 postExecute();
817
818 if (getState() == SimObject::Draining) {
819 advancePC(fault);
820 completeDrain();
821
822 return;
823 }
824
825 advanceInst(fault);
826}
827
828
829void
830TimingSimpleCPU::completeDrain()
831{
832 DPRINTF(Config, "Done draining\n");
833 changeState(SimObject::Drained);
834 drainEvent->process();
835}
836
837bool
838TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
839{
840 if (pkt->isResponse() && !pkt->wasNacked()) {
841 // delay processing of returned data until next CPU clock edge
842 Tick next_tick = cpu->nextCycle(curTick());
843
844 if (next_tick == curTick()) {
845 cpu->completeDataAccess(pkt);
846 } else {
847 if (!tickEvent.scheduled()) {
848 tickEvent.schedule(pkt, next_tick);
849 } else {
850 // In the case of a split transaction and a cache that is
851 // faster than a CPU we could get two responses before
852 // next_tick expires
853 if (!retryEvent.scheduled())
854 cpu->schedule(retryEvent, next_tick);
855 return false;
856 }
857 }
858
859 return true;
860 }
861 else if (pkt->wasNacked()) {
862 assert(cpu->_status == DcacheWaitResponse);
863 pkt->reinitNacked();
864 if (!sendTiming(pkt)) {
865 cpu->_status = DcacheRetry;
866 cpu->dcache_pkt = pkt;
867 }
868 }
869 //Snooping a Coherence Request, do nothing
870 return true;
871}
872
873void
874TimingSimpleCPU::DcachePort::DTickEvent::process()
875{
876 cpu->completeDataAccess(pkt);
877}
878
879void
880TimingSimpleCPU::DcachePort::recvRetry()
881{
882 // we shouldn't get a retry unless we have a packet that we're
883 // waiting to transmit
884 assert(cpu->dcache_pkt != NULL);
885 assert(cpu->_status == DcacheRetry);
886 PacketPtr tmp = cpu->dcache_pkt;
887 if (tmp->senderState) {
888 // This is a packet from a split access.
889 SplitFragmentSenderState * send_state =
890 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
891 assert(send_state);
892 PacketPtr big_pkt = send_state->bigPkt;
893
894 SplitMainSenderState * main_send_state =
895 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
896 assert(main_send_state);
897
898 if (sendTiming(tmp)) {
899 // If we were able to send without retrying, record that fact
900 // and try sending the other fragment.
901 send_state->clearFromParent();
902 int other_index = main_send_state->getPendingFragment();
903 if (other_index > 0) {
904 tmp = main_send_state->fragments[other_index];
905 cpu->dcache_pkt = tmp;
906 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
907 (big_pkt->isWrite() && cpu->handleWritePacket())) {
908 main_send_state->fragments[other_index] = NULL;
909 }
910 } else {
911 cpu->_status = DcacheWaitResponse;
912 // memory system takes ownership of packet
913 cpu->dcache_pkt = NULL;
914 }
915 }
916 } else if (sendTiming(tmp)) {
917 cpu->_status = DcacheWaitResponse;
918 // memory system takes ownership of packet
919 cpu->dcache_pkt = NULL;
920 }
921}
922
923TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
924 Tick t)
925 : pkt(_pkt), cpu(_cpu)
926{
927 cpu->schedule(this, t);
928}
929
930void
931TimingSimpleCPU::IprEvent::process()
932{
933 cpu->completeDataAccess(pkt);
934}
935
936const char *
937TimingSimpleCPU::IprEvent::description() const
938{
939 return "Timing Simple CPU Delay IPR event";
940}
941
942
943void
944TimingSimpleCPU::printAddr(Addr a)
945{
946 dcachePort.printAddr(a);
947}
948
949
950////////////////////////////////////////////////////////////////////////
951//
952// TimingSimpleCPU Simulation Object
953//
954TimingSimpleCPU *
955TimingSimpleCPUParams::create()
956{
957 numThreads = 1;
958 if (!FullSystem && workload.size() != 1)
959 panic("only one workload allowed");
960 return new TimingSimpleCPU(this);
961}
78}
79
80void
81TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
82{
83 pkt = _pkt;
84 cpu->schedule(this, t);
85}
86
87TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
88 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this),
89 dcachePort(this), fetchEvent(this)
90{
91 _status = Idle;
92
93 ifetch_pkt = dcache_pkt = NULL;
94 drainEvent = NULL;
95 previousTick = 0;
96 changeState(SimObject::Running);
97 system->totalNumInsts = 0;
98}
99
100
101TimingSimpleCPU::~TimingSimpleCPU()
102{
103}
104
105void
106TimingSimpleCPU::serialize(ostream &os)
107{
108 SimObject::State so_state = SimObject::getState();
109 SERIALIZE_ENUM(so_state);
110 BaseSimpleCPU::serialize(os);
111}
112
113void
114TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
115{
116 SimObject::State so_state;
117 UNSERIALIZE_ENUM(so_state);
118 BaseSimpleCPU::unserialize(cp, section);
119}
120
121unsigned int
122TimingSimpleCPU::drain(Event *drain_event)
123{
124 // TimingSimpleCPU is ready to drain if it's not waiting for
125 // an access to complete.
126 if (_status == Idle || _status == Running || _status == SwitchedOut) {
127 changeState(SimObject::Drained);
128 return 0;
129 } else {
130 changeState(SimObject::Draining);
131 drainEvent = drain_event;
132 return 1;
133 }
134}
135
136void
137TimingSimpleCPU::resume()
138{
139 DPRINTF(SimpleCPU, "Resume\n");
140 if (_status != SwitchedOut && _status != Idle) {
141 assert(system->getMemoryMode() == Enums::timing);
142
143 if (fetchEvent.scheduled())
144 deschedule(fetchEvent);
145
146 schedule(fetchEvent, nextCycle());
147 }
148
149 changeState(SimObject::Running);
150}
151
152void
153TimingSimpleCPU::switchOut()
154{
155 assert(_status == Running || _status == Idle);
156 _status = SwitchedOut;
157 numCycles += tickToCycles(curTick() - previousTick);
158
159 // If we've been scheduled to resume but are then told to switch out,
160 // we'll need to cancel it.
161 if (fetchEvent.scheduled())
162 deschedule(fetchEvent);
163}
164
165
166void
167TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
168{
169 BaseCPU::takeOverFrom(oldCPU);
170
171 // if any of this CPU's ThreadContexts are active, mark the CPU as
172 // running and schedule its tick event.
173 for (int i = 0; i < threadContexts.size(); ++i) {
174 ThreadContext *tc = threadContexts[i];
175 if (tc->status() == ThreadContext::Active && _status != Running) {
176 _status = Running;
177 break;
178 }
179 }
180
181 if (_status != Running) {
182 _status = Idle;
183 }
184 assert(threadContexts.size() == 1);
185 previousTick = curTick();
186}
187
188
189void
190TimingSimpleCPU::activateContext(ThreadID thread_num, int delay)
191{
192 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
193
194 assert(thread_num == 0);
195 assert(thread);
196
197 assert(_status == Idle);
198
199 notIdleFraction++;
200 _status = Running;
201
202 // kick things off by initiating the fetch of the next instruction
203 schedule(fetchEvent, nextCycle(curTick() + ticks(delay)));
204}
205
206
207void
208TimingSimpleCPU::suspendContext(ThreadID thread_num)
209{
210 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
211
212 assert(thread_num == 0);
213 assert(thread);
214
215 if (_status == Idle)
216 return;
217
218 assert(_status == Running);
219
220 // just change status to Idle... if status != Running,
221 // completeInst() will not initiate fetch of next instruction.
222
223 notIdleFraction--;
224 _status = Idle;
225}
226
227bool
228TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
229{
230 RequestPtr req = pkt->req;
231 if (req->isMmappedIpr()) {
232 Tick delay;
233 delay = TheISA::handleIprRead(thread->getTC(), pkt);
234 new IprEvent(pkt, this, nextCycle(curTick() + delay));
235 _status = DcacheWaitResponse;
236 dcache_pkt = NULL;
237 } else if (!dcachePort.sendTiming(pkt)) {
238 _status = DcacheRetry;
239 dcache_pkt = pkt;
240 } else {
241 _status = DcacheWaitResponse;
242 // memory system takes ownership of packet
243 dcache_pkt = NULL;
244 }
245 return dcache_pkt == NULL;
246}
247
248void
249TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
250 bool read)
251{
252 PacketPtr pkt;
253 buildPacket(pkt, req, read);
254 pkt->dataDynamicArray<uint8_t>(data);
255 if (req->getFlags().isSet(Request::NO_ACCESS)) {
256 assert(!dcache_pkt);
257 pkt->makeResponse();
258 completeDataAccess(pkt);
259 } else if (read) {
260 handleReadPacket(pkt);
261 } else {
262 bool do_access = true; // flag to suppress cache access
263
264 if (req->isLLSC()) {
265 do_access = TheISA::handleLockedWrite(thread, req);
266 } else if (req->isCondSwap()) {
267 assert(res);
268 req->setExtraData(*res);
269 }
270
271 if (do_access) {
272 dcache_pkt = pkt;
273 handleWritePacket();
274 } else {
275 _status = DcacheWaitResponse;
276 completeDataAccess(pkt);
277 }
278 }
279}
280
281void
282TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
283 RequestPtr req, uint8_t *data, bool read)
284{
285 PacketPtr pkt1, pkt2;
286 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
287 if (req->getFlags().isSet(Request::NO_ACCESS)) {
288 assert(!dcache_pkt);
289 pkt1->makeResponse();
290 completeDataAccess(pkt1);
291 } else if (read) {
292 SplitFragmentSenderState * send_state =
293 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
294 if (handleReadPacket(pkt1)) {
295 send_state->clearFromParent();
296 send_state = dynamic_cast<SplitFragmentSenderState *>(
297 pkt2->senderState);
298 if (handleReadPacket(pkt2)) {
299 send_state->clearFromParent();
300 }
301 }
302 } else {
303 dcache_pkt = pkt1;
304 SplitFragmentSenderState * send_state =
305 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
306 if (handleWritePacket()) {
307 send_state->clearFromParent();
308 dcache_pkt = pkt2;
309 send_state = dynamic_cast<SplitFragmentSenderState *>(
310 pkt2->senderState);
311 if (handleWritePacket()) {
312 send_state->clearFromParent();
313 }
314 }
315 }
316}
317
318void
319TimingSimpleCPU::translationFault(Fault fault)
320{
321 // fault may be NoFault in cases where a fault is suppressed,
322 // for instance prefetches.
323 numCycles += tickToCycles(curTick() - previousTick);
324 previousTick = curTick();
325
326 if (traceData) {
327 // Since there was a fault, we shouldn't trace this instruction.
328 delete traceData;
329 traceData = NULL;
330 }
331
332 postExecute();
333
334 if (getState() == SimObject::Draining) {
335 advancePC(fault);
336 completeDrain();
337 } else {
338 advanceInst(fault);
339 }
340}
341
342void
343TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
344{
345 MemCmd cmd;
346 if (read) {
347 cmd = MemCmd::ReadReq;
348 if (req->isLLSC())
349 cmd = MemCmd::LoadLockedReq;
350 } else {
351 cmd = MemCmd::WriteReq;
352 if (req->isLLSC()) {
353 cmd = MemCmd::StoreCondReq;
354 } else if (req->isSwap()) {
355 cmd = MemCmd::SwapReq;
356 }
357 }
358 pkt = new Packet(req, cmd, Packet::Broadcast);
359}
360
361void
362TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
363 RequestPtr req1, RequestPtr req2, RequestPtr req,
364 uint8_t *data, bool read)
365{
366 pkt1 = pkt2 = NULL;
367
368 assert(!req1->isMmappedIpr() && !req2->isMmappedIpr());
369
370 if (req->getFlags().isSet(Request::NO_ACCESS)) {
371 buildPacket(pkt1, req, read);
372 return;
373 }
374
375 buildPacket(pkt1, req1, read);
376 buildPacket(pkt2, req2, read);
377
378 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags(), dataMasterId());
379 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand(),
380 Packet::Broadcast);
381
382 pkt->dataDynamicArray<uint8_t>(data);
383 pkt1->dataStatic<uint8_t>(data);
384 pkt2->dataStatic<uint8_t>(data + req1->getSize());
385
386 SplitMainSenderState * main_send_state = new SplitMainSenderState;
387 pkt->senderState = main_send_state;
388 main_send_state->fragments[0] = pkt1;
389 main_send_state->fragments[1] = pkt2;
390 main_send_state->outstanding = 2;
391 pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
392 pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
393}
394
395Fault
396TimingSimpleCPU::readMem(Addr addr, uint8_t *data,
397 unsigned size, unsigned flags)
398{
399 Fault fault;
400 const int asid = 0;
401 const ThreadID tid = 0;
402 const Addr pc = thread->instAddr();
403 unsigned block_size = dcachePort.peerBlockSize();
404 BaseTLB::Mode mode = BaseTLB::Read;
405
406 if (traceData) {
407 traceData->setAddr(addr);
408 }
409
410 RequestPtr req = new Request(asid, addr, size,
411 flags, dataMasterId(), pc, _cpuId, tid);
412
413 Addr split_addr = roundDown(addr + size - 1, block_size);
414 assert(split_addr <= addr || split_addr - addr < block_size);
415
416 _status = DTBWaitResponse;
417 if (split_addr > addr) {
418 RequestPtr req1, req2;
419 assert(!req->isLLSC() && !req->isSwap());
420 req->splitOnVaddr(split_addr, req1, req2);
421
422 WholeTranslationState *state =
423 new WholeTranslationState(req, req1, req2, new uint8_t[size],
424 NULL, mode);
425 DataTranslation<TimingSimpleCPU *> *trans1 =
426 new DataTranslation<TimingSimpleCPU *>(this, state, 0);
427 DataTranslation<TimingSimpleCPU *> *trans2 =
428 new DataTranslation<TimingSimpleCPU *>(this, state, 1);
429
430 thread->dtb->translateTiming(req1, tc, trans1, mode);
431 thread->dtb->translateTiming(req2, tc, trans2, mode);
432 } else {
433 WholeTranslationState *state =
434 new WholeTranslationState(req, new uint8_t[size], NULL, mode);
435 DataTranslation<TimingSimpleCPU *> *translation
436 = new DataTranslation<TimingSimpleCPU *>(this, state);
437 thread->dtb->translateTiming(req, tc, translation, mode);
438 }
439
440 return NoFault;
441}
442
443bool
444TimingSimpleCPU::handleWritePacket()
445{
446 RequestPtr req = dcache_pkt->req;
447 if (req->isMmappedIpr()) {
448 Tick delay;
449 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
450 new IprEvent(dcache_pkt, this, nextCycle(curTick() + delay));
451 _status = DcacheWaitResponse;
452 dcache_pkt = NULL;
453 } else if (!dcachePort.sendTiming(dcache_pkt)) {
454 _status = DcacheRetry;
455 } else {
456 _status = DcacheWaitResponse;
457 // memory system takes ownership of packet
458 dcache_pkt = NULL;
459 }
460 return dcache_pkt == NULL;
461}
462
463Fault
464TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
465 Addr addr, unsigned flags, uint64_t *res)
466{
467 uint8_t *newData = new uint8_t[size];
468 memcpy(newData, data, size);
469
470 const int asid = 0;
471 const ThreadID tid = 0;
472 const Addr pc = thread->instAddr();
473 unsigned block_size = dcachePort.peerBlockSize();
474 BaseTLB::Mode mode = BaseTLB::Write;
475
476 if (traceData) {
477 traceData->setAddr(addr);
478 }
479
480 RequestPtr req = new Request(asid, addr, size,
481 flags, dataMasterId(), pc, _cpuId, tid);
482
483 Addr split_addr = roundDown(addr + size - 1, block_size);
484 assert(split_addr <= addr || split_addr - addr < block_size);
485
486 _status = DTBWaitResponse;
487 if (split_addr > addr) {
488 RequestPtr req1, req2;
489 assert(!req->isLLSC() && !req->isSwap());
490 req->splitOnVaddr(split_addr, req1, req2);
491
492 WholeTranslationState *state =
493 new WholeTranslationState(req, req1, req2, newData, res, mode);
494 DataTranslation<TimingSimpleCPU *> *trans1 =
495 new DataTranslation<TimingSimpleCPU *>(this, state, 0);
496 DataTranslation<TimingSimpleCPU *> *trans2 =
497 new DataTranslation<TimingSimpleCPU *>(this, state, 1);
498
499 thread->dtb->translateTiming(req1, tc, trans1, mode);
500 thread->dtb->translateTiming(req2, tc, trans2, mode);
501 } else {
502 WholeTranslationState *state =
503 new WholeTranslationState(req, newData, res, mode);
504 DataTranslation<TimingSimpleCPU *> *translation =
505 new DataTranslation<TimingSimpleCPU *>(this, state);
506 thread->dtb->translateTiming(req, tc, translation, mode);
507 }
508
509 // Translation faults will be returned via finishTranslation()
510 return NoFault;
511}
512
513
514void
515TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
516{
517 _status = Running;
518
519 if (state->getFault() != NoFault) {
520 if (state->isPrefetch()) {
521 state->setNoFault();
522 }
523 delete [] state->data;
524 state->deleteReqs();
525 translationFault(state->getFault());
526 } else {
527 if (!state->isSplit) {
528 sendData(state->mainReq, state->data, state->res,
529 state->mode == BaseTLB::Read);
530 } else {
531 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
532 state->data, state->mode == BaseTLB::Read);
533 }
534 }
535
536 delete state;
537}
538
539
540void
541TimingSimpleCPU::fetch()
542{
543 DPRINTF(SimpleCPU, "Fetch\n");
544
545 if (!curStaticInst || !curStaticInst->isDelayedCommit())
546 checkForInterrupts();
547
548 checkPcEventQueue();
549
550 // We must have just got suspended by a PC event
551 if (_status == Idle)
552 return;
553
554 TheISA::PCState pcState = thread->pcState();
555 bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
556
557 if (needToFetch) {
558 _status = Running;
559 Request *ifetch_req = new Request();
560 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
561 setupFetchRequest(ifetch_req);
562 DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
563 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
564 BaseTLB::Execute);
565 } else {
566 _status = IcacheWaitResponse;
567 completeIfetch(NULL);
568
569 numCycles += tickToCycles(curTick() - previousTick);
570 previousTick = curTick();
571 }
572}
573
574
575void
576TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
577{
578 if (fault == NoFault) {
579 DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n",
580 req->getVaddr(), req->getPaddr());
581 ifetch_pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
582 ifetch_pkt->dataStatic(&inst);
583 DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr());
584
585 if (!icachePort.sendTiming(ifetch_pkt)) {
586 // Need to wait for retry
587 _status = IcacheRetry;
588 } else {
589 // Need to wait for cache to respond
590 _status = IcacheWaitResponse;
591 // ownership of packet transferred to memory system
592 ifetch_pkt = NULL;
593 }
594 } else {
595 DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
596 delete req;
597 // fetch fault: advance directly to next instruction (fault handler)
598 _status = Running;
599 advanceInst(fault);
600 }
601
602 numCycles += tickToCycles(curTick() - previousTick);
603 previousTick = curTick();
604}
605
606
607void
608TimingSimpleCPU::advanceInst(Fault fault)
609{
610
611 if (_status == Faulting)
612 return;
613
614 if (fault != NoFault) {
615 advancePC(fault);
616 DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
617 reschedule(fetchEvent, nextCycle(), true);
618 _status = Faulting;
619 return;
620 }
621
622
623 if (!stayAtPC)
624 advancePC(fault);
625
626 if (_status == Running) {
627 // kick off fetch of next instruction... callback from icache
628 // response will cause that instruction to be executed,
629 // keeping the CPU running.
630 fetch();
631 }
632}
633
634
635void
636TimingSimpleCPU::completeIfetch(PacketPtr pkt)
637{
638 DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ?
639 pkt->getAddr() : 0);
640
641 // received a response from the icache: execute the received
642 // instruction
643
644 assert(!pkt || !pkt->isError());
645 assert(_status == IcacheWaitResponse);
646
647 _status = Running;
648
649 numCycles += tickToCycles(curTick() - previousTick);
650 previousTick = curTick();
651
652 if (getState() == SimObject::Draining) {
653 if (pkt) {
654 delete pkt->req;
655 delete pkt;
656 }
657
658 completeDrain();
659 return;
660 }
661
662 preExecute();
663 if (curStaticInst && curStaticInst->isMemRef()) {
664 // load or store: just send to dcache
665 Fault fault = curStaticInst->initiateAcc(this, traceData);
666
667 // If we're not running now the instruction will complete in a dcache
668 // response callback or the instruction faulted and has started an
669 // ifetch
670 if (_status == Running) {
671 if (fault != NoFault && traceData) {
672 // If there was a fault, we shouldn't trace this instruction.
673 delete traceData;
674 traceData = NULL;
675 }
676
677 postExecute();
678 // @todo remove me after debugging with legion done
679 if (curStaticInst && (!curStaticInst->isMicroop() ||
680 curStaticInst->isFirstMicroop()))
681 instCnt++;
682 advanceInst(fault);
683 }
684 } else if (curStaticInst) {
685 // non-memory instruction: execute completely now
686 Fault fault = curStaticInst->execute(this, traceData);
687
688 // keep an instruction count
689 if (fault == NoFault)
690 countInst();
691 else if (traceData && !DTRACE(ExecFaulting)) {
692 delete traceData;
693 traceData = NULL;
694 }
695
696 postExecute();
697 // @todo remove me after debugging with legion done
698 if (curStaticInst && (!curStaticInst->isMicroop() ||
699 curStaticInst->isFirstMicroop()))
700 instCnt++;
701 advanceInst(fault);
702 } else {
703 advanceInst(NoFault);
704 }
705
706 if (pkt) {
707 delete pkt->req;
708 delete pkt;
709 }
710}
711
712void
713TimingSimpleCPU::IcachePort::ITickEvent::process()
714{
715 cpu->completeIfetch(pkt);
716}
717
718bool
719TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
720{
721 if (pkt->isResponse() && !pkt->wasNacked()) {
722 DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
723 // delay processing of returned data until next CPU clock edge
724 Tick next_tick = cpu->nextCycle(curTick());
725
726 if (next_tick == curTick())
727 cpu->completeIfetch(pkt);
728 else
729 tickEvent.schedule(pkt, next_tick);
730
731 return true;
732 } else if (pkt->wasNacked()) {
733 assert(cpu->_status == IcacheWaitResponse);
734 pkt->reinitNacked();
735 if (!sendTiming(pkt)) {
736 cpu->_status = IcacheRetry;
737 cpu->ifetch_pkt = pkt;
738 }
739 }
740 //Snooping a Coherence Request, do nothing
741 return true;
742}
743
744void
745TimingSimpleCPU::IcachePort::recvRetry()
746{
747 // we shouldn't get a retry unless we have a packet that we're
748 // waiting to transmit
749 assert(cpu->ifetch_pkt != NULL);
750 assert(cpu->_status == IcacheRetry);
751 PacketPtr tmp = cpu->ifetch_pkt;
752 if (sendTiming(tmp)) {
753 cpu->_status = IcacheWaitResponse;
754 cpu->ifetch_pkt = NULL;
755 }
756}
757
758void
759TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
760{
761 // received a response from the dcache: complete the load or store
762 // instruction
763 assert(!pkt->isError());
764 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
765 pkt->req->getFlags().isSet(Request::NO_ACCESS));
766
767 numCycles += tickToCycles(curTick() - previousTick);
768 previousTick = curTick();
769
770 if (pkt->senderState) {
771 SplitFragmentSenderState * send_state =
772 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
773 assert(send_state);
774 delete pkt->req;
775 delete pkt;
776 PacketPtr big_pkt = send_state->bigPkt;
777 delete send_state;
778
779 SplitMainSenderState * main_send_state =
780 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
781 assert(main_send_state);
782 // Record the fact that this packet is no longer outstanding.
783 assert(main_send_state->outstanding != 0);
784 main_send_state->outstanding--;
785
786 if (main_send_state->outstanding) {
787 return;
788 } else {
789 delete main_send_state;
790 big_pkt->senderState = NULL;
791 pkt = big_pkt;
792 }
793 }
794
795 _status = Running;
796
797 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
798
799 // keep an instruction count
800 if (fault == NoFault)
801 countInst();
802 else if (traceData) {
803 // If there was a fault, we shouldn't trace this instruction.
804 delete traceData;
805 traceData = NULL;
806 }
807
808 // the locked flag may be cleared on the response packet, so check
809 // pkt->req and not pkt to see if it was a load-locked
810 if (pkt->isRead() && pkt->req->isLLSC()) {
811 TheISA::handleLockedRead(thread, pkt->req);
812 }
813
814 delete pkt->req;
815 delete pkt;
816
817 postExecute();
818
819 if (getState() == SimObject::Draining) {
820 advancePC(fault);
821 completeDrain();
822
823 return;
824 }
825
826 advanceInst(fault);
827}
828
829
830void
831TimingSimpleCPU::completeDrain()
832{
833 DPRINTF(Config, "Done draining\n");
834 changeState(SimObject::Drained);
835 drainEvent->process();
836}
837
838bool
839TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
840{
841 if (pkt->isResponse() && !pkt->wasNacked()) {
842 // delay processing of returned data until next CPU clock edge
843 Tick next_tick = cpu->nextCycle(curTick());
844
845 if (next_tick == curTick()) {
846 cpu->completeDataAccess(pkt);
847 } else {
848 if (!tickEvent.scheduled()) {
849 tickEvent.schedule(pkt, next_tick);
850 } else {
851 // In the case of a split transaction and a cache that is
852 // faster than a CPU we could get two responses before
853 // next_tick expires
854 if (!retryEvent.scheduled())
855 cpu->schedule(retryEvent, next_tick);
856 return false;
857 }
858 }
859
860 return true;
861 }
862 else if (pkt->wasNacked()) {
863 assert(cpu->_status == DcacheWaitResponse);
864 pkt->reinitNacked();
865 if (!sendTiming(pkt)) {
866 cpu->_status = DcacheRetry;
867 cpu->dcache_pkt = pkt;
868 }
869 }
870 //Snooping a Coherence Request, do nothing
871 return true;
872}
873
874void
875TimingSimpleCPU::DcachePort::DTickEvent::process()
876{
877 cpu->completeDataAccess(pkt);
878}
879
880void
881TimingSimpleCPU::DcachePort::recvRetry()
882{
883 // we shouldn't get a retry unless we have a packet that we're
884 // waiting to transmit
885 assert(cpu->dcache_pkt != NULL);
886 assert(cpu->_status == DcacheRetry);
887 PacketPtr tmp = cpu->dcache_pkt;
888 if (tmp->senderState) {
889 // This is a packet from a split access.
890 SplitFragmentSenderState * send_state =
891 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
892 assert(send_state);
893 PacketPtr big_pkt = send_state->bigPkt;
894
895 SplitMainSenderState * main_send_state =
896 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
897 assert(main_send_state);
898
899 if (sendTiming(tmp)) {
900 // If we were able to send without retrying, record that fact
901 // and try sending the other fragment.
902 send_state->clearFromParent();
903 int other_index = main_send_state->getPendingFragment();
904 if (other_index > 0) {
905 tmp = main_send_state->fragments[other_index];
906 cpu->dcache_pkt = tmp;
907 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
908 (big_pkt->isWrite() && cpu->handleWritePacket())) {
909 main_send_state->fragments[other_index] = NULL;
910 }
911 } else {
912 cpu->_status = DcacheWaitResponse;
913 // memory system takes ownership of packet
914 cpu->dcache_pkt = NULL;
915 }
916 }
917 } else if (sendTiming(tmp)) {
918 cpu->_status = DcacheWaitResponse;
919 // memory system takes ownership of packet
920 cpu->dcache_pkt = NULL;
921 }
922}
923
924TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
925 Tick t)
926 : pkt(_pkt), cpu(_cpu)
927{
928 cpu->schedule(this, t);
929}
930
931void
932TimingSimpleCPU::IprEvent::process()
933{
934 cpu->completeDataAccess(pkt);
935}
936
937const char *
938TimingSimpleCPU::IprEvent::description() const
939{
940 return "Timing Simple CPU Delay IPR event";
941}
942
943
944void
945TimingSimpleCPU::printAddr(Addr a)
946{
947 dcachePort.printAddr(a);
948}
949
950
951////////////////////////////////////////////////////////////////////////
952//
953// TimingSimpleCPU Simulation Object
954//
955TimingSimpleCPU *
956TimingSimpleCPUParams::create()
957{
958 numThreads = 1;
959 if (!FullSystem && workload.size() != 1)
960 panic("only one workload allowed");
961 return new TimingSimpleCPU(this);
962}