timing.cc (8232:b28d06a175be) timing.cc (8276:66bb0d8ae8bf)
1/*
2 * Copyright (c) 2010 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Steve Reinhardt
41 */
42
43#include "arch/locked_mem.hh"
44#include "arch/mmapped_ipr.hh"
45#include "arch/utility.hh"
46#include "base/bigint.hh"
47#include "config/the_isa.hh"
48#include "cpu/simple/timing.hh"
49#include "cpu/exetrace.hh"
50#include "debug/Config.hh"
51#include "debug/ExecFaulting.hh"
52#include "debug/SimpleCPU.hh"
53#include "mem/packet.hh"
54#include "mem/packet_access.hh"
55#include "params/TimingSimpleCPU.hh"
56#include "sim/faults.hh"
57#include "sim/system.hh"
58
59using namespace std;
60using namespace TheISA;
61
62Port *
63TimingSimpleCPU::getPort(const std::string &if_name, int idx)
64{
65 if (if_name == "dcache_port")
66 return &dcachePort;
67 else if (if_name == "icache_port")
68 return &icachePort;
69 else
70 panic("No Such Port\n");
71}
72
73void
74TimingSimpleCPU::init()
75{
76 BaseCPU::init();
77#if FULL_SYSTEM
78 for (int i = 0; i < threadContexts.size(); ++i) {
79 ThreadContext *tc = threadContexts[i];
80
81 // initialize CPU, including PC
82 TheISA::initCPU(tc, _cpuId);
83 }
84#endif
85}
86
87Tick
88TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
89{
90 panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
91 return curTick();
92}
93
94void
95TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
96{
97 //No internal storage to update, jusst return
98 return;
99}
100
101void
102TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
103{
104 if (status == RangeChange) {
105 if (!snoopRangeSent) {
106 snoopRangeSent = true;
107 sendStatusChange(Port::RangeChange);
108 }
109 return;
110 }
111
112 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
113}
114
115
116void
117TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
118{
119 pkt = _pkt;
120 cpu->schedule(this, t);
121}
122
123TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
124 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this, p->clock),
125 dcachePort(this, p->clock), fetchEvent(this)
126{
127 _status = Idle;
128
129 icachePort.snoopRangeSent = false;
130 dcachePort.snoopRangeSent = false;
131
132 ifetch_pkt = dcache_pkt = NULL;
133 drainEvent = NULL;
134 previousTick = 0;
135 changeState(SimObject::Running);
136 system->totalNumInsts = 0;
137}
138
139
140TimingSimpleCPU::~TimingSimpleCPU()
141{
142}
143
144void
145TimingSimpleCPU::serialize(ostream &os)
146{
147 SimObject::State so_state = SimObject::getState();
148 SERIALIZE_ENUM(so_state);
149 BaseSimpleCPU::serialize(os);
150}
151
152void
153TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
154{
155 SimObject::State so_state;
156 UNSERIALIZE_ENUM(so_state);
157 BaseSimpleCPU::unserialize(cp, section);
158}
159
160unsigned int
161TimingSimpleCPU::drain(Event *drain_event)
162{
163 // TimingSimpleCPU is ready to drain if it's not waiting for
164 // an access to complete.
165 if (_status == Idle || _status == Running || _status == SwitchedOut) {
166 changeState(SimObject::Drained);
167 return 0;
168 } else {
169 changeState(SimObject::Draining);
170 drainEvent = drain_event;
171 return 1;
172 }
173}
174
175void
176TimingSimpleCPU::resume()
177{
178 DPRINTF(SimpleCPU, "Resume\n");
179 if (_status != SwitchedOut && _status != Idle) {
180 assert(system->getMemoryMode() == Enums::timing);
181
182 if (fetchEvent.scheduled())
183 deschedule(fetchEvent);
184
185 schedule(fetchEvent, nextCycle());
186 }
187
188 changeState(SimObject::Running);
189}
190
191void
192TimingSimpleCPU::switchOut()
193{
194 assert(_status == Running || _status == Idle);
195 _status = SwitchedOut;
196 numCycles += tickToCycles(curTick() - previousTick);
197
198 // If we've been scheduled to resume but are then told to switch out,
199 // we'll need to cancel it.
200 if (fetchEvent.scheduled())
201 deschedule(fetchEvent);
202}
203
204
205void
206TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
207{
208 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
209
210 // if any of this CPU's ThreadContexts are active, mark the CPU as
211 // running and schedule its tick event.
212 for (int i = 0; i < threadContexts.size(); ++i) {
213 ThreadContext *tc = threadContexts[i];
214 if (tc->status() == ThreadContext::Active && _status != Running) {
215 _status = Running;
216 break;
217 }
218 }
219
220 if (_status != Running) {
221 _status = Idle;
222 }
223 assert(threadContexts.size() == 1);
224 previousTick = curTick();
225}
226
227
228void
229TimingSimpleCPU::activateContext(int thread_num, int delay)
230{
231 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
232
233 assert(thread_num == 0);
234 assert(thread);
235
236 assert(_status == Idle);
237
238 notIdleFraction++;
239 _status = Running;
240
241 // kick things off by initiating the fetch of the next instruction
242 schedule(fetchEvent, nextCycle(curTick() + ticks(delay)));
243}
244
245
246void
247TimingSimpleCPU::suspendContext(int thread_num)
248{
249 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
250
251 assert(thread_num == 0);
252 assert(thread);
253
254 if (_status == Idle)
255 return;
256
257 assert(_status == Running);
258
259 // just change status to Idle... if status != Running,
260 // completeInst() will not initiate fetch of next instruction.
261
262 notIdleFraction--;
263 _status = Idle;
264}
265
266bool
267TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
268{
269 RequestPtr req = pkt->req;
270 if (req->isMmappedIpr()) {
271 Tick delay;
272 delay = TheISA::handleIprRead(thread->getTC(), pkt);
273 new IprEvent(pkt, this, nextCycle(curTick() + delay));
274 _status = DcacheWaitResponse;
275 dcache_pkt = NULL;
276 } else if (!dcachePort.sendTiming(pkt)) {
277 _status = DcacheRetry;
278 dcache_pkt = pkt;
279 } else {
280 _status = DcacheWaitResponse;
281 // memory system takes ownership of packet
282 dcache_pkt = NULL;
283 }
284 return dcache_pkt == NULL;
285}
286
287void
288TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
289 bool read)
290{
291 PacketPtr pkt;
292 buildPacket(pkt, req, read);
293 pkt->dataDynamicArray<uint8_t>(data);
294 if (req->getFlags().isSet(Request::NO_ACCESS)) {
295 assert(!dcache_pkt);
296 pkt->makeResponse();
297 completeDataAccess(pkt);
298 } else if (read) {
299 handleReadPacket(pkt);
300 } else {
301 bool do_access = true; // flag to suppress cache access
302
303 if (req->isLLSC()) {
304 do_access = TheISA::handleLockedWrite(thread, req);
305 } else if (req->isCondSwap()) {
306 assert(res);
307 req->setExtraData(*res);
308 }
309
310 if (do_access) {
311 dcache_pkt = pkt;
312 handleWritePacket();
313 } else {
314 _status = DcacheWaitResponse;
315 completeDataAccess(pkt);
316 }
317 }
318}
319
320void
321TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
322 RequestPtr req, uint8_t *data, bool read)
323{
324 PacketPtr pkt1, pkt2;
325 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
326 if (req->getFlags().isSet(Request::NO_ACCESS)) {
327 assert(!dcache_pkt);
328 pkt1->makeResponse();
329 completeDataAccess(pkt1);
330 } else if (read) {
331 SplitFragmentSenderState * send_state =
332 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
333 if (handleReadPacket(pkt1)) {
334 send_state->clearFromParent();
335 send_state = dynamic_cast<SplitFragmentSenderState *>(
336 pkt2->senderState);
337 if (handleReadPacket(pkt2)) {
338 send_state->clearFromParent();
339 }
340 }
341 } else {
342 dcache_pkt = pkt1;
343 SplitFragmentSenderState * send_state =
344 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
345 if (handleWritePacket()) {
346 send_state->clearFromParent();
347 dcache_pkt = pkt2;
348 send_state = dynamic_cast<SplitFragmentSenderState *>(
349 pkt2->senderState);
350 if (handleWritePacket()) {
351 send_state->clearFromParent();
352 }
353 }
354 }
355}
356
357void
358TimingSimpleCPU::translationFault(Fault fault)
359{
360 // fault may be NoFault in cases where a fault is suppressed,
361 // for instance prefetches.
362 numCycles += tickToCycles(curTick() - previousTick);
363 previousTick = curTick();
364
365 if (traceData) {
366 // Since there was a fault, we shouldn't trace this instruction.
367 delete traceData;
368 traceData = NULL;
369 }
370
371 postExecute();
372
373 if (getState() == SimObject::Draining) {
374 advancePC(fault);
375 completeDrain();
376 } else {
377 advanceInst(fault);
378 }
379}
380
381void
382TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
383{
384 MemCmd cmd;
385 if (read) {
386 cmd = MemCmd::ReadReq;
387 if (req->isLLSC())
388 cmd = MemCmd::LoadLockedReq;
389 } else {
390 cmd = MemCmd::WriteReq;
391 if (req->isLLSC()) {
392 cmd = MemCmd::StoreCondReq;
393 } else if (req->isSwap()) {
394 cmd = MemCmd::SwapReq;
395 }
396 }
397 pkt = new Packet(req, cmd, Packet::Broadcast);
398}
399
400void
401TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
402 RequestPtr req1, RequestPtr req2, RequestPtr req,
403 uint8_t *data, bool read)
404{
405 pkt1 = pkt2 = NULL;
406
407 assert(!req1->isMmappedIpr() && !req2->isMmappedIpr());
408
409 if (req->getFlags().isSet(Request::NO_ACCESS)) {
410 buildPacket(pkt1, req, read);
411 return;
412 }
413
414 buildPacket(pkt1, req1, read);
415 buildPacket(pkt2, req2, read);
416
417 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags());
418 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand(),
419 Packet::Broadcast);
420
421 pkt->dataDynamicArray<uint8_t>(data);
422 pkt1->dataStatic<uint8_t>(data);
423 pkt2->dataStatic<uint8_t>(data + req1->getSize());
424
425 SplitMainSenderState * main_send_state = new SplitMainSenderState;
426 pkt->senderState = main_send_state;
427 main_send_state->fragments[0] = pkt1;
428 main_send_state->fragments[1] = pkt2;
429 main_send_state->outstanding = 2;
430 pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
431 pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
432}
433
434Fault
435TimingSimpleCPU::readBytes(Addr addr, uint8_t *data,
436 unsigned size, unsigned flags)
437{
438 Fault fault;
439 const int asid = 0;
440 const ThreadID tid = 0;
441 const Addr pc = thread->instAddr();
442 unsigned block_size = dcachePort.peerBlockSize();
443 BaseTLB::Mode mode = BaseTLB::Read;
444
445 if (traceData) {
446 traceData->setAddr(addr);
447 }
448
449 RequestPtr req = new Request(asid, addr, size,
450 flags, pc, _cpuId, tid);
451
452 Addr split_addr = roundDown(addr + size - 1, block_size);
453 assert(split_addr <= addr || split_addr - addr < block_size);
454
455 _status = DTBWaitResponse;
456 if (split_addr > addr) {
457 RequestPtr req1, req2;
458 assert(!req->isLLSC() && !req->isSwap());
459 req->splitOnVaddr(split_addr, req1, req2);
460
461 WholeTranslationState *state =
462 new WholeTranslationState(req, req1, req2, new uint8_t[size],
463 NULL, mode);
464 DataTranslation<TimingSimpleCPU> *trans1 =
465 new DataTranslation<TimingSimpleCPU>(this, state, 0);
466 DataTranslation<TimingSimpleCPU> *trans2 =
467 new DataTranslation<TimingSimpleCPU>(this, state, 1);
468
469 thread->dtb->translateTiming(req1, tc, trans1, mode);
470 thread->dtb->translateTiming(req2, tc, trans2, mode);
471 } else {
472 WholeTranslationState *state =
473 new WholeTranslationState(req, new uint8_t[size], NULL, mode);
474 DataTranslation<TimingSimpleCPU> *translation
475 = new DataTranslation<TimingSimpleCPU>(this, state);
476 thread->dtb->translateTiming(req, tc, translation, mode);
477 }
478
479 return NoFault;
480}
481
482template <class T>
483Fault
484TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
485{
486 return readBytes(addr, (uint8_t *)&data, sizeof(T), flags);
487}
488
489#ifndef DOXYGEN_SHOULD_SKIP_THIS
490
491template
492Fault
493TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
494
495template
496Fault
497TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
498
499template
500Fault
501TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
502
503template
504Fault
505TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
506
507template
508Fault
509TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
510
511template
512Fault
513TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
514
515#endif //DOXYGEN_SHOULD_SKIP_THIS
516
517template<>
518Fault
519TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
520{
521 return read(addr, *(uint64_t*)&data, flags);
522}
523
524template<>
525Fault
526TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
527{
528 return read(addr, *(uint32_t*)&data, flags);
529}
530
531template<>
532Fault
533TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
534{
535 return read(addr, (uint32_t&)data, flags);
536}
537
538bool
539TimingSimpleCPU::handleWritePacket()
540{
541 RequestPtr req = dcache_pkt->req;
542 if (req->isMmappedIpr()) {
543 Tick delay;
544 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
545 new IprEvent(dcache_pkt, this, nextCycle(curTick() + delay));
546 _status = DcacheWaitResponse;
547 dcache_pkt = NULL;
548 } else if (!dcachePort.sendTiming(dcache_pkt)) {
549 _status = DcacheRetry;
550 } else {
551 _status = DcacheWaitResponse;
552 // memory system takes ownership of packet
553 dcache_pkt = NULL;
554 }
555 return dcache_pkt == NULL;
556}
557
558Fault
559TimingSimpleCPU::writeTheseBytes(uint8_t *data, unsigned size,
560 Addr addr, unsigned flags, uint64_t *res)
561{
562 const int asid = 0;
563 const ThreadID tid = 0;
564 const Addr pc = thread->instAddr();
565 unsigned block_size = dcachePort.peerBlockSize();
566 BaseTLB::Mode mode = BaseTLB::Write;
567
568 if (traceData) {
569 traceData->setAddr(addr);
570 }
571
572 RequestPtr req = new Request(asid, addr, size,
573 flags, pc, _cpuId, tid);
574
575 Addr split_addr = roundDown(addr + size - 1, block_size);
576 assert(split_addr <= addr || split_addr - addr < block_size);
577
578 _status = DTBWaitResponse;
579 if (split_addr > addr) {
580 RequestPtr req1, req2;
581 assert(!req->isLLSC() && !req->isSwap());
582 req->splitOnVaddr(split_addr, req1, req2);
583
584 WholeTranslationState *state =
585 new WholeTranslationState(req, req1, req2, data, res, mode);
586 DataTranslation<TimingSimpleCPU> *trans1 =
587 new DataTranslation<TimingSimpleCPU>(this, state, 0);
588 DataTranslation<TimingSimpleCPU> *trans2 =
589 new DataTranslation<TimingSimpleCPU>(this, state, 1);
590
591 thread->dtb->translateTiming(req1, tc, trans1, mode);
592 thread->dtb->translateTiming(req2, tc, trans2, mode);
593 } else {
594 WholeTranslationState *state =
595 new WholeTranslationState(req, data, res, mode);
596 DataTranslation<TimingSimpleCPU> *translation =
597 new DataTranslation<TimingSimpleCPU>(this, state);
598 thread->dtb->translateTiming(req, tc, translation, mode);
599 }
600
601 // Translation faults will be returned via finishTranslation()
602 return NoFault;
603}
604
605Fault
606TimingSimpleCPU::writeBytes(uint8_t *data, unsigned size,
607 Addr addr, unsigned flags, uint64_t *res)
608{
609 uint8_t *newData = new uint8_t[size];
610 memcpy(newData, data, size);
611 return writeTheseBytes(newData, size, addr, flags, res);
612}
613
614template <class T>
615Fault
616TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
617{
618 if (traceData) {
619 traceData->setData(data);
620 }
621 T *dataP = (T*) new uint8_t[sizeof(T)];
622 *dataP = TheISA::htog(data);
623
624 return writeTheseBytes((uint8_t *)dataP, sizeof(T), addr, flags, res);
625}
626
627
628#ifndef DOXYGEN_SHOULD_SKIP_THIS
629template
630Fault
631TimingSimpleCPU::write(Twin32_t data, Addr addr,
632 unsigned flags, uint64_t *res);
633
634template
635Fault
636TimingSimpleCPU::write(Twin64_t data, Addr addr,
637 unsigned flags, uint64_t *res);
638
639template
640Fault
641TimingSimpleCPU::write(uint64_t data, Addr addr,
642 unsigned flags, uint64_t *res);
643
644template
645Fault
646TimingSimpleCPU::write(uint32_t data, Addr addr,
647 unsigned flags, uint64_t *res);
648
649template
650Fault
651TimingSimpleCPU::write(uint16_t data, Addr addr,
652 unsigned flags, uint64_t *res);
653
654template
655Fault
656TimingSimpleCPU::write(uint8_t data, Addr addr,
657 unsigned flags, uint64_t *res);
658
659#endif //DOXYGEN_SHOULD_SKIP_THIS
660
661template<>
662Fault
663TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
664{
665 return write(*(uint64_t*)&data, addr, flags, res);
666}
667
668template<>
669Fault
670TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
671{
672 return write(*(uint32_t*)&data, addr, flags, res);
673}
674
675
676template<>
677Fault
678TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
679{
680 return write((uint32_t)data, addr, flags, res);
681}
682
683
684void
685TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
686{
687 _status = Running;
688
689 if (state->getFault() != NoFault) {
690 if (state->isPrefetch()) {
691 state->setNoFault();
692 }
693 delete [] state->data;
694 state->deleteReqs();
695 translationFault(state->getFault());
696 } else {
697 if (!state->isSplit) {
698 sendData(state->mainReq, state->data, state->res,
699 state->mode == BaseTLB::Read);
700 } else {
701 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
702 state->data, state->mode == BaseTLB::Read);
703 }
704 }
705
706 delete state;
707}
708
709
710void
711TimingSimpleCPU::fetch()
712{
713 DPRINTF(SimpleCPU, "Fetch\n");
714
715 if (!curStaticInst || !curStaticInst->isDelayedCommit())
716 checkForInterrupts();
717
718 checkPcEventQueue();
719
720 // We must have just got suspended by a PC event
721 if (_status == Idle)
722 return;
723
724 TheISA::PCState pcState = thread->pcState();
725 bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
726
727 if (needToFetch) {
1/*
2 * Copyright (c) 2010 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Steve Reinhardt
41 */
42
43#include "arch/locked_mem.hh"
44#include "arch/mmapped_ipr.hh"
45#include "arch/utility.hh"
46#include "base/bigint.hh"
47#include "config/the_isa.hh"
48#include "cpu/simple/timing.hh"
49#include "cpu/exetrace.hh"
50#include "debug/Config.hh"
51#include "debug/ExecFaulting.hh"
52#include "debug/SimpleCPU.hh"
53#include "mem/packet.hh"
54#include "mem/packet_access.hh"
55#include "params/TimingSimpleCPU.hh"
56#include "sim/faults.hh"
57#include "sim/system.hh"
58
59using namespace std;
60using namespace TheISA;
61
62Port *
63TimingSimpleCPU::getPort(const std::string &if_name, int idx)
64{
65 if (if_name == "dcache_port")
66 return &dcachePort;
67 else if (if_name == "icache_port")
68 return &icachePort;
69 else
70 panic("No Such Port\n");
71}
72
73void
74TimingSimpleCPU::init()
75{
76 BaseCPU::init();
77#if FULL_SYSTEM
78 for (int i = 0; i < threadContexts.size(); ++i) {
79 ThreadContext *tc = threadContexts[i];
80
81 // initialize CPU, including PC
82 TheISA::initCPU(tc, _cpuId);
83 }
84#endif
85}
86
87Tick
88TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
89{
90 panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
91 return curTick();
92}
93
94void
95TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
96{
97 //No internal storage to update, jusst return
98 return;
99}
100
101void
102TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
103{
104 if (status == RangeChange) {
105 if (!snoopRangeSent) {
106 snoopRangeSent = true;
107 sendStatusChange(Port::RangeChange);
108 }
109 return;
110 }
111
112 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
113}
114
115
116void
117TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
118{
119 pkt = _pkt;
120 cpu->schedule(this, t);
121}
122
123TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
124 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this, p->clock),
125 dcachePort(this, p->clock), fetchEvent(this)
126{
127 _status = Idle;
128
129 icachePort.snoopRangeSent = false;
130 dcachePort.snoopRangeSent = false;
131
132 ifetch_pkt = dcache_pkt = NULL;
133 drainEvent = NULL;
134 previousTick = 0;
135 changeState(SimObject::Running);
136 system->totalNumInsts = 0;
137}
138
139
140TimingSimpleCPU::~TimingSimpleCPU()
141{
142}
143
144void
145TimingSimpleCPU::serialize(ostream &os)
146{
147 SimObject::State so_state = SimObject::getState();
148 SERIALIZE_ENUM(so_state);
149 BaseSimpleCPU::serialize(os);
150}
151
152void
153TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
154{
155 SimObject::State so_state;
156 UNSERIALIZE_ENUM(so_state);
157 BaseSimpleCPU::unserialize(cp, section);
158}
159
160unsigned int
161TimingSimpleCPU::drain(Event *drain_event)
162{
163 // TimingSimpleCPU is ready to drain if it's not waiting for
164 // an access to complete.
165 if (_status == Idle || _status == Running || _status == SwitchedOut) {
166 changeState(SimObject::Drained);
167 return 0;
168 } else {
169 changeState(SimObject::Draining);
170 drainEvent = drain_event;
171 return 1;
172 }
173}
174
175void
176TimingSimpleCPU::resume()
177{
178 DPRINTF(SimpleCPU, "Resume\n");
179 if (_status != SwitchedOut && _status != Idle) {
180 assert(system->getMemoryMode() == Enums::timing);
181
182 if (fetchEvent.scheduled())
183 deschedule(fetchEvent);
184
185 schedule(fetchEvent, nextCycle());
186 }
187
188 changeState(SimObject::Running);
189}
190
191void
192TimingSimpleCPU::switchOut()
193{
194 assert(_status == Running || _status == Idle);
195 _status = SwitchedOut;
196 numCycles += tickToCycles(curTick() - previousTick);
197
198 // If we've been scheduled to resume but are then told to switch out,
199 // we'll need to cancel it.
200 if (fetchEvent.scheduled())
201 deschedule(fetchEvent);
202}
203
204
205void
206TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
207{
208 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
209
210 // if any of this CPU's ThreadContexts are active, mark the CPU as
211 // running and schedule its tick event.
212 for (int i = 0; i < threadContexts.size(); ++i) {
213 ThreadContext *tc = threadContexts[i];
214 if (tc->status() == ThreadContext::Active && _status != Running) {
215 _status = Running;
216 break;
217 }
218 }
219
220 if (_status != Running) {
221 _status = Idle;
222 }
223 assert(threadContexts.size() == 1);
224 previousTick = curTick();
225}
226
227
228void
229TimingSimpleCPU::activateContext(int thread_num, int delay)
230{
231 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
232
233 assert(thread_num == 0);
234 assert(thread);
235
236 assert(_status == Idle);
237
238 notIdleFraction++;
239 _status = Running;
240
241 // kick things off by initiating the fetch of the next instruction
242 schedule(fetchEvent, nextCycle(curTick() + ticks(delay)));
243}
244
245
246void
247TimingSimpleCPU::suspendContext(int thread_num)
248{
249 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
250
251 assert(thread_num == 0);
252 assert(thread);
253
254 if (_status == Idle)
255 return;
256
257 assert(_status == Running);
258
259 // just change status to Idle... if status != Running,
260 // completeInst() will not initiate fetch of next instruction.
261
262 notIdleFraction--;
263 _status = Idle;
264}
265
266bool
267TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
268{
269 RequestPtr req = pkt->req;
270 if (req->isMmappedIpr()) {
271 Tick delay;
272 delay = TheISA::handleIprRead(thread->getTC(), pkt);
273 new IprEvent(pkt, this, nextCycle(curTick() + delay));
274 _status = DcacheWaitResponse;
275 dcache_pkt = NULL;
276 } else if (!dcachePort.sendTiming(pkt)) {
277 _status = DcacheRetry;
278 dcache_pkt = pkt;
279 } else {
280 _status = DcacheWaitResponse;
281 // memory system takes ownership of packet
282 dcache_pkt = NULL;
283 }
284 return dcache_pkt == NULL;
285}
286
287void
288TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
289 bool read)
290{
291 PacketPtr pkt;
292 buildPacket(pkt, req, read);
293 pkt->dataDynamicArray<uint8_t>(data);
294 if (req->getFlags().isSet(Request::NO_ACCESS)) {
295 assert(!dcache_pkt);
296 pkt->makeResponse();
297 completeDataAccess(pkt);
298 } else if (read) {
299 handleReadPacket(pkt);
300 } else {
301 bool do_access = true; // flag to suppress cache access
302
303 if (req->isLLSC()) {
304 do_access = TheISA::handleLockedWrite(thread, req);
305 } else if (req->isCondSwap()) {
306 assert(res);
307 req->setExtraData(*res);
308 }
309
310 if (do_access) {
311 dcache_pkt = pkt;
312 handleWritePacket();
313 } else {
314 _status = DcacheWaitResponse;
315 completeDataAccess(pkt);
316 }
317 }
318}
319
320void
321TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
322 RequestPtr req, uint8_t *data, bool read)
323{
324 PacketPtr pkt1, pkt2;
325 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
326 if (req->getFlags().isSet(Request::NO_ACCESS)) {
327 assert(!dcache_pkt);
328 pkt1->makeResponse();
329 completeDataAccess(pkt1);
330 } else if (read) {
331 SplitFragmentSenderState * send_state =
332 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
333 if (handleReadPacket(pkt1)) {
334 send_state->clearFromParent();
335 send_state = dynamic_cast<SplitFragmentSenderState *>(
336 pkt2->senderState);
337 if (handleReadPacket(pkt2)) {
338 send_state->clearFromParent();
339 }
340 }
341 } else {
342 dcache_pkt = pkt1;
343 SplitFragmentSenderState * send_state =
344 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
345 if (handleWritePacket()) {
346 send_state->clearFromParent();
347 dcache_pkt = pkt2;
348 send_state = dynamic_cast<SplitFragmentSenderState *>(
349 pkt2->senderState);
350 if (handleWritePacket()) {
351 send_state->clearFromParent();
352 }
353 }
354 }
355}
356
357void
358TimingSimpleCPU::translationFault(Fault fault)
359{
360 // fault may be NoFault in cases where a fault is suppressed,
361 // for instance prefetches.
362 numCycles += tickToCycles(curTick() - previousTick);
363 previousTick = curTick();
364
365 if (traceData) {
366 // Since there was a fault, we shouldn't trace this instruction.
367 delete traceData;
368 traceData = NULL;
369 }
370
371 postExecute();
372
373 if (getState() == SimObject::Draining) {
374 advancePC(fault);
375 completeDrain();
376 } else {
377 advanceInst(fault);
378 }
379}
380
381void
382TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
383{
384 MemCmd cmd;
385 if (read) {
386 cmd = MemCmd::ReadReq;
387 if (req->isLLSC())
388 cmd = MemCmd::LoadLockedReq;
389 } else {
390 cmd = MemCmd::WriteReq;
391 if (req->isLLSC()) {
392 cmd = MemCmd::StoreCondReq;
393 } else if (req->isSwap()) {
394 cmd = MemCmd::SwapReq;
395 }
396 }
397 pkt = new Packet(req, cmd, Packet::Broadcast);
398}
399
400void
401TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
402 RequestPtr req1, RequestPtr req2, RequestPtr req,
403 uint8_t *data, bool read)
404{
405 pkt1 = pkt2 = NULL;
406
407 assert(!req1->isMmappedIpr() && !req2->isMmappedIpr());
408
409 if (req->getFlags().isSet(Request::NO_ACCESS)) {
410 buildPacket(pkt1, req, read);
411 return;
412 }
413
414 buildPacket(pkt1, req1, read);
415 buildPacket(pkt2, req2, read);
416
417 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags());
418 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand(),
419 Packet::Broadcast);
420
421 pkt->dataDynamicArray<uint8_t>(data);
422 pkt1->dataStatic<uint8_t>(data);
423 pkt2->dataStatic<uint8_t>(data + req1->getSize());
424
425 SplitMainSenderState * main_send_state = new SplitMainSenderState;
426 pkt->senderState = main_send_state;
427 main_send_state->fragments[0] = pkt1;
428 main_send_state->fragments[1] = pkt2;
429 main_send_state->outstanding = 2;
430 pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
431 pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
432}
433
434Fault
435TimingSimpleCPU::readBytes(Addr addr, uint8_t *data,
436 unsigned size, unsigned flags)
437{
438 Fault fault;
439 const int asid = 0;
440 const ThreadID tid = 0;
441 const Addr pc = thread->instAddr();
442 unsigned block_size = dcachePort.peerBlockSize();
443 BaseTLB::Mode mode = BaseTLB::Read;
444
445 if (traceData) {
446 traceData->setAddr(addr);
447 }
448
449 RequestPtr req = new Request(asid, addr, size,
450 flags, pc, _cpuId, tid);
451
452 Addr split_addr = roundDown(addr + size - 1, block_size);
453 assert(split_addr <= addr || split_addr - addr < block_size);
454
455 _status = DTBWaitResponse;
456 if (split_addr > addr) {
457 RequestPtr req1, req2;
458 assert(!req->isLLSC() && !req->isSwap());
459 req->splitOnVaddr(split_addr, req1, req2);
460
461 WholeTranslationState *state =
462 new WholeTranslationState(req, req1, req2, new uint8_t[size],
463 NULL, mode);
464 DataTranslation<TimingSimpleCPU> *trans1 =
465 new DataTranslation<TimingSimpleCPU>(this, state, 0);
466 DataTranslation<TimingSimpleCPU> *trans2 =
467 new DataTranslation<TimingSimpleCPU>(this, state, 1);
468
469 thread->dtb->translateTiming(req1, tc, trans1, mode);
470 thread->dtb->translateTiming(req2, tc, trans2, mode);
471 } else {
472 WholeTranslationState *state =
473 new WholeTranslationState(req, new uint8_t[size], NULL, mode);
474 DataTranslation<TimingSimpleCPU> *translation
475 = new DataTranslation<TimingSimpleCPU>(this, state);
476 thread->dtb->translateTiming(req, tc, translation, mode);
477 }
478
479 return NoFault;
480}
481
482template <class T>
483Fault
484TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
485{
486 return readBytes(addr, (uint8_t *)&data, sizeof(T), flags);
487}
488
489#ifndef DOXYGEN_SHOULD_SKIP_THIS
490
491template
492Fault
493TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
494
495template
496Fault
497TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
498
499template
500Fault
501TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
502
503template
504Fault
505TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
506
507template
508Fault
509TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
510
511template
512Fault
513TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
514
515#endif //DOXYGEN_SHOULD_SKIP_THIS
516
517template<>
518Fault
519TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
520{
521 return read(addr, *(uint64_t*)&data, flags);
522}
523
524template<>
525Fault
526TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
527{
528 return read(addr, *(uint32_t*)&data, flags);
529}
530
531template<>
532Fault
533TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
534{
535 return read(addr, (uint32_t&)data, flags);
536}
537
538bool
539TimingSimpleCPU::handleWritePacket()
540{
541 RequestPtr req = dcache_pkt->req;
542 if (req->isMmappedIpr()) {
543 Tick delay;
544 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
545 new IprEvent(dcache_pkt, this, nextCycle(curTick() + delay));
546 _status = DcacheWaitResponse;
547 dcache_pkt = NULL;
548 } else if (!dcachePort.sendTiming(dcache_pkt)) {
549 _status = DcacheRetry;
550 } else {
551 _status = DcacheWaitResponse;
552 // memory system takes ownership of packet
553 dcache_pkt = NULL;
554 }
555 return dcache_pkt == NULL;
556}
557
558Fault
559TimingSimpleCPU::writeTheseBytes(uint8_t *data, unsigned size,
560 Addr addr, unsigned flags, uint64_t *res)
561{
562 const int asid = 0;
563 const ThreadID tid = 0;
564 const Addr pc = thread->instAddr();
565 unsigned block_size = dcachePort.peerBlockSize();
566 BaseTLB::Mode mode = BaseTLB::Write;
567
568 if (traceData) {
569 traceData->setAddr(addr);
570 }
571
572 RequestPtr req = new Request(asid, addr, size,
573 flags, pc, _cpuId, tid);
574
575 Addr split_addr = roundDown(addr + size - 1, block_size);
576 assert(split_addr <= addr || split_addr - addr < block_size);
577
578 _status = DTBWaitResponse;
579 if (split_addr > addr) {
580 RequestPtr req1, req2;
581 assert(!req->isLLSC() && !req->isSwap());
582 req->splitOnVaddr(split_addr, req1, req2);
583
584 WholeTranslationState *state =
585 new WholeTranslationState(req, req1, req2, data, res, mode);
586 DataTranslation<TimingSimpleCPU> *trans1 =
587 new DataTranslation<TimingSimpleCPU>(this, state, 0);
588 DataTranslation<TimingSimpleCPU> *trans2 =
589 new DataTranslation<TimingSimpleCPU>(this, state, 1);
590
591 thread->dtb->translateTiming(req1, tc, trans1, mode);
592 thread->dtb->translateTiming(req2, tc, trans2, mode);
593 } else {
594 WholeTranslationState *state =
595 new WholeTranslationState(req, data, res, mode);
596 DataTranslation<TimingSimpleCPU> *translation =
597 new DataTranslation<TimingSimpleCPU>(this, state);
598 thread->dtb->translateTiming(req, tc, translation, mode);
599 }
600
601 // Translation faults will be returned via finishTranslation()
602 return NoFault;
603}
604
605Fault
606TimingSimpleCPU::writeBytes(uint8_t *data, unsigned size,
607 Addr addr, unsigned flags, uint64_t *res)
608{
609 uint8_t *newData = new uint8_t[size];
610 memcpy(newData, data, size);
611 return writeTheseBytes(newData, size, addr, flags, res);
612}
613
614template <class T>
615Fault
616TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
617{
618 if (traceData) {
619 traceData->setData(data);
620 }
621 T *dataP = (T*) new uint8_t[sizeof(T)];
622 *dataP = TheISA::htog(data);
623
624 return writeTheseBytes((uint8_t *)dataP, sizeof(T), addr, flags, res);
625}
626
627
628#ifndef DOXYGEN_SHOULD_SKIP_THIS
629template
630Fault
631TimingSimpleCPU::write(Twin32_t data, Addr addr,
632 unsigned flags, uint64_t *res);
633
634template
635Fault
636TimingSimpleCPU::write(Twin64_t data, Addr addr,
637 unsigned flags, uint64_t *res);
638
639template
640Fault
641TimingSimpleCPU::write(uint64_t data, Addr addr,
642 unsigned flags, uint64_t *res);
643
644template
645Fault
646TimingSimpleCPU::write(uint32_t data, Addr addr,
647 unsigned flags, uint64_t *res);
648
649template
650Fault
651TimingSimpleCPU::write(uint16_t data, Addr addr,
652 unsigned flags, uint64_t *res);
653
654template
655Fault
656TimingSimpleCPU::write(uint8_t data, Addr addr,
657 unsigned flags, uint64_t *res);
658
659#endif //DOXYGEN_SHOULD_SKIP_THIS
660
661template<>
662Fault
663TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
664{
665 return write(*(uint64_t*)&data, addr, flags, res);
666}
667
668template<>
669Fault
670TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
671{
672 return write(*(uint32_t*)&data, addr, flags, res);
673}
674
675
676template<>
677Fault
678TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
679{
680 return write((uint32_t)data, addr, flags, res);
681}
682
683
684void
685TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
686{
687 _status = Running;
688
689 if (state->getFault() != NoFault) {
690 if (state->isPrefetch()) {
691 state->setNoFault();
692 }
693 delete [] state->data;
694 state->deleteReqs();
695 translationFault(state->getFault());
696 } else {
697 if (!state->isSplit) {
698 sendData(state->mainReq, state->data, state->res,
699 state->mode == BaseTLB::Read);
700 } else {
701 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
702 state->data, state->mode == BaseTLB::Read);
703 }
704 }
705
706 delete state;
707}
708
709
710void
711TimingSimpleCPU::fetch()
712{
713 DPRINTF(SimpleCPU, "Fetch\n");
714
715 if (!curStaticInst || !curStaticInst->isDelayedCommit())
716 checkForInterrupts();
717
718 checkPcEventQueue();
719
720 // We must have just got suspended by a PC event
721 if (_status == Idle)
722 return;
723
724 TheISA::PCState pcState = thread->pcState();
725 bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
726
727 if (needToFetch) {
728 _status = Running;
728 Request *ifetch_req = new Request();
729 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
730 setupFetchRequest(ifetch_req);
731 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
732 BaseTLB::Execute);
733 } else {
734 _status = IcacheWaitResponse;
735 completeIfetch(NULL);
736
737 numCycles += tickToCycles(curTick() - previousTick);
738 previousTick = curTick();
739 }
740}
741
742
743void
744TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
745{
746 if (fault == NoFault) {
747 ifetch_pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
748 ifetch_pkt->dataStatic(&inst);
749
750 if (!icachePort.sendTiming(ifetch_pkt)) {
751 // Need to wait for retry
752 _status = IcacheRetry;
753 } else {
754 // Need to wait for cache to respond
755 _status = IcacheWaitResponse;
756 // ownership of packet transferred to memory system
757 ifetch_pkt = NULL;
758 }
759 } else {
760 delete req;
761 // fetch fault: advance directly to next instruction (fault handler)
762 _status = Running;
763 advanceInst(fault);
764 }
765
766 numCycles += tickToCycles(curTick() - previousTick);
767 previousTick = curTick();
768}
769
770
771void
772TimingSimpleCPU::advanceInst(Fault fault)
773{
729 Request *ifetch_req = new Request();
730 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
731 setupFetchRequest(ifetch_req);
732 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
733 BaseTLB::Execute);
734 } else {
735 _status = IcacheWaitResponse;
736 completeIfetch(NULL);
737
738 numCycles += tickToCycles(curTick() - previousTick);
739 previousTick = curTick();
740 }
741}
742
743
744void
745TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
746{
747 if (fault == NoFault) {
748 ifetch_pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
749 ifetch_pkt->dataStatic(&inst);
750
751 if (!icachePort.sendTiming(ifetch_pkt)) {
752 // Need to wait for retry
753 _status = IcacheRetry;
754 } else {
755 // Need to wait for cache to respond
756 _status = IcacheWaitResponse;
757 // ownership of packet transferred to memory system
758 ifetch_pkt = NULL;
759 }
760 } else {
761 delete req;
762 // fetch fault: advance directly to next instruction (fault handler)
763 _status = Running;
764 advanceInst(fault);
765 }
766
767 numCycles += tickToCycles(curTick() - previousTick);
768 previousTick = curTick();
769}
770
771
772void
773TimingSimpleCPU::advanceInst(Fault fault)
774{
774 if (fault != NoFault || !stayAtPC)
775
776 if (_status == Faulting)
777 return;
778
779 if (fault != NoFault) {
775 advancePC(fault);
780 advancePC(fault);
781 DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
782 reschedule(fetchEvent, nextCycle(), true);
783 _status = Faulting;
784 return;
785 }
776
786
787
788 if (!stayAtPC)
789 advancePC(fault);
790
777 if (_status == Running) {
778 // kick off fetch of next instruction... callback from icache
779 // response will cause that instruction to be executed,
780 // keeping the CPU running.
781 fetch();
782 }
783}
784
785
786void
787TimingSimpleCPU::completeIfetch(PacketPtr pkt)
788{
791 if (_status == Running) {
792 // kick off fetch of next instruction... callback from icache
793 // response will cause that instruction to be executed,
794 // keeping the CPU running.
795 fetch();
796 }
797}
798
799
800void
801TimingSimpleCPU::completeIfetch(PacketPtr pkt)
802{
789 DPRINTF(SimpleCPU, "Complete ICache Fetch\n");
790
791 // received a response from the icache: execute the received
792 // instruction
793
794 assert(!pkt || !pkt->isError());
795 assert(_status == IcacheWaitResponse);
796
797 _status = Running;
798
799 numCycles += tickToCycles(curTick() - previousTick);
800 previousTick = curTick();
801
802 if (getState() == SimObject::Draining) {
803 if (pkt) {
804 delete pkt->req;
805 delete pkt;
806 }
807
808 completeDrain();
809 return;
810 }
811
812 preExecute();
813 if (curStaticInst && curStaticInst->isMemRef()) {
814 // load or store: just send to dcache
815 Fault fault = curStaticInst->initiateAcc(this, traceData);
816
817 // If we're not running now the instruction will complete in a dcache
818 // response callback or the instruction faulted and has started an
819 // ifetch
820 if (_status == Running) {
821 if (fault != NoFault && traceData) {
822 // If there was a fault, we shouldn't trace this instruction.
823 delete traceData;
824 traceData = NULL;
825 }
826
827 postExecute();
828 // @todo remove me after debugging with legion done
829 if (curStaticInst && (!curStaticInst->isMicroop() ||
830 curStaticInst->isFirstMicroop()))
831 instCnt++;
832 advanceInst(fault);
833 }
834 } else if (curStaticInst) {
835 // non-memory instruction: execute completely now
836 Fault fault = curStaticInst->execute(this, traceData);
837
838 // keep an instruction count
839 if (fault == NoFault)
840 countInst();
841 else if (traceData && !DTRACE(ExecFaulting)) {
842 delete traceData;
843 traceData = NULL;
844 }
845
846 postExecute();
847 // @todo remove me after debugging with legion done
848 if (curStaticInst && (!curStaticInst->isMicroop() ||
849 curStaticInst->isFirstMicroop()))
850 instCnt++;
851 advanceInst(fault);
852 } else {
853 advanceInst(NoFault);
854 }
855
856 if (pkt) {
857 delete pkt->req;
858 delete pkt;
859 }
860}
861
862void
863TimingSimpleCPU::IcachePort::ITickEvent::process()
864{
865 cpu->completeIfetch(pkt);
866}
867
868bool
869TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
870{
871 if (pkt->isResponse() && !pkt->wasNacked()) {
872 // delay processing of returned data until next CPU clock edge
873 Tick next_tick = cpu->nextCycle(curTick());
874
875 if (next_tick == curTick())
876 cpu->completeIfetch(pkt);
877 else
878 tickEvent.schedule(pkt, next_tick);
879
880 return true;
803 // received a response from the icache: execute the received
804 // instruction
805
806 assert(!pkt || !pkt->isError());
807 assert(_status == IcacheWaitResponse);
808
809 _status = Running;
810
811 numCycles += tickToCycles(curTick() - previousTick);
812 previousTick = curTick();
813
814 if (getState() == SimObject::Draining) {
815 if (pkt) {
816 delete pkt->req;
817 delete pkt;
818 }
819
820 completeDrain();
821 return;
822 }
823
824 preExecute();
825 if (curStaticInst && curStaticInst->isMemRef()) {
826 // load or store: just send to dcache
827 Fault fault = curStaticInst->initiateAcc(this, traceData);
828
829 // If we're not running now the instruction will complete in a dcache
830 // response callback or the instruction faulted and has started an
831 // ifetch
832 if (_status == Running) {
833 if (fault != NoFault && traceData) {
834 // If there was a fault, we shouldn't trace this instruction.
835 delete traceData;
836 traceData = NULL;
837 }
838
839 postExecute();
840 // @todo remove me after debugging with legion done
841 if (curStaticInst && (!curStaticInst->isMicroop() ||
842 curStaticInst->isFirstMicroop()))
843 instCnt++;
844 advanceInst(fault);
845 }
846 } else if (curStaticInst) {
847 // non-memory instruction: execute completely now
848 Fault fault = curStaticInst->execute(this, traceData);
849
850 // keep an instruction count
851 if (fault == NoFault)
852 countInst();
853 else if (traceData && !DTRACE(ExecFaulting)) {
854 delete traceData;
855 traceData = NULL;
856 }
857
858 postExecute();
859 // @todo remove me after debugging with legion done
860 if (curStaticInst && (!curStaticInst->isMicroop() ||
861 curStaticInst->isFirstMicroop()))
862 instCnt++;
863 advanceInst(fault);
864 } else {
865 advanceInst(NoFault);
866 }
867
868 if (pkt) {
869 delete pkt->req;
870 delete pkt;
871 }
872}
873
874void
875TimingSimpleCPU::IcachePort::ITickEvent::process()
876{
877 cpu->completeIfetch(pkt);
878}
879
880bool
881TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
882{
883 if (pkt->isResponse() && !pkt->wasNacked()) {
884 // delay processing of returned data until next CPU clock edge
885 Tick next_tick = cpu->nextCycle(curTick());
886
887 if (next_tick == curTick())
888 cpu->completeIfetch(pkt);
889 else
890 tickEvent.schedule(pkt, next_tick);
891
892 return true;
881 }
882 else if (pkt->wasNacked()) {
893 } else if (pkt->wasNacked()) {
883 assert(cpu->_status == IcacheWaitResponse);
884 pkt->reinitNacked();
885 if (!sendTiming(pkt)) {
886 cpu->_status = IcacheRetry;
887 cpu->ifetch_pkt = pkt;
888 }
889 }
890 //Snooping a Coherence Request, do nothing
891 return true;
892}
893
894void
895TimingSimpleCPU::IcachePort::recvRetry()
896{
897 // we shouldn't get a retry unless we have a packet that we're
898 // waiting to transmit
899 assert(cpu->ifetch_pkt != NULL);
900 assert(cpu->_status == IcacheRetry);
901 PacketPtr tmp = cpu->ifetch_pkt;
902 if (sendTiming(tmp)) {
903 cpu->_status = IcacheWaitResponse;
904 cpu->ifetch_pkt = NULL;
905 }
906}
907
908void
909TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
910{
911 // received a response from the dcache: complete the load or store
912 // instruction
913 assert(!pkt->isError());
914 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
915 pkt->req->getFlags().isSet(Request::NO_ACCESS));
916
917 numCycles += tickToCycles(curTick() - previousTick);
918 previousTick = curTick();
919
920 if (pkt->senderState) {
921 SplitFragmentSenderState * send_state =
922 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
923 assert(send_state);
924 delete pkt->req;
925 delete pkt;
926 PacketPtr big_pkt = send_state->bigPkt;
927 delete send_state;
928
929 SplitMainSenderState * main_send_state =
930 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
931 assert(main_send_state);
932 // Record the fact that this packet is no longer outstanding.
933 assert(main_send_state->outstanding != 0);
934 main_send_state->outstanding--;
935
936 if (main_send_state->outstanding) {
937 return;
938 } else {
939 delete main_send_state;
940 big_pkt->senderState = NULL;
941 pkt = big_pkt;
942 }
943 }
944
945 _status = Running;
946
947 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
948
949 // keep an instruction count
950 if (fault == NoFault)
951 countInst();
952 else if (traceData) {
953 // If there was a fault, we shouldn't trace this instruction.
954 delete traceData;
955 traceData = NULL;
956 }
957
958 // the locked flag may be cleared on the response packet, so check
959 // pkt->req and not pkt to see if it was a load-locked
960 if (pkt->isRead() && pkt->req->isLLSC()) {
961 TheISA::handleLockedRead(thread, pkt->req);
962 }
963
964 delete pkt->req;
965 delete pkt;
966
967 postExecute();
968
969 if (getState() == SimObject::Draining) {
970 advancePC(fault);
971 completeDrain();
972
973 return;
974 }
975
976 advanceInst(fault);
977}
978
979
980void
981TimingSimpleCPU::completeDrain()
982{
983 DPRINTF(Config, "Done draining\n");
984 changeState(SimObject::Drained);
985 drainEvent->process();
986}
987
988void
989TimingSimpleCPU::DcachePort::setPeer(Port *port)
990{
991 Port::setPeer(port);
992
993#if FULL_SYSTEM
994 // Update the ThreadContext's memory ports (Functional/Virtual
995 // Ports)
996 cpu->tcBase()->connectMemPorts(cpu->tcBase());
997#endif
998}
999
1000bool
1001TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
1002{
1003 if (pkt->isResponse() && !pkt->wasNacked()) {
1004 // delay processing of returned data until next CPU clock edge
1005 Tick next_tick = cpu->nextCycle(curTick());
1006
1007 if (next_tick == curTick()) {
1008 cpu->completeDataAccess(pkt);
1009 } else {
1010 if (!tickEvent.scheduled()) {
1011 tickEvent.schedule(pkt, next_tick);
1012 } else {
1013 // In the case of a split transaction and a cache that is
1014 // faster than a CPU we could get two responses before
1015 // next_tick expires
1016 if (!retryEvent.scheduled())
1017 schedule(retryEvent, next_tick);
1018 return false;
1019 }
1020 }
1021
1022 return true;
1023 }
1024 else if (pkt->wasNacked()) {
1025 assert(cpu->_status == DcacheWaitResponse);
1026 pkt->reinitNacked();
1027 if (!sendTiming(pkt)) {
1028 cpu->_status = DcacheRetry;
1029 cpu->dcache_pkt = pkt;
1030 }
1031 }
1032 //Snooping a Coherence Request, do nothing
1033 return true;
1034}
1035
1036void
1037TimingSimpleCPU::DcachePort::DTickEvent::process()
1038{
1039 cpu->completeDataAccess(pkt);
1040}
1041
1042void
1043TimingSimpleCPU::DcachePort::recvRetry()
1044{
1045 // we shouldn't get a retry unless we have a packet that we're
1046 // waiting to transmit
1047 assert(cpu->dcache_pkt != NULL);
1048 assert(cpu->_status == DcacheRetry);
1049 PacketPtr tmp = cpu->dcache_pkt;
1050 if (tmp->senderState) {
1051 // This is a packet from a split access.
1052 SplitFragmentSenderState * send_state =
1053 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
1054 assert(send_state);
1055 PacketPtr big_pkt = send_state->bigPkt;
1056
1057 SplitMainSenderState * main_send_state =
1058 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
1059 assert(main_send_state);
1060
1061 if (sendTiming(tmp)) {
1062 // If we were able to send without retrying, record that fact
1063 // and try sending the other fragment.
1064 send_state->clearFromParent();
1065 int other_index = main_send_state->getPendingFragment();
1066 if (other_index > 0) {
1067 tmp = main_send_state->fragments[other_index];
1068 cpu->dcache_pkt = tmp;
1069 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
1070 (big_pkt->isWrite() && cpu->handleWritePacket())) {
1071 main_send_state->fragments[other_index] = NULL;
1072 }
1073 } else {
1074 cpu->_status = DcacheWaitResponse;
1075 // memory system takes ownership of packet
1076 cpu->dcache_pkt = NULL;
1077 }
1078 }
1079 } else if (sendTiming(tmp)) {
1080 cpu->_status = DcacheWaitResponse;
1081 // memory system takes ownership of packet
1082 cpu->dcache_pkt = NULL;
1083 }
1084}
1085
1086TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
1087 Tick t)
1088 : pkt(_pkt), cpu(_cpu)
1089{
1090 cpu->schedule(this, t);
1091}
1092
1093void
1094TimingSimpleCPU::IprEvent::process()
1095{
1096 cpu->completeDataAccess(pkt);
1097}
1098
1099const char *
1100TimingSimpleCPU::IprEvent::description() const
1101{
1102 return "Timing Simple CPU Delay IPR event";
1103}
1104
1105
1106void
1107TimingSimpleCPU::printAddr(Addr a)
1108{
1109 dcachePort.printAddr(a);
1110}
1111
1112
1113////////////////////////////////////////////////////////////////////////
1114//
1115// TimingSimpleCPU Simulation Object
1116//
1117TimingSimpleCPU *
1118TimingSimpleCPUParams::create()
1119{
1120 numThreads = 1;
1121#if !FULL_SYSTEM
1122 if (workload.size() != 1)
1123 panic("only one workload allowed");
1124#endif
1125 return new TimingSimpleCPU(this);
1126}
894 assert(cpu->_status == IcacheWaitResponse);
895 pkt->reinitNacked();
896 if (!sendTiming(pkt)) {
897 cpu->_status = IcacheRetry;
898 cpu->ifetch_pkt = pkt;
899 }
900 }
901 //Snooping a Coherence Request, do nothing
902 return true;
903}
904
905void
906TimingSimpleCPU::IcachePort::recvRetry()
907{
908 // we shouldn't get a retry unless we have a packet that we're
909 // waiting to transmit
910 assert(cpu->ifetch_pkt != NULL);
911 assert(cpu->_status == IcacheRetry);
912 PacketPtr tmp = cpu->ifetch_pkt;
913 if (sendTiming(tmp)) {
914 cpu->_status = IcacheWaitResponse;
915 cpu->ifetch_pkt = NULL;
916 }
917}
918
919void
920TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
921{
922 // received a response from the dcache: complete the load or store
923 // instruction
924 assert(!pkt->isError());
925 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
926 pkt->req->getFlags().isSet(Request::NO_ACCESS));
927
928 numCycles += tickToCycles(curTick() - previousTick);
929 previousTick = curTick();
930
931 if (pkt->senderState) {
932 SplitFragmentSenderState * send_state =
933 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
934 assert(send_state);
935 delete pkt->req;
936 delete pkt;
937 PacketPtr big_pkt = send_state->bigPkt;
938 delete send_state;
939
940 SplitMainSenderState * main_send_state =
941 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
942 assert(main_send_state);
943 // Record the fact that this packet is no longer outstanding.
944 assert(main_send_state->outstanding != 0);
945 main_send_state->outstanding--;
946
947 if (main_send_state->outstanding) {
948 return;
949 } else {
950 delete main_send_state;
951 big_pkt->senderState = NULL;
952 pkt = big_pkt;
953 }
954 }
955
956 _status = Running;
957
958 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
959
960 // keep an instruction count
961 if (fault == NoFault)
962 countInst();
963 else if (traceData) {
964 // If there was a fault, we shouldn't trace this instruction.
965 delete traceData;
966 traceData = NULL;
967 }
968
969 // the locked flag may be cleared on the response packet, so check
970 // pkt->req and not pkt to see if it was a load-locked
971 if (pkt->isRead() && pkt->req->isLLSC()) {
972 TheISA::handleLockedRead(thread, pkt->req);
973 }
974
975 delete pkt->req;
976 delete pkt;
977
978 postExecute();
979
980 if (getState() == SimObject::Draining) {
981 advancePC(fault);
982 completeDrain();
983
984 return;
985 }
986
987 advanceInst(fault);
988}
989
990
991void
992TimingSimpleCPU::completeDrain()
993{
994 DPRINTF(Config, "Done draining\n");
995 changeState(SimObject::Drained);
996 drainEvent->process();
997}
998
999void
1000TimingSimpleCPU::DcachePort::setPeer(Port *port)
1001{
1002 Port::setPeer(port);
1003
1004#if FULL_SYSTEM
1005 // Update the ThreadContext's memory ports (Functional/Virtual
1006 // Ports)
1007 cpu->tcBase()->connectMemPorts(cpu->tcBase());
1008#endif
1009}
1010
1011bool
1012TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
1013{
1014 if (pkt->isResponse() && !pkt->wasNacked()) {
1015 // delay processing of returned data until next CPU clock edge
1016 Tick next_tick = cpu->nextCycle(curTick());
1017
1018 if (next_tick == curTick()) {
1019 cpu->completeDataAccess(pkt);
1020 } else {
1021 if (!tickEvent.scheduled()) {
1022 tickEvent.schedule(pkt, next_tick);
1023 } else {
1024 // In the case of a split transaction and a cache that is
1025 // faster than a CPU we could get two responses before
1026 // next_tick expires
1027 if (!retryEvent.scheduled())
1028 schedule(retryEvent, next_tick);
1029 return false;
1030 }
1031 }
1032
1033 return true;
1034 }
1035 else if (pkt->wasNacked()) {
1036 assert(cpu->_status == DcacheWaitResponse);
1037 pkt->reinitNacked();
1038 if (!sendTiming(pkt)) {
1039 cpu->_status = DcacheRetry;
1040 cpu->dcache_pkt = pkt;
1041 }
1042 }
1043 //Snooping a Coherence Request, do nothing
1044 return true;
1045}
1046
1047void
1048TimingSimpleCPU::DcachePort::DTickEvent::process()
1049{
1050 cpu->completeDataAccess(pkt);
1051}
1052
1053void
1054TimingSimpleCPU::DcachePort::recvRetry()
1055{
1056 // we shouldn't get a retry unless we have a packet that we're
1057 // waiting to transmit
1058 assert(cpu->dcache_pkt != NULL);
1059 assert(cpu->_status == DcacheRetry);
1060 PacketPtr tmp = cpu->dcache_pkt;
1061 if (tmp->senderState) {
1062 // This is a packet from a split access.
1063 SplitFragmentSenderState * send_state =
1064 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
1065 assert(send_state);
1066 PacketPtr big_pkt = send_state->bigPkt;
1067
1068 SplitMainSenderState * main_send_state =
1069 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
1070 assert(main_send_state);
1071
1072 if (sendTiming(tmp)) {
1073 // If we were able to send without retrying, record that fact
1074 // and try sending the other fragment.
1075 send_state->clearFromParent();
1076 int other_index = main_send_state->getPendingFragment();
1077 if (other_index > 0) {
1078 tmp = main_send_state->fragments[other_index];
1079 cpu->dcache_pkt = tmp;
1080 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
1081 (big_pkt->isWrite() && cpu->handleWritePacket())) {
1082 main_send_state->fragments[other_index] = NULL;
1083 }
1084 } else {
1085 cpu->_status = DcacheWaitResponse;
1086 // memory system takes ownership of packet
1087 cpu->dcache_pkt = NULL;
1088 }
1089 }
1090 } else if (sendTiming(tmp)) {
1091 cpu->_status = DcacheWaitResponse;
1092 // memory system takes ownership of packet
1093 cpu->dcache_pkt = NULL;
1094 }
1095}
1096
1097TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
1098 Tick t)
1099 : pkt(_pkt), cpu(_cpu)
1100{
1101 cpu->schedule(this, t);
1102}
1103
1104void
1105TimingSimpleCPU::IprEvent::process()
1106{
1107 cpu->completeDataAccess(pkt);
1108}
1109
1110const char *
1111TimingSimpleCPU::IprEvent::description() const
1112{
1113 return "Timing Simple CPU Delay IPR event";
1114}
1115
1116
1117void
1118TimingSimpleCPU::printAddr(Addr a)
1119{
1120 dcachePort.printAddr(a);
1121}
1122
1123
1124////////////////////////////////////////////////////////////////////////
1125//
1126// TimingSimpleCPU Simulation Object
1127//
1128TimingSimpleCPU *
1129TimingSimpleCPUParams::create()
1130{
1131 numThreads = 1;
1132#if !FULL_SYSTEM
1133 if (workload.size() != 1)
1134 panic("only one workload allowed");
1135#endif
1136 return new TimingSimpleCPU(this);
1137}