timing.cc (6023:47b4fcb10c11) timing.cc (6043:19852407f5c9)
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Steve Reinhardt
29 */
30
31#include "arch/locked_mem.hh"
32#include "arch/mmaped_ipr.hh"
33#include "arch/utility.hh"
34#include "base/bigint.hh"
35#include "cpu/exetrace.hh"
36#include "cpu/simple/timing.hh"
37#include "mem/packet.hh"
38#include "mem/packet_access.hh"
39#include "params/TimingSimpleCPU.hh"
40#include "sim/system.hh"
41
42using namespace std;
43using namespace TheISA;
44
45Port *
46TimingSimpleCPU::getPort(const std::string &if_name, int idx)
47{
48 if (if_name == "dcache_port")
49 return &dcachePort;
50 else if (if_name == "icache_port")
51 return &icachePort;
52 else
53 panic("No Such Port\n");
54}
55
56void
57TimingSimpleCPU::init()
58{
59 BaseCPU::init();
60#if FULL_SYSTEM
61 for (int i = 0; i < threadContexts.size(); ++i) {
62 ThreadContext *tc = threadContexts[i];
63
64 // initialize CPU, including PC
65 TheISA::initCPU(tc, _cpuId);
66 }
67#endif
68}
69
70Tick
71TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
72{
73 panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
74 return curTick;
75}
76
77void
78TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
79{
80 //No internal storage to update, jusst return
81 return;
82}
83
84void
85TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
86{
87 if (status == RangeChange) {
88 if (!snoopRangeSent) {
89 snoopRangeSent = true;
90 sendStatusChange(Port::RangeChange);
91 }
92 return;
93 }
94
95 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
96}
97
98
99void
100TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
101{
102 pkt = _pkt;
103 cpu->schedule(this, t);
104}
105
106TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
107 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this, p->clock),
108 dcachePort(this, p->clock), fetchEvent(this)
109{
110 _status = Idle;
111
112 icachePort.snoopRangeSent = false;
113 dcachePort.snoopRangeSent = false;
114
115 ifetch_pkt = dcache_pkt = NULL;
116 drainEvent = NULL;
117 previousTick = 0;
118 changeState(SimObject::Running);
119}
120
121
122TimingSimpleCPU::~TimingSimpleCPU()
123{
124}
125
126void
127TimingSimpleCPU::serialize(ostream &os)
128{
129 SimObject::State so_state = SimObject::getState();
130 SERIALIZE_ENUM(so_state);
131 BaseSimpleCPU::serialize(os);
132}
133
134void
135TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
136{
137 SimObject::State so_state;
138 UNSERIALIZE_ENUM(so_state);
139 BaseSimpleCPU::unserialize(cp, section);
140}
141
142unsigned int
143TimingSimpleCPU::drain(Event *drain_event)
144{
145 // TimingSimpleCPU is ready to drain if it's not waiting for
146 // an access to complete.
147 if (_status == Idle || _status == Running || _status == SwitchedOut) {
148 changeState(SimObject::Drained);
149 return 0;
150 } else {
151 changeState(SimObject::Draining);
152 drainEvent = drain_event;
153 return 1;
154 }
155}
156
157void
158TimingSimpleCPU::resume()
159{
160 DPRINTF(SimpleCPU, "Resume\n");
161 if (_status != SwitchedOut && _status != Idle) {
162 assert(system->getMemoryMode() == Enums::timing);
163
164 if (fetchEvent.scheduled())
165 deschedule(fetchEvent);
166
167 schedule(fetchEvent, nextCycle());
168 }
169
170 changeState(SimObject::Running);
171}
172
173void
174TimingSimpleCPU::switchOut()
175{
176 assert(_status == Running || _status == Idle);
177 _status = SwitchedOut;
178 numCycles += tickToCycles(curTick - previousTick);
179
180 // If we've been scheduled to resume but are then told to switch out,
181 // we'll need to cancel it.
182 if (fetchEvent.scheduled())
183 deschedule(fetchEvent);
184}
185
186
187void
188TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
189{
190 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
191
192 // if any of this CPU's ThreadContexts are active, mark the CPU as
193 // running and schedule its tick event.
194 for (int i = 0; i < threadContexts.size(); ++i) {
195 ThreadContext *tc = threadContexts[i];
196 if (tc->status() == ThreadContext::Active && _status != Running) {
197 _status = Running;
198 break;
199 }
200 }
201
202 if (_status != Running) {
203 _status = Idle;
204 }
205 assert(threadContexts.size() == 1);
206 previousTick = curTick;
207}
208
209
210void
211TimingSimpleCPU::activateContext(int thread_num, int delay)
212{
213 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
214
215 assert(thread_num == 0);
216 assert(thread);
217
218 assert(_status == Idle);
219
220 notIdleFraction++;
221 _status = Running;
222
223 // kick things off by initiating the fetch of the next instruction
224 schedule(fetchEvent, nextCycle(curTick + ticks(delay)));
225}
226
227
228void
229TimingSimpleCPU::suspendContext(int thread_num)
230{
231 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
232
233 assert(thread_num == 0);
234 assert(thread);
235
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Steve Reinhardt
29 */
30
31#include "arch/locked_mem.hh"
32#include "arch/mmaped_ipr.hh"
33#include "arch/utility.hh"
34#include "base/bigint.hh"
35#include "cpu/exetrace.hh"
36#include "cpu/simple/timing.hh"
37#include "mem/packet.hh"
38#include "mem/packet_access.hh"
39#include "params/TimingSimpleCPU.hh"
40#include "sim/system.hh"
41
42using namespace std;
43using namespace TheISA;
44
45Port *
46TimingSimpleCPU::getPort(const std::string &if_name, int idx)
47{
48 if (if_name == "dcache_port")
49 return &dcachePort;
50 else if (if_name == "icache_port")
51 return &icachePort;
52 else
53 panic("No Such Port\n");
54}
55
56void
57TimingSimpleCPU::init()
58{
59 BaseCPU::init();
60#if FULL_SYSTEM
61 for (int i = 0; i < threadContexts.size(); ++i) {
62 ThreadContext *tc = threadContexts[i];
63
64 // initialize CPU, including PC
65 TheISA::initCPU(tc, _cpuId);
66 }
67#endif
68}
69
70Tick
71TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
72{
73 panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
74 return curTick;
75}
76
77void
78TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
79{
80 //No internal storage to update, jusst return
81 return;
82}
83
84void
85TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
86{
87 if (status == RangeChange) {
88 if (!snoopRangeSent) {
89 snoopRangeSent = true;
90 sendStatusChange(Port::RangeChange);
91 }
92 return;
93 }
94
95 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
96}
97
98
99void
100TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
101{
102 pkt = _pkt;
103 cpu->schedule(this, t);
104}
105
106TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
107 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this, p->clock),
108 dcachePort(this, p->clock), fetchEvent(this)
109{
110 _status = Idle;
111
112 icachePort.snoopRangeSent = false;
113 dcachePort.snoopRangeSent = false;
114
115 ifetch_pkt = dcache_pkt = NULL;
116 drainEvent = NULL;
117 previousTick = 0;
118 changeState(SimObject::Running);
119}
120
121
122TimingSimpleCPU::~TimingSimpleCPU()
123{
124}
125
126void
127TimingSimpleCPU::serialize(ostream &os)
128{
129 SimObject::State so_state = SimObject::getState();
130 SERIALIZE_ENUM(so_state);
131 BaseSimpleCPU::serialize(os);
132}
133
134void
135TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
136{
137 SimObject::State so_state;
138 UNSERIALIZE_ENUM(so_state);
139 BaseSimpleCPU::unserialize(cp, section);
140}
141
142unsigned int
143TimingSimpleCPU::drain(Event *drain_event)
144{
145 // TimingSimpleCPU is ready to drain if it's not waiting for
146 // an access to complete.
147 if (_status == Idle || _status == Running || _status == SwitchedOut) {
148 changeState(SimObject::Drained);
149 return 0;
150 } else {
151 changeState(SimObject::Draining);
152 drainEvent = drain_event;
153 return 1;
154 }
155}
156
157void
158TimingSimpleCPU::resume()
159{
160 DPRINTF(SimpleCPU, "Resume\n");
161 if (_status != SwitchedOut && _status != Idle) {
162 assert(system->getMemoryMode() == Enums::timing);
163
164 if (fetchEvent.scheduled())
165 deschedule(fetchEvent);
166
167 schedule(fetchEvent, nextCycle());
168 }
169
170 changeState(SimObject::Running);
171}
172
173void
174TimingSimpleCPU::switchOut()
175{
176 assert(_status == Running || _status == Idle);
177 _status = SwitchedOut;
178 numCycles += tickToCycles(curTick - previousTick);
179
180 // If we've been scheduled to resume but are then told to switch out,
181 // we'll need to cancel it.
182 if (fetchEvent.scheduled())
183 deschedule(fetchEvent);
184}
185
186
187void
188TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
189{
190 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
191
192 // if any of this CPU's ThreadContexts are active, mark the CPU as
193 // running and schedule its tick event.
194 for (int i = 0; i < threadContexts.size(); ++i) {
195 ThreadContext *tc = threadContexts[i];
196 if (tc->status() == ThreadContext::Active && _status != Running) {
197 _status = Running;
198 break;
199 }
200 }
201
202 if (_status != Running) {
203 _status = Idle;
204 }
205 assert(threadContexts.size() == 1);
206 previousTick = curTick;
207}
208
209
210void
211TimingSimpleCPU::activateContext(int thread_num, int delay)
212{
213 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
214
215 assert(thread_num == 0);
216 assert(thread);
217
218 assert(_status == Idle);
219
220 notIdleFraction++;
221 _status = Running;
222
223 // kick things off by initiating the fetch of the next instruction
224 schedule(fetchEvent, nextCycle(curTick + ticks(delay)));
225}
226
227
228void
229TimingSimpleCPU::suspendContext(int thread_num)
230{
231 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
232
233 assert(thread_num == 0);
234 assert(thread);
235
236 if (_status == Idle)
237 return;
238
236 assert(_status == Running);
237
238 // just change status to Idle... if status != Running,
239 // completeInst() will not initiate fetch of next instruction.
240
241 notIdleFraction--;
242 _status = Idle;
243}
244
245bool
246TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
247{
248 RequestPtr req = pkt->req;
249 if (req->isMmapedIpr()) {
250 Tick delay;
251 delay = TheISA::handleIprRead(thread->getTC(), pkt);
252 new IprEvent(pkt, this, nextCycle(curTick + delay));
253 _status = DcacheWaitResponse;
254 dcache_pkt = NULL;
255 } else if (!dcachePort.sendTiming(pkt)) {
256 _status = DcacheRetry;
257 dcache_pkt = pkt;
258 } else {
259 _status = DcacheWaitResponse;
260 // memory system takes ownership of packet
261 dcache_pkt = NULL;
262 }
263 return dcache_pkt == NULL;
264}
265
266void
267TimingSimpleCPU::sendData(Fault fault, RequestPtr req,
268 uint8_t *data, uint64_t *res, bool read)
269{
270 _status = Running;
271 if (fault != NoFault) {
272 delete data;
273 delete req;
274
275 translationFault(fault);
276 return;
277 }
278 PacketPtr pkt;
279 buildPacket(pkt, req, read);
280 pkt->dataDynamic<uint8_t>(data);
281 if (req->getFlags().isSet(Request::NO_ACCESS)) {
282 assert(!dcache_pkt);
283 pkt->makeResponse();
284 completeDataAccess(pkt);
285 } else if (read) {
286 handleReadPacket(pkt);
287 } else {
288 bool do_access = true; // flag to suppress cache access
289
290 if (req->isLocked()) {
291 do_access = TheISA::handleLockedWrite(thread, req);
292 } else if (req->isCondSwap()) {
293 assert(res);
294 req->setExtraData(*res);
295 }
296
297 if (do_access) {
298 dcache_pkt = pkt;
299 handleWritePacket();
300 } else {
301 _status = DcacheWaitResponse;
302 completeDataAccess(pkt);
303 }
304 }
305}
306
307void
308TimingSimpleCPU::sendSplitData(Fault fault1, Fault fault2,
309 RequestPtr req1, RequestPtr req2, RequestPtr req,
310 uint8_t *data, bool read)
311{
312 _status = Running;
313 if (fault1 != NoFault || fault2 != NoFault) {
314 delete data;
315 delete req1;
316 delete req2;
317 if (fault1 != NoFault)
318 translationFault(fault1);
319 else if (fault2 != NoFault)
320 translationFault(fault2);
321 return;
322 }
323 PacketPtr pkt1, pkt2;
324 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
325 if (req->getFlags().isSet(Request::NO_ACCESS)) {
326 assert(!dcache_pkt);
327 pkt1->makeResponse();
328 completeDataAccess(pkt1);
329 } else if (read) {
330 if (handleReadPacket(pkt1)) {
331 SplitFragmentSenderState * send_state =
332 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
333 send_state->clearFromParent();
334 if (handleReadPacket(pkt2)) {
335 send_state = dynamic_cast<SplitFragmentSenderState *>(
336 pkt1->senderState);
337 send_state->clearFromParent();
338 }
339 }
340 } else {
341 dcache_pkt = pkt1;
342 if (handleWritePacket()) {
343 SplitFragmentSenderState * send_state =
344 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
345 send_state->clearFromParent();
346 dcache_pkt = pkt2;
347 if (handleWritePacket()) {
348 send_state = dynamic_cast<SplitFragmentSenderState *>(
349 pkt1->senderState);
350 send_state->clearFromParent();
351 }
352 }
353 }
354}
355
356void
357TimingSimpleCPU::translationFault(Fault fault)
358{
359 numCycles += tickToCycles(curTick - previousTick);
360 previousTick = curTick;
361
362 if (traceData) {
363 // Since there was a fault, we shouldn't trace this instruction.
364 delete traceData;
365 traceData = NULL;
366 }
367
368 postExecute();
369
370 if (getState() == SimObject::Draining) {
371 advancePC(fault);
372 completeDrain();
373 } else {
374 advanceInst(fault);
375 }
376}
377
378void
379TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
380{
381 MemCmd cmd;
382 if (read) {
383 cmd = MemCmd::ReadReq;
384 if (req->isLocked())
385 cmd = MemCmd::LoadLockedReq;
386 } else {
387 cmd = MemCmd::WriteReq;
388 if (req->isLocked()) {
389 cmd = MemCmd::StoreCondReq;
390 } else if (req->isSwap()) {
391 cmd = MemCmd::SwapReq;
392 }
393 }
394 pkt = new Packet(req, cmd, Packet::Broadcast);
395}
396
397void
398TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
399 RequestPtr req1, RequestPtr req2, RequestPtr req,
400 uint8_t *data, bool read)
401{
402 pkt1 = pkt2 = NULL;
403
404 assert(!req1->isMmapedIpr() && !req2->isMmapedIpr());
405
406 if (req->getFlags().isSet(Request::NO_ACCESS)) {
407 buildPacket(pkt1, req, read);
408 return;
409 }
410
411 buildPacket(pkt1, req1, read);
412 buildPacket(pkt2, req2, read);
413
414 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags());
415 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand(),
416 Packet::Broadcast);
417
418 pkt->dataDynamic<uint8_t>(data);
419 pkt1->dataStatic<uint8_t>(data);
420 pkt2->dataStatic<uint8_t>(data + req1->getSize());
421
422 SplitMainSenderState * main_send_state = new SplitMainSenderState;
423 pkt->senderState = main_send_state;
424 main_send_state->fragments[0] = pkt1;
425 main_send_state->fragments[1] = pkt2;
426 main_send_state->outstanding = 2;
427 pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
428 pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
429}
430
431template <class T>
432Fault
433TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
434{
435 Fault fault;
436 const int asid = 0;
437 const int thread_id = 0;
438 const Addr pc = thread->readPC();
439 int block_size = dcachePort.peerBlockSize();
440 int data_size = sizeof(T);
441
442 RequestPtr req = new Request(asid, addr, data_size,
443 flags, pc, _cpuId, thread_id);
444
445 Addr split_addr = roundDown(addr + data_size - 1, block_size);
446 assert(split_addr <= addr || split_addr - addr < block_size);
447
448
449 _status = DTBWaitResponse;
450 if (split_addr > addr) {
451 RequestPtr req1, req2;
452 assert(!req->isLocked() && !req->isSwap());
453 req->splitOnVaddr(split_addr, req1, req2);
454
455 typedef SplitDataTranslation::WholeTranslationState WholeState;
456 WholeState *state = new WholeState(req1, req2, req,
457 (uint8_t *)(new T), BaseTLB::Read);
458 thread->dtb->translateTiming(req1, tc,
459 new SplitDataTranslation(this, 0, state), BaseTLB::Read);
460 thread->dtb->translateTiming(req2, tc,
461 new SplitDataTranslation(this, 1, state), BaseTLB::Read);
462 } else {
463 DataTranslation *translation =
464 new DataTranslation(this, (uint8_t *)(new T), NULL, BaseTLB::Read);
465 thread->dtb->translateTiming(req, tc, translation, BaseTLB::Read);
466 }
467
468 if (traceData) {
469 traceData->setData(data);
470 traceData->setAddr(addr);
471 }
472
473 // This will need a new way to tell if it has a dcache attached.
474 if (req->isUncacheable())
475 recordEvent("Uncached Read");
476
477 return NoFault;
478}
479
480#ifndef DOXYGEN_SHOULD_SKIP_THIS
481
482template
483Fault
484TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
485
486template
487Fault
488TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
489
490template
491Fault
492TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
493
494template
495Fault
496TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
497
498template
499Fault
500TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
501
502template
503Fault
504TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
505
506#endif //DOXYGEN_SHOULD_SKIP_THIS
507
508template<>
509Fault
510TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
511{
512 return read(addr, *(uint64_t*)&data, flags);
513}
514
515template<>
516Fault
517TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
518{
519 return read(addr, *(uint32_t*)&data, flags);
520}
521
522
523template<>
524Fault
525TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
526{
527 return read(addr, (uint32_t&)data, flags);
528}
529
530bool
531TimingSimpleCPU::handleWritePacket()
532{
533 RequestPtr req = dcache_pkt->req;
534 if (req->isMmapedIpr()) {
535 Tick delay;
536 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
537 new IprEvent(dcache_pkt, this, nextCycle(curTick + delay));
538 _status = DcacheWaitResponse;
539 dcache_pkt = NULL;
540 } else if (!dcachePort.sendTiming(dcache_pkt)) {
541 _status = DcacheRetry;
542 } else {
543 _status = DcacheWaitResponse;
544 // memory system takes ownership of packet
545 dcache_pkt = NULL;
546 }
547 return dcache_pkt == NULL;
548}
549
550template <class T>
551Fault
552TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
553{
554 const int asid = 0;
555 const int thread_id = 0;
556 const Addr pc = thread->readPC();
557 int block_size = dcachePort.peerBlockSize();
558 int data_size = sizeof(T);
559
560 RequestPtr req = new Request(asid, addr, data_size,
561 flags, pc, _cpuId, thread_id);
562
563 Addr split_addr = roundDown(addr + data_size - 1, block_size);
564 assert(split_addr <= addr || split_addr - addr < block_size);
565
566 T *dataP = new T;
567 *dataP = TheISA::htog(data);
568 _status = DTBWaitResponse;
569 if (split_addr > addr) {
570 RequestPtr req1, req2;
571 assert(!req->isLocked() && !req->isSwap());
572 req->splitOnVaddr(split_addr, req1, req2);
573
574 typedef SplitDataTranslation::WholeTranslationState WholeState;
575 WholeState *state = new WholeState(req1, req2, req,
576 (uint8_t *)dataP, BaseTLB::Write);
577 thread->dtb->translateTiming(req1, tc,
578 new SplitDataTranslation(this, 0, state), BaseTLB::Write);
579 thread->dtb->translateTiming(req2, tc,
580 new SplitDataTranslation(this, 1, state), BaseTLB::Write);
581 } else {
582 DataTranslation *translation =
583 new DataTranslation(this, (uint8_t *)dataP, res, BaseTLB::Write);
584 thread->dtb->translateTiming(req, tc, translation, BaseTLB::Write);
585 }
586
587 if (traceData) {
588 traceData->setAddr(req->getVaddr());
589 traceData->setData(data);
590 }
591
592 // This will need a new way to tell if it's hooked up to a cache or not.
593 if (req->isUncacheable())
594 recordEvent("Uncached Write");
595
596 // If the write needs to have a fault on the access, consider calling
597 // changeStatus() and changing it to "bad addr write" or something.
598 return NoFault;
599}
600
601
602#ifndef DOXYGEN_SHOULD_SKIP_THIS
603template
604Fault
605TimingSimpleCPU::write(Twin32_t data, Addr addr,
606 unsigned flags, uint64_t *res);
607
608template
609Fault
610TimingSimpleCPU::write(Twin64_t data, Addr addr,
611 unsigned flags, uint64_t *res);
612
613template
614Fault
615TimingSimpleCPU::write(uint64_t data, Addr addr,
616 unsigned flags, uint64_t *res);
617
618template
619Fault
620TimingSimpleCPU::write(uint32_t data, Addr addr,
621 unsigned flags, uint64_t *res);
622
623template
624Fault
625TimingSimpleCPU::write(uint16_t data, Addr addr,
626 unsigned flags, uint64_t *res);
627
628template
629Fault
630TimingSimpleCPU::write(uint8_t data, Addr addr,
631 unsigned flags, uint64_t *res);
632
633#endif //DOXYGEN_SHOULD_SKIP_THIS
634
635template<>
636Fault
637TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
638{
639 return write(*(uint64_t*)&data, addr, flags, res);
640}
641
642template<>
643Fault
644TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
645{
646 return write(*(uint32_t*)&data, addr, flags, res);
647}
648
649
650template<>
651Fault
652TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
653{
654 return write((uint32_t)data, addr, flags, res);
655}
656
657
658void
659TimingSimpleCPU::fetch()
660{
661 DPRINTF(SimpleCPU, "Fetch\n");
662
663 if (!curStaticInst || !curStaticInst->isDelayedCommit())
664 checkForInterrupts();
665
666 checkPcEventQueue();
667
668 bool fromRom = isRomMicroPC(thread->readMicroPC());
669
670 if (!fromRom && !curMacroStaticInst) {
671 Request *ifetch_req = new Request();
672 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
673 setupFetchRequest(ifetch_req);
674 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
675 BaseTLB::Execute);
676 } else {
677 _status = IcacheWaitResponse;
678 completeIfetch(NULL);
679
680 numCycles += tickToCycles(curTick - previousTick);
681 previousTick = curTick;
682 }
683}
684
685
686void
687TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
688{
689 if (fault == NoFault) {
690 ifetch_pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
691 ifetch_pkt->dataStatic(&inst);
692
693 if (!icachePort.sendTiming(ifetch_pkt)) {
694 // Need to wait for retry
695 _status = IcacheRetry;
696 } else {
697 // Need to wait for cache to respond
698 _status = IcacheWaitResponse;
699 // ownership of packet transferred to memory system
700 ifetch_pkt = NULL;
701 }
702 } else {
703 delete req;
704 // fetch fault: advance directly to next instruction (fault handler)
705 advanceInst(fault);
706 }
707
708 numCycles += tickToCycles(curTick - previousTick);
709 previousTick = curTick;
710}
711
712
713void
714TimingSimpleCPU::advanceInst(Fault fault)
715{
716 if (fault != NoFault || !stayAtPC)
717 advancePC(fault);
718
719 if (_status == Running) {
720 // kick off fetch of next instruction... callback from icache
721 // response will cause that instruction to be executed,
722 // keeping the CPU running.
723 fetch();
724 }
725}
726
727
728void
729TimingSimpleCPU::completeIfetch(PacketPtr pkt)
730{
731 DPRINTF(SimpleCPU, "Complete ICache Fetch\n");
732
733 // received a response from the icache: execute the received
734 // instruction
735
736 assert(!pkt || !pkt->isError());
737 assert(_status == IcacheWaitResponse);
738
739 _status = Running;
740
741 numCycles += tickToCycles(curTick - previousTick);
742 previousTick = curTick;
743
744 if (getState() == SimObject::Draining) {
745 if (pkt) {
746 delete pkt->req;
747 delete pkt;
748 }
749
750 completeDrain();
751 return;
752 }
753
754 preExecute();
755 if (curStaticInst &&
756 curStaticInst->isMemRef() && !curStaticInst->isDataPrefetch()) {
757 // load or store: just send to dcache
758 Fault fault = curStaticInst->initiateAcc(this, traceData);
759 if (_status != Running) {
760 // instruction will complete in dcache response callback
761 assert(_status == DcacheWaitResponse ||
762 _status == DcacheRetry || DTBWaitResponse);
763 assert(fault == NoFault);
764 } else {
765 if (fault != NoFault && traceData) {
766 // If there was a fault, we shouldn't trace this instruction.
767 delete traceData;
768 traceData = NULL;
769 }
770
771 postExecute();
772 // @todo remove me after debugging with legion done
773 if (curStaticInst && (!curStaticInst->isMicroop() ||
774 curStaticInst->isFirstMicroop()))
775 instCnt++;
776 advanceInst(fault);
777 }
778 } else if (curStaticInst) {
779 // non-memory instruction: execute completely now
780 Fault fault = curStaticInst->execute(this, traceData);
781
782 // keep an instruction count
783 if (fault == NoFault)
784 countInst();
785 else if (traceData) {
786 // If there was a fault, we shouldn't trace this instruction.
787 delete traceData;
788 traceData = NULL;
789 }
790
791 postExecute();
792 // @todo remove me after debugging with legion done
793 if (curStaticInst && (!curStaticInst->isMicroop() ||
794 curStaticInst->isFirstMicroop()))
795 instCnt++;
796 advanceInst(fault);
797 } else {
798 advanceInst(NoFault);
799 }
800
801 if (pkt) {
802 delete pkt->req;
803 delete pkt;
804 }
805}
806
807void
808TimingSimpleCPU::IcachePort::ITickEvent::process()
809{
810 cpu->completeIfetch(pkt);
811}
812
813bool
814TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
815{
816 if (pkt->isResponse() && !pkt->wasNacked()) {
817 // delay processing of returned data until next CPU clock edge
818 Tick next_tick = cpu->nextCycle(curTick);
819
820 if (next_tick == curTick)
821 cpu->completeIfetch(pkt);
822 else
823 tickEvent.schedule(pkt, next_tick);
824
825 return true;
826 }
827 else if (pkt->wasNacked()) {
828 assert(cpu->_status == IcacheWaitResponse);
829 pkt->reinitNacked();
830 if (!sendTiming(pkt)) {
831 cpu->_status = IcacheRetry;
832 cpu->ifetch_pkt = pkt;
833 }
834 }
835 //Snooping a Coherence Request, do nothing
836 return true;
837}
838
839void
840TimingSimpleCPU::IcachePort::recvRetry()
841{
842 // we shouldn't get a retry unless we have a packet that we're
843 // waiting to transmit
844 assert(cpu->ifetch_pkt != NULL);
845 assert(cpu->_status == IcacheRetry);
846 PacketPtr tmp = cpu->ifetch_pkt;
847 if (sendTiming(tmp)) {
848 cpu->_status = IcacheWaitResponse;
849 cpu->ifetch_pkt = NULL;
850 }
851}
852
853void
854TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
855{
856 // received a response from the dcache: complete the load or store
857 // instruction
858 assert(!pkt->isError());
859
860 numCycles += tickToCycles(curTick - previousTick);
861 previousTick = curTick;
862
863 if (pkt->senderState) {
864 SplitFragmentSenderState * send_state =
865 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
866 assert(send_state);
867 delete pkt->req;
868 delete pkt;
869 PacketPtr big_pkt = send_state->bigPkt;
870 delete send_state;
871
872 SplitMainSenderState * main_send_state =
873 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
874 assert(main_send_state);
875 // Record the fact that this packet is no longer outstanding.
876 assert(main_send_state->outstanding != 0);
877 main_send_state->outstanding--;
878
879 if (main_send_state->outstanding) {
880 return;
881 } else {
882 delete main_send_state;
883 big_pkt->senderState = NULL;
884 pkt = big_pkt;
885 }
886 }
887
888 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse);
889 _status = Running;
890
891 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
892
893 // keep an instruction count
894 if (fault == NoFault)
895 countInst();
896 else if (traceData) {
897 // If there was a fault, we shouldn't trace this instruction.
898 delete traceData;
899 traceData = NULL;
900 }
901
902 // the locked flag may be cleared on the response packet, so check
903 // pkt->req and not pkt to see if it was a load-locked
904 if (pkt->isRead() && pkt->req->isLocked()) {
905 TheISA::handleLockedRead(thread, pkt->req);
906 }
907
908 delete pkt->req;
909 delete pkt;
910
911 postExecute();
912
913 if (getState() == SimObject::Draining) {
914 advancePC(fault);
915 completeDrain();
916
917 return;
918 }
919
920 advanceInst(fault);
921}
922
923
924void
925TimingSimpleCPU::completeDrain()
926{
927 DPRINTF(Config, "Done draining\n");
928 changeState(SimObject::Drained);
929 drainEvent->process();
930}
931
932void
933TimingSimpleCPU::DcachePort::setPeer(Port *port)
934{
935 Port::setPeer(port);
936
937#if FULL_SYSTEM
938 // Update the ThreadContext's memory ports (Functional/Virtual
939 // Ports)
940 cpu->tcBase()->connectMemPorts(cpu->tcBase());
941#endif
942}
943
944bool
945TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
946{
947 if (pkt->isResponse() && !pkt->wasNacked()) {
948 // delay processing of returned data until next CPU clock edge
949 Tick next_tick = cpu->nextCycle(curTick);
950
951 if (next_tick == curTick) {
952 cpu->completeDataAccess(pkt);
953 } else {
954 tickEvent.schedule(pkt, next_tick);
955 }
956
957 return true;
958 }
959 else if (pkt->wasNacked()) {
960 assert(cpu->_status == DcacheWaitResponse);
961 pkt->reinitNacked();
962 if (!sendTiming(pkt)) {
963 cpu->_status = DcacheRetry;
964 cpu->dcache_pkt = pkt;
965 }
966 }
967 //Snooping a Coherence Request, do nothing
968 return true;
969}
970
971void
972TimingSimpleCPU::DcachePort::DTickEvent::process()
973{
974 cpu->completeDataAccess(pkt);
975}
976
977void
978TimingSimpleCPU::DcachePort::recvRetry()
979{
980 // we shouldn't get a retry unless we have a packet that we're
981 // waiting to transmit
982 assert(cpu->dcache_pkt != NULL);
983 assert(cpu->_status == DcacheRetry);
984 PacketPtr tmp = cpu->dcache_pkt;
985 if (tmp->senderState) {
986 // This is a packet from a split access.
987 SplitFragmentSenderState * send_state =
988 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
989 assert(send_state);
990 PacketPtr big_pkt = send_state->bigPkt;
991
992 SplitMainSenderState * main_send_state =
993 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
994 assert(main_send_state);
995
996 if (sendTiming(tmp)) {
997 // If we were able to send without retrying, record that fact
998 // and try sending the other fragment.
999 send_state->clearFromParent();
1000 int other_index = main_send_state->getPendingFragment();
1001 if (other_index > 0) {
1002 tmp = main_send_state->fragments[other_index];
1003 cpu->dcache_pkt = tmp;
1004 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
1005 (big_pkt->isWrite() && cpu->handleWritePacket())) {
1006 main_send_state->fragments[other_index] = NULL;
1007 }
1008 } else {
1009 cpu->_status = DcacheWaitResponse;
1010 // memory system takes ownership of packet
1011 cpu->dcache_pkt = NULL;
1012 }
1013 }
1014 } else if (sendTiming(tmp)) {
1015 cpu->_status = DcacheWaitResponse;
1016 // memory system takes ownership of packet
1017 cpu->dcache_pkt = NULL;
1018 }
1019}
1020
1021TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
1022 Tick t)
1023 : pkt(_pkt), cpu(_cpu)
1024{
1025 cpu->schedule(this, t);
1026}
1027
1028void
1029TimingSimpleCPU::IprEvent::process()
1030{
1031 cpu->completeDataAccess(pkt);
1032}
1033
1034const char *
1035TimingSimpleCPU::IprEvent::description() const
1036{
1037 return "Timing Simple CPU Delay IPR event";
1038}
1039
1040
1041void
1042TimingSimpleCPU::printAddr(Addr a)
1043{
1044 dcachePort.printAddr(a);
1045}
1046
1047
1048////////////////////////////////////////////////////////////////////////
1049//
1050// TimingSimpleCPU Simulation Object
1051//
1052TimingSimpleCPU *
1053TimingSimpleCPUParams::create()
1054{
1055 numThreads = 1;
1056#if !FULL_SYSTEM
1057 if (workload.size() != 1)
1058 panic("only one workload allowed");
1059#endif
1060 return new TimingSimpleCPU(this);
1061}
239 assert(_status == Running);
240
241 // just change status to Idle... if status != Running,
242 // completeInst() will not initiate fetch of next instruction.
243
244 notIdleFraction--;
245 _status = Idle;
246}
247
248bool
249TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
250{
251 RequestPtr req = pkt->req;
252 if (req->isMmapedIpr()) {
253 Tick delay;
254 delay = TheISA::handleIprRead(thread->getTC(), pkt);
255 new IprEvent(pkt, this, nextCycle(curTick + delay));
256 _status = DcacheWaitResponse;
257 dcache_pkt = NULL;
258 } else if (!dcachePort.sendTiming(pkt)) {
259 _status = DcacheRetry;
260 dcache_pkt = pkt;
261 } else {
262 _status = DcacheWaitResponse;
263 // memory system takes ownership of packet
264 dcache_pkt = NULL;
265 }
266 return dcache_pkt == NULL;
267}
268
269void
270TimingSimpleCPU::sendData(Fault fault, RequestPtr req,
271 uint8_t *data, uint64_t *res, bool read)
272{
273 _status = Running;
274 if (fault != NoFault) {
275 delete data;
276 delete req;
277
278 translationFault(fault);
279 return;
280 }
281 PacketPtr pkt;
282 buildPacket(pkt, req, read);
283 pkt->dataDynamic<uint8_t>(data);
284 if (req->getFlags().isSet(Request::NO_ACCESS)) {
285 assert(!dcache_pkt);
286 pkt->makeResponse();
287 completeDataAccess(pkt);
288 } else if (read) {
289 handleReadPacket(pkt);
290 } else {
291 bool do_access = true; // flag to suppress cache access
292
293 if (req->isLocked()) {
294 do_access = TheISA::handleLockedWrite(thread, req);
295 } else if (req->isCondSwap()) {
296 assert(res);
297 req->setExtraData(*res);
298 }
299
300 if (do_access) {
301 dcache_pkt = pkt;
302 handleWritePacket();
303 } else {
304 _status = DcacheWaitResponse;
305 completeDataAccess(pkt);
306 }
307 }
308}
309
310void
311TimingSimpleCPU::sendSplitData(Fault fault1, Fault fault2,
312 RequestPtr req1, RequestPtr req2, RequestPtr req,
313 uint8_t *data, bool read)
314{
315 _status = Running;
316 if (fault1 != NoFault || fault2 != NoFault) {
317 delete data;
318 delete req1;
319 delete req2;
320 if (fault1 != NoFault)
321 translationFault(fault1);
322 else if (fault2 != NoFault)
323 translationFault(fault2);
324 return;
325 }
326 PacketPtr pkt1, pkt2;
327 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
328 if (req->getFlags().isSet(Request::NO_ACCESS)) {
329 assert(!dcache_pkt);
330 pkt1->makeResponse();
331 completeDataAccess(pkt1);
332 } else if (read) {
333 if (handleReadPacket(pkt1)) {
334 SplitFragmentSenderState * send_state =
335 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
336 send_state->clearFromParent();
337 if (handleReadPacket(pkt2)) {
338 send_state = dynamic_cast<SplitFragmentSenderState *>(
339 pkt1->senderState);
340 send_state->clearFromParent();
341 }
342 }
343 } else {
344 dcache_pkt = pkt1;
345 if (handleWritePacket()) {
346 SplitFragmentSenderState * send_state =
347 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
348 send_state->clearFromParent();
349 dcache_pkt = pkt2;
350 if (handleWritePacket()) {
351 send_state = dynamic_cast<SplitFragmentSenderState *>(
352 pkt1->senderState);
353 send_state->clearFromParent();
354 }
355 }
356 }
357}
358
359void
360TimingSimpleCPU::translationFault(Fault fault)
361{
362 numCycles += tickToCycles(curTick - previousTick);
363 previousTick = curTick;
364
365 if (traceData) {
366 // Since there was a fault, we shouldn't trace this instruction.
367 delete traceData;
368 traceData = NULL;
369 }
370
371 postExecute();
372
373 if (getState() == SimObject::Draining) {
374 advancePC(fault);
375 completeDrain();
376 } else {
377 advanceInst(fault);
378 }
379}
380
381void
382TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
383{
384 MemCmd cmd;
385 if (read) {
386 cmd = MemCmd::ReadReq;
387 if (req->isLocked())
388 cmd = MemCmd::LoadLockedReq;
389 } else {
390 cmd = MemCmd::WriteReq;
391 if (req->isLocked()) {
392 cmd = MemCmd::StoreCondReq;
393 } else if (req->isSwap()) {
394 cmd = MemCmd::SwapReq;
395 }
396 }
397 pkt = new Packet(req, cmd, Packet::Broadcast);
398}
399
400void
401TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
402 RequestPtr req1, RequestPtr req2, RequestPtr req,
403 uint8_t *data, bool read)
404{
405 pkt1 = pkt2 = NULL;
406
407 assert(!req1->isMmapedIpr() && !req2->isMmapedIpr());
408
409 if (req->getFlags().isSet(Request::NO_ACCESS)) {
410 buildPacket(pkt1, req, read);
411 return;
412 }
413
414 buildPacket(pkt1, req1, read);
415 buildPacket(pkt2, req2, read);
416
417 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags());
418 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand(),
419 Packet::Broadcast);
420
421 pkt->dataDynamic<uint8_t>(data);
422 pkt1->dataStatic<uint8_t>(data);
423 pkt2->dataStatic<uint8_t>(data + req1->getSize());
424
425 SplitMainSenderState * main_send_state = new SplitMainSenderState;
426 pkt->senderState = main_send_state;
427 main_send_state->fragments[0] = pkt1;
428 main_send_state->fragments[1] = pkt2;
429 main_send_state->outstanding = 2;
430 pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
431 pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
432}
433
434template <class T>
435Fault
436TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
437{
438 Fault fault;
439 const int asid = 0;
440 const int thread_id = 0;
441 const Addr pc = thread->readPC();
442 int block_size = dcachePort.peerBlockSize();
443 int data_size = sizeof(T);
444
445 RequestPtr req = new Request(asid, addr, data_size,
446 flags, pc, _cpuId, thread_id);
447
448 Addr split_addr = roundDown(addr + data_size - 1, block_size);
449 assert(split_addr <= addr || split_addr - addr < block_size);
450
451
452 _status = DTBWaitResponse;
453 if (split_addr > addr) {
454 RequestPtr req1, req2;
455 assert(!req->isLocked() && !req->isSwap());
456 req->splitOnVaddr(split_addr, req1, req2);
457
458 typedef SplitDataTranslation::WholeTranslationState WholeState;
459 WholeState *state = new WholeState(req1, req2, req,
460 (uint8_t *)(new T), BaseTLB::Read);
461 thread->dtb->translateTiming(req1, tc,
462 new SplitDataTranslation(this, 0, state), BaseTLB::Read);
463 thread->dtb->translateTiming(req2, tc,
464 new SplitDataTranslation(this, 1, state), BaseTLB::Read);
465 } else {
466 DataTranslation *translation =
467 new DataTranslation(this, (uint8_t *)(new T), NULL, BaseTLB::Read);
468 thread->dtb->translateTiming(req, tc, translation, BaseTLB::Read);
469 }
470
471 if (traceData) {
472 traceData->setData(data);
473 traceData->setAddr(addr);
474 }
475
476 // This will need a new way to tell if it has a dcache attached.
477 if (req->isUncacheable())
478 recordEvent("Uncached Read");
479
480 return NoFault;
481}
482
483#ifndef DOXYGEN_SHOULD_SKIP_THIS
484
485template
486Fault
487TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
488
489template
490Fault
491TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
492
493template
494Fault
495TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
496
497template
498Fault
499TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
500
501template
502Fault
503TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
504
505template
506Fault
507TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
508
509#endif //DOXYGEN_SHOULD_SKIP_THIS
510
511template<>
512Fault
513TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
514{
515 return read(addr, *(uint64_t*)&data, flags);
516}
517
518template<>
519Fault
520TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
521{
522 return read(addr, *(uint32_t*)&data, flags);
523}
524
525
526template<>
527Fault
528TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
529{
530 return read(addr, (uint32_t&)data, flags);
531}
532
533bool
534TimingSimpleCPU::handleWritePacket()
535{
536 RequestPtr req = dcache_pkt->req;
537 if (req->isMmapedIpr()) {
538 Tick delay;
539 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
540 new IprEvent(dcache_pkt, this, nextCycle(curTick + delay));
541 _status = DcacheWaitResponse;
542 dcache_pkt = NULL;
543 } else if (!dcachePort.sendTiming(dcache_pkt)) {
544 _status = DcacheRetry;
545 } else {
546 _status = DcacheWaitResponse;
547 // memory system takes ownership of packet
548 dcache_pkt = NULL;
549 }
550 return dcache_pkt == NULL;
551}
552
553template <class T>
554Fault
555TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
556{
557 const int asid = 0;
558 const int thread_id = 0;
559 const Addr pc = thread->readPC();
560 int block_size = dcachePort.peerBlockSize();
561 int data_size = sizeof(T);
562
563 RequestPtr req = new Request(asid, addr, data_size,
564 flags, pc, _cpuId, thread_id);
565
566 Addr split_addr = roundDown(addr + data_size - 1, block_size);
567 assert(split_addr <= addr || split_addr - addr < block_size);
568
569 T *dataP = new T;
570 *dataP = TheISA::htog(data);
571 _status = DTBWaitResponse;
572 if (split_addr > addr) {
573 RequestPtr req1, req2;
574 assert(!req->isLocked() && !req->isSwap());
575 req->splitOnVaddr(split_addr, req1, req2);
576
577 typedef SplitDataTranslation::WholeTranslationState WholeState;
578 WholeState *state = new WholeState(req1, req2, req,
579 (uint8_t *)dataP, BaseTLB::Write);
580 thread->dtb->translateTiming(req1, tc,
581 new SplitDataTranslation(this, 0, state), BaseTLB::Write);
582 thread->dtb->translateTiming(req2, tc,
583 new SplitDataTranslation(this, 1, state), BaseTLB::Write);
584 } else {
585 DataTranslation *translation =
586 new DataTranslation(this, (uint8_t *)dataP, res, BaseTLB::Write);
587 thread->dtb->translateTiming(req, tc, translation, BaseTLB::Write);
588 }
589
590 if (traceData) {
591 traceData->setAddr(req->getVaddr());
592 traceData->setData(data);
593 }
594
595 // This will need a new way to tell if it's hooked up to a cache or not.
596 if (req->isUncacheable())
597 recordEvent("Uncached Write");
598
599 // If the write needs to have a fault on the access, consider calling
600 // changeStatus() and changing it to "bad addr write" or something.
601 return NoFault;
602}
603
604
605#ifndef DOXYGEN_SHOULD_SKIP_THIS
606template
607Fault
608TimingSimpleCPU::write(Twin32_t data, Addr addr,
609 unsigned flags, uint64_t *res);
610
611template
612Fault
613TimingSimpleCPU::write(Twin64_t data, Addr addr,
614 unsigned flags, uint64_t *res);
615
616template
617Fault
618TimingSimpleCPU::write(uint64_t data, Addr addr,
619 unsigned flags, uint64_t *res);
620
621template
622Fault
623TimingSimpleCPU::write(uint32_t data, Addr addr,
624 unsigned flags, uint64_t *res);
625
626template
627Fault
628TimingSimpleCPU::write(uint16_t data, Addr addr,
629 unsigned flags, uint64_t *res);
630
631template
632Fault
633TimingSimpleCPU::write(uint8_t data, Addr addr,
634 unsigned flags, uint64_t *res);
635
636#endif //DOXYGEN_SHOULD_SKIP_THIS
637
638template<>
639Fault
640TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
641{
642 return write(*(uint64_t*)&data, addr, flags, res);
643}
644
645template<>
646Fault
647TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
648{
649 return write(*(uint32_t*)&data, addr, flags, res);
650}
651
652
653template<>
654Fault
655TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
656{
657 return write((uint32_t)data, addr, flags, res);
658}
659
660
661void
662TimingSimpleCPU::fetch()
663{
664 DPRINTF(SimpleCPU, "Fetch\n");
665
666 if (!curStaticInst || !curStaticInst->isDelayedCommit())
667 checkForInterrupts();
668
669 checkPcEventQueue();
670
671 bool fromRom = isRomMicroPC(thread->readMicroPC());
672
673 if (!fromRom && !curMacroStaticInst) {
674 Request *ifetch_req = new Request();
675 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
676 setupFetchRequest(ifetch_req);
677 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
678 BaseTLB::Execute);
679 } else {
680 _status = IcacheWaitResponse;
681 completeIfetch(NULL);
682
683 numCycles += tickToCycles(curTick - previousTick);
684 previousTick = curTick;
685 }
686}
687
688
689void
690TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
691{
692 if (fault == NoFault) {
693 ifetch_pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
694 ifetch_pkt->dataStatic(&inst);
695
696 if (!icachePort.sendTiming(ifetch_pkt)) {
697 // Need to wait for retry
698 _status = IcacheRetry;
699 } else {
700 // Need to wait for cache to respond
701 _status = IcacheWaitResponse;
702 // ownership of packet transferred to memory system
703 ifetch_pkt = NULL;
704 }
705 } else {
706 delete req;
707 // fetch fault: advance directly to next instruction (fault handler)
708 advanceInst(fault);
709 }
710
711 numCycles += tickToCycles(curTick - previousTick);
712 previousTick = curTick;
713}
714
715
716void
717TimingSimpleCPU::advanceInst(Fault fault)
718{
719 if (fault != NoFault || !stayAtPC)
720 advancePC(fault);
721
722 if (_status == Running) {
723 // kick off fetch of next instruction... callback from icache
724 // response will cause that instruction to be executed,
725 // keeping the CPU running.
726 fetch();
727 }
728}
729
730
731void
732TimingSimpleCPU::completeIfetch(PacketPtr pkt)
733{
734 DPRINTF(SimpleCPU, "Complete ICache Fetch\n");
735
736 // received a response from the icache: execute the received
737 // instruction
738
739 assert(!pkt || !pkt->isError());
740 assert(_status == IcacheWaitResponse);
741
742 _status = Running;
743
744 numCycles += tickToCycles(curTick - previousTick);
745 previousTick = curTick;
746
747 if (getState() == SimObject::Draining) {
748 if (pkt) {
749 delete pkt->req;
750 delete pkt;
751 }
752
753 completeDrain();
754 return;
755 }
756
757 preExecute();
758 if (curStaticInst &&
759 curStaticInst->isMemRef() && !curStaticInst->isDataPrefetch()) {
760 // load or store: just send to dcache
761 Fault fault = curStaticInst->initiateAcc(this, traceData);
762 if (_status != Running) {
763 // instruction will complete in dcache response callback
764 assert(_status == DcacheWaitResponse ||
765 _status == DcacheRetry || DTBWaitResponse);
766 assert(fault == NoFault);
767 } else {
768 if (fault != NoFault && traceData) {
769 // If there was a fault, we shouldn't trace this instruction.
770 delete traceData;
771 traceData = NULL;
772 }
773
774 postExecute();
775 // @todo remove me after debugging with legion done
776 if (curStaticInst && (!curStaticInst->isMicroop() ||
777 curStaticInst->isFirstMicroop()))
778 instCnt++;
779 advanceInst(fault);
780 }
781 } else if (curStaticInst) {
782 // non-memory instruction: execute completely now
783 Fault fault = curStaticInst->execute(this, traceData);
784
785 // keep an instruction count
786 if (fault == NoFault)
787 countInst();
788 else if (traceData) {
789 // If there was a fault, we shouldn't trace this instruction.
790 delete traceData;
791 traceData = NULL;
792 }
793
794 postExecute();
795 // @todo remove me after debugging with legion done
796 if (curStaticInst && (!curStaticInst->isMicroop() ||
797 curStaticInst->isFirstMicroop()))
798 instCnt++;
799 advanceInst(fault);
800 } else {
801 advanceInst(NoFault);
802 }
803
804 if (pkt) {
805 delete pkt->req;
806 delete pkt;
807 }
808}
809
810void
811TimingSimpleCPU::IcachePort::ITickEvent::process()
812{
813 cpu->completeIfetch(pkt);
814}
815
816bool
817TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
818{
819 if (pkt->isResponse() && !pkt->wasNacked()) {
820 // delay processing of returned data until next CPU clock edge
821 Tick next_tick = cpu->nextCycle(curTick);
822
823 if (next_tick == curTick)
824 cpu->completeIfetch(pkt);
825 else
826 tickEvent.schedule(pkt, next_tick);
827
828 return true;
829 }
830 else if (pkt->wasNacked()) {
831 assert(cpu->_status == IcacheWaitResponse);
832 pkt->reinitNacked();
833 if (!sendTiming(pkt)) {
834 cpu->_status = IcacheRetry;
835 cpu->ifetch_pkt = pkt;
836 }
837 }
838 //Snooping a Coherence Request, do nothing
839 return true;
840}
841
842void
843TimingSimpleCPU::IcachePort::recvRetry()
844{
845 // we shouldn't get a retry unless we have a packet that we're
846 // waiting to transmit
847 assert(cpu->ifetch_pkt != NULL);
848 assert(cpu->_status == IcacheRetry);
849 PacketPtr tmp = cpu->ifetch_pkt;
850 if (sendTiming(tmp)) {
851 cpu->_status = IcacheWaitResponse;
852 cpu->ifetch_pkt = NULL;
853 }
854}
855
856void
857TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
858{
859 // received a response from the dcache: complete the load or store
860 // instruction
861 assert(!pkt->isError());
862
863 numCycles += tickToCycles(curTick - previousTick);
864 previousTick = curTick;
865
866 if (pkt->senderState) {
867 SplitFragmentSenderState * send_state =
868 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
869 assert(send_state);
870 delete pkt->req;
871 delete pkt;
872 PacketPtr big_pkt = send_state->bigPkt;
873 delete send_state;
874
875 SplitMainSenderState * main_send_state =
876 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
877 assert(main_send_state);
878 // Record the fact that this packet is no longer outstanding.
879 assert(main_send_state->outstanding != 0);
880 main_send_state->outstanding--;
881
882 if (main_send_state->outstanding) {
883 return;
884 } else {
885 delete main_send_state;
886 big_pkt->senderState = NULL;
887 pkt = big_pkt;
888 }
889 }
890
891 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse);
892 _status = Running;
893
894 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
895
896 // keep an instruction count
897 if (fault == NoFault)
898 countInst();
899 else if (traceData) {
900 // If there was a fault, we shouldn't trace this instruction.
901 delete traceData;
902 traceData = NULL;
903 }
904
905 // the locked flag may be cleared on the response packet, so check
906 // pkt->req and not pkt to see if it was a load-locked
907 if (pkt->isRead() && pkt->req->isLocked()) {
908 TheISA::handleLockedRead(thread, pkt->req);
909 }
910
911 delete pkt->req;
912 delete pkt;
913
914 postExecute();
915
916 if (getState() == SimObject::Draining) {
917 advancePC(fault);
918 completeDrain();
919
920 return;
921 }
922
923 advanceInst(fault);
924}
925
926
927void
928TimingSimpleCPU::completeDrain()
929{
930 DPRINTF(Config, "Done draining\n");
931 changeState(SimObject::Drained);
932 drainEvent->process();
933}
934
935void
936TimingSimpleCPU::DcachePort::setPeer(Port *port)
937{
938 Port::setPeer(port);
939
940#if FULL_SYSTEM
941 // Update the ThreadContext's memory ports (Functional/Virtual
942 // Ports)
943 cpu->tcBase()->connectMemPorts(cpu->tcBase());
944#endif
945}
946
947bool
948TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
949{
950 if (pkt->isResponse() && !pkt->wasNacked()) {
951 // delay processing of returned data until next CPU clock edge
952 Tick next_tick = cpu->nextCycle(curTick);
953
954 if (next_tick == curTick) {
955 cpu->completeDataAccess(pkt);
956 } else {
957 tickEvent.schedule(pkt, next_tick);
958 }
959
960 return true;
961 }
962 else if (pkt->wasNacked()) {
963 assert(cpu->_status == DcacheWaitResponse);
964 pkt->reinitNacked();
965 if (!sendTiming(pkt)) {
966 cpu->_status = DcacheRetry;
967 cpu->dcache_pkt = pkt;
968 }
969 }
970 //Snooping a Coherence Request, do nothing
971 return true;
972}
973
974void
975TimingSimpleCPU::DcachePort::DTickEvent::process()
976{
977 cpu->completeDataAccess(pkt);
978}
979
980void
981TimingSimpleCPU::DcachePort::recvRetry()
982{
983 // we shouldn't get a retry unless we have a packet that we're
984 // waiting to transmit
985 assert(cpu->dcache_pkt != NULL);
986 assert(cpu->_status == DcacheRetry);
987 PacketPtr tmp = cpu->dcache_pkt;
988 if (tmp->senderState) {
989 // This is a packet from a split access.
990 SplitFragmentSenderState * send_state =
991 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
992 assert(send_state);
993 PacketPtr big_pkt = send_state->bigPkt;
994
995 SplitMainSenderState * main_send_state =
996 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
997 assert(main_send_state);
998
999 if (sendTiming(tmp)) {
1000 // If we were able to send without retrying, record that fact
1001 // and try sending the other fragment.
1002 send_state->clearFromParent();
1003 int other_index = main_send_state->getPendingFragment();
1004 if (other_index > 0) {
1005 tmp = main_send_state->fragments[other_index];
1006 cpu->dcache_pkt = tmp;
1007 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
1008 (big_pkt->isWrite() && cpu->handleWritePacket())) {
1009 main_send_state->fragments[other_index] = NULL;
1010 }
1011 } else {
1012 cpu->_status = DcacheWaitResponse;
1013 // memory system takes ownership of packet
1014 cpu->dcache_pkt = NULL;
1015 }
1016 }
1017 } else if (sendTiming(tmp)) {
1018 cpu->_status = DcacheWaitResponse;
1019 // memory system takes ownership of packet
1020 cpu->dcache_pkt = NULL;
1021 }
1022}
1023
1024TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
1025 Tick t)
1026 : pkt(_pkt), cpu(_cpu)
1027{
1028 cpu->schedule(this, t);
1029}
1030
1031void
1032TimingSimpleCPU::IprEvent::process()
1033{
1034 cpu->completeDataAccess(pkt);
1035}
1036
1037const char *
1038TimingSimpleCPU::IprEvent::description() const
1039{
1040 return "Timing Simple CPU Delay IPR event";
1041}
1042
1043
1044void
1045TimingSimpleCPU::printAddr(Addr a)
1046{
1047 dcachePort.printAddr(a);
1048}
1049
1050
1051////////////////////////////////////////////////////////////////////////
1052//
1053// TimingSimpleCPU Simulation Object
1054//
1055TimingSimpleCPU *
1056TimingSimpleCPUParams::create()
1057{
1058 numThreads = 1;
1059#if !FULL_SYSTEM
1060 if (workload.size() != 1)
1061 panic("only one workload allowed");
1062#endif
1063 return new TimingSimpleCPU(this);
1064}