timing.cc (6973:a123bd350935) timing.cc (7016:8b2b8e5e7d35)
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Steve Reinhardt
29 */
30
31#include "arch/locked_mem.hh"
32#include "arch/mmaped_ipr.hh"
33#include "arch/utility.hh"
34#include "base/bigint.hh"
35#include "config/the_isa.hh"
36#include "cpu/exetrace.hh"
37#include "cpu/simple/timing.hh"
38#include "mem/packet.hh"
39#include "mem/packet_access.hh"
40#include "params/TimingSimpleCPU.hh"
41#include "sim/system.hh"
42
43using namespace std;
44using namespace TheISA;
45
46Port *
47TimingSimpleCPU::getPort(const std::string &if_name, int idx)
48{
49 if (if_name == "dcache_port")
50 return &dcachePort;
51 else if (if_name == "icache_port")
52 return &icachePort;
53 else
54 panic("No Such Port\n");
55}
56
57void
58TimingSimpleCPU::init()
59{
60 BaseCPU::init();
61#if FULL_SYSTEM
62 for (int i = 0; i < threadContexts.size(); ++i) {
63 ThreadContext *tc = threadContexts[i];
64
65 // initialize CPU, including PC
66 TheISA::initCPU(tc, _cpuId);
67 }
68#endif
69}
70
71Tick
72TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
73{
74 panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
75 return curTick;
76}
77
78void
79TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
80{
81 //No internal storage to update, jusst return
82 return;
83}
84
85void
86TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
87{
88 if (status == RangeChange) {
89 if (!snoopRangeSent) {
90 snoopRangeSent = true;
91 sendStatusChange(Port::RangeChange);
92 }
93 return;
94 }
95
96 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
97}
98
99
100void
101TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
102{
103 pkt = _pkt;
104 cpu->schedule(this, t);
105}
106
107TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
108 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this, p->clock),
109 dcachePort(this, p->clock), fetchEvent(this)
110{
111 _status = Idle;
112
113 icachePort.snoopRangeSent = false;
114 dcachePort.snoopRangeSent = false;
115
116 ifetch_pkt = dcache_pkt = NULL;
117 drainEvent = NULL;
118 previousTick = 0;
119 changeState(SimObject::Running);
120}
121
122
123TimingSimpleCPU::~TimingSimpleCPU()
124{
125}
126
127void
128TimingSimpleCPU::serialize(ostream &os)
129{
130 SimObject::State so_state = SimObject::getState();
131 SERIALIZE_ENUM(so_state);
132 BaseSimpleCPU::serialize(os);
133}
134
135void
136TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
137{
138 SimObject::State so_state;
139 UNSERIALIZE_ENUM(so_state);
140 BaseSimpleCPU::unserialize(cp, section);
141}
142
143unsigned int
144TimingSimpleCPU::drain(Event *drain_event)
145{
146 // TimingSimpleCPU is ready to drain if it's not waiting for
147 // an access to complete.
148 if (_status == Idle || _status == Running || _status == SwitchedOut) {
149 changeState(SimObject::Drained);
150 return 0;
151 } else {
152 changeState(SimObject::Draining);
153 drainEvent = drain_event;
154 return 1;
155 }
156}
157
158void
159TimingSimpleCPU::resume()
160{
161 DPRINTF(SimpleCPU, "Resume\n");
162 if (_status != SwitchedOut && _status != Idle) {
163 assert(system->getMemoryMode() == Enums::timing);
164
165 if (fetchEvent.scheduled())
166 deschedule(fetchEvent);
167
168 schedule(fetchEvent, nextCycle());
169 }
170
171 changeState(SimObject::Running);
172}
173
174void
175TimingSimpleCPU::switchOut()
176{
177 assert(_status == Running || _status == Idle);
178 _status = SwitchedOut;
179 numCycles += tickToCycles(curTick - previousTick);
180
181 // If we've been scheduled to resume but are then told to switch out,
182 // we'll need to cancel it.
183 if (fetchEvent.scheduled())
184 deschedule(fetchEvent);
185}
186
187
188void
189TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
190{
191 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
192
193 // if any of this CPU's ThreadContexts are active, mark the CPU as
194 // running and schedule its tick event.
195 for (int i = 0; i < threadContexts.size(); ++i) {
196 ThreadContext *tc = threadContexts[i];
197 if (tc->status() == ThreadContext::Active && _status != Running) {
198 _status = Running;
199 break;
200 }
201 }
202
203 if (_status != Running) {
204 _status = Idle;
205 }
206 assert(threadContexts.size() == 1);
207 previousTick = curTick;
208}
209
210
211void
212TimingSimpleCPU::activateContext(int thread_num, int delay)
213{
214 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
215
216 assert(thread_num == 0);
217 assert(thread);
218
219 assert(_status == Idle);
220
221 notIdleFraction++;
222 _status = Running;
223
224 // kick things off by initiating the fetch of the next instruction
225 schedule(fetchEvent, nextCycle(curTick + ticks(delay)));
226}
227
228
229void
230TimingSimpleCPU::suspendContext(int thread_num)
231{
232 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
233
234 assert(thread_num == 0);
235 assert(thread);
236
237 if (_status == Idle)
238 return;
239
240 assert(_status == Running);
241
242 // just change status to Idle... if status != Running,
243 // completeInst() will not initiate fetch of next instruction.
244
245 notIdleFraction--;
246 _status = Idle;
247}
248
249bool
250TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
251{
252 RequestPtr req = pkt->req;
253 if (req->isMmapedIpr()) {
254 Tick delay;
255 delay = TheISA::handleIprRead(thread->getTC(), pkt);
256 new IprEvent(pkt, this, nextCycle(curTick + delay));
257 _status = DcacheWaitResponse;
258 dcache_pkt = NULL;
259 } else if (!dcachePort.sendTiming(pkt)) {
260 _status = DcacheRetry;
261 dcache_pkt = pkt;
262 } else {
263 _status = DcacheWaitResponse;
264 // memory system takes ownership of packet
265 dcache_pkt = NULL;
266 }
267 return dcache_pkt == NULL;
268}
269
270void
271TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
272 bool read)
273{
274 PacketPtr pkt;
275 buildPacket(pkt, req, read);
276 pkt->dataDynamic<uint8_t>(data);
277 if (req->getFlags().isSet(Request::NO_ACCESS)) {
278 assert(!dcache_pkt);
279 pkt->makeResponse();
280 completeDataAccess(pkt);
281 } else if (read) {
282 handleReadPacket(pkt);
283 } else {
284 bool do_access = true; // flag to suppress cache access
285
286 if (req->isLLSC()) {
287 do_access = TheISA::handleLockedWrite(thread, req);
288 } else if (req->isCondSwap()) {
289 assert(res);
290 req->setExtraData(*res);
291 }
292
293 if (do_access) {
294 dcache_pkt = pkt;
295 handleWritePacket();
296 } else {
297 _status = DcacheWaitResponse;
298 completeDataAccess(pkt);
299 }
300 }
301}
302
303void
304TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
305 RequestPtr req, uint8_t *data, bool read)
306{
307 PacketPtr pkt1, pkt2;
308 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
309 if (req->getFlags().isSet(Request::NO_ACCESS)) {
310 assert(!dcache_pkt);
311 pkt1->makeResponse();
312 completeDataAccess(pkt1);
313 } else if (read) {
314 if (handleReadPacket(pkt1)) {
315 SplitFragmentSenderState * send_state =
316 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
317 send_state->clearFromParent();
318 if (handleReadPacket(pkt2)) {
319 send_state = dynamic_cast<SplitFragmentSenderState *>(
320 pkt1->senderState);
321 send_state->clearFromParent();
322 }
323 }
324 } else {
325 dcache_pkt = pkt1;
326 if (handleWritePacket()) {
327 SplitFragmentSenderState * send_state =
328 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
329 send_state->clearFromParent();
330 dcache_pkt = pkt2;
331 if (handleWritePacket()) {
332 send_state = dynamic_cast<SplitFragmentSenderState *>(
333 pkt1->senderState);
334 send_state->clearFromParent();
335 }
336 }
337 }
338}
339
340void
341TimingSimpleCPU::translationFault(Fault fault)
342{
343 // fault may be NoFault in cases where a fault is suppressed,
344 // for instance prefetches.
345 numCycles += tickToCycles(curTick - previousTick);
346 previousTick = curTick;
347
348 if (traceData) {
349 // Since there was a fault, we shouldn't trace this instruction.
350 delete traceData;
351 traceData = NULL;
352 }
353
354 postExecute();
355
356 if (getState() == SimObject::Draining) {
357 advancePC(fault);
358 completeDrain();
359 } else {
360 advanceInst(fault);
361 }
362}
363
364void
365TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
366{
367 MemCmd cmd;
368 if (read) {
369 cmd = MemCmd::ReadReq;
370 if (req->isLLSC())
371 cmd = MemCmd::LoadLockedReq;
372 } else {
373 cmd = MemCmd::WriteReq;
374 if (req->isLLSC()) {
375 cmd = MemCmd::StoreCondReq;
376 } else if (req->isSwap()) {
377 cmd = MemCmd::SwapReq;
378 }
379 }
380 pkt = new Packet(req, cmd, Packet::Broadcast);
381}
382
383void
384TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
385 RequestPtr req1, RequestPtr req2, RequestPtr req,
386 uint8_t *data, bool read)
387{
388 pkt1 = pkt2 = NULL;
389
390 assert(!req1->isMmapedIpr() && !req2->isMmapedIpr());
391
392 if (req->getFlags().isSet(Request::NO_ACCESS)) {
393 buildPacket(pkt1, req, read);
394 return;
395 }
396
397 buildPacket(pkt1, req1, read);
398 buildPacket(pkt2, req2, read);
399
400 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags());
401 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand(),
402 Packet::Broadcast);
403
404 pkt->dataDynamic<uint8_t>(data);
405 pkt1->dataStatic<uint8_t>(data);
406 pkt2->dataStatic<uint8_t>(data + req1->getSize());
407
408 SplitMainSenderState * main_send_state = new SplitMainSenderState;
409 pkt->senderState = main_send_state;
410 main_send_state->fragments[0] = pkt1;
411 main_send_state->fragments[1] = pkt2;
412 main_send_state->outstanding = 2;
413 pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
414 pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
415}
416
417template <class T>
418Fault
419TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
420{
421 Fault fault;
422 const int asid = 0;
423 const ThreadID tid = 0;
424 const Addr pc = thread->readPC();
425 unsigned block_size = dcachePort.peerBlockSize();
426 int data_size = sizeof(T);
427 BaseTLB::Mode mode = BaseTLB::Read;
428
429 RequestPtr req = new Request(asid, addr, data_size,
430 flags, pc, _cpuId, tid);
431
432 Addr split_addr = roundDown(addr + data_size - 1, block_size);
433 assert(split_addr <= addr || split_addr - addr < block_size);
434
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Steve Reinhardt
29 */
30
31#include "arch/locked_mem.hh"
32#include "arch/mmaped_ipr.hh"
33#include "arch/utility.hh"
34#include "base/bigint.hh"
35#include "config/the_isa.hh"
36#include "cpu/exetrace.hh"
37#include "cpu/simple/timing.hh"
38#include "mem/packet.hh"
39#include "mem/packet_access.hh"
40#include "params/TimingSimpleCPU.hh"
41#include "sim/system.hh"
42
43using namespace std;
44using namespace TheISA;
45
46Port *
47TimingSimpleCPU::getPort(const std::string &if_name, int idx)
48{
49 if (if_name == "dcache_port")
50 return &dcachePort;
51 else if (if_name == "icache_port")
52 return &icachePort;
53 else
54 panic("No Such Port\n");
55}
56
57void
58TimingSimpleCPU::init()
59{
60 BaseCPU::init();
61#if FULL_SYSTEM
62 for (int i = 0; i < threadContexts.size(); ++i) {
63 ThreadContext *tc = threadContexts[i];
64
65 // initialize CPU, including PC
66 TheISA::initCPU(tc, _cpuId);
67 }
68#endif
69}
70
71Tick
72TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
73{
74 panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
75 return curTick;
76}
77
78void
79TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
80{
81 //No internal storage to update, jusst return
82 return;
83}
84
85void
86TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
87{
88 if (status == RangeChange) {
89 if (!snoopRangeSent) {
90 snoopRangeSent = true;
91 sendStatusChange(Port::RangeChange);
92 }
93 return;
94 }
95
96 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
97}
98
99
100void
101TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
102{
103 pkt = _pkt;
104 cpu->schedule(this, t);
105}
106
107TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
108 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this, p->clock),
109 dcachePort(this, p->clock), fetchEvent(this)
110{
111 _status = Idle;
112
113 icachePort.snoopRangeSent = false;
114 dcachePort.snoopRangeSent = false;
115
116 ifetch_pkt = dcache_pkt = NULL;
117 drainEvent = NULL;
118 previousTick = 0;
119 changeState(SimObject::Running);
120}
121
122
123TimingSimpleCPU::~TimingSimpleCPU()
124{
125}
126
127void
128TimingSimpleCPU::serialize(ostream &os)
129{
130 SimObject::State so_state = SimObject::getState();
131 SERIALIZE_ENUM(so_state);
132 BaseSimpleCPU::serialize(os);
133}
134
135void
136TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
137{
138 SimObject::State so_state;
139 UNSERIALIZE_ENUM(so_state);
140 BaseSimpleCPU::unserialize(cp, section);
141}
142
143unsigned int
144TimingSimpleCPU::drain(Event *drain_event)
145{
146 // TimingSimpleCPU is ready to drain if it's not waiting for
147 // an access to complete.
148 if (_status == Idle || _status == Running || _status == SwitchedOut) {
149 changeState(SimObject::Drained);
150 return 0;
151 } else {
152 changeState(SimObject::Draining);
153 drainEvent = drain_event;
154 return 1;
155 }
156}
157
158void
159TimingSimpleCPU::resume()
160{
161 DPRINTF(SimpleCPU, "Resume\n");
162 if (_status != SwitchedOut && _status != Idle) {
163 assert(system->getMemoryMode() == Enums::timing);
164
165 if (fetchEvent.scheduled())
166 deschedule(fetchEvent);
167
168 schedule(fetchEvent, nextCycle());
169 }
170
171 changeState(SimObject::Running);
172}
173
174void
175TimingSimpleCPU::switchOut()
176{
177 assert(_status == Running || _status == Idle);
178 _status = SwitchedOut;
179 numCycles += tickToCycles(curTick - previousTick);
180
181 // If we've been scheduled to resume but are then told to switch out,
182 // we'll need to cancel it.
183 if (fetchEvent.scheduled())
184 deschedule(fetchEvent);
185}
186
187
188void
189TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
190{
191 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
192
193 // if any of this CPU's ThreadContexts are active, mark the CPU as
194 // running and schedule its tick event.
195 for (int i = 0; i < threadContexts.size(); ++i) {
196 ThreadContext *tc = threadContexts[i];
197 if (tc->status() == ThreadContext::Active && _status != Running) {
198 _status = Running;
199 break;
200 }
201 }
202
203 if (_status != Running) {
204 _status = Idle;
205 }
206 assert(threadContexts.size() == 1);
207 previousTick = curTick;
208}
209
210
211void
212TimingSimpleCPU::activateContext(int thread_num, int delay)
213{
214 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
215
216 assert(thread_num == 0);
217 assert(thread);
218
219 assert(_status == Idle);
220
221 notIdleFraction++;
222 _status = Running;
223
224 // kick things off by initiating the fetch of the next instruction
225 schedule(fetchEvent, nextCycle(curTick + ticks(delay)));
226}
227
228
229void
230TimingSimpleCPU::suspendContext(int thread_num)
231{
232 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
233
234 assert(thread_num == 0);
235 assert(thread);
236
237 if (_status == Idle)
238 return;
239
240 assert(_status == Running);
241
242 // just change status to Idle... if status != Running,
243 // completeInst() will not initiate fetch of next instruction.
244
245 notIdleFraction--;
246 _status = Idle;
247}
248
249bool
250TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
251{
252 RequestPtr req = pkt->req;
253 if (req->isMmapedIpr()) {
254 Tick delay;
255 delay = TheISA::handleIprRead(thread->getTC(), pkt);
256 new IprEvent(pkt, this, nextCycle(curTick + delay));
257 _status = DcacheWaitResponse;
258 dcache_pkt = NULL;
259 } else if (!dcachePort.sendTiming(pkt)) {
260 _status = DcacheRetry;
261 dcache_pkt = pkt;
262 } else {
263 _status = DcacheWaitResponse;
264 // memory system takes ownership of packet
265 dcache_pkt = NULL;
266 }
267 return dcache_pkt == NULL;
268}
269
270void
271TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
272 bool read)
273{
274 PacketPtr pkt;
275 buildPacket(pkt, req, read);
276 pkt->dataDynamic<uint8_t>(data);
277 if (req->getFlags().isSet(Request::NO_ACCESS)) {
278 assert(!dcache_pkt);
279 pkt->makeResponse();
280 completeDataAccess(pkt);
281 } else if (read) {
282 handleReadPacket(pkt);
283 } else {
284 bool do_access = true; // flag to suppress cache access
285
286 if (req->isLLSC()) {
287 do_access = TheISA::handleLockedWrite(thread, req);
288 } else if (req->isCondSwap()) {
289 assert(res);
290 req->setExtraData(*res);
291 }
292
293 if (do_access) {
294 dcache_pkt = pkt;
295 handleWritePacket();
296 } else {
297 _status = DcacheWaitResponse;
298 completeDataAccess(pkt);
299 }
300 }
301}
302
303void
304TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
305 RequestPtr req, uint8_t *data, bool read)
306{
307 PacketPtr pkt1, pkt2;
308 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
309 if (req->getFlags().isSet(Request::NO_ACCESS)) {
310 assert(!dcache_pkt);
311 pkt1->makeResponse();
312 completeDataAccess(pkt1);
313 } else if (read) {
314 if (handleReadPacket(pkt1)) {
315 SplitFragmentSenderState * send_state =
316 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
317 send_state->clearFromParent();
318 if (handleReadPacket(pkt2)) {
319 send_state = dynamic_cast<SplitFragmentSenderState *>(
320 pkt1->senderState);
321 send_state->clearFromParent();
322 }
323 }
324 } else {
325 dcache_pkt = pkt1;
326 if (handleWritePacket()) {
327 SplitFragmentSenderState * send_state =
328 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
329 send_state->clearFromParent();
330 dcache_pkt = pkt2;
331 if (handleWritePacket()) {
332 send_state = dynamic_cast<SplitFragmentSenderState *>(
333 pkt1->senderState);
334 send_state->clearFromParent();
335 }
336 }
337 }
338}
339
340void
341TimingSimpleCPU::translationFault(Fault fault)
342{
343 // fault may be NoFault in cases where a fault is suppressed,
344 // for instance prefetches.
345 numCycles += tickToCycles(curTick - previousTick);
346 previousTick = curTick;
347
348 if (traceData) {
349 // Since there was a fault, we shouldn't trace this instruction.
350 delete traceData;
351 traceData = NULL;
352 }
353
354 postExecute();
355
356 if (getState() == SimObject::Draining) {
357 advancePC(fault);
358 completeDrain();
359 } else {
360 advanceInst(fault);
361 }
362}
363
364void
365TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
366{
367 MemCmd cmd;
368 if (read) {
369 cmd = MemCmd::ReadReq;
370 if (req->isLLSC())
371 cmd = MemCmd::LoadLockedReq;
372 } else {
373 cmd = MemCmd::WriteReq;
374 if (req->isLLSC()) {
375 cmd = MemCmd::StoreCondReq;
376 } else if (req->isSwap()) {
377 cmd = MemCmd::SwapReq;
378 }
379 }
380 pkt = new Packet(req, cmd, Packet::Broadcast);
381}
382
383void
384TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
385 RequestPtr req1, RequestPtr req2, RequestPtr req,
386 uint8_t *data, bool read)
387{
388 pkt1 = pkt2 = NULL;
389
390 assert(!req1->isMmapedIpr() && !req2->isMmapedIpr());
391
392 if (req->getFlags().isSet(Request::NO_ACCESS)) {
393 buildPacket(pkt1, req, read);
394 return;
395 }
396
397 buildPacket(pkt1, req1, read);
398 buildPacket(pkt2, req2, read);
399
400 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags());
401 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand(),
402 Packet::Broadcast);
403
404 pkt->dataDynamic<uint8_t>(data);
405 pkt1->dataStatic<uint8_t>(data);
406 pkt2->dataStatic<uint8_t>(data + req1->getSize());
407
408 SplitMainSenderState * main_send_state = new SplitMainSenderState;
409 pkt->senderState = main_send_state;
410 main_send_state->fragments[0] = pkt1;
411 main_send_state->fragments[1] = pkt2;
412 main_send_state->outstanding = 2;
413 pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
414 pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
415}
416
417template <class T>
418Fault
419TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
420{
421 Fault fault;
422 const int asid = 0;
423 const ThreadID tid = 0;
424 const Addr pc = thread->readPC();
425 unsigned block_size = dcachePort.peerBlockSize();
426 int data_size = sizeof(T);
427 BaseTLB::Mode mode = BaseTLB::Read;
428
429 RequestPtr req = new Request(asid, addr, data_size,
430 flags, pc, _cpuId, tid);
431
432 Addr split_addr = roundDown(addr + data_size - 1, block_size);
433 assert(split_addr <= addr || split_addr - addr < block_size);
434
435 // This will need a new way to tell if it's hooked up to a cache or not.
436 if (req->isUncacheable())
437 recordEvent("Uncached Write");
438
435 _status = DTBWaitResponse;
436 if (split_addr > addr) {
437 RequestPtr req1, req2;
438 assert(!req->isLLSC() && !req->isSwap());
439 req->splitOnVaddr(split_addr, req1, req2);
440
441 WholeTranslationState *state =
442 new WholeTranslationState(req, req1, req2, (uint8_t *)(new T),
443 NULL, mode);
444 DataTranslation<TimingSimpleCPU> *trans1 =
445 new DataTranslation<TimingSimpleCPU>(this, state, 0);
446 DataTranslation<TimingSimpleCPU> *trans2 =
447 new DataTranslation<TimingSimpleCPU>(this, state, 1);
448
449 thread->dtb->translateTiming(req1, tc, trans1, mode);
450 thread->dtb->translateTiming(req2, tc, trans2, mode);
451 } else {
452 WholeTranslationState *state =
453 new WholeTranslationState(req, (uint8_t *)(new T), NULL, mode);
454 DataTranslation<TimingSimpleCPU> *translation
455 = new DataTranslation<TimingSimpleCPU>(this, state);
456 thread->dtb->translateTiming(req, tc, translation, mode);
457 }
458
459 if (traceData) {
460 traceData->setData(data);
461 traceData->setAddr(addr);
462 }
463
439 _status = DTBWaitResponse;
440 if (split_addr > addr) {
441 RequestPtr req1, req2;
442 assert(!req->isLLSC() && !req->isSwap());
443 req->splitOnVaddr(split_addr, req1, req2);
444
445 WholeTranslationState *state =
446 new WholeTranslationState(req, req1, req2, (uint8_t *)(new T),
447 NULL, mode);
448 DataTranslation<TimingSimpleCPU> *trans1 =
449 new DataTranslation<TimingSimpleCPU>(this, state, 0);
450 DataTranslation<TimingSimpleCPU> *trans2 =
451 new DataTranslation<TimingSimpleCPU>(this, state, 1);
452
453 thread->dtb->translateTiming(req1, tc, trans1, mode);
454 thread->dtb->translateTiming(req2, tc, trans2, mode);
455 } else {
456 WholeTranslationState *state =
457 new WholeTranslationState(req, (uint8_t *)(new T), NULL, mode);
458 DataTranslation<TimingSimpleCPU> *translation
459 = new DataTranslation<TimingSimpleCPU>(this, state);
460 thread->dtb->translateTiming(req, tc, translation, mode);
461 }
462
463 if (traceData) {
464 traceData->setData(data);
465 traceData->setAddr(addr);
466 }
467
464 // This will need a new way to tell if it has a dcache attached.
465 if (req->isUncacheable())
466 recordEvent("Uncached Read");
467
468 return NoFault;
469}
470
471#ifndef DOXYGEN_SHOULD_SKIP_THIS
472
473template
474Fault
475TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
476
477template
478Fault
479TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
480
481template
482Fault
483TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
484
485template
486Fault
487TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
488
489template
490Fault
491TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
492
493template
494Fault
495TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
496
497#endif //DOXYGEN_SHOULD_SKIP_THIS
498
499template<>
500Fault
501TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
502{
503 return read(addr, *(uint64_t*)&data, flags);
504}
505
506template<>
507Fault
508TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
509{
510 return read(addr, *(uint32_t*)&data, flags);
511}
512
468 return NoFault;
469}
470
471#ifndef DOXYGEN_SHOULD_SKIP_THIS
472
473template
474Fault
475TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
476
477template
478Fault
479TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
480
481template
482Fault
483TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
484
485template
486Fault
487TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
488
489template
490Fault
491TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
492
493template
494Fault
495TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
496
497#endif //DOXYGEN_SHOULD_SKIP_THIS
498
499template<>
500Fault
501TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
502{
503 return read(addr, *(uint64_t*)&data, flags);
504}
505
506template<>
507Fault
508TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
509{
510 return read(addr, *(uint32_t*)&data, flags);
511}
512
513
514template<>
515Fault
516TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
517{
518 return read(addr, (uint32_t&)data, flags);
519}
520
521bool
522TimingSimpleCPU::handleWritePacket()
523{
524 RequestPtr req = dcache_pkt->req;
525 if (req->isMmapedIpr()) {
526 Tick delay;
527 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
528 new IprEvent(dcache_pkt, this, nextCycle(curTick + delay));
529 _status = DcacheWaitResponse;
530 dcache_pkt = NULL;
531 } else if (!dcachePort.sendTiming(dcache_pkt)) {
532 _status = DcacheRetry;
533 } else {
534 _status = DcacheWaitResponse;
535 // memory system takes ownership of packet
536 dcache_pkt = NULL;
537 }
538 return dcache_pkt == NULL;
539}
540
541template <class T>
542Fault
543TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
544{
545 const int asid = 0;
546 const ThreadID tid = 0;
547 const Addr pc = thread->readPC();
548 unsigned block_size = dcachePort.peerBlockSize();
549 int data_size = sizeof(T);
550 BaseTLB::Mode mode = BaseTLB::Write;
551
552 RequestPtr req = new Request(asid, addr, data_size,
553 flags, pc, _cpuId, tid);
554
555 Addr split_addr = roundDown(addr + data_size - 1, block_size);
556 assert(split_addr <= addr || split_addr - addr < block_size);
557
513template<>
514Fault
515TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
516{
517 return read(addr, (uint32_t&)data, flags);
518}
519
520bool
521TimingSimpleCPU::handleWritePacket()
522{
523 RequestPtr req = dcache_pkt->req;
524 if (req->isMmapedIpr()) {
525 Tick delay;
526 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
527 new IprEvent(dcache_pkt, this, nextCycle(curTick + delay));
528 _status = DcacheWaitResponse;
529 dcache_pkt = NULL;
530 } else if (!dcachePort.sendTiming(dcache_pkt)) {
531 _status = DcacheRetry;
532 } else {
533 _status = DcacheWaitResponse;
534 // memory system takes ownership of packet
535 dcache_pkt = NULL;
536 }
537 return dcache_pkt == NULL;
538}
539
540template <class T>
541Fault
542TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
543{
544 const int asid = 0;
545 const ThreadID tid = 0;
546 const Addr pc = thread->readPC();
547 unsigned block_size = dcachePort.peerBlockSize();
548 int data_size = sizeof(T);
549 BaseTLB::Mode mode = BaseTLB::Write;
550
551 RequestPtr req = new Request(asid, addr, data_size,
552 flags, pc, _cpuId, tid);
553
554 Addr split_addr = roundDown(addr + data_size - 1, block_size);
555 assert(split_addr <= addr || split_addr - addr < block_size);
556
557 // This will need a new way to tell if it's hooked up to a cache or not.
558 if (req->isUncacheable())
559 recordEvent("Uncached Write");
560
558 T *dataP = new T;
559 *dataP = TheISA::htog(data);
560 _status = DTBWaitResponse;
561 if (split_addr > addr) {
562 RequestPtr req1, req2;
563 assert(!req->isLLSC() && !req->isSwap());
564 req->splitOnVaddr(split_addr, req1, req2);
565
566 WholeTranslationState *state =
567 new WholeTranslationState(req, req1, req2, (uint8_t *)dataP,
568 res, mode);
569 DataTranslation<TimingSimpleCPU> *trans1 =
570 new DataTranslation<TimingSimpleCPU>(this, state, 0);
571 DataTranslation<TimingSimpleCPU> *trans2 =
572 new DataTranslation<TimingSimpleCPU>(this, state, 1);
573
574 thread->dtb->translateTiming(req1, tc, trans1, mode);
575 thread->dtb->translateTiming(req2, tc, trans2, mode);
576 } else {
577 WholeTranslationState *state =
578 new WholeTranslationState(req, (uint8_t *)dataP, res, mode);
579 DataTranslation<TimingSimpleCPU> *translation =
580 new DataTranslation<TimingSimpleCPU>(this, state);
581 thread->dtb->translateTiming(req, tc, translation, mode);
582 }
583
584 if (traceData) {
585 traceData->setAddr(req->getVaddr());
586 traceData->setData(data);
587 }
588
561 T *dataP = new T;
562 *dataP = TheISA::htog(data);
563 _status = DTBWaitResponse;
564 if (split_addr > addr) {
565 RequestPtr req1, req2;
566 assert(!req->isLLSC() && !req->isSwap());
567 req->splitOnVaddr(split_addr, req1, req2);
568
569 WholeTranslationState *state =
570 new WholeTranslationState(req, req1, req2, (uint8_t *)dataP,
571 res, mode);
572 DataTranslation<TimingSimpleCPU> *trans1 =
573 new DataTranslation<TimingSimpleCPU>(this, state, 0);
574 DataTranslation<TimingSimpleCPU> *trans2 =
575 new DataTranslation<TimingSimpleCPU>(this, state, 1);
576
577 thread->dtb->translateTiming(req1, tc, trans1, mode);
578 thread->dtb->translateTiming(req2, tc, trans2, mode);
579 } else {
580 WholeTranslationState *state =
581 new WholeTranslationState(req, (uint8_t *)dataP, res, mode);
582 DataTranslation<TimingSimpleCPU> *translation =
583 new DataTranslation<TimingSimpleCPU>(this, state);
584 thread->dtb->translateTiming(req, tc, translation, mode);
585 }
586
587 if (traceData) {
588 traceData->setAddr(req->getVaddr());
589 traceData->setData(data);
590 }
591
589 // This will need a new way to tell if it's hooked up to a cache or not.
590 if (req->isUncacheable())
591 recordEvent("Uncached Write");
592
593 // If the write needs to have a fault on the access, consider calling
594 // changeStatus() and changing it to "bad addr write" or something.
595 return NoFault;
596}
597
598
599#ifndef DOXYGEN_SHOULD_SKIP_THIS
600template
601Fault
602TimingSimpleCPU::write(Twin32_t data, Addr addr,
603 unsigned flags, uint64_t *res);
604
605template
606Fault
607TimingSimpleCPU::write(Twin64_t data, Addr addr,
608 unsigned flags, uint64_t *res);
609
610template
611Fault
612TimingSimpleCPU::write(uint64_t data, Addr addr,
613 unsigned flags, uint64_t *res);
614
615template
616Fault
617TimingSimpleCPU::write(uint32_t data, Addr addr,
618 unsigned flags, uint64_t *res);
619
620template
621Fault
622TimingSimpleCPU::write(uint16_t data, Addr addr,
623 unsigned flags, uint64_t *res);
624
625template
626Fault
627TimingSimpleCPU::write(uint8_t data, Addr addr,
628 unsigned flags, uint64_t *res);
629
630#endif //DOXYGEN_SHOULD_SKIP_THIS
631
632template<>
633Fault
634TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
635{
636 return write(*(uint64_t*)&data, addr, flags, res);
637}
638
639template<>
640Fault
641TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
642{
643 return write(*(uint32_t*)&data, addr, flags, res);
644}
645
646
647template<>
648Fault
649TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
650{
651 return write((uint32_t)data, addr, flags, res);
652}
653
654
655void
656TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
657{
658 _status = Running;
659
660 if (state->getFault() != NoFault) {
661 if (state->isPrefetch()) {
662 state->setNoFault();
663 }
664 delete state->data;
665 state->deleteReqs();
666 translationFault(state->getFault());
667 } else {
668 if (!state->isSplit) {
669 sendData(state->mainReq, state->data, state->res,
670 state->mode == BaseTLB::Read);
671 } else {
672 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
673 state->data, state->mode == BaseTLB::Read);
674 }
675 }
676
677 delete state;
678}
679
680
681void
682TimingSimpleCPU::fetch()
683{
684 DPRINTF(SimpleCPU, "Fetch\n");
685
686 if (!curStaticInst || !curStaticInst->isDelayedCommit())
687 checkForInterrupts();
688
689 checkPcEventQueue();
690
691 bool fromRom = isRomMicroPC(thread->readMicroPC());
692
693 if (!fromRom && !curMacroStaticInst) {
694 Request *ifetch_req = new Request();
695 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
696 setupFetchRequest(ifetch_req);
697 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
698 BaseTLB::Execute);
699 } else {
700 _status = IcacheWaitResponse;
701 completeIfetch(NULL);
702
703 numCycles += tickToCycles(curTick - previousTick);
704 previousTick = curTick;
705 }
706}
707
708
709void
710TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
711{
712 if (fault == NoFault) {
713 ifetch_pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
714 ifetch_pkt->dataStatic(&inst);
715
716 if (!icachePort.sendTiming(ifetch_pkt)) {
717 // Need to wait for retry
718 _status = IcacheRetry;
719 } else {
720 // Need to wait for cache to respond
721 _status = IcacheWaitResponse;
722 // ownership of packet transferred to memory system
723 ifetch_pkt = NULL;
724 }
725 } else {
726 delete req;
727 // fetch fault: advance directly to next instruction (fault handler)
728 advanceInst(fault);
729 }
730
731 numCycles += tickToCycles(curTick - previousTick);
732 previousTick = curTick;
733}
734
735
736void
737TimingSimpleCPU::advanceInst(Fault fault)
738{
739 if (fault != NoFault || !stayAtPC)
740 advancePC(fault);
741
742 if (_status == Running) {
743 // kick off fetch of next instruction... callback from icache
744 // response will cause that instruction to be executed,
745 // keeping the CPU running.
746 fetch();
747 }
748}
749
750
751void
752TimingSimpleCPU::completeIfetch(PacketPtr pkt)
753{
754 DPRINTF(SimpleCPU, "Complete ICache Fetch\n");
755
756 // received a response from the icache: execute the received
757 // instruction
758
759 assert(!pkt || !pkt->isError());
760 assert(_status == IcacheWaitResponse);
761
762 _status = Running;
763
764 numCycles += tickToCycles(curTick - previousTick);
765 previousTick = curTick;
766
767 if (getState() == SimObject::Draining) {
768 if (pkt) {
769 delete pkt->req;
770 delete pkt;
771 }
772
773 completeDrain();
774 return;
775 }
776
777 preExecute();
778 if (curStaticInst &&
779 curStaticInst->isMemRef() && !curStaticInst->isDataPrefetch()) {
780 // load or store: just send to dcache
781 Fault fault = curStaticInst->initiateAcc(this, traceData);
782 if (_status != Running) {
783 // instruction will complete in dcache response callback
784 assert(_status == DcacheWaitResponse ||
785 _status == DcacheRetry || DTBWaitResponse);
786 assert(fault == NoFault);
787 } else {
788 if (fault != NoFault && traceData) {
789 // If there was a fault, we shouldn't trace this instruction.
790 delete traceData;
791 traceData = NULL;
792 }
793
794 postExecute();
795 // @todo remove me after debugging with legion done
796 if (curStaticInst && (!curStaticInst->isMicroop() ||
797 curStaticInst->isFirstMicroop()))
798 instCnt++;
799 advanceInst(fault);
800 }
801 } else if (curStaticInst) {
802 // non-memory instruction: execute completely now
803 Fault fault = curStaticInst->execute(this, traceData);
804
805 // keep an instruction count
806 if (fault == NoFault)
807 countInst();
808 else if (traceData) {
809 // If there was a fault, we shouldn't trace this instruction.
810 delete traceData;
811 traceData = NULL;
812 }
813
814 postExecute();
815 // @todo remove me after debugging with legion done
816 if (curStaticInst && (!curStaticInst->isMicroop() ||
817 curStaticInst->isFirstMicroop()))
818 instCnt++;
819 advanceInst(fault);
820 } else {
821 advanceInst(NoFault);
822 }
823
824 if (pkt) {
825 delete pkt->req;
826 delete pkt;
827 }
828}
829
830void
831TimingSimpleCPU::IcachePort::ITickEvent::process()
832{
833 cpu->completeIfetch(pkt);
834}
835
836bool
837TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
838{
839 if (pkt->isResponse() && !pkt->wasNacked()) {
840 // delay processing of returned data until next CPU clock edge
841 Tick next_tick = cpu->nextCycle(curTick);
842
843 if (next_tick == curTick)
844 cpu->completeIfetch(pkt);
845 else
846 tickEvent.schedule(pkt, next_tick);
847
848 return true;
849 }
850 else if (pkt->wasNacked()) {
851 assert(cpu->_status == IcacheWaitResponse);
852 pkt->reinitNacked();
853 if (!sendTiming(pkt)) {
854 cpu->_status = IcacheRetry;
855 cpu->ifetch_pkt = pkt;
856 }
857 }
858 //Snooping a Coherence Request, do nothing
859 return true;
860}
861
862void
863TimingSimpleCPU::IcachePort::recvRetry()
864{
865 // we shouldn't get a retry unless we have a packet that we're
866 // waiting to transmit
867 assert(cpu->ifetch_pkt != NULL);
868 assert(cpu->_status == IcacheRetry);
869 PacketPtr tmp = cpu->ifetch_pkt;
870 if (sendTiming(tmp)) {
871 cpu->_status = IcacheWaitResponse;
872 cpu->ifetch_pkt = NULL;
873 }
874}
875
876void
877TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
878{
879 // received a response from the dcache: complete the load or store
880 // instruction
881 assert(!pkt->isError());
882
883 numCycles += tickToCycles(curTick - previousTick);
884 previousTick = curTick;
885
886 if (pkt->senderState) {
887 SplitFragmentSenderState * send_state =
888 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
889 assert(send_state);
890 delete pkt->req;
891 delete pkt;
892 PacketPtr big_pkt = send_state->bigPkt;
893 delete send_state;
894
895 SplitMainSenderState * main_send_state =
896 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
897 assert(main_send_state);
898 // Record the fact that this packet is no longer outstanding.
899 assert(main_send_state->outstanding != 0);
900 main_send_state->outstanding--;
901
902 if (main_send_state->outstanding) {
903 return;
904 } else {
905 delete main_send_state;
906 big_pkt->senderState = NULL;
907 pkt = big_pkt;
908 }
909 }
910
911 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse);
912 _status = Running;
913
914 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
915
916 // keep an instruction count
917 if (fault == NoFault)
918 countInst();
919 else if (traceData) {
920 // If there was a fault, we shouldn't trace this instruction.
921 delete traceData;
922 traceData = NULL;
923 }
924
925 // the locked flag may be cleared on the response packet, so check
926 // pkt->req and not pkt to see if it was a load-locked
927 if (pkt->isRead() && pkt->req->isLLSC()) {
928 TheISA::handleLockedRead(thread, pkt->req);
929 }
930
931 delete pkt->req;
932 delete pkt;
933
934 postExecute();
935
936 if (getState() == SimObject::Draining) {
937 advancePC(fault);
938 completeDrain();
939
940 return;
941 }
942
943 advanceInst(fault);
944}
945
946
947void
948TimingSimpleCPU::completeDrain()
949{
950 DPRINTF(Config, "Done draining\n");
951 changeState(SimObject::Drained);
952 drainEvent->process();
953}
954
955void
956TimingSimpleCPU::DcachePort::setPeer(Port *port)
957{
958 Port::setPeer(port);
959
960#if FULL_SYSTEM
961 // Update the ThreadContext's memory ports (Functional/Virtual
962 // Ports)
963 cpu->tcBase()->connectMemPorts(cpu->tcBase());
964#endif
965}
966
967bool
968TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
969{
970 if (pkt->isResponse() && !pkt->wasNacked()) {
971 // delay processing of returned data until next CPU clock edge
972 Tick next_tick = cpu->nextCycle(curTick);
973
974 if (next_tick == curTick) {
975 cpu->completeDataAccess(pkt);
976 } else {
977 tickEvent.schedule(pkt, next_tick);
978 }
979
980 return true;
981 }
982 else if (pkt->wasNacked()) {
983 assert(cpu->_status == DcacheWaitResponse);
984 pkt->reinitNacked();
985 if (!sendTiming(pkt)) {
986 cpu->_status = DcacheRetry;
987 cpu->dcache_pkt = pkt;
988 }
989 }
990 //Snooping a Coherence Request, do nothing
991 return true;
992}
993
994void
995TimingSimpleCPU::DcachePort::DTickEvent::process()
996{
997 cpu->completeDataAccess(pkt);
998}
999
1000void
1001TimingSimpleCPU::DcachePort::recvRetry()
1002{
1003 // we shouldn't get a retry unless we have a packet that we're
1004 // waiting to transmit
1005 assert(cpu->dcache_pkt != NULL);
1006 assert(cpu->_status == DcacheRetry);
1007 PacketPtr tmp = cpu->dcache_pkt;
1008 if (tmp->senderState) {
1009 // This is a packet from a split access.
1010 SplitFragmentSenderState * send_state =
1011 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
1012 assert(send_state);
1013 PacketPtr big_pkt = send_state->bigPkt;
1014
1015 SplitMainSenderState * main_send_state =
1016 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
1017 assert(main_send_state);
1018
1019 if (sendTiming(tmp)) {
1020 // If we were able to send without retrying, record that fact
1021 // and try sending the other fragment.
1022 send_state->clearFromParent();
1023 int other_index = main_send_state->getPendingFragment();
1024 if (other_index > 0) {
1025 tmp = main_send_state->fragments[other_index];
1026 cpu->dcache_pkt = tmp;
1027 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
1028 (big_pkt->isWrite() && cpu->handleWritePacket())) {
1029 main_send_state->fragments[other_index] = NULL;
1030 }
1031 } else {
1032 cpu->_status = DcacheWaitResponse;
1033 // memory system takes ownership of packet
1034 cpu->dcache_pkt = NULL;
1035 }
1036 }
1037 } else if (sendTiming(tmp)) {
1038 cpu->_status = DcacheWaitResponse;
1039 // memory system takes ownership of packet
1040 cpu->dcache_pkt = NULL;
1041 }
1042}
1043
1044TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
1045 Tick t)
1046 : pkt(_pkt), cpu(_cpu)
1047{
1048 cpu->schedule(this, t);
1049}
1050
1051void
1052TimingSimpleCPU::IprEvent::process()
1053{
1054 cpu->completeDataAccess(pkt);
1055}
1056
1057const char *
1058TimingSimpleCPU::IprEvent::description() const
1059{
1060 return "Timing Simple CPU Delay IPR event";
1061}
1062
1063
1064void
1065TimingSimpleCPU::printAddr(Addr a)
1066{
1067 dcachePort.printAddr(a);
1068}
1069
1070
1071////////////////////////////////////////////////////////////////////////
1072//
1073// TimingSimpleCPU Simulation Object
1074//
1075TimingSimpleCPU *
1076TimingSimpleCPUParams::create()
1077{
1078 numThreads = 1;
1079#if !FULL_SYSTEM
1080 if (workload.size() != 1)
1081 panic("only one workload allowed");
1082#endif
1083 return new TimingSimpleCPU(this);
1084}
592 // If the write needs to have a fault on the access, consider calling
593 // changeStatus() and changing it to "bad addr write" or something.
594 return NoFault;
595}
596
597
598#ifndef DOXYGEN_SHOULD_SKIP_THIS
599template
600Fault
601TimingSimpleCPU::write(Twin32_t data, Addr addr,
602 unsigned flags, uint64_t *res);
603
604template
605Fault
606TimingSimpleCPU::write(Twin64_t data, Addr addr,
607 unsigned flags, uint64_t *res);
608
609template
610Fault
611TimingSimpleCPU::write(uint64_t data, Addr addr,
612 unsigned flags, uint64_t *res);
613
614template
615Fault
616TimingSimpleCPU::write(uint32_t data, Addr addr,
617 unsigned flags, uint64_t *res);
618
619template
620Fault
621TimingSimpleCPU::write(uint16_t data, Addr addr,
622 unsigned flags, uint64_t *res);
623
624template
625Fault
626TimingSimpleCPU::write(uint8_t data, Addr addr,
627 unsigned flags, uint64_t *res);
628
629#endif //DOXYGEN_SHOULD_SKIP_THIS
630
631template<>
632Fault
633TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
634{
635 return write(*(uint64_t*)&data, addr, flags, res);
636}
637
638template<>
639Fault
640TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
641{
642 return write(*(uint32_t*)&data, addr, flags, res);
643}
644
645
646template<>
647Fault
648TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
649{
650 return write((uint32_t)data, addr, flags, res);
651}
652
653
654void
655TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
656{
657 _status = Running;
658
659 if (state->getFault() != NoFault) {
660 if (state->isPrefetch()) {
661 state->setNoFault();
662 }
663 delete state->data;
664 state->deleteReqs();
665 translationFault(state->getFault());
666 } else {
667 if (!state->isSplit) {
668 sendData(state->mainReq, state->data, state->res,
669 state->mode == BaseTLB::Read);
670 } else {
671 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
672 state->data, state->mode == BaseTLB::Read);
673 }
674 }
675
676 delete state;
677}
678
679
680void
681TimingSimpleCPU::fetch()
682{
683 DPRINTF(SimpleCPU, "Fetch\n");
684
685 if (!curStaticInst || !curStaticInst->isDelayedCommit())
686 checkForInterrupts();
687
688 checkPcEventQueue();
689
690 bool fromRom = isRomMicroPC(thread->readMicroPC());
691
692 if (!fromRom && !curMacroStaticInst) {
693 Request *ifetch_req = new Request();
694 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
695 setupFetchRequest(ifetch_req);
696 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
697 BaseTLB::Execute);
698 } else {
699 _status = IcacheWaitResponse;
700 completeIfetch(NULL);
701
702 numCycles += tickToCycles(curTick - previousTick);
703 previousTick = curTick;
704 }
705}
706
707
708void
709TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
710{
711 if (fault == NoFault) {
712 ifetch_pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
713 ifetch_pkt->dataStatic(&inst);
714
715 if (!icachePort.sendTiming(ifetch_pkt)) {
716 // Need to wait for retry
717 _status = IcacheRetry;
718 } else {
719 // Need to wait for cache to respond
720 _status = IcacheWaitResponse;
721 // ownership of packet transferred to memory system
722 ifetch_pkt = NULL;
723 }
724 } else {
725 delete req;
726 // fetch fault: advance directly to next instruction (fault handler)
727 advanceInst(fault);
728 }
729
730 numCycles += tickToCycles(curTick - previousTick);
731 previousTick = curTick;
732}
733
734
735void
736TimingSimpleCPU::advanceInst(Fault fault)
737{
738 if (fault != NoFault || !stayAtPC)
739 advancePC(fault);
740
741 if (_status == Running) {
742 // kick off fetch of next instruction... callback from icache
743 // response will cause that instruction to be executed,
744 // keeping the CPU running.
745 fetch();
746 }
747}
748
749
750void
751TimingSimpleCPU::completeIfetch(PacketPtr pkt)
752{
753 DPRINTF(SimpleCPU, "Complete ICache Fetch\n");
754
755 // received a response from the icache: execute the received
756 // instruction
757
758 assert(!pkt || !pkt->isError());
759 assert(_status == IcacheWaitResponse);
760
761 _status = Running;
762
763 numCycles += tickToCycles(curTick - previousTick);
764 previousTick = curTick;
765
766 if (getState() == SimObject::Draining) {
767 if (pkt) {
768 delete pkt->req;
769 delete pkt;
770 }
771
772 completeDrain();
773 return;
774 }
775
776 preExecute();
777 if (curStaticInst &&
778 curStaticInst->isMemRef() && !curStaticInst->isDataPrefetch()) {
779 // load or store: just send to dcache
780 Fault fault = curStaticInst->initiateAcc(this, traceData);
781 if (_status != Running) {
782 // instruction will complete in dcache response callback
783 assert(_status == DcacheWaitResponse ||
784 _status == DcacheRetry || DTBWaitResponse);
785 assert(fault == NoFault);
786 } else {
787 if (fault != NoFault && traceData) {
788 // If there was a fault, we shouldn't trace this instruction.
789 delete traceData;
790 traceData = NULL;
791 }
792
793 postExecute();
794 // @todo remove me after debugging with legion done
795 if (curStaticInst && (!curStaticInst->isMicroop() ||
796 curStaticInst->isFirstMicroop()))
797 instCnt++;
798 advanceInst(fault);
799 }
800 } else if (curStaticInst) {
801 // non-memory instruction: execute completely now
802 Fault fault = curStaticInst->execute(this, traceData);
803
804 // keep an instruction count
805 if (fault == NoFault)
806 countInst();
807 else if (traceData) {
808 // If there was a fault, we shouldn't trace this instruction.
809 delete traceData;
810 traceData = NULL;
811 }
812
813 postExecute();
814 // @todo remove me after debugging with legion done
815 if (curStaticInst && (!curStaticInst->isMicroop() ||
816 curStaticInst->isFirstMicroop()))
817 instCnt++;
818 advanceInst(fault);
819 } else {
820 advanceInst(NoFault);
821 }
822
823 if (pkt) {
824 delete pkt->req;
825 delete pkt;
826 }
827}
828
829void
830TimingSimpleCPU::IcachePort::ITickEvent::process()
831{
832 cpu->completeIfetch(pkt);
833}
834
835bool
836TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
837{
838 if (pkt->isResponse() && !pkt->wasNacked()) {
839 // delay processing of returned data until next CPU clock edge
840 Tick next_tick = cpu->nextCycle(curTick);
841
842 if (next_tick == curTick)
843 cpu->completeIfetch(pkt);
844 else
845 tickEvent.schedule(pkt, next_tick);
846
847 return true;
848 }
849 else if (pkt->wasNacked()) {
850 assert(cpu->_status == IcacheWaitResponse);
851 pkt->reinitNacked();
852 if (!sendTiming(pkt)) {
853 cpu->_status = IcacheRetry;
854 cpu->ifetch_pkt = pkt;
855 }
856 }
857 //Snooping a Coherence Request, do nothing
858 return true;
859}
860
861void
862TimingSimpleCPU::IcachePort::recvRetry()
863{
864 // we shouldn't get a retry unless we have a packet that we're
865 // waiting to transmit
866 assert(cpu->ifetch_pkt != NULL);
867 assert(cpu->_status == IcacheRetry);
868 PacketPtr tmp = cpu->ifetch_pkt;
869 if (sendTiming(tmp)) {
870 cpu->_status = IcacheWaitResponse;
871 cpu->ifetch_pkt = NULL;
872 }
873}
874
875void
876TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
877{
878 // received a response from the dcache: complete the load or store
879 // instruction
880 assert(!pkt->isError());
881
882 numCycles += tickToCycles(curTick - previousTick);
883 previousTick = curTick;
884
885 if (pkt->senderState) {
886 SplitFragmentSenderState * send_state =
887 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
888 assert(send_state);
889 delete pkt->req;
890 delete pkt;
891 PacketPtr big_pkt = send_state->bigPkt;
892 delete send_state;
893
894 SplitMainSenderState * main_send_state =
895 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
896 assert(main_send_state);
897 // Record the fact that this packet is no longer outstanding.
898 assert(main_send_state->outstanding != 0);
899 main_send_state->outstanding--;
900
901 if (main_send_state->outstanding) {
902 return;
903 } else {
904 delete main_send_state;
905 big_pkt->senderState = NULL;
906 pkt = big_pkt;
907 }
908 }
909
910 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse);
911 _status = Running;
912
913 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
914
915 // keep an instruction count
916 if (fault == NoFault)
917 countInst();
918 else if (traceData) {
919 // If there was a fault, we shouldn't trace this instruction.
920 delete traceData;
921 traceData = NULL;
922 }
923
924 // the locked flag may be cleared on the response packet, so check
925 // pkt->req and not pkt to see if it was a load-locked
926 if (pkt->isRead() && pkt->req->isLLSC()) {
927 TheISA::handleLockedRead(thread, pkt->req);
928 }
929
930 delete pkt->req;
931 delete pkt;
932
933 postExecute();
934
935 if (getState() == SimObject::Draining) {
936 advancePC(fault);
937 completeDrain();
938
939 return;
940 }
941
942 advanceInst(fault);
943}
944
945
946void
947TimingSimpleCPU::completeDrain()
948{
949 DPRINTF(Config, "Done draining\n");
950 changeState(SimObject::Drained);
951 drainEvent->process();
952}
953
954void
955TimingSimpleCPU::DcachePort::setPeer(Port *port)
956{
957 Port::setPeer(port);
958
959#if FULL_SYSTEM
960 // Update the ThreadContext's memory ports (Functional/Virtual
961 // Ports)
962 cpu->tcBase()->connectMemPorts(cpu->tcBase());
963#endif
964}
965
966bool
967TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
968{
969 if (pkt->isResponse() && !pkt->wasNacked()) {
970 // delay processing of returned data until next CPU clock edge
971 Tick next_tick = cpu->nextCycle(curTick);
972
973 if (next_tick == curTick) {
974 cpu->completeDataAccess(pkt);
975 } else {
976 tickEvent.schedule(pkt, next_tick);
977 }
978
979 return true;
980 }
981 else if (pkt->wasNacked()) {
982 assert(cpu->_status == DcacheWaitResponse);
983 pkt->reinitNacked();
984 if (!sendTiming(pkt)) {
985 cpu->_status = DcacheRetry;
986 cpu->dcache_pkt = pkt;
987 }
988 }
989 //Snooping a Coherence Request, do nothing
990 return true;
991}
992
993void
994TimingSimpleCPU::DcachePort::DTickEvent::process()
995{
996 cpu->completeDataAccess(pkt);
997}
998
999void
1000TimingSimpleCPU::DcachePort::recvRetry()
1001{
1002 // we shouldn't get a retry unless we have a packet that we're
1003 // waiting to transmit
1004 assert(cpu->dcache_pkt != NULL);
1005 assert(cpu->_status == DcacheRetry);
1006 PacketPtr tmp = cpu->dcache_pkt;
1007 if (tmp->senderState) {
1008 // This is a packet from a split access.
1009 SplitFragmentSenderState * send_state =
1010 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
1011 assert(send_state);
1012 PacketPtr big_pkt = send_state->bigPkt;
1013
1014 SplitMainSenderState * main_send_state =
1015 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
1016 assert(main_send_state);
1017
1018 if (sendTiming(tmp)) {
1019 // If we were able to send without retrying, record that fact
1020 // and try sending the other fragment.
1021 send_state->clearFromParent();
1022 int other_index = main_send_state->getPendingFragment();
1023 if (other_index > 0) {
1024 tmp = main_send_state->fragments[other_index];
1025 cpu->dcache_pkt = tmp;
1026 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
1027 (big_pkt->isWrite() && cpu->handleWritePacket())) {
1028 main_send_state->fragments[other_index] = NULL;
1029 }
1030 } else {
1031 cpu->_status = DcacheWaitResponse;
1032 // memory system takes ownership of packet
1033 cpu->dcache_pkt = NULL;
1034 }
1035 }
1036 } else if (sendTiming(tmp)) {
1037 cpu->_status = DcacheWaitResponse;
1038 // memory system takes ownership of packet
1039 cpu->dcache_pkt = NULL;
1040 }
1041}
1042
1043TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
1044 Tick t)
1045 : pkt(_pkt), cpu(_cpu)
1046{
1047 cpu->schedule(this, t);
1048}
1049
1050void
1051TimingSimpleCPU::IprEvent::process()
1052{
1053 cpu->completeDataAccess(pkt);
1054}
1055
1056const char *
1057TimingSimpleCPU::IprEvent::description() const
1058{
1059 return "Timing Simple CPU Delay IPR event";
1060}
1061
1062
1063void
1064TimingSimpleCPU::printAddr(Addr a)
1065{
1066 dcachePort.printAddr(a);
1067}
1068
1069
1070////////////////////////////////////////////////////////////////////////
1071//
1072// TimingSimpleCPU Simulation Object
1073//
1074TimingSimpleCPU *
1075TimingSimpleCPUParams::create()
1076{
1077 numThreads = 1;
1078#if !FULL_SYSTEM
1079 if (workload.size() != 1)
1080 panic("only one workload allowed");
1081#endif
1082 return new TimingSimpleCPU(this);
1083}