timing.cc (5891:73084c6bb183) timing.cc (5894:8091ac99341a)
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Steve Reinhardt
29 */
30
31#include "arch/locked_mem.hh"
32#include "arch/mmaped_ipr.hh"
33#include "arch/utility.hh"
34#include "base/bigint.hh"
35#include "cpu/exetrace.hh"
36#include "cpu/simple/timing.hh"
37#include "mem/packet.hh"
38#include "mem/packet_access.hh"
39#include "params/TimingSimpleCPU.hh"
40#include "sim/system.hh"
41
42using namespace std;
43using namespace TheISA;
44
45Port *
46TimingSimpleCPU::getPort(const std::string &if_name, int idx)
47{
48 if (if_name == "dcache_port")
49 return &dcachePort;
50 else if (if_name == "icache_port")
51 return &icachePort;
52 else
53 panic("No Such Port\n");
54}
55
56void
57TimingSimpleCPU::init()
58{
59 BaseCPU::init();
60#if FULL_SYSTEM
61 for (int i = 0; i < threadContexts.size(); ++i) {
62 ThreadContext *tc = threadContexts[i];
63
64 // initialize CPU, including PC
65 TheISA::initCPU(tc, _cpuId);
66 }
67#endif
68}
69
70Tick
71TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
72{
73 panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
74 return curTick;
75}
76
77void
78TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
79{
80 //No internal storage to update, jusst return
81 return;
82}
83
84void
85TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
86{
87 if (status == RangeChange) {
88 if (!snoopRangeSent) {
89 snoopRangeSent = true;
90 sendStatusChange(Port::RangeChange);
91 }
92 return;
93 }
94
95 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
96}
97
98
99void
100TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
101{
102 pkt = _pkt;
103 cpu->schedule(this, t);
104}
105
106TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Steve Reinhardt
29 */
30
31#include "arch/locked_mem.hh"
32#include "arch/mmaped_ipr.hh"
33#include "arch/utility.hh"
34#include "base/bigint.hh"
35#include "cpu/exetrace.hh"
36#include "cpu/simple/timing.hh"
37#include "mem/packet.hh"
38#include "mem/packet_access.hh"
39#include "params/TimingSimpleCPU.hh"
40#include "sim/system.hh"
41
42using namespace std;
43using namespace TheISA;
44
45Port *
46TimingSimpleCPU::getPort(const std::string &if_name, int idx)
47{
48 if (if_name == "dcache_port")
49 return &dcachePort;
50 else if (if_name == "icache_port")
51 return &icachePort;
52 else
53 panic("No Such Port\n");
54}
55
56void
57TimingSimpleCPU::init()
58{
59 BaseCPU::init();
60#if FULL_SYSTEM
61 for (int i = 0; i < threadContexts.size(); ++i) {
62 ThreadContext *tc = threadContexts[i];
63
64 // initialize CPU, including PC
65 TheISA::initCPU(tc, _cpuId);
66 }
67#endif
68}
69
70Tick
71TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
72{
73 panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
74 return curTick;
75}
76
77void
78TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
79{
80 //No internal storage to update, jusst return
81 return;
82}
83
84void
85TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
86{
87 if (status == RangeChange) {
88 if (!snoopRangeSent) {
89 snoopRangeSent = true;
90 sendStatusChange(Port::RangeChange);
91 }
92 return;
93 }
94
95 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
96}
97
98
99void
100TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
101{
102 pkt = _pkt;
103 cpu->schedule(this, t);
104}
105
106TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
107 : BaseSimpleCPU(p), icachePort(this, p->clock), dcachePort(this, p->clock), fetchEvent(this)
107 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this, p->clock),
108 dcachePort(this, p->clock), fetchEvent(this)
108{
109 _status = Idle;
110
111 icachePort.snoopRangeSent = false;
112 dcachePort.snoopRangeSent = false;
113
114 ifetch_pkt = dcache_pkt = NULL;
115 drainEvent = NULL;
116 previousTick = 0;
117 changeState(SimObject::Running);
118}
119
120
121TimingSimpleCPU::~TimingSimpleCPU()
122{
123}
124
125void
126TimingSimpleCPU::serialize(ostream &os)
127{
128 SimObject::State so_state = SimObject::getState();
129 SERIALIZE_ENUM(so_state);
130 BaseSimpleCPU::serialize(os);
131}
132
133void
134TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
135{
136 SimObject::State so_state;
137 UNSERIALIZE_ENUM(so_state);
138 BaseSimpleCPU::unserialize(cp, section);
139}
140
141unsigned int
142TimingSimpleCPU::drain(Event *drain_event)
143{
144 // TimingSimpleCPU is ready to drain if it's not waiting for
145 // an access to complete.
146 if (_status == Idle || _status == Running || _status == SwitchedOut) {
147 changeState(SimObject::Drained);
148 return 0;
149 } else {
150 changeState(SimObject::Draining);
151 drainEvent = drain_event;
152 return 1;
153 }
154}
155
156void
157TimingSimpleCPU::resume()
158{
159 DPRINTF(SimpleCPU, "Resume\n");
160 if (_status != SwitchedOut && _status != Idle) {
161 assert(system->getMemoryMode() == Enums::timing);
162
163 if (fetchEvent.scheduled())
164 deschedule(fetchEvent);
165
166 schedule(fetchEvent, nextCycle());
167 }
168
169 changeState(SimObject::Running);
170}
171
172void
173TimingSimpleCPU::switchOut()
174{
175 assert(_status == Running || _status == Idle);
176 _status = SwitchedOut;
177 numCycles += tickToCycles(curTick - previousTick);
178
179 // If we've been scheduled to resume but are then told to switch out,
180 // we'll need to cancel it.
181 if (fetchEvent.scheduled())
182 deschedule(fetchEvent);
183}
184
185
186void
187TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
188{
189 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
190
191 // if any of this CPU's ThreadContexts are active, mark the CPU as
192 // running and schedule its tick event.
193 for (int i = 0; i < threadContexts.size(); ++i) {
194 ThreadContext *tc = threadContexts[i];
195 if (tc->status() == ThreadContext::Active && _status != Running) {
196 _status = Running;
197 break;
198 }
199 }
200
201 if (_status != Running) {
202 _status = Idle;
203 }
204 assert(threadContexts.size() == 1);
205 previousTick = curTick;
206}
207
208
209void
210TimingSimpleCPU::activateContext(int thread_num, int delay)
211{
212 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
213
214 assert(thread_num == 0);
215 assert(thread);
216
217 assert(_status == Idle);
218
219 notIdleFraction++;
220 _status = Running;
221
222 // kick things off by initiating the fetch of the next instruction
223 schedule(fetchEvent, nextCycle(curTick + ticks(delay)));
224}
225
226
227void
228TimingSimpleCPU::suspendContext(int thread_num)
229{
230 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
231
232 assert(thread_num == 0);
233 assert(thread);
234
235 assert(_status == Running);
236
237 // just change status to Idle... if status != Running,
238 // completeInst() will not initiate fetch of next instruction.
239
240 notIdleFraction--;
241 _status = Idle;
242}
243
244bool
245TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
246{
247 RequestPtr req = pkt->req;
248 if (req->isMmapedIpr()) {
249 Tick delay;
250 delay = TheISA::handleIprRead(thread->getTC(), pkt);
251 new IprEvent(pkt, this, nextCycle(curTick + delay));
252 _status = DcacheWaitResponse;
253 dcache_pkt = NULL;
254 } else if (!dcachePort.sendTiming(pkt)) {
255 _status = DcacheRetry;
256 dcache_pkt = pkt;
257 } else {
258 _status = DcacheWaitResponse;
259 // memory system takes ownership of packet
260 dcache_pkt = NULL;
261 }
262 return dcache_pkt == NULL;
263}
264
109{
110 _status = Idle;
111
112 icachePort.snoopRangeSent = false;
113 dcachePort.snoopRangeSent = false;
114
115 ifetch_pkt = dcache_pkt = NULL;
116 drainEvent = NULL;
117 previousTick = 0;
118 changeState(SimObject::Running);
119}
120
121
122TimingSimpleCPU::~TimingSimpleCPU()
123{
124}
125
126void
127TimingSimpleCPU::serialize(ostream &os)
128{
129 SimObject::State so_state = SimObject::getState();
130 SERIALIZE_ENUM(so_state);
131 BaseSimpleCPU::serialize(os);
132}
133
134void
135TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
136{
137 SimObject::State so_state;
138 UNSERIALIZE_ENUM(so_state);
139 BaseSimpleCPU::unserialize(cp, section);
140}
141
142unsigned int
143TimingSimpleCPU::drain(Event *drain_event)
144{
145 // TimingSimpleCPU is ready to drain if it's not waiting for
146 // an access to complete.
147 if (_status == Idle || _status == Running || _status == SwitchedOut) {
148 changeState(SimObject::Drained);
149 return 0;
150 } else {
151 changeState(SimObject::Draining);
152 drainEvent = drain_event;
153 return 1;
154 }
155}
156
157void
158TimingSimpleCPU::resume()
159{
160 DPRINTF(SimpleCPU, "Resume\n");
161 if (_status != SwitchedOut && _status != Idle) {
162 assert(system->getMemoryMode() == Enums::timing);
163
164 if (fetchEvent.scheduled())
165 deschedule(fetchEvent);
166
167 schedule(fetchEvent, nextCycle());
168 }
169
170 changeState(SimObject::Running);
171}
172
173void
174TimingSimpleCPU::switchOut()
175{
176 assert(_status == Running || _status == Idle);
177 _status = SwitchedOut;
178 numCycles += tickToCycles(curTick - previousTick);
179
180 // If we've been scheduled to resume but are then told to switch out,
181 // we'll need to cancel it.
182 if (fetchEvent.scheduled())
183 deschedule(fetchEvent);
184}
185
186
187void
188TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
189{
190 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
191
192 // if any of this CPU's ThreadContexts are active, mark the CPU as
193 // running and schedule its tick event.
194 for (int i = 0; i < threadContexts.size(); ++i) {
195 ThreadContext *tc = threadContexts[i];
196 if (tc->status() == ThreadContext::Active && _status != Running) {
197 _status = Running;
198 break;
199 }
200 }
201
202 if (_status != Running) {
203 _status = Idle;
204 }
205 assert(threadContexts.size() == 1);
206 previousTick = curTick;
207}
208
209
210void
211TimingSimpleCPU::activateContext(int thread_num, int delay)
212{
213 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
214
215 assert(thread_num == 0);
216 assert(thread);
217
218 assert(_status == Idle);
219
220 notIdleFraction++;
221 _status = Running;
222
223 // kick things off by initiating the fetch of the next instruction
224 schedule(fetchEvent, nextCycle(curTick + ticks(delay)));
225}
226
227
228void
229TimingSimpleCPU::suspendContext(int thread_num)
230{
231 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
232
233 assert(thread_num == 0);
234 assert(thread);
235
236 assert(_status == Running);
237
238 // just change status to Idle... if status != Running,
239 // completeInst() will not initiate fetch of next instruction.
240
241 notIdleFraction--;
242 _status = Idle;
243}
244
245bool
246TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
247{
248 RequestPtr req = pkt->req;
249 if (req->isMmapedIpr()) {
250 Tick delay;
251 delay = TheISA::handleIprRead(thread->getTC(), pkt);
252 new IprEvent(pkt, this, nextCycle(curTick + delay));
253 _status = DcacheWaitResponse;
254 dcache_pkt = NULL;
255 } else if (!dcachePort.sendTiming(pkt)) {
256 _status = DcacheRetry;
257 dcache_pkt = pkt;
258 } else {
259 _status = DcacheWaitResponse;
260 // memory system takes ownership of packet
261 dcache_pkt = NULL;
262 }
263 return dcache_pkt == NULL;
264}
265
265Fault
266TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
267 RequestPtr &req, Addr split_addr, uint8_t *data, bool read)
266void
267TimingSimpleCPU::sendData(Fault fault, RequestPtr req,
268 uint8_t *data, uint64_t *res, bool read)
268{
269{
269 Fault fault;
270 RequestPtr req1, req2;
271 assert(!req->isLocked() && !req->isSwap());
272 req->splitOnVaddr(split_addr, req1, req2);
273
274 pkt1 = pkt2 = NULL;
275 if ((fault = buildPacket(pkt1, req1, read)) != NoFault ||
276 (fault = buildPacket(pkt2, req2, read)) != NoFault) {
270 _status = Running;
271 if (fault != NoFault) {
272 delete data;
277 delete req;
273 delete req;
278 delete req1;
279 delete pkt1;
280 req = NULL;
281 pkt1 = NULL;
282 return fault;
274
275 translationFault(fault);
276 return;
283 }
277 }
278 PacketPtr pkt;
279 buildPacket(pkt, req, read);
280 pkt->dataDynamic<uint8_t>(data);
281 if (req->getFlags().isSet(Request::NO_ACCESS)) {
282 assert(!dcache_pkt);
283 pkt->makeResponse();
284 completeDataAccess(pkt);
285 } else if (read) {
286 handleReadPacket(pkt);
287 } else {
288 bool do_access = true; // flag to suppress cache access
284
289
285 assert(!req1->isMmapedIpr() && !req2->isMmapedIpr());
290 if (req->isLocked()) {
291 do_access = TheISA::handleLockedWrite(thread, req);
292 } else if (req->isCondSwap()) {
293 assert(res);
294 req->setExtraData(*res);
295 }
286
296
287 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags());
288 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand(),
289 Packet::Broadcast);
290 if (req->getFlags().isSet(Request::NO_ACCESS)) {
297 if (do_access) {
298 dcache_pkt = pkt;
299 handleWritePacket();
300 } else {
301 _status = DcacheWaitResponse;
302 completeDataAccess(pkt);
303 }
304 }
305}
306
307void
308TimingSimpleCPU::sendSplitData(Fault fault1, Fault fault2,
309 RequestPtr req1, RequestPtr req2, RequestPtr req,
310 uint8_t *data, bool read)
311{
312 _status = Running;
313 if (fault1 != NoFault || fault2 != NoFault) {
314 delete data;
291 delete req1;
315 delete req1;
292 delete pkt1;
293 delete req2;
316 delete req2;
294 delete pkt2;
295 pkt1 = pkt;
296 pkt2 = NULL;
297 return NoFault;
317 if (fault1 != NoFault)
318 translationFault(fault1);
319 else if (fault2 != NoFault)
320 translationFault(fault2);
321 return;
298 }
322 }
323 PacketPtr pkt1, pkt2;
324 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
325 if (req->getFlags().isSet(Request::NO_ACCESS)) {
326 assert(!dcache_pkt);
327 pkt1->makeResponse();
328 completeDataAccess(pkt1);
329 } else if (read) {
330 if (handleReadPacket(pkt1)) {
331 SplitFragmentSenderState * send_state =
332 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
333 send_state->clearFromParent();
334 if (handleReadPacket(pkt2)) {
335 send_state = dynamic_cast<SplitFragmentSenderState *>(
336 pkt1->senderState);
337 send_state->clearFromParent();
338 }
339 }
340 } else {
341 dcache_pkt = pkt1;
342 if (handleWritePacket()) {
343 SplitFragmentSenderState * send_state =
344 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
345 send_state->clearFromParent();
346 dcache_pkt = pkt2;
347 if (handleWritePacket()) {
348 send_state = dynamic_cast<SplitFragmentSenderState *>(
349 pkt1->senderState);
350 send_state->clearFromParent();
351 }
352 }
353 }
354}
299
355
300 pkt->dataDynamic<uint8_t>(data);
301 pkt1->dataStatic<uint8_t>(data);
302 pkt2->dataStatic<uint8_t>(data + req1->getSize());
356void
357TimingSimpleCPU::translationFault(Fault fault)
358{
359 numCycles += tickToCycles(curTick - previousTick);
360 previousTick = curTick;
303
361
304 SplitMainSenderState * main_send_state = new SplitMainSenderState;
305 pkt->senderState = main_send_state;
306 main_send_state->fragments[0] = pkt1;
307 main_send_state->fragments[1] = pkt2;
308 main_send_state->outstanding = 2;
309 pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
310 pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
311 return fault;
362 if (traceData) {
363 // Since there was a fault, we shouldn't trace this instruction.
364 delete traceData;
365 traceData = NULL;
366 }
367
368 postExecute();
369
370 if (getState() == SimObject::Draining) {
371 advancePC(fault);
372 completeDrain();
373 } else {
374 advanceInst(fault);
375 }
312}
313
376}
377
314Fault
315TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr &req, bool read)
378void
379TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
316{
380{
317 Fault fault = thread->dtb->translateAtomic(req, tc, !read);
318 MemCmd cmd;
381 MemCmd cmd;
319 if (fault != NoFault) {
320 delete req;
321 req = NULL;
322 pkt = NULL;
323 return fault;
324 } else if (read) {
382 if (read) {
325 cmd = MemCmd::ReadReq;
326 if (req->isLocked())
327 cmd = MemCmd::LoadLockedReq;
328 } else {
329 cmd = MemCmd::WriteReq;
330 if (req->isLocked()) {
331 cmd = MemCmd::StoreCondReq;
332 } else if (req->isSwap()) {
333 cmd = MemCmd::SwapReq;
334 }
335 }
336 pkt = new Packet(req, cmd, Packet::Broadcast);
383 cmd = MemCmd::ReadReq;
384 if (req->isLocked())
385 cmd = MemCmd::LoadLockedReq;
386 } else {
387 cmd = MemCmd::WriteReq;
388 if (req->isLocked()) {
389 cmd = MemCmd::StoreCondReq;
390 } else if (req->isSwap()) {
391 cmd = MemCmd::SwapReq;
392 }
393 }
394 pkt = new Packet(req, cmd, Packet::Broadcast);
337 return NoFault;
338}
339
395}
396
397void
398TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
399 RequestPtr req1, RequestPtr req2, RequestPtr req,
400 uint8_t *data, bool read)
401{
402 pkt1 = pkt2 = NULL;
403
404 assert(!req1->isMmapedIpr() && !req2->isMmapedIpr());
405
406 if (req->getFlags().isSet(Request::NO_ACCESS)) {
407 buildPacket(pkt1, req, read);
408 return;
409 }
410
411 buildPacket(pkt1, req1, read);
412 buildPacket(pkt2, req2, read);
413
414 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags());
415 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand(),
416 Packet::Broadcast);
417
418 pkt->dataDynamic<uint8_t>(data);
419 pkt1->dataStatic<uint8_t>(data);
420 pkt2->dataStatic<uint8_t>(data + req1->getSize());
421
422 SplitMainSenderState * main_send_state = new SplitMainSenderState;
423 pkt->senderState = main_send_state;
424 main_send_state->fragments[0] = pkt1;
425 main_send_state->fragments[1] = pkt2;
426 main_send_state->outstanding = 2;
427 pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
428 pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
429}
430
340template <class T>
341Fault
342TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
343{
344 Fault fault;
345 const int asid = 0;
346 const int thread_id = 0;
347 const Addr pc = thread->readPC();
348 int block_size = dcachePort.peerBlockSize();
349 int data_size = sizeof(T);
350
431template <class T>
432Fault
433TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
434{
435 Fault fault;
436 const int asid = 0;
437 const int thread_id = 0;
438 const Addr pc = thread->readPC();
439 int block_size = dcachePort.peerBlockSize();
440 int data_size = sizeof(T);
441
351 PacketPtr pkt;
352 RequestPtr req = new Request(asid, addr, data_size,
353 flags, pc, _cpuId, thread_id);
354
355 Addr split_addr = roundDown(addr + data_size - 1, block_size);
356 assert(split_addr <= addr || split_addr - addr < block_size);
357
442 RequestPtr req = new Request(asid, addr, data_size,
443 flags, pc, _cpuId, thread_id);
444
445 Addr split_addr = roundDown(addr + data_size - 1, block_size);
446 assert(split_addr <= addr || split_addr - addr < block_size);
447
448
449 _status = DTBWaitResponse;
358 if (split_addr > addr) {
450 if (split_addr > addr) {
359 PacketPtr pkt1, pkt2;
360 Fault fault = this->buildSplitPacket(pkt1, pkt2, req,
361 split_addr, (uint8_t *)(new T), true);
362 if (fault != NoFault)
363 return fault;
364 if (req->getFlags().isSet(Request::NO_ACCESS)) {
365 dcache_pkt = pkt1;
366 } else if (handleReadPacket(pkt1)) {
367 SplitFragmentSenderState * send_state =
368 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
369 send_state->clearFromParent();
370 if (handleReadPacket(pkt2)) {
371 send_state =
372 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
373 send_state->clearFromParent();
374 }
375 }
451 RequestPtr req1, req2;
452 assert(!req->isLocked() && !req->isSwap());
453 req->splitOnVaddr(split_addr, req1, req2);
454
455 typedef SplitDataTranslation::WholeTranslationState WholeState;
456 WholeState *state = new WholeState(req1, req2, req,
457 (uint8_t *)(new T), true);
458 thread->dtb->translateTiming(req1, tc,
459 new SplitDataTranslation(this, 0, state), false);
460 thread->dtb->translateTiming(req2, tc,
461 new SplitDataTranslation(this, 1, state), false);
376 } else {
462 } else {
377 Fault fault = buildPacket(pkt, req, true);
378 if (fault != NoFault) {
379 return fault;
380 }
381 if (req->getFlags().isSet(Request::NO_ACCESS)) {
382 dcache_pkt = pkt;
383 } else {
384 pkt->dataDynamic<T>(new T);
385 handleReadPacket(pkt);
386 }
463 thread->dtb->translateTiming(req, tc,
464 new DataTranslation(this, (uint8_t *)(new T), NULL, true),
465 false);
387 }
388
389 if (traceData) {
390 traceData->setData(data);
391 traceData->setAddr(addr);
392 }
393
394 // This will need a new way to tell if it has a dcache attached.
395 if (req->isUncacheable())
396 recordEvent("Uncached Read");
397
398 return NoFault;
399}
400
401#ifndef DOXYGEN_SHOULD_SKIP_THIS
402
403template
404Fault
405TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
406
407template
408Fault
409TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
410
411template
412Fault
413TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
414
415template
416Fault
417TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
418
419template
420Fault
421TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
422
423template
424Fault
425TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
426
427#endif //DOXYGEN_SHOULD_SKIP_THIS
428
429template<>
430Fault
431TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
432{
433 return read(addr, *(uint64_t*)&data, flags);
434}
435
436template<>
437Fault
438TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
439{
440 return read(addr, *(uint32_t*)&data, flags);
441}
442
443
444template<>
445Fault
446TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
447{
448 return read(addr, (uint32_t&)data, flags);
449}
450
451bool
452TimingSimpleCPU::handleWritePacket()
453{
454 RequestPtr req = dcache_pkt->req;
455 if (req->isMmapedIpr()) {
456 Tick delay;
457 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
458 new IprEvent(dcache_pkt, this, nextCycle(curTick + delay));
459 _status = DcacheWaitResponse;
460 dcache_pkt = NULL;
461 } else if (!dcachePort.sendTiming(dcache_pkt)) {
462 _status = DcacheRetry;
463 } else {
464 _status = DcacheWaitResponse;
465 // memory system takes ownership of packet
466 dcache_pkt = NULL;
467 }
468 return dcache_pkt == NULL;
469}
470
471template <class T>
472Fault
473TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
474{
475 const int asid = 0;
476 const int thread_id = 0;
477 const Addr pc = thread->readPC();
478 int block_size = dcachePort.peerBlockSize();
479 int data_size = sizeof(T);
480
481 RequestPtr req = new Request(asid, addr, data_size,
482 flags, pc, _cpuId, thread_id);
483
484 Addr split_addr = roundDown(addr + data_size - 1, block_size);
485 assert(split_addr <= addr || split_addr - addr < block_size);
486
466 }
467
468 if (traceData) {
469 traceData->setData(data);
470 traceData->setAddr(addr);
471 }
472
473 // This will need a new way to tell if it has a dcache attached.
474 if (req->isUncacheable())
475 recordEvent("Uncached Read");
476
477 return NoFault;
478}
479
480#ifndef DOXYGEN_SHOULD_SKIP_THIS
481
482template
483Fault
484TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
485
486template
487Fault
488TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
489
490template
491Fault
492TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
493
494template
495Fault
496TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
497
498template
499Fault
500TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
501
502template
503Fault
504TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
505
506#endif //DOXYGEN_SHOULD_SKIP_THIS
507
508template<>
509Fault
510TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
511{
512 return read(addr, *(uint64_t*)&data, flags);
513}
514
515template<>
516Fault
517TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
518{
519 return read(addr, *(uint32_t*)&data, flags);
520}
521
522
523template<>
524Fault
525TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
526{
527 return read(addr, (uint32_t&)data, flags);
528}
529
530bool
531TimingSimpleCPU::handleWritePacket()
532{
533 RequestPtr req = dcache_pkt->req;
534 if (req->isMmapedIpr()) {
535 Tick delay;
536 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
537 new IprEvent(dcache_pkt, this, nextCycle(curTick + delay));
538 _status = DcacheWaitResponse;
539 dcache_pkt = NULL;
540 } else if (!dcachePort.sendTiming(dcache_pkt)) {
541 _status = DcacheRetry;
542 } else {
543 _status = DcacheWaitResponse;
544 // memory system takes ownership of packet
545 dcache_pkt = NULL;
546 }
547 return dcache_pkt == NULL;
548}
549
550template <class T>
551Fault
552TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
553{
554 const int asid = 0;
555 const int thread_id = 0;
556 const Addr pc = thread->readPC();
557 int block_size = dcachePort.peerBlockSize();
558 int data_size = sizeof(T);
559
560 RequestPtr req = new Request(asid, addr, data_size,
561 flags, pc, _cpuId, thread_id);
562
563 Addr split_addr = roundDown(addr + data_size - 1, block_size);
564 assert(split_addr <= addr || split_addr - addr < block_size);
565
566 T *dataP = new T;
567 *dataP = TheISA::gtoh(data);
568 _status = DTBWaitResponse;
487 if (split_addr > addr) {
569 if (split_addr > addr) {
488 PacketPtr pkt1, pkt2;
489 T *dataP = new T;
490 *dataP = data;
491 Fault fault = this->buildSplitPacket(pkt1, pkt2, req, split_addr,
492 (uint8_t *)dataP, false);
493 if (fault != NoFault)
494 return fault;
495 dcache_pkt = pkt1;
496 if (!req->getFlags().isSet(Request::NO_ACCESS)) {
497 if (handleWritePacket()) {
498 SplitFragmentSenderState * send_state =
499 dynamic_cast<SplitFragmentSenderState *>(
500 pkt1->senderState);
501 send_state->clearFromParent();
502 dcache_pkt = pkt2;
503 if (handleReadPacket(pkt2)) {
504 send_state =
505 dynamic_cast<SplitFragmentSenderState *>(
506 pkt1->senderState);
507 send_state->clearFromParent();
508 }
509 }
510 }
511 } else {
512 bool do_access = true; // flag to suppress cache access
570 RequestPtr req1, req2;
571 assert(!req->isLocked() && !req->isSwap());
572 req->splitOnVaddr(split_addr, req1, req2);
513
573
514 Fault fault = buildPacket(dcache_pkt, req, false);
515 if (fault != NoFault)
516 return fault;
517
518 if (!req->getFlags().isSet(Request::NO_ACCESS)) {
519 if (req->isLocked()) {
520 do_access = TheISA::handleLockedWrite(thread, req);
521 } else if (req->isCondSwap()) {
522 assert(res);
523 req->setExtraData(*res);
524 }
525
526 dcache_pkt->allocate();
527 if (req->isMmapedIpr())
528 dcache_pkt->set(htog(data));
529 else
530 dcache_pkt->set(data);
531
532 if (do_access)
533 handleWritePacket();
534 }
574 typedef SplitDataTranslation::WholeTranslationState WholeState;
575 WholeState *state = new WholeState(req1, req2, req,
576 (uint8_t *)dataP, false);
577 thread->dtb->translateTiming(req1, tc,
578 new SplitDataTranslation(this, 0, state), true);
579 thread->dtb->translateTiming(req2, tc,
580 new SplitDataTranslation(this, 1, state), true);
581 } else {
582 thread->dtb->translateTiming(req, tc,
583 new DataTranslation(this, (uint8_t *)dataP, res, false),
584 true);
535 }
536
537 if (traceData) {
538 traceData->setAddr(req->getVaddr());
539 traceData->setData(data);
540 }
541
542 // This will need a new way to tell if it's hooked up to a cache or not.
543 if (req->isUncacheable())
544 recordEvent("Uncached Write");
545
546 // If the write needs to have a fault on the access, consider calling
547 // changeStatus() and changing it to "bad addr write" or something.
548 return NoFault;
549}
550
551
552#ifndef DOXYGEN_SHOULD_SKIP_THIS
553template
554Fault
555TimingSimpleCPU::write(Twin32_t data, Addr addr,
556 unsigned flags, uint64_t *res);
557
558template
559Fault
560TimingSimpleCPU::write(Twin64_t data, Addr addr,
561 unsigned flags, uint64_t *res);
562
563template
564Fault
565TimingSimpleCPU::write(uint64_t data, Addr addr,
566 unsigned flags, uint64_t *res);
567
568template
569Fault
570TimingSimpleCPU::write(uint32_t data, Addr addr,
571 unsigned flags, uint64_t *res);
572
573template
574Fault
575TimingSimpleCPU::write(uint16_t data, Addr addr,
576 unsigned flags, uint64_t *res);
577
578template
579Fault
580TimingSimpleCPU::write(uint8_t data, Addr addr,
581 unsigned flags, uint64_t *res);
582
583#endif //DOXYGEN_SHOULD_SKIP_THIS
584
585template<>
586Fault
587TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
588{
589 return write(*(uint64_t*)&data, addr, flags, res);
590}
591
592template<>
593Fault
594TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
595{
596 return write(*(uint32_t*)&data, addr, flags, res);
597}
598
599
600template<>
601Fault
602TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
603{
604 return write((uint32_t)data, addr, flags, res);
605}
606
607
608void
609TimingSimpleCPU::fetch()
610{
611 DPRINTF(SimpleCPU, "Fetch\n");
612
613 if (!curStaticInst || !curStaticInst->isDelayedCommit())
614 checkForInterrupts();
615
616 checkPcEventQueue();
617
618 bool fromRom = isRomMicroPC(thread->readMicroPC());
619
620 if (!fromRom) {
621 Request *ifetch_req = new Request();
622 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
585 }
586
587 if (traceData) {
588 traceData->setAddr(req->getVaddr());
589 traceData->setData(data);
590 }
591
592 // This will need a new way to tell if it's hooked up to a cache or not.
593 if (req->isUncacheable())
594 recordEvent("Uncached Write");
595
596 // If the write needs to have a fault on the access, consider calling
597 // changeStatus() and changing it to "bad addr write" or something.
598 return NoFault;
599}
600
601
602#ifndef DOXYGEN_SHOULD_SKIP_THIS
603template
604Fault
605TimingSimpleCPU::write(Twin32_t data, Addr addr,
606 unsigned flags, uint64_t *res);
607
608template
609Fault
610TimingSimpleCPU::write(Twin64_t data, Addr addr,
611 unsigned flags, uint64_t *res);
612
613template
614Fault
615TimingSimpleCPU::write(uint64_t data, Addr addr,
616 unsigned flags, uint64_t *res);
617
618template
619Fault
620TimingSimpleCPU::write(uint32_t data, Addr addr,
621 unsigned flags, uint64_t *res);
622
623template
624Fault
625TimingSimpleCPU::write(uint16_t data, Addr addr,
626 unsigned flags, uint64_t *res);
627
628template
629Fault
630TimingSimpleCPU::write(uint8_t data, Addr addr,
631 unsigned flags, uint64_t *res);
632
633#endif //DOXYGEN_SHOULD_SKIP_THIS
634
635template<>
636Fault
637TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
638{
639 return write(*(uint64_t*)&data, addr, flags, res);
640}
641
642template<>
643Fault
644TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
645{
646 return write(*(uint32_t*)&data, addr, flags, res);
647}
648
649
650template<>
651Fault
652TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
653{
654 return write((uint32_t)data, addr, flags, res);
655}
656
657
658void
659TimingSimpleCPU::fetch()
660{
661 DPRINTF(SimpleCPU, "Fetch\n");
662
663 if (!curStaticInst || !curStaticInst->isDelayedCommit())
664 checkForInterrupts();
665
666 checkPcEventQueue();
667
668 bool fromRom = isRomMicroPC(thread->readMicroPC());
669
670 if (!fromRom) {
671 Request *ifetch_req = new Request();
672 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
623 Fault fault = setupFetchRequest(ifetch_req);
673 setupFetchRequest(ifetch_req);
674 thread->itb->translateTiming(ifetch_req, tc,
675 &fetchTranslation);
676 } else {
677 _status = IcacheWaitResponse;
678 completeIfetch(NULL);
624
679
625 ifetch_pkt = new Packet(ifetch_req, MemCmd::ReadReq, Packet::Broadcast);
680 numCycles += tickToCycles(curTick - previousTick);
681 previousTick = curTick;
682 }
683}
684
685
686void
687TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
688{
689 if (fault == NoFault) {
690 ifetch_pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
626 ifetch_pkt->dataStatic(&inst);
627
691 ifetch_pkt->dataStatic(&inst);
692
628 if (fault == NoFault) {
629 if (!icachePort.sendTiming(ifetch_pkt)) {
630 // Need to wait for retry
631 _status = IcacheRetry;
632 } else {
633 // Need to wait for cache to respond
634 _status = IcacheWaitResponse;
635 // ownership of packet transferred to memory system
636 ifetch_pkt = NULL;
637 }
693 if (!icachePort.sendTiming(ifetch_pkt)) {
694 // Need to wait for retry
695 _status = IcacheRetry;
638 } else {
696 } else {
639 delete ifetch_req;
640 delete ifetch_pkt;
641 // fetch fault: advance directly to next instruction (fault handler)
642 advanceInst(fault);
697 // Need to wait for cache to respond
698 _status = IcacheWaitResponse;
699 // ownership of packet transferred to memory system
700 ifetch_pkt = NULL;
643 }
644 } else {
701 }
702 } else {
645 _status = IcacheWaitResponse;
646 completeIfetch(NULL);
703 delete req;
704 // fetch fault: advance directly to next instruction (fault handler)
705 advanceInst(fault);
647 }
648
649 numCycles += tickToCycles(curTick - previousTick);
650 previousTick = curTick;
651}
652
653
654void
655TimingSimpleCPU::advanceInst(Fault fault)
656{
657 if (fault != NoFault || !stayAtPC)
658 advancePC(fault);
659
660 if (_status == Running) {
661 // kick off fetch of next instruction... callback from icache
662 // response will cause that instruction to be executed,
663 // keeping the CPU running.
664 fetch();
665 }
666}
667
668
669void
670TimingSimpleCPU::completeIfetch(PacketPtr pkt)
671{
672 DPRINTF(SimpleCPU, "Complete ICache Fetch\n");
673
674 // received a response from the icache: execute the received
675 // instruction
676
677 assert(!pkt || !pkt->isError());
678 assert(_status == IcacheWaitResponse);
679
680 _status = Running;
681
682 numCycles += tickToCycles(curTick - previousTick);
683 previousTick = curTick;
684
685 if (getState() == SimObject::Draining) {
686 if (pkt) {
687 delete pkt->req;
688 delete pkt;
689 }
690
691 completeDrain();
692 return;
693 }
694
695 preExecute();
696 if (curStaticInst &&
697 curStaticInst->isMemRef() && !curStaticInst->isDataPrefetch()) {
698 // load or store: just send to dcache
699 Fault fault = curStaticInst->initiateAcc(this, traceData);
700 if (_status != Running) {
701 // instruction will complete in dcache response callback
706 }
707
708 numCycles += tickToCycles(curTick - previousTick);
709 previousTick = curTick;
710}
711
712
713void
714TimingSimpleCPU::advanceInst(Fault fault)
715{
716 if (fault != NoFault || !stayAtPC)
717 advancePC(fault);
718
719 if (_status == Running) {
720 // kick off fetch of next instruction... callback from icache
721 // response will cause that instruction to be executed,
722 // keeping the CPU running.
723 fetch();
724 }
725}
726
727
728void
729TimingSimpleCPU::completeIfetch(PacketPtr pkt)
730{
731 DPRINTF(SimpleCPU, "Complete ICache Fetch\n");
732
733 // received a response from the icache: execute the received
734 // instruction
735
736 assert(!pkt || !pkt->isError());
737 assert(_status == IcacheWaitResponse);
738
739 _status = Running;
740
741 numCycles += tickToCycles(curTick - previousTick);
742 previousTick = curTick;
743
744 if (getState() == SimObject::Draining) {
745 if (pkt) {
746 delete pkt->req;
747 delete pkt;
748 }
749
750 completeDrain();
751 return;
752 }
753
754 preExecute();
755 if (curStaticInst &&
756 curStaticInst->isMemRef() && !curStaticInst->isDataPrefetch()) {
757 // load or store: just send to dcache
758 Fault fault = curStaticInst->initiateAcc(this, traceData);
759 if (_status != Running) {
760 // instruction will complete in dcache response callback
702 assert(_status == DcacheWaitResponse || _status == DcacheRetry);
761 assert(_status == DcacheWaitResponse ||
762 _status == DcacheRetry || DTBWaitResponse);
703 assert(fault == NoFault);
704 } else {
763 assert(fault == NoFault);
764 } else {
705 if (fault == NoFault) {
706 // Note that ARM can have NULL packets if the instruction gets
707 // squashed due to predication
708 // early fail on store conditional: complete now
709 assert(dcache_pkt != NULL || THE_ISA == ARM_ISA);
710
711 fault = curStaticInst->completeAcc(dcache_pkt, this,
712 traceData);
713 if (dcache_pkt != NULL)
714 {
715 delete dcache_pkt->req;
716 delete dcache_pkt;
717 dcache_pkt = NULL;
718 }
719
720 // keep an instruction count
721 if (fault == NoFault)
722 countInst();
723 } else if (traceData) {
765 if (fault != NoFault && traceData) {
724 // If there was a fault, we shouldn't trace this instruction.
725 delete traceData;
726 traceData = NULL;
727 }
728
729 postExecute();
730 // @todo remove me after debugging with legion done
731 if (curStaticInst && (!curStaticInst->isMicroop() ||
732 curStaticInst->isFirstMicroop()))
733 instCnt++;
734 advanceInst(fault);
735 }
736 } else if (curStaticInst) {
737 // non-memory instruction: execute completely now
738 Fault fault = curStaticInst->execute(this, traceData);
739
740 // keep an instruction count
741 if (fault == NoFault)
742 countInst();
743 else if (traceData) {
744 // If there was a fault, we shouldn't trace this instruction.
745 delete traceData;
746 traceData = NULL;
747 }
748
749 postExecute();
750 // @todo remove me after debugging with legion done
751 if (curStaticInst && (!curStaticInst->isMicroop() ||
752 curStaticInst->isFirstMicroop()))
753 instCnt++;
754 advanceInst(fault);
755 } else {
756 advanceInst(NoFault);
757 }
758
759 if (pkt) {
760 delete pkt->req;
761 delete pkt;
762 }
763}
764
765void
766TimingSimpleCPU::IcachePort::ITickEvent::process()
767{
768 cpu->completeIfetch(pkt);
769}
770
771bool
772TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
773{
774 if (pkt->isResponse() && !pkt->wasNacked()) {
775 // delay processing of returned data until next CPU clock edge
776 Tick next_tick = cpu->nextCycle(curTick);
777
778 if (next_tick == curTick)
779 cpu->completeIfetch(pkt);
780 else
781 tickEvent.schedule(pkt, next_tick);
782
783 return true;
784 }
785 else if (pkt->wasNacked()) {
786 assert(cpu->_status == IcacheWaitResponse);
787 pkt->reinitNacked();
788 if (!sendTiming(pkt)) {
789 cpu->_status = IcacheRetry;
790 cpu->ifetch_pkt = pkt;
791 }
792 }
793 //Snooping a Coherence Request, do nothing
794 return true;
795}
796
797void
798TimingSimpleCPU::IcachePort::recvRetry()
799{
800 // we shouldn't get a retry unless we have a packet that we're
801 // waiting to transmit
802 assert(cpu->ifetch_pkt != NULL);
803 assert(cpu->_status == IcacheRetry);
804 PacketPtr tmp = cpu->ifetch_pkt;
805 if (sendTiming(tmp)) {
806 cpu->_status = IcacheWaitResponse;
807 cpu->ifetch_pkt = NULL;
808 }
809}
810
811void
812TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
813{
814 // received a response from the dcache: complete the load or store
815 // instruction
816 assert(!pkt->isError());
817
818 numCycles += tickToCycles(curTick - previousTick);
819 previousTick = curTick;
820
821 if (pkt->senderState) {
822 SplitFragmentSenderState * send_state =
823 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
824 assert(send_state);
825 delete pkt->req;
826 delete pkt;
827 PacketPtr big_pkt = send_state->bigPkt;
828 delete send_state;
829
830 SplitMainSenderState * main_send_state =
831 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
832 assert(main_send_state);
833 // Record the fact that this packet is no longer outstanding.
834 assert(main_send_state->outstanding != 0);
835 main_send_state->outstanding--;
836
837 if (main_send_state->outstanding) {
838 return;
839 } else {
840 delete main_send_state;
841 big_pkt->senderState = NULL;
842 pkt = big_pkt;
843 }
844 }
845
766 // If there was a fault, we shouldn't trace this instruction.
767 delete traceData;
768 traceData = NULL;
769 }
770
771 postExecute();
772 // @todo remove me after debugging with legion done
773 if (curStaticInst && (!curStaticInst->isMicroop() ||
774 curStaticInst->isFirstMicroop()))
775 instCnt++;
776 advanceInst(fault);
777 }
778 } else if (curStaticInst) {
779 // non-memory instruction: execute completely now
780 Fault fault = curStaticInst->execute(this, traceData);
781
782 // keep an instruction count
783 if (fault == NoFault)
784 countInst();
785 else if (traceData) {
786 // If there was a fault, we shouldn't trace this instruction.
787 delete traceData;
788 traceData = NULL;
789 }
790
791 postExecute();
792 // @todo remove me after debugging with legion done
793 if (curStaticInst && (!curStaticInst->isMicroop() ||
794 curStaticInst->isFirstMicroop()))
795 instCnt++;
796 advanceInst(fault);
797 } else {
798 advanceInst(NoFault);
799 }
800
801 if (pkt) {
802 delete pkt->req;
803 delete pkt;
804 }
805}
806
807void
808TimingSimpleCPU::IcachePort::ITickEvent::process()
809{
810 cpu->completeIfetch(pkt);
811}
812
813bool
814TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
815{
816 if (pkt->isResponse() && !pkt->wasNacked()) {
817 // delay processing of returned data until next CPU clock edge
818 Tick next_tick = cpu->nextCycle(curTick);
819
820 if (next_tick == curTick)
821 cpu->completeIfetch(pkt);
822 else
823 tickEvent.schedule(pkt, next_tick);
824
825 return true;
826 }
827 else if (pkt->wasNacked()) {
828 assert(cpu->_status == IcacheWaitResponse);
829 pkt->reinitNacked();
830 if (!sendTiming(pkt)) {
831 cpu->_status = IcacheRetry;
832 cpu->ifetch_pkt = pkt;
833 }
834 }
835 //Snooping a Coherence Request, do nothing
836 return true;
837}
838
839void
840TimingSimpleCPU::IcachePort::recvRetry()
841{
842 // we shouldn't get a retry unless we have a packet that we're
843 // waiting to transmit
844 assert(cpu->ifetch_pkt != NULL);
845 assert(cpu->_status == IcacheRetry);
846 PacketPtr tmp = cpu->ifetch_pkt;
847 if (sendTiming(tmp)) {
848 cpu->_status = IcacheWaitResponse;
849 cpu->ifetch_pkt = NULL;
850 }
851}
852
853void
854TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
855{
856 // received a response from the dcache: complete the load or store
857 // instruction
858 assert(!pkt->isError());
859
860 numCycles += tickToCycles(curTick - previousTick);
861 previousTick = curTick;
862
863 if (pkt->senderState) {
864 SplitFragmentSenderState * send_state =
865 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
866 assert(send_state);
867 delete pkt->req;
868 delete pkt;
869 PacketPtr big_pkt = send_state->bigPkt;
870 delete send_state;
871
872 SplitMainSenderState * main_send_state =
873 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
874 assert(main_send_state);
875 // Record the fact that this packet is no longer outstanding.
876 assert(main_send_state->outstanding != 0);
877 main_send_state->outstanding--;
878
879 if (main_send_state->outstanding) {
880 return;
881 } else {
882 delete main_send_state;
883 big_pkt->senderState = NULL;
884 pkt = big_pkt;
885 }
886 }
887
846 assert(_status == DcacheWaitResponse);
888 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse);
847 _status = Running;
848
849 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
850
851 // keep an instruction count
852 if (fault == NoFault)
853 countInst();
854 else if (traceData) {
855 // If there was a fault, we shouldn't trace this instruction.
856 delete traceData;
857 traceData = NULL;
858 }
859
860 // the locked flag may be cleared on the response packet, so check
861 // pkt->req and not pkt to see if it was a load-locked
862 if (pkt->isRead() && pkt->req->isLocked()) {
863 TheISA::handleLockedRead(thread, pkt->req);
864 }
865
866 delete pkt->req;
867 delete pkt;
868
869 postExecute();
870
871 if (getState() == SimObject::Draining) {
872 advancePC(fault);
873 completeDrain();
874
875 return;
876 }
877
878 advanceInst(fault);
879}
880
881
882void
883TimingSimpleCPU::completeDrain()
884{
885 DPRINTF(Config, "Done draining\n");
886 changeState(SimObject::Drained);
887 drainEvent->process();
888}
889
890void
891TimingSimpleCPU::DcachePort::setPeer(Port *port)
892{
893 Port::setPeer(port);
894
895#if FULL_SYSTEM
896 // Update the ThreadContext's memory ports (Functional/Virtual
897 // Ports)
898 cpu->tcBase()->connectMemPorts(cpu->tcBase());
899#endif
900}
901
902bool
903TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
904{
905 if (pkt->isResponse() && !pkt->wasNacked()) {
906 // delay processing of returned data until next CPU clock edge
907 Tick next_tick = cpu->nextCycle(curTick);
908
909 if (next_tick == curTick) {
910 cpu->completeDataAccess(pkt);
911 } else {
912 tickEvent.schedule(pkt, next_tick);
913 }
914
915 return true;
916 }
917 else if (pkt->wasNacked()) {
918 assert(cpu->_status == DcacheWaitResponse);
919 pkt->reinitNacked();
920 if (!sendTiming(pkt)) {
921 cpu->_status = DcacheRetry;
922 cpu->dcache_pkt = pkt;
923 }
924 }
925 //Snooping a Coherence Request, do nothing
926 return true;
927}
928
929void
930TimingSimpleCPU::DcachePort::DTickEvent::process()
931{
932 cpu->completeDataAccess(pkt);
933}
934
935void
936TimingSimpleCPU::DcachePort::recvRetry()
937{
938 // we shouldn't get a retry unless we have a packet that we're
939 // waiting to transmit
940 assert(cpu->dcache_pkt != NULL);
941 assert(cpu->_status == DcacheRetry);
942 PacketPtr tmp = cpu->dcache_pkt;
943 if (tmp->senderState) {
944 // This is a packet from a split access.
945 SplitFragmentSenderState * send_state =
946 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
947 assert(send_state);
948 PacketPtr big_pkt = send_state->bigPkt;
949
950 SplitMainSenderState * main_send_state =
951 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
952 assert(main_send_state);
953
954 if (sendTiming(tmp)) {
955 // If we were able to send without retrying, record that fact
956 // and try sending the other fragment.
957 send_state->clearFromParent();
958 int other_index = main_send_state->getPendingFragment();
959 if (other_index > 0) {
960 tmp = main_send_state->fragments[other_index];
961 cpu->dcache_pkt = tmp;
962 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
963 (big_pkt->isWrite() && cpu->handleWritePacket())) {
964 main_send_state->fragments[other_index] = NULL;
965 }
966 } else {
967 cpu->_status = DcacheWaitResponse;
968 // memory system takes ownership of packet
969 cpu->dcache_pkt = NULL;
970 }
971 }
972 } else if (sendTiming(tmp)) {
973 cpu->_status = DcacheWaitResponse;
974 // memory system takes ownership of packet
975 cpu->dcache_pkt = NULL;
976 }
977}
978
979TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
980 Tick t)
981 : pkt(_pkt), cpu(_cpu)
982{
983 cpu->schedule(this, t);
984}
985
986void
987TimingSimpleCPU::IprEvent::process()
988{
989 cpu->completeDataAccess(pkt);
990}
991
992const char *
993TimingSimpleCPU::IprEvent::description() const
994{
995 return "Timing Simple CPU Delay IPR event";
996}
997
998
999void
1000TimingSimpleCPU::printAddr(Addr a)
1001{
1002 dcachePort.printAddr(a);
1003}
1004
1005
1006////////////////////////////////////////////////////////////////////////
1007//
1008// TimingSimpleCPU Simulation Object
1009//
1010TimingSimpleCPU *
1011TimingSimpleCPUParams::create()
1012{
1013 numThreads = 1;
1014#if !FULL_SYSTEM
1015 if (workload.size() != 1)
1016 panic("only one workload allowed");
1017#endif
1018 return new TimingSimpleCPU(this);
1019}
889 _status = Running;
890
891 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
892
893 // keep an instruction count
894 if (fault == NoFault)
895 countInst();
896 else if (traceData) {
897 // If there was a fault, we shouldn't trace this instruction.
898 delete traceData;
899 traceData = NULL;
900 }
901
902 // the locked flag may be cleared on the response packet, so check
903 // pkt->req and not pkt to see if it was a load-locked
904 if (pkt->isRead() && pkt->req->isLocked()) {
905 TheISA::handleLockedRead(thread, pkt->req);
906 }
907
908 delete pkt->req;
909 delete pkt;
910
911 postExecute();
912
913 if (getState() == SimObject::Draining) {
914 advancePC(fault);
915 completeDrain();
916
917 return;
918 }
919
920 advanceInst(fault);
921}
922
923
924void
925TimingSimpleCPU::completeDrain()
926{
927 DPRINTF(Config, "Done draining\n");
928 changeState(SimObject::Drained);
929 drainEvent->process();
930}
931
932void
933TimingSimpleCPU::DcachePort::setPeer(Port *port)
934{
935 Port::setPeer(port);
936
937#if FULL_SYSTEM
938 // Update the ThreadContext's memory ports (Functional/Virtual
939 // Ports)
940 cpu->tcBase()->connectMemPorts(cpu->tcBase());
941#endif
942}
943
944bool
945TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
946{
947 if (pkt->isResponse() && !pkt->wasNacked()) {
948 // delay processing of returned data until next CPU clock edge
949 Tick next_tick = cpu->nextCycle(curTick);
950
951 if (next_tick == curTick) {
952 cpu->completeDataAccess(pkt);
953 } else {
954 tickEvent.schedule(pkt, next_tick);
955 }
956
957 return true;
958 }
959 else if (pkt->wasNacked()) {
960 assert(cpu->_status == DcacheWaitResponse);
961 pkt->reinitNacked();
962 if (!sendTiming(pkt)) {
963 cpu->_status = DcacheRetry;
964 cpu->dcache_pkt = pkt;
965 }
966 }
967 //Snooping a Coherence Request, do nothing
968 return true;
969}
970
971void
972TimingSimpleCPU::DcachePort::DTickEvent::process()
973{
974 cpu->completeDataAccess(pkt);
975}
976
977void
978TimingSimpleCPU::DcachePort::recvRetry()
979{
980 // we shouldn't get a retry unless we have a packet that we're
981 // waiting to transmit
982 assert(cpu->dcache_pkt != NULL);
983 assert(cpu->_status == DcacheRetry);
984 PacketPtr tmp = cpu->dcache_pkt;
985 if (tmp->senderState) {
986 // This is a packet from a split access.
987 SplitFragmentSenderState * send_state =
988 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
989 assert(send_state);
990 PacketPtr big_pkt = send_state->bigPkt;
991
992 SplitMainSenderState * main_send_state =
993 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
994 assert(main_send_state);
995
996 if (sendTiming(tmp)) {
997 // If we were able to send without retrying, record that fact
998 // and try sending the other fragment.
999 send_state->clearFromParent();
1000 int other_index = main_send_state->getPendingFragment();
1001 if (other_index > 0) {
1002 tmp = main_send_state->fragments[other_index];
1003 cpu->dcache_pkt = tmp;
1004 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
1005 (big_pkt->isWrite() && cpu->handleWritePacket())) {
1006 main_send_state->fragments[other_index] = NULL;
1007 }
1008 } else {
1009 cpu->_status = DcacheWaitResponse;
1010 // memory system takes ownership of packet
1011 cpu->dcache_pkt = NULL;
1012 }
1013 }
1014 } else if (sendTiming(tmp)) {
1015 cpu->_status = DcacheWaitResponse;
1016 // memory system takes ownership of packet
1017 cpu->dcache_pkt = NULL;
1018 }
1019}
1020
1021TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
1022 Tick t)
1023 : pkt(_pkt), cpu(_cpu)
1024{
1025 cpu->schedule(this, t);
1026}
1027
1028void
1029TimingSimpleCPU::IprEvent::process()
1030{
1031 cpu->completeDataAccess(pkt);
1032}
1033
1034const char *
1035TimingSimpleCPU::IprEvent::description() const
1036{
1037 return "Timing Simple CPU Delay IPR event";
1038}
1039
1040
1041void
1042TimingSimpleCPU::printAddr(Addr a)
1043{
1044 dcachePort.printAddr(a);
1045}
1046
1047
1048////////////////////////////////////////////////////////////////////////
1049//
1050// TimingSimpleCPU Simulation Object
1051//
1052TimingSimpleCPU *
1053TimingSimpleCPUParams::create()
1054{
1055 numThreads = 1;
1056#if !FULL_SYSTEM
1057 if (workload.size() != 1)
1058 panic("only one workload allowed");
1059#endif
1060 return new TimingSimpleCPU(this);
1061}