timing.cc (7745:434b5dfb87d9) timing.cc (7823:dac01f14f20f)
1/*
2 * Copyright (c) 2010 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Steve Reinhardt
41 */
42
43#include "arch/locked_mem.hh"
44#include "arch/mmaped_ipr.hh"
45#include "arch/utility.hh"
46#include "base/bigint.hh"
47#include "config/the_isa.hh"
48#include "cpu/exetrace.hh"
49#include "cpu/simple/timing.hh"
50#include "mem/packet.hh"
51#include "mem/packet_access.hh"
52#include "params/TimingSimpleCPU.hh"
53#include "sim/faults.hh"
54#include "sim/system.hh"
55
56using namespace std;
57using namespace TheISA;
58
59Port *
60TimingSimpleCPU::getPort(const std::string &if_name, int idx)
61{
62 if (if_name == "dcache_port")
63 return &dcachePort;
64 else if (if_name == "icache_port")
65 return &icachePort;
66 else
67 panic("No Such Port\n");
68}
69
70void
71TimingSimpleCPU::init()
72{
73 BaseCPU::init();
74#if FULL_SYSTEM
75 for (int i = 0; i < threadContexts.size(); ++i) {
76 ThreadContext *tc = threadContexts[i];
77
78 // initialize CPU, including PC
79 TheISA::initCPU(tc, _cpuId);
80 }
81#endif
82}
83
84Tick
85TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
86{
87 panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
1/*
2 * Copyright (c) 2010 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Steve Reinhardt
41 */
42
43#include "arch/locked_mem.hh"
44#include "arch/mmaped_ipr.hh"
45#include "arch/utility.hh"
46#include "base/bigint.hh"
47#include "config/the_isa.hh"
48#include "cpu/exetrace.hh"
49#include "cpu/simple/timing.hh"
50#include "mem/packet.hh"
51#include "mem/packet_access.hh"
52#include "params/TimingSimpleCPU.hh"
53#include "sim/faults.hh"
54#include "sim/system.hh"
55
56using namespace std;
57using namespace TheISA;
58
59Port *
60TimingSimpleCPU::getPort(const std::string &if_name, int idx)
61{
62 if (if_name == "dcache_port")
63 return &dcachePort;
64 else if (if_name == "icache_port")
65 return &icachePort;
66 else
67 panic("No Such Port\n");
68}
69
70void
71TimingSimpleCPU::init()
72{
73 BaseCPU::init();
74#if FULL_SYSTEM
75 for (int i = 0; i < threadContexts.size(); ++i) {
76 ThreadContext *tc = threadContexts[i];
77
78 // initialize CPU, including PC
79 TheISA::initCPU(tc, _cpuId);
80 }
81#endif
82}
83
84Tick
85TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
86{
87 panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
88 return curTick;
88 return curTick();
89}
90
91void
92TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
93{
94 //No internal storage to update, jusst return
95 return;
96}
97
98void
99TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
100{
101 if (status == RangeChange) {
102 if (!snoopRangeSent) {
103 snoopRangeSent = true;
104 sendStatusChange(Port::RangeChange);
105 }
106 return;
107 }
108
109 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
110}
111
112
113void
114TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
115{
116 pkt = _pkt;
117 cpu->schedule(this, t);
118}
119
120TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
121 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this, p->clock),
122 dcachePort(this, p->clock), fetchEvent(this)
123{
124 _status = Idle;
125
126 icachePort.snoopRangeSent = false;
127 dcachePort.snoopRangeSent = false;
128
129 ifetch_pkt = dcache_pkt = NULL;
130 drainEvent = NULL;
131 previousTick = 0;
132 changeState(SimObject::Running);
133}
134
135
136TimingSimpleCPU::~TimingSimpleCPU()
137{
138}
139
140void
141TimingSimpleCPU::serialize(ostream &os)
142{
143 SimObject::State so_state = SimObject::getState();
144 SERIALIZE_ENUM(so_state);
145 BaseSimpleCPU::serialize(os);
146}
147
148void
149TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
150{
151 SimObject::State so_state;
152 UNSERIALIZE_ENUM(so_state);
153 BaseSimpleCPU::unserialize(cp, section);
154}
155
156unsigned int
157TimingSimpleCPU::drain(Event *drain_event)
158{
159 // TimingSimpleCPU is ready to drain if it's not waiting for
160 // an access to complete.
161 if (_status == Idle || _status == Running || _status == SwitchedOut) {
162 changeState(SimObject::Drained);
163 return 0;
164 } else {
165 changeState(SimObject::Draining);
166 drainEvent = drain_event;
167 return 1;
168 }
169}
170
171void
172TimingSimpleCPU::resume()
173{
174 DPRINTF(SimpleCPU, "Resume\n");
175 if (_status != SwitchedOut && _status != Idle) {
176 assert(system->getMemoryMode() == Enums::timing);
177
178 if (fetchEvent.scheduled())
179 deschedule(fetchEvent);
180
181 schedule(fetchEvent, nextCycle());
182 }
183
184 changeState(SimObject::Running);
185}
186
187void
188TimingSimpleCPU::switchOut()
189{
190 assert(_status == Running || _status == Idle);
191 _status = SwitchedOut;
89}
90
91void
92TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
93{
94 //No internal storage to update, jusst return
95 return;
96}
97
98void
99TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
100{
101 if (status == RangeChange) {
102 if (!snoopRangeSent) {
103 snoopRangeSent = true;
104 sendStatusChange(Port::RangeChange);
105 }
106 return;
107 }
108
109 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
110}
111
112
113void
114TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
115{
116 pkt = _pkt;
117 cpu->schedule(this, t);
118}
119
120TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
121 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this, p->clock),
122 dcachePort(this, p->clock), fetchEvent(this)
123{
124 _status = Idle;
125
126 icachePort.snoopRangeSent = false;
127 dcachePort.snoopRangeSent = false;
128
129 ifetch_pkt = dcache_pkt = NULL;
130 drainEvent = NULL;
131 previousTick = 0;
132 changeState(SimObject::Running);
133}
134
135
136TimingSimpleCPU::~TimingSimpleCPU()
137{
138}
139
140void
141TimingSimpleCPU::serialize(ostream &os)
142{
143 SimObject::State so_state = SimObject::getState();
144 SERIALIZE_ENUM(so_state);
145 BaseSimpleCPU::serialize(os);
146}
147
148void
149TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
150{
151 SimObject::State so_state;
152 UNSERIALIZE_ENUM(so_state);
153 BaseSimpleCPU::unserialize(cp, section);
154}
155
156unsigned int
157TimingSimpleCPU::drain(Event *drain_event)
158{
159 // TimingSimpleCPU is ready to drain if it's not waiting for
160 // an access to complete.
161 if (_status == Idle || _status == Running || _status == SwitchedOut) {
162 changeState(SimObject::Drained);
163 return 0;
164 } else {
165 changeState(SimObject::Draining);
166 drainEvent = drain_event;
167 return 1;
168 }
169}
170
171void
172TimingSimpleCPU::resume()
173{
174 DPRINTF(SimpleCPU, "Resume\n");
175 if (_status != SwitchedOut && _status != Idle) {
176 assert(system->getMemoryMode() == Enums::timing);
177
178 if (fetchEvent.scheduled())
179 deschedule(fetchEvent);
180
181 schedule(fetchEvent, nextCycle());
182 }
183
184 changeState(SimObject::Running);
185}
186
187void
188TimingSimpleCPU::switchOut()
189{
190 assert(_status == Running || _status == Idle);
191 _status = SwitchedOut;
192 numCycles += tickToCycles(curTick - previousTick);
192 numCycles += tickToCycles(curTick() - previousTick);
193
194 // If we've been scheduled to resume but are then told to switch out,
195 // we'll need to cancel it.
196 if (fetchEvent.scheduled())
197 deschedule(fetchEvent);
198}
199
200
201void
202TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
203{
204 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
205
206 // if any of this CPU's ThreadContexts are active, mark the CPU as
207 // running and schedule its tick event.
208 for (int i = 0; i < threadContexts.size(); ++i) {
209 ThreadContext *tc = threadContexts[i];
210 if (tc->status() == ThreadContext::Active && _status != Running) {
211 _status = Running;
212 break;
213 }
214 }
215
216 if (_status != Running) {
217 _status = Idle;
218 }
219 assert(threadContexts.size() == 1);
193
194 // If we've been scheduled to resume but are then told to switch out,
195 // we'll need to cancel it.
196 if (fetchEvent.scheduled())
197 deschedule(fetchEvent);
198}
199
200
201void
202TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
203{
204 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
205
206 // if any of this CPU's ThreadContexts are active, mark the CPU as
207 // running and schedule its tick event.
208 for (int i = 0; i < threadContexts.size(); ++i) {
209 ThreadContext *tc = threadContexts[i];
210 if (tc->status() == ThreadContext::Active && _status != Running) {
211 _status = Running;
212 break;
213 }
214 }
215
216 if (_status != Running) {
217 _status = Idle;
218 }
219 assert(threadContexts.size() == 1);
220 previousTick = curTick;
220 previousTick = curTick();
221}
222
223
224void
225TimingSimpleCPU::activateContext(int thread_num, int delay)
226{
227 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
228
229 assert(thread_num == 0);
230 assert(thread);
231
232 assert(_status == Idle);
233
234 notIdleFraction++;
235 _status = Running;
236
237 // kick things off by initiating the fetch of the next instruction
221}
222
223
224void
225TimingSimpleCPU::activateContext(int thread_num, int delay)
226{
227 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
228
229 assert(thread_num == 0);
230 assert(thread);
231
232 assert(_status == Idle);
233
234 notIdleFraction++;
235 _status = Running;
236
237 // kick things off by initiating the fetch of the next instruction
238 schedule(fetchEvent, nextCycle(curTick + ticks(delay)));
238 schedule(fetchEvent, nextCycle(curTick() + ticks(delay)));
239}
240
241
242void
243TimingSimpleCPU::suspendContext(int thread_num)
244{
245 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
246
247 assert(thread_num == 0);
248 assert(thread);
249
250 if (_status == Idle)
251 return;
252
253 assert(_status == Running);
254
255 // just change status to Idle... if status != Running,
256 // completeInst() will not initiate fetch of next instruction.
257
258 notIdleFraction--;
259 _status = Idle;
260}
261
262bool
263TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
264{
265 RequestPtr req = pkt->req;
266 if (req->isMmapedIpr()) {
267 Tick delay;
268 delay = TheISA::handleIprRead(thread->getTC(), pkt);
239}
240
241
242void
243TimingSimpleCPU::suspendContext(int thread_num)
244{
245 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
246
247 assert(thread_num == 0);
248 assert(thread);
249
250 if (_status == Idle)
251 return;
252
253 assert(_status == Running);
254
255 // just change status to Idle... if status != Running,
256 // completeInst() will not initiate fetch of next instruction.
257
258 notIdleFraction--;
259 _status = Idle;
260}
261
262bool
263TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
264{
265 RequestPtr req = pkt->req;
266 if (req->isMmapedIpr()) {
267 Tick delay;
268 delay = TheISA::handleIprRead(thread->getTC(), pkt);
269 new IprEvent(pkt, this, nextCycle(curTick + delay));
269 new IprEvent(pkt, this, nextCycle(curTick() + delay));
270 _status = DcacheWaitResponse;
271 dcache_pkt = NULL;
272 } else if (!dcachePort.sendTiming(pkt)) {
273 _status = DcacheRetry;
274 dcache_pkt = pkt;
275 } else {
276 _status = DcacheWaitResponse;
277 // memory system takes ownership of packet
278 dcache_pkt = NULL;
279 }
280 return dcache_pkt == NULL;
281}
282
283void
284TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
285 bool read)
286{
287 PacketPtr pkt;
288 buildPacket(pkt, req, read);
289 pkt->dataDynamicArray<uint8_t>(data);
290 if (req->getFlags().isSet(Request::NO_ACCESS)) {
291 assert(!dcache_pkt);
292 pkt->makeResponse();
293 completeDataAccess(pkt);
294 } else if (read) {
295 handleReadPacket(pkt);
296 } else {
297 bool do_access = true; // flag to suppress cache access
298
299 if (req->isLLSC()) {
300 do_access = TheISA::handleLockedWrite(thread, req);
301 } else if (req->isCondSwap()) {
302 assert(res);
303 req->setExtraData(*res);
304 }
305
306 if (do_access) {
307 dcache_pkt = pkt;
308 handleWritePacket();
309 } else {
310 _status = DcacheWaitResponse;
311 completeDataAccess(pkt);
312 }
313 }
314}
315
316void
317TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
318 RequestPtr req, uint8_t *data, bool read)
319{
320 PacketPtr pkt1, pkt2;
321 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
322 if (req->getFlags().isSet(Request::NO_ACCESS)) {
323 assert(!dcache_pkt);
324 pkt1->makeResponse();
325 completeDataAccess(pkt1);
326 } else if (read) {
327 if (handleReadPacket(pkt1)) {
328 SplitFragmentSenderState * send_state =
329 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
330 send_state->clearFromParent();
331 if (handleReadPacket(pkt2)) {
332 send_state = dynamic_cast<SplitFragmentSenderState *>(
333 pkt1->senderState);
334 send_state->clearFromParent();
335 }
336 }
337 } else {
338 dcache_pkt = pkt1;
339 if (handleWritePacket()) {
340 SplitFragmentSenderState * send_state =
341 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
342 send_state->clearFromParent();
343 dcache_pkt = pkt2;
344 if (handleWritePacket()) {
345 send_state = dynamic_cast<SplitFragmentSenderState *>(
346 pkt1->senderState);
347 send_state->clearFromParent();
348 }
349 }
350 }
351}
352
353void
354TimingSimpleCPU::translationFault(Fault fault)
355{
356 // fault may be NoFault in cases where a fault is suppressed,
357 // for instance prefetches.
270 _status = DcacheWaitResponse;
271 dcache_pkt = NULL;
272 } else if (!dcachePort.sendTiming(pkt)) {
273 _status = DcacheRetry;
274 dcache_pkt = pkt;
275 } else {
276 _status = DcacheWaitResponse;
277 // memory system takes ownership of packet
278 dcache_pkt = NULL;
279 }
280 return dcache_pkt == NULL;
281}
282
283void
284TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
285 bool read)
286{
287 PacketPtr pkt;
288 buildPacket(pkt, req, read);
289 pkt->dataDynamicArray<uint8_t>(data);
290 if (req->getFlags().isSet(Request::NO_ACCESS)) {
291 assert(!dcache_pkt);
292 pkt->makeResponse();
293 completeDataAccess(pkt);
294 } else if (read) {
295 handleReadPacket(pkt);
296 } else {
297 bool do_access = true; // flag to suppress cache access
298
299 if (req->isLLSC()) {
300 do_access = TheISA::handleLockedWrite(thread, req);
301 } else if (req->isCondSwap()) {
302 assert(res);
303 req->setExtraData(*res);
304 }
305
306 if (do_access) {
307 dcache_pkt = pkt;
308 handleWritePacket();
309 } else {
310 _status = DcacheWaitResponse;
311 completeDataAccess(pkt);
312 }
313 }
314}
315
316void
317TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
318 RequestPtr req, uint8_t *data, bool read)
319{
320 PacketPtr pkt1, pkt2;
321 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
322 if (req->getFlags().isSet(Request::NO_ACCESS)) {
323 assert(!dcache_pkt);
324 pkt1->makeResponse();
325 completeDataAccess(pkt1);
326 } else if (read) {
327 if (handleReadPacket(pkt1)) {
328 SplitFragmentSenderState * send_state =
329 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
330 send_state->clearFromParent();
331 if (handleReadPacket(pkt2)) {
332 send_state = dynamic_cast<SplitFragmentSenderState *>(
333 pkt1->senderState);
334 send_state->clearFromParent();
335 }
336 }
337 } else {
338 dcache_pkt = pkt1;
339 if (handleWritePacket()) {
340 SplitFragmentSenderState * send_state =
341 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
342 send_state->clearFromParent();
343 dcache_pkt = pkt2;
344 if (handleWritePacket()) {
345 send_state = dynamic_cast<SplitFragmentSenderState *>(
346 pkt1->senderState);
347 send_state->clearFromParent();
348 }
349 }
350 }
351}
352
353void
354TimingSimpleCPU::translationFault(Fault fault)
355{
356 // fault may be NoFault in cases where a fault is suppressed,
357 // for instance prefetches.
358 numCycles += tickToCycles(curTick - previousTick);
359 previousTick = curTick;
358 numCycles += tickToCycles(curTick() - previousTick);
359 previousTick = curTick();
360
361 if (traceData) {
362 // Since there was a fault, we shouldn't trace this instruction.
363 delete traceData;
364 traceData = NULL;
365 }
366
367 postExecute();
368
369 if (getState() == SimObject::Draining) {
370 advancePC(fault);
371 completeDrain();
372 } else {
373 advanceInst(fault);
374 }
375}
376
377void
378TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
379{
380 MemCmd cmd;
381 if (read) {
382 cmd = MemCmd::ReadReq;
383 if (req->isLLSC())
384 cmd = MemCmd::LoadLockedReq;
385 } else {
386 cmd = MemCmd::WriteReq;
387 if (req->isLLSC()) {
388 cmd = MemCmd::StoreCondReq;
389 } else if (req->isSwap()) {
390 cmd = MemCmd::SwapReq;
391 }
392 }
393 pkt = new Packet(req, cmd, Packet::Broadcast);
394}
395
396void
397TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
398 RequestPtr req1, RequestPtr req2, RequestPtr req,
399 uint8_t *data, bool read)
400{
401 pkt1 = pkt2 = NULL;
402
403 assert(!req1->isMmapedIpr() && !req2->isMmapedIpr());
404
405 if (req->getFlags().isSet(Request::NO_ACCESS)) {
406 buildPacket(pkt1, req, read);
407 return;
408 }
409
410 buildPacket(pkt1, req1, read);
411 buildPacket(pkt2, req2, read);
412
413 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags());
414 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand(),
415 Packet::Broadcast);
416
417 pkt->dataDynamicArray<uint8_t>(data);
418 pkt1->dataStatic<uint8_t>(data);
419 pkt2->dataStatic<uint8_t>(data + req1->getSize());
420
421 SplitMainSenderState * main_send_state = new SplitMainSenderState;
422 pkt->senderState = main_send_state;
423 main_send_state->fragments[0] = pkt1;
424 main_send_state->fragments[1] = pkt2;
425 main_send_state->outstanding = 2;
426 pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
427 pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
428}
429
430Fault
431TimingSimpleCPU::readBytes(Addr addr, uint8_t *data,
432 unsigned size, unsigned flags)
433{
434 Fault fault;
435 const int asid = 0;
436 const ThreadID tid = 0;
437 const Addr pc = thread->instAddr();
438 unsigned block_size = dcachePort.peerBlockSize();
439 BaseTLB::Mode mode = BaseTLB::Read;
440
441 if (traceData) {
442 traceData->setAddr(addr);
443 }
444
445 RequestPtr req = new Request(asid, addr, size,
446 flags, pc, _cpuId, tid);
447
448 Addr split_addr = roundDown(addr + size - 1, block_size);
449 assert(split_addr <= addr || split_addr - addr < block_size);
450
451 _status = DTBWaitResponse;
452 if (split_addr > addr) {
453 RequestPtr req1, req2;
454 assert(!req->isLLSC() && !req->isSwap());
455 req->splitOnVaddr(split_addr, req1, req2);
456
457 WholeTranslationState *state =
458 new WholeTranslationState(req, req1, req2, new uint8_t[size],
459 NULL, mode);
460 DataTranslation<TimingSimpleCPU> *trans1 =
461 new DataTranslation<TimingSimpleCPU>(this, state, 0);
462 DataTranslation<TimingSimpleCPU> *trans2 =
463 new DataTranslation<TimingSimpleCPU>(this, state, 1);
464
465 thread->dtb->translateTiming(req1, tc, trans1, mode);
466 thread->dtb->translateTiming(req2, tc, trans2, mode);
467 } else {
468 WholeTranslationState *state =
469 new WholeTranslationState(req, new uint8_t[size], NULL, mode);
470 DataTranslation<TimingSimpleCPU> *translation
471 = new DataTranslation<TimingSimpleCPU>(this, state);
472 thread->dtb->translateTiming(req, tc, translation, mode);
473 }
474
475 return NoFault;
476}
477
478template <class T>
479Fault
480TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
481{
482 return readBytes(addr, (uint8_t *)&data, sizeof(T), flags);
483}
484
485#ifndef DOXYGEN_SHOULD_SKIP_THIS
486
487template
488Fault
489TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
490
491template
492Fault
493TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
494
495template
496Fault
497TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
498
499template
500Fault
501TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
502
503template
504Fault
505TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
506
507template
508Fault
509TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
510
511#endif //DOXYGEN_SHOULD_SKIP_THIS
512
513template<>
514Fault
515TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
516{
517 return read(addr, *(uint64_t*)&data, flags);
518}
519
520template<>
521Fault
522TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
523{
524 return read(addr, *(uint32_t*)&data, flags);
525}
526
527template<>
528Fault
529TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
530{
531 return read(addr, (uint32_t&)data, flags);
532}
533
534bool
535TimingSimpleCPU::handleWritePacket()
536{
537 RequestPtr req = dcache_pkt->req;
538 if (req->isMmapedIpr()) {
539 Tick delay;
540 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
360
361 if (traceData) {
362 // Since there was a fault, we shouldn't trace this instruction.
363 delete traceData;
364 traceData = NULL;
365 }
366
367 postExecute();
368
369 if (getState() == SimObject::Draining) {
370 advancePC(fault);
371 completeDrain();
372 } else {
373 advanceInst(fault);
374 }
375}
376
377void
378TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
379{
380 MemCmd cmd;
381 if (read) {
382 cmd = MemCmd::ReadReq;
383 if (req->isLLSC())
384 cmd = MemCmd::LoadLockedReq;
385 } else {
386 cmd = MemCmd::WriteReq;
387 if (req->isLLSC()) {
388 cmd = MemCmd::StoreCondReq;
389 } else if (req->isSwap()) {
390 cmd = MemCmd::SwapReq;
391 }
392 }
393 pkt = new Packet(req, cmd, Packet::Broadcast);
394}
395
396void
397TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
398 RequestPtr req1, RequestPtr req2, RequestPtr req,
399 uint8_t *data, bool read)
400{
401 pkt1 = pkt2 = NULL;
402
403 assert(!req1->isMmapedIpr() && !req2->isMmapedIpr());
404
405 if (req->getFlags().isSet(Request::NO_ACCESS)) {
406 buildPacket(pkt1, req, read);
407 return;
408 }
409
410 buildPacket(pkt1, req1, read);
411 buildPacket(pkt2, req2, read);
412
413 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags());
414 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand(),
415 Packet::Broadcast);
416
417 pkt->dataDynamicArray<uint8_t>(data);
418 pkt1->dataStatic<uint8_t>(data);
419 pkt2->dataStatic<uint8_t>(data + req1->getSize());
420
421 SplitMainSenderState * main_send_state = new SplitMainSenderState;
422 pkt->senderState = main_send_state;
423 main_send_state->fragments[0] = pkt1;
424 main_send_state->fragments[1] = pkt2;
425 main_send_state->outstanding = 2;
426 pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
427 pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
428}
429
430Fault
431TimingSimpleCPU::readBytes(Addr addr, uint8_t *data,
432 unsigned size, unsigned flags)
433{
434 Fault fault;
435 const int asid = 0;
436 const ThreadID tid = 0;
437 const Addr pc = thread->instAddr();
438 unsigned block_size = dcachePort.peerBlockSize();
439 BaseTLB::Mode mode = BaseTLB::Read;
440
441 if (traceData) {
442 traceData->setAddr(addr);
443 }
444
445 RequestPtr req = new Request(asid, addr, size,
446 flags, pc, _cpuId, tid);
447
448 Addr split_addr = roundDown(addr + size - 1, block_size);
449 assert(split_addr <= addr || split_addr - addr < block_size);
450
451 _status = DTBWaitResponse;
452 if (split_addr > addr) {
453 RequestPtr req1, req2;
454 assert(!req->isLLSC() && !req->isSwap());
455 req->splitOnVaddr(split_addr, req1, req2);
456
457 WholeTranslationState *state =
458 new WholeTranslationState(req, req1, req2, new uint8_t[size],
459 NULL, mode);
460 DataTranslation<TimingSimpleCPU> *trans1 =
461 new DataTranslation<TimingSimpleCPU>(this, state, 0);
462 DataTranslation<TimingSimpleCPU> *trans2 =
463 new DataTranslation<TimingSimpleCPU>(this, state, 1);
464
465 thread->dtb->translateTiming(req1, tc, trans1, mode);
466 thread->dtb->translateTiming(req2, tc, trans2, mode);
467 } else {
468 WholeTranslationState *state =
469 new WholeTranslationState(req, new uint8_t[size], NULL, mode);
470 DataTranslation<TimingSimpleCPU> *translation
471 = new DataTranslation<TimingSimpleCPU>(this, state);
472 thread->dtb->translateTiming(req, tc, translation, mode);
473 }
474
475 return NoFault;
476}
477
478template <class T>
479Fault
480TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
481{
482 return readBytes(addr, (uint8_t *)&data, sizeof(T), flags);
483}
484
485#ifndef DOXYGEN_SHOULD_SKIP_THIS
486
487template
488Fault
489TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
490
491template
492Fault
493TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
494
495template
496Fault
497TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
498
499template
500Fault
501TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
502
503template
504Fault
505TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
506
507template
508Fault
509TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
510
511#endif //DOXYGEN_SHOULD_SKIP_THIS
512
513template<>
514Fault
515TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
516{
517 return read(addr, *(uint64_t*)&data, flags);
518}
519
520template<>
521Fault
522TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
523{
524 return read(addr, *(uint32_t*)&data, flags);
525}
526
527template<>
528Fault
529TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
530{
531 return read(addr, (uint32_t&)data, flags);
532}
533
534bool
535TimingSimpleCPU::handleWritePacket()
536{
537 RequestPtr req = dcache_pkt->req;
538 if (req->isMmapedIpr()) {
539 Tick delay;
540 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
541 new IprEvent(dcache_pkt, this, nextCycle(curTick + delay));
541 new IprEvent(dcache_pkt, this, nextCycle(curTick() + delay));
542 _status = DcacheWaitResponse;
543 dcache_pkt = NULL;
544 } else if (!dcachePort.sendTiming(dcache_pkt)) {
545 _status = DcacheRetry;
546 } else {
547 _status = DcacheWaitResponse;
548 // memory system takes ownership of packet
549 dcache_pkt = NULL;
550 }
551 return dcache_pkt == NULL;
552}
553
554Fault
555TimingSimpleCPU::writeTheseBytes(uint8_t *data, unsigned size,
556 Addr addr, unsigned flags, uint64_t *res)
557{
558 const int asid = 0;
559 const ThreadID tid = 0;
560 const Addr pc = thread->instAddr();
561 unsigned block_size = dcachePort.peerBlockSize();
562 BaseTLB::Mode mode = BaseTLB::Write;
563
564 if (traceData) {
565 traceData->setAddr(addr);
566 }
567
568 RequestPtr req = new Request(asid, addr, size,
569 flags, pc, _cpuId, tid);
570
571 Addr split_addr = roundDown(addr + size - 1, block_size);
572 assert(split_addr <= addr || split_addr - addr < block_size);
573
574 _status = DTBWaitResponse;
575 if (split_addr > addr) {
576 RequestPtr req1, req2;
577 assert(!req->isLLSC() && !req->isSwap());
578 req->splitOnVaddr(split_addr, req1, req2);
579
580 WholeTranslationState *state =
581 new WholeTranslationState(req, req1, req2, data, res, mode);
582 DataTranslation<TimingSimpleCPU> *trans1 =
583 new DataTranslation<TimingSimpleCPU>(this, state, 0);
584 DataTranslation<TimingSimpleCPU> *trans2 =
585 new DataTranslation<TimingSimpleCPU>(this, state, 1);
586
587 thread->dtb->translateTiming(req1, tc, trans1, mode);
588 thread->dtb->translateTiming(req2, tc, trans2, mode);
589 } else {
590 WholeTranslationState *state =
591 new WholeTranslationState(req, data, res, mode);
592 DataTranslation<TimingSimpleCPU> *translation =
593 new DataTranslation<TimingSimpleCPU>(this, state);
594 thread->dtb->translateTiming(req, tc, translation, mode);
595 }
596
597 // Translation faults will be returned via finishTranslation()
598 return NoFault;
599}
600
601Fault
602TimingSimpleCPU::writeBytes(uint8_t *data, unsigned size,
603 Addr addr, unsigned flags, uint64_t *res)
604{
605 uint8_t *newData = new uint8_t[size];
606 memcpy(newData, data, size);
607 return writeTheseBytes(newData, size, addr, flags, res);
608}
609
610template <class T>
611Fault
612TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
613{
614 if (traceData) {
615 traceData->setData(data);
616 }
617 T *dataP = (T*) new uint8_t[sizeof(T)];
618 *dataP = TheISA::htog(data);
619
620 return writeTheseBytes((uint8_t *)dataP, sizeof(T), addr, flags, res);
621}
622
623
624#ifndef DOXYGEN_SHOULD_SKIP_THIS
625template
626Fault
627TimingSimpleCPU::write(Twin32_t data, Addr addr,
628 unsigned flags, uint64_t *res);
629
630template
631Fault
632TimingSimpleCPU::write(Twin64_t data, Addr addr,
633 unsigned flags, uint64_t *res);
634
635template
636Fault
637TimingSimpleCPU::write(uint64_t data, Addr addr,
638 unsigned flags, uint64_t *res);
639
640template
641Fault
642TimingSimpleCPU::write(uint32_t data, Addr addr,
643 unsigned flags, uint64_t *res);
644
645template
646Fault
647TimingSimpleCPU::write(uint16_t data, Addr addr,
648 unsigned flags, uint64_t *res);
649
650template
651Fault
652TimingSimpleCPU::write(uint8_t data, Addr addr,
653 unsigned flags, uint64_t *res);
654
655#endif //DOXYGEN_SHOULD_SKIP_THIS
656
657template<>
658Fault
659TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
660{
661 return write(*(uint64_t*)&data, addr, flags, res);
662}
663
664template<>
665Fault
666TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
667{
668 return write(*(uint32_t*)&data, addr, flags, res);
669}
670
671
672template<>
673Fault
674TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
675{
676 return write((uint32_t)data, addr, flags, res);
677}
678
679
680void
681TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
682{
683 _status = Running;
684
685 if (state->getFault() != NoFault) {
686 if (state->isPrefetch()) {
687 state->setNoFault();
688 }
689 delete [] state->data;
690 state->deleteReqs();
691 translationFault(state->getFault());
692 } else {
693 if (!state->isSplit) {
694 sendData(state->mainReq, state->data, state->res,
695 state->mode == BaseTLB::Read);
696 } else {
697 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
698 state->data, state->mode == BaseTLB::Read);
699 }
700 }
701
702 delete state;
703}
704
705
706void
707TimingSimpleCPU::fetch()
708{
709 DPRINTF(SimpleCPU, "Fetch\n");
710
711 if (!curStaticInst || !curStaticInst->isDelayedCommit())
712 checkForInterrupts();
713
714 checkPcEventQueue();
715
716 TheISA::PCState pcState = thread->pcState();
717 bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
718
719 if (needToFetch) {
720 Request *ifetch_req = new Request();
721 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
722 setupFetchRequest(ifetch_req);
723 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
724 BaseTLB::Execute);
725 } else {
726 _status = IcacheWaitResponse;
727 completeIfetch(NULL);
728
542 _status = DcacheWaitResponse;
543 dcache_pkt = NULL;
544 } else if (!dcachePort.sendTiming(dcache_pkt)) {
545 _status = DcacheRetry;
546 } else {
547 _status = DcacheWaitResponse;
548 // memory system takes ownership of packet
549 dcache_pkt = NULL;
550 }
551 return dcache_pkt == NULL;
552}
553
554Fault
555TimingSimpleCPU::writeTheseBytes(uint8_t *data, unsigned size,
556 Addr addr, unsigned flags, uint64_t *res)
557{
558 const int asid = 0;
559 const ThreadID tid = 0;
560 const Addr pc = thread->instAddr();
561 unsigned block_size = dcachePort.peerBlockSize();
562 BaseTLB::Mode mode = BaseTLB::Write;
563
564 if (traceData) {
565 traceData->setAddr(addr);
566 }
567
568 RequestPtr req = new Request(asid, addr, size,
569 flags, pc, _cpuId, tid);
570
571 Addr split_addr = roundDown(addr + size - 1, block_size);
572 assert(split_addr <= addr || split_addr - addr < block_size);
573
574 _status = DTBWaitResponse;
575 if (split_addr > addr) {
576 RequestPtr req1, req2;
577 assert(!req->isLLSC() && !req->isSwap());
578 req->splitOnVaddr(split_addr, req1, req2);
579
580 WholeTranslationState *state =
581 new WholeTranslationState(req, req1, req2, data, res, mode);
582 DataTranslation<TimingSimpleCPU> *trans1 =
583 new DataTranslation<TimingSimpleCPU>(this, state, 0);
584 DataTranslation<TimingSimpleCPU> *trans2 =
585 new DataTranslation<TimingSimpleCPU>(this, state, 1);
586
587 thread->dtb->translateTiming(req1, tc, trans1, mode);
588 thread->dtb->translateTiming(req2, tc, trans2, mode);
589 } else {
590 WholeTranslationState *state =
591 new WholeTranslationState(req, data, res, mode);
592 DataTranslation<TimingSimpleCPU> *translation =
593 new DataTranslation<TimingSimpleCPU>(this, state);
594 thread->dtb->translateTiming(req, tc, translation, mode);
595 }
596
597 // Translation faults will be returned via finishTranslation()
598 return NoFault;
599}
600
601Fault
602TimingSimpleCPU::writeBytes(uint8_t *data, unsigned size,
603 Addr addr, unsigned flags, uint64_t *res)
604{
605 uint8_t *newData = new uint8_t[size];
606 memcpy(newData, data, size);
607 return writeTheseBytes(newData, size, addr, flags, res);
608}
609
610template <class T>
611Fault
612TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
613{
614 if (traceData) {
615 traceData->setData(data);
616 }
617 T *dataP = (T*) new uint8_t[sizeof(T)];
618 *dataP = TheISA::htog(data);
619
620 return writeTheseBytes((uint8_t *)dataP, sizeof(T), addr, flags, res);
621}
622
623
624#ifndef DOXYGEN_SHOULD_SKIP_THIS
625template
626Fault
627TimingSimpleCPU::write(Twin32_t data, Addr addr,
628 unsigned flags, uint64_t *res);
629
630template
631Fault
632TimingSimpleCPU::write(Twin64_t data, Addr addr,
633 unsigned flags, uint64_t *res);
634
635template
636Fault
637TimingSimpleCPU::write(uint64_t data, Addr addr,
638 unsigned flags, uint64_t *res);
639
640template
641Fault
642TimingSimpleCPU::write(uint32_t data, Addr addr,
643 unsigned flags, uint64_t *res);
644
645template
646Fault
647TimingSimpleCPU::write(uint16_t data, Addr addr,
648 unsigned flags, uint64_t *res);
649
650template
651Fault
652TimingSimpleCPU::write(uint8_t data, Addr addr,
653 unsigned flags, uint64_t *res);
654
655#endif //DOXYGEN_SHOULD_SKIP_THIS
656
657template<>
658Fault
659TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
660{
661 return write(*(uint64_t*)&data, addr, flags, res);
662}
663
664template<>
665Fault
666TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
667{
668 return write(*(uint32_t*)&data, addr, flags, res);
669}
670
671
672template<>
673Fault
674TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
675{
676 return write((uint32_t)data, addr, flags, res);
677}
678
679
680void
681TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
682{
683 _status = Running;
684
685 if (state->getFault() != NoFault) {
686 if (state->isPrefetch()) {
687 state->setNoFault();
688 }
689 delete [] state->data;
690 state->deleteReqs();
691 translationFault(state->getFault());
692 } else {
693 if (!state->isSplit) {
694 sendData(state->mainReq, state->data, state->res,
695 state->mode == BaseTLB::Read);
696 } else {
697 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
698 state->data, state->mode == BaseTLB::Read);
699 }
700 }
701
702 delete state;
703}
704
705
706void
707TimingSimpleCPU::fetch()
708{
709 DPRINTF(SimpleCPU, "Fetch\n");
710
711 if (!curStaticInst || !curStaticInst->isDelayedCommit())
712 checkForInterrupts();
713
714 checkPcEventQueue();
715
716 TheISA::PCState pcState = thread->pcState();
717 bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
718
719 if (needToFetch) {
720 Request *ifetch_req = new Request();
721 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
722 setupFetchRequest(ifetch_req);
723 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
724 BaseTLB::Execute);
725 } else {
726 _status = IcacheWaitResponse;
727 completeIfetch(NULL);
728
729 numCycles += tickToCycles(curTick - previousTick);
730 previousTick = curTick;
729 numCycles += tickToCycles(curTick() - previousTick);
730 previousTick = curTick();
731 }
732}
733
734
735void
736TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
737{
738 if (fault == NoFault) {
739 ifetch_pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
740 ifetch_pkt->dataStatic(&inst);
741
742 if (!icachePort.sendTiming(ifetch_pkt)) {
743 // Need to wait for retry
744 _status = IcacheRetry;
745 } else {
746 // Need to wait for cache to respond
747 _status = IcacheWaitResponse;
748 // ownership of packet transferred to memory system
749 ifetch_pkt = NULL;
750 }
751 } else {
752 delete req;
753 // fetch fault: advance directly to next instruction (fault handler)
754 advanceInst(fault);
755 }
756
731 }
732}
733
734
735void
736TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
737{
738 if (fault == NoFault) {
739 ifetch_pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
740 ifetch_pkt->dataStatic(&inst);
741
742 if (!icachePort.sendTiming(ifetch_pkt)) {
743 // Need to wait for retry
744 _status = IcacheRetry;
745 } else {
746 // Need to wait for cache to respond
747 _status = IcacheWaitResponse;
748 // ownership of packet transferred to memory system
749 ifetch_pkt = NULL;
750 }
751 } else {
752 delete req;
753 // fetch fault: advance directly to next instruction (fault handler)
754 advanceInst(fault);
755 }
756
757 numCycles += tickToCycles(curTick - previousTick);
758 previousTick = curTick;
757 numCycles += tickToCycles(curTick() - previousTick);
758 previousTick = curTick();
759}
760
761
762void
763TimingSimpleCPU::advanceInst(Fault fault)
764{
765 if (fault != NoFault || !stayAtPC)
766 advancePC(fault);
767
768 if (_status == Running) {
769 // kick off fetch of next instruction... callback from icache
770 // response will cause that instruction to be executed,
771 // keeping the CPU running.
772 fetch();
773 }
774}
775
776
777void
778TimingSimpleCPU::completeIfetch(PacketPtr pkt)
779{
780 DPRINTF(SimpleCPU, "Complete ICache Fetch\n");
781
782 // received a response from the icache: execute the received
783 // instruction
784
785 assert(!pkt || !pkt->isError());
786 assert(_status == IcacheWaitResponse);
787
788 _status = Running;
789
759}
760
761
762void
763TimingSimpleCPU::advanceInst(Fault fault)
764{
765 if (fault != NoFault || !stayAtPC)
766 advancePC(fault);
767
768 if (_status == Running) {
769 // kick off fetch of next instruction... callback from icache
770 // response will cause that instruction to be executed,
771 // keeping the CPU running.
772 fetch();
773 }
774}
775
776
777void
778TimingSimpleCPU::completeIfetch(PacketPtr pkt)
779{
780 DPRINTF(SimpleCPU, "Complete ICache Fetch\n");
781
782 // received a response from the icache: execute the received
783 // instruction
784
785 assert(!pkt || !pkt->isError());
786 assert(_status == IcacheWaitResponse);
787
788 _status = Running;
789
790 numCycles += tickToCycles(curTick - previousTick);
791 previousTick = curTick;
790 numCycles += tickToCycles(curTick() - previousTick);
791 previousTick = curTick();
792
793 if (getState() == SimObject::Draining) {
794 if (pkt) {
795 delete pkt->req;
796 delete pkt;
797 }
798
799 completeDrain();
800 return;
801 }
802
803 preExecute();
804 if (curStaticInst && curStaticInst->isMemRef()) {
805 // load or store: just send to dcache
806 Fault fault = curStaticInst->initiateAcc(this, traceData);
807 if (_status != Running) {
808 // instruction will complete in dcache response callback
809 assert(_status == DcacheWaitResponse ||
810 _status == DcacheRetry || DTBWaitResponse);
811 assert(fault == NoFault);
812 } else {
813 if (fault != NoFault && traceData) {
814 // If there was a fault, we shouldn't trace this instruction.
815 delete traceData;
816 traceData = NULL;
817 }
818
819 postExecute();
820 // @todo remove me after debugging with legion done
821 if (curStaticInst && (!curStaticInst->isMicroop() ||
822 curStaticInst->isFirstMicroop()))
823 instCnt++;
824 advanceInst(fault);
825 }
826 } else if (curStaticInst) {
827 // non-memory instruction: execute completely now
828 Fault fault = curStaticInst->execute(this, traceData);
829
830 // keep an instruction count
831 if (fault == NoFault)
832 countInst();
833 else if (traceData && !DTRACE(ExecFaulting)) {
834 delete traceData;
835 traceData = NULL;
836 }
837
838 postExecute();
839 // @todo remove me after debugging with legion done
840 if (curStaticInst && (!curStaticInst->isMicroop() ||
841 curStaticInst->isFirstMicroop()))
842 instCnt++;
843 advanceInst(fault);
844 } else {
845 advanceInst(NoFault);
846 }
847
848 if (pkt) {
849 delete pkt->req;
850 delete pkt;
851 }
852}
853
854void
855TimingSimpleCPU::IcachePort::ITickEvent::process()
856{
857 cpu->completeIfetch(pkt);
858}
859
860bool
861TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
862{
863 if (pkt->isResponse() && !pkt->wasNacked()) {
864 // delay processing of returned data until next CPU clock edge
792
793 if (getState() == SimObject::Draining) {
794 if (pkt) {
795 delete pkt->req;
796 delete pkt;
797 }
798
799 completeDrain();
800 return;
801 }
802
803 preExecute();
804 if (curStaticInst && curStaticInst->isMemRef()) {
805 // load or store: just send to dcache
806 Fault fault = curStaticInst->initiateAcc(this, traceData);
807 if (_status != Running) {
808 // instruction will complete in dcache response callback
809 assert(_status == DcacheWaitResponse ||
810 _status == DcacheRetry || DTBWaitResponse);
811 assert(fault == NoFault);
812 } else {
813 if (fault != NoFault && traceData) {
814 // If there was a fault, we shouldn't trace this instruction.
815 delete traceData;
816 traceData = NULL;
817 }
818
819 postExecute();
820 // @todo remove me after debugging with legion done
821 if (curStaticInst && (!curStaticInst->isMicroop() ||
822 curStaticInst->isFirstMicroop()))
823 instCnt++;
824 advanceInst(fault);
825 }
826 } else if (curStaticInst) {
827 // non-memory instruction: execute completely now
828 Fault fault = curStaticInst->execute(this, traceData);
829
830 // keep an instruction count
831 if (fault == NoFault)
832 countInst();
833 else if (traceData && !DTRACE(ExecFaulting)) {
834 delete traceData;
835 traceData = NULL;
836 }
837
838 postExecute();
839 // @todo remove me after debugging with legion done
840 if (curStaticInst && (!curStaticInst->isMicroop() ||
841 curStaticInst->isFirstMicroop()))
842 instCnt++;
843 advanceInst(fault);
844 } else {
845 advanceInst(NoFault);
846 }
847
848 if (pkt) {
849 delete pkt->req;
850 delete pkt;
851 }
852}
853
854void
855TimingSimpleCPU::IcachePort::ITickEvent::process()
856{
857 cpu->completeIfetch(pkt);
858}
859
860bool
861TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
862{
863 if (pkt->isResponse() && !pkt->wasNacked()) {
864 // delay processing of returned data until next CPU clock edge
865 Tick next_tick = cpu->nextCycle(curTick);
865 Tick next_tick = cpu->nextCycle(curTick());
866
866
867 if (next_tick == curTick)
867 if (next_tick == curTick())
868 cpu->completeIfetch(pkt);
869 else
870 tickEvent.schedule(pkt, next_tick);
871
872 return true;
873 }
874 else if (pkt->wasNacked()) {
875 assert(cpu->_status == IcacheWaitResponse);
876 pkt->reinitNacked();
877 if (!sendTiming(pkt)) {
878 cpu->_status = IcacheRetry;
879 cpu->ifetch_pkt = pkt;
880 }
881 }
882 //Snooping a Coherence Request, do nothing
883 return true;
884}
885
886void
887TimingSimpleCPU::IcachePort::recvRetry()
888{
889 // we shouldn't get a retry unless we have a packet that we're
890 // waiting to transmit
891 assert(cpu->ifetch_pkt != NULL);
892 assert(cpu->_status == IcacheRetry);
893 PacketPtr tmp = cpu->ifetch_pkt;
894 if (sendTiming(tmp)) {
895 cpu->_status = IcacheWaitResponse;
896 cpu->ifetch_pkt = NULL;
897 }
898}
899
900void
901TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
902{
903 // received a response from the dcache: complete the load or store
904 // instruction
905 assert(!pkt->isError());
906 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
907 pkt->req->getFlags().isSet(Request::NO_ACCESS));
908
868 cpu->completeIfetch(pkt);
869 else
870 tickEvent.schedule(pkt, next_tick);
871
872 return true;
873 }
874 else if (pkt->wasNacked()) {
875 assert(cpu->_status == IcacheWaitResponse);
876 pkt->reinitNacked();
877 if (!sendTiming(pkt)) {
878 cpu->_status = IcacheRetry;
879 cpu->ifetch_pkt = pkt;
880 }
881 }
882 //Snooping a Coherence Request, do nothing
883 return true;
884}
885
886void
887TimingSimpleCPU::IcachePort::recvRetry()
888{
889 // we shouldn't get a retry unless we have a packet that we're
890 // waiting to transmit
891 assert(cpu->ifetch_pkt != NULL);
892 assert(cpu->_status == IcacheRetry);
893 PacketPtr tmp = cpu->ifetch_pkt;
894 if (sendTiming(tmp)) {
895 cpu->_status = IcacheWaitResponse;
896 cpu->ifetch_pkt = NULL;
897 }
898}
899
900void
901TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
902{
903 // received a response from the dcache: complete the load or store
904 // instruction
905 assert(!pkt->isError());
906 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
907 pkt->req->getFlags().isSet(Request::NO_ACCESS));
908
909 numCycles += tickToCycles(curTick - previousTick);
910 previousTick = curTick;
909 numCycles += tickToCycles(curTick() - previousTick);
910 previousTick = curTick();
911
912 if (pkt->senderState) {
913 SplitFragmentSenderState * send_state =
914 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
915 assert(send_state);
916 delete pkt->req;
917 delete pkt;
918 PacketPtr big_pkt = send_state->bigPkt;
919 delete send_state;
920
921 SplitMainSenderState * main_send_state =
922 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
923 assert(main_send_state);
924 // Record the fact that this packet is no longer outstanding.
925 assert(main_send_state->outstanding != 0);
926 main_send_state->outstanding--;
927
928 if (main_send_state->outstanding) {
929 return;
930 } else {
931 delete main_send_state;
932 big_pkt->senderState = NULL;
933 pkt = big_pkt;
934 }
935 }
936
937 _status = Running;
938
939 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
940
941 // keep an instruction count
942 if (fault == NoFault)
943 countInst();
944 else if (traceData) {
945 // If there was a fault, we shouldn't trace this instruction.
946 delete traceData;
947 traceData = NULL;
948 }
949
950 // the locked flag may be cleared on the response packet, so check
951 // pkt->req and not pkt to see if it was a load-locked
952 if (pkt->isRead() && pkt->req->isLLSC()) {
953 TheISA::handleLockedRead(thread, pkt->req);
954 }
955
956 delete pkt->req;
957 delete pkt;
958
959 postExecute();
960
961 if (getState() == SimObject::Draining) {
962 advancePC(fault);
963 completeDrain();
964
965 return;
966 }
967
968 advanceInst(fault);
969}
970
971
972void
973TimingSimpleCPU::completeDrain()
974{
975 DPRINTF(Config, "Done draining\n");
976 changeState(SimObject::Drained);
977 drainEvent->process();
978}
979
980void
981TimingSimpleCPU::DcachePort::setPeer(Port *port)
982{
983 Port::setPeer(port);
984
985#if FULL_SYSTEM
986 // Update the ThreadContext's memory ports (Functional/Virtual
987 // Ports)
988 cpu->tcBase()->connectMemPorts(cpu->tcBase());
989#endif
990}
991
992bool
993TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
994{
995 if (pkt->isResponse() && !pkt->wasNacked()) {
996 // delay processing of returned data until next CPU clock edge
911
912 if (pkt->senderState) {
913 SplitFragmentSenderState * send_state =
914 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
915 assert(send_state);
916 delete pkt->req;
917 delete pkt;
918 PacketPtr big_pkt = send_state->bigPkt;
919 delete send_state;
920
921 SplitMainSenderState * main_send_state =
922 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
923 assert(main_send_state);
924 // Record the fact that this packet is no longer outstanding.
925 assert(main_send_state->outstanding != 0);
926 main_send_state->outstanding--;
927
928 if (main_send_state->outstanding) {
929 return;
930 } else {
931 delete main_send_state;
932 big_pkt->senderState = NULL;
933 pkt = big_pkt;
934 }
935 }
936
937 _status = Running;
938
939 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
940
941 // keep an instruction count
942 if (fault == NoFault)
943 countInst();
944 else if (traceData) {
945 // If there was a fault, we shouldn't trace this instruction.
946 delete traceData;
947 traceData = NULL;
948 }
949
950 // the locked flag may be cleared on the response packet, so check
951 // pkt->req and not pkt to see if it was a load-locked
952 if (pkt->isRead() && pkt->req->isLLSC()) {
953 TheISA::handleLockedRead(thread, pkt->req);
954 }
955
956 delete pkt->req;
957 delete pkt;
958
959 postExecute();
960
961 if (getState() == SimObject::Draining) {
962 advancePC(fault);
963 completeDrain();
964
965 return;
966 }
967
968 advanceInst(fault);
969}
970
971
972void
973TimingSimpleCPU::completeDrain()
974{
975 DPRINTF(Config, "Done draining\n");
976 changeState(SimObject::Drained);
977 drainEvent->process();
978}
979
980void
981TimingSimpleCPU::DcachePort::setPeer(Port *port)
982{
983 Port::setPeer(port);
984
985#if FULL_SYSTEM
986 // Update the ThreadContext's memory ports (Functional/Virtual
987 // Ports)
988 cpu->tcBase()->connectMemPorts(cpu->tcBase());
989#endif
990}
991
992bool
993TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
994{
995 if (pkt->isResponse() && !pkt->wasNacked()) {
996 // delay processing of returned data until next CPU clock edge
997 Tick next_tick = cpu->nextCycle(curTick);
997 Tick next_tick = cpu->nextCycle(curTick());
998
998
999 if (next_tick == curTick) {
999 if (next_tick == curTick()) {
1000 cpu->completeDataAccess(pkt);
1001 } else {
1002 if (!tickEvent.scheduled()) {
1003 tickEvent.schedule(pkt, next_tick);
1004 } else {
1005 // In the case of a split transaction and a cache that is
1006 // faster than a CPU we could get two responses before
1007 // next_tick expires
1008 if (!retryEvent.scheduled())
1009 schedule(retryEvent, next_tick);
1010 return false;
1011 }
1012 }
1013
1014 return true;
1015 }
1016 else if (pkt->wasNacked()) {
1017 assert(cpu->_status == DcacheWaitResponse);
1018 pkt->reinitNacked();
1019 if (!sendTiming(pkt)) {
1020 cpu->_status = DcacheRetry;
1021 cpu->dcache_pkt = pkt;
1022 }
1023 }
1024 //Snooping a Coherence Request, do nothing
1025 return true;
1026}
1027
1028void
1029TimingSimpleCPU::DcachePort::DTickEvent::process()
1030{
1031 cpu->completeDataAccess(pkt);
1032}
1033
1034void
1035TimingSimpleCPU::DcachePort::recvRetry()
1036{
1037 // we shouldn't get a retry unless we have a packet that we're
1038 // waiting to transmit
1039 assert(cpu->dcache_pkt != NULL);
1040 assert(cpu->_status == DcacheRetry);
1041 PacketPtr tmp = cpu->dcache_pkt;
1042 if (tmp->senderState) {
1043 // This is a packet from a split access.
1044 SplitFragmentSenderState * send_state =
1045 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
1046 assert(send_state);
1047 PacketPtr big_pkt = send_state->bigPkt;
1048
1049 SplitMainSenderState * main_send_state =
1050 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
1051 assert(main_send_state);
1052
1053 if (sendTiming(tmp)) {
1054 // If we were able to send without retrying, record that fact
1055 // and try sending the other fragment.
1056 send_state->clearFromParent();
1057 int other_index = main_send_state->getPendingFragment();
1058 if (other_index > 0) {
1059 tmp = main_send_state->fragments[other_index];
1060 cpu->dcache_pkt = tmp;
1061 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
1062 (big_pkt->isWrite() && cpu->handleWritePacket())) {
1063 main_send_state->fragments[other_index] = NULL;
1064 }
1065 } else {
1066 cpu->_status = DcacheWaitResponse;
1067 // memory system takes ownership of packet
1068 cpu->dcache_pkt = NULL;
1069 }
1070 }
1071 } else if (sendTiming(tmp)) {
1072 cpu->_status = DcacheWaitResponse;
1073 // memory system takes ownership of packet
1074 cpu->dcache_pkt = NULL;
1075 }
1076}
1077
1078TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
1079 Tick t)
1080 : pkt(_pkt), cpu(_cpu)
1081{
1082 cpu->schedule(this, t);
1083}
1084
1085void
1086TimingSimpleCPU::IprEvent::process()
1087{
1088 cpu->completeDataAccess(pkt);
1089}
1090
1091const char *
1092TimingSimpleCPU::IprEvent::description() const
1093{
1094 return "Timing Simple CPU Delay IPR event";
1095}
1096
1097
1098void
1099TimingSimpleCPU::printAddr(Addr a)
1100{
1101 dcachePort.printAddr(a);
1102}
1103
1104
1105////////////////////////////////////////////////////////////////////////
1106//
1107// TimingSimpleCPU Simulation Object
1108//
1109TimingSimpleCPU *
1110TimingSimpleCPUParams::create()
1111{
1112 numThreads = 1;
1113#if !FULL_SYSTEM
1114 if (workload.size() != 1)
1115 panic("only one workload allowed");
1116#endif
1117 return new TimingSimpleCPU(this);
1118}
1000 cpu->completeDataAccess(pkt);
1001 } else {
1002 if (!tickEvent.scheduled()) {
1003 tickEvent.schedule(pkt, next_tick);
1004 } else {
1005 // In the case of a split transaction and a cache that is
1006 // faster than a CPU we could get two responses before
1007 // next_tick expires
1008 if (!retryEvent.scheduled())
1009 schedule(retryEvent, next_tick);
1010 return false;
1011 }
1012 }
1013
1014 return true;
1015 }
1016 else if (pkt->wasNacked()) {
1017 assert(cpu->_status == DcacheWaitResponse);
1018 pkt->reinitNacked();
1019 if (!sendTiming(pkt)) {
1020 cpu->_status = DcacheRetry;
1021 cpu->dcache_pkt = pkt;
1022 }
1023 }
1024 //Snooping a Coherence Request, do nothing
1025 return true;
1026}
1027
1028void
1029TimingSimpleCPU::DcachePort::DTickEvent::process()
1030{
1031 cpu->completeDataAccess(pkt);
1032}
1033
1034void
1035TimingSimpleCPU::DcachePort::recvRetry()
1036{
1037 // we shouldn't get a retry unless we have a packet that we're
1038 // waiting to transmit
1039 assert(cpu->dcache_pkt != NULL);
1040 assert(cpu->_status == DcacheRetry);
1041 PacketPtr tmp = cpu->dcache_pkt;
1042 if (tmp->senderState) {
1043 // This is a packet from a split access.
1044 SplitFragmentSenderState * send_state =
1045 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
1046 assert(send_state);
1047 PacketPtr big_pkt = send_state->bigPkt;
1048
1049 SplitMainSenderState * main_send_state =
1050 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
1051 assert(main_send_state);
1052
1053 if (sendTiming(tmp)) {
1054 // If we were able to send without retrying, record that fact
1055 // and try sending the other fragment.
1056 send_state->clearFromParent();
1057 int other_index = main_send_state->getPendingFragment();
1058 if (other_index > 0) {
1059 tmp = main_send_state->fragments[other_index];
1060 cpu->dcache_pkt = tmp;
1061 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
1062 (big_pkt->isWrite() && cpu->handleWritePacket())) {
1063 main_send_state->fragments[other_index] = NULL;
1064 }
1065 } else {
1066 cpu->_status = DcacheWaitResponse;
1067 // memory system takes ownership of packet
1068 cpu->dcache_pkt = NULL;
1069 }
1070 }
1071 } else if (sendTiming(tmp)) {
1072 cpu->_status = DcacheWaitResponse;
1073 // memory system takes ownership of packet
1074 cpu->dcache_pkt = NULL;
1075 }
1076}
1077
1078TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
1079 Tick t)
1080 : pkt(_pkt), cpu(_cpu)
1081{
1082 cpu->schedule(this, t);
1083}
1084
1085void
1086TimingSimpleCPU::IprEvent::process()
1087{
1088 cpu->completeDataAccess(pkt);
1089}
1090
1091const char *
1092TimingSimpleCPU::IprEvent::description() const
1093{
1094 return "Timing Simple CPU Delay IPR event";
1095}
1096
1097
1098void
1099TimingSimpleCPU::printAddr(Addr a)
1100{
1101 dcachePort.printAddr(a);
1102}
1103
1104
1105////////////////////////////////////////////////////////////////////////
1106//
1107// TimingSimpleCPU Simulation Object
1108//
1109TimingSimpleCPU *
1110TimingSimpleCPUParams::create()
1111{
1112 numThreads = 1;
1113#if !FULL_SYSTEM
1114 if (workload.size() != 1)
1115 panic("only one workload allowed");
1116#endif
1117 return new TimingSimpleCPU(this);
1118}