timing.cc (8443:530ff1bc8d70) timing.cc (8444:56de1f9320df)
1/*
2 * Copyright (c) 2010 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Steve Reinhardt
41 */
42
43#include "arch/locked_mem.hh"
44#include "arch/mmapped_ipr.hh"
45#include "arch/utility.hh"
46#include "base/bigint.hh"
47#include "config/the_isa.hh"
48#include "cpu/simple/timing.hh"
49#include "cpu/exetrace.hh"
50#include "debug/Config.hh"
51#include "debug/ExecFaulting.hh"
52#include "debug/SimpleCPU.hh"
53#include "mem/packet.hh"
54#include "mem/packet_access.hh"
55#include "params/TimingSimpleCPU.hh"
56#include "sim/faults.hh"
57#include "sim/system.hh"
58
59using namespace std;
60using namespace TheISA;
61
62Port *
63TimingSimpleCPU::getPort(const std::string &if_name, int idx)
64{
65 if (if_name == "dcache_port")
66 return &dcachePort;
67 else if (if_name == "icache_port")
68 return &icachePort;
69 else
70 panic("No Such Port\n");
71}
72
73void
74TimingSimpleCPU::init()
75{
76 BaseCPU::init();
77#if FULL_SYSTEM
78 for (int i = 0; i < threadContexts.size(); ++i) {
79 ThreadContext *tc = threadContexts[i];
80
81 // initialize CPU, including PC
82 TheISA::initCPU(tc, _cpuId);
83 }
84#endif
85}
86
87Tick
88TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
89{
90 panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
91 return curTick();
92}
93
94void
95TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
96{
97 //No internal storage to update, jusst return
98 return;
99}
100
101void
102TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
103{
104 if (status == RangeChange) {
105 if (!snoopRangeSent) {
106 snoopRangeSent = true;
107 sendStatusChange(Port::RangeChange);
108 }
109 return;
110 }
111
112 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
113}
114
115
116void
117TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
118{
119 pkt = _pkt;
120 cpu->schedule(this, t);
121}
122
123TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
124 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this, p->clock),
125 dcachePort(this, p->clock), fetchEvent(this)
126{
127 _status = Idle;
128
129 icachePort.snoopRangeSent = false;
130 dcachePort.snoopRangeSent = false;
131
132 ifetch_pkt = dcache_pkt = NULL;
133 drainEvent = NULL;
134 previousTick = 0;
135 changeState(SimObject::Running);
136 system->totalNumInsts = 0;
137}
138
139
140TimingSimpleCPU::~TimingSimpleCPU()
141{
142}
143
144void
145TimingSimpleCPU::serialize(ostream &os)
146{
147 SimObject::State so_state = SimObject::getState();
148 SERIALIZE_ENUM(so_state);
149 BaseSimpleCPU::serialize(os);
150}
151
152void
153TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
154{
155 SimObject::State so_state;
156 UNSERIALIZE_ENUM(so_state);
157 BaseSimpleCPU::unserialize(cp, section);
158}
159
160unsigned int
161TimingSimpleCPU::drain(Event *drain_event)
162{
163 // TimingSimpleCPU is ready to drain if it's not waiting for
164 // an access to complete.
165 if (_status == Idle || _status == Running || _status == SwitchedOut) {
166 changeState(SimObject::Drained);
167 return 0;
168 } else {
169 changeState(SimObject::Draining);
170 drainEvent = drain_event;
171 return 1;
172 }
173}
174
175void
176TimingSimpleCPU::resume()
177{
178 DPRINTF(SimpleCPU, "Resume\n");
179 if (_status != SwitchedOut && _status != Idle) {
180 assert(system->getMemoryMode() == Enums::timing);
181
182 if (fetchEvent.scheduled())
183 deschedule(fetchEvent);
184
185 schedule(fetchEvent, nextCycle());
186 }
187
188 changeState(SimObject::Running);
189}
190
191void
192TimingSimpleCPU::switchOut()
193{
194 assert(_status == Running || _status == Idle);
195 _status = SwitchedOut;
196 numCycles += tickToCycles(curTick() - previousTick);
197
198 // If we've been scheduled to resume but are then told to switch out,
199 // we'll need to cancel it.
200 if (fetchEvent.scheduled())
201 deschedule(fetchEvent);
202}
203
204
205void
206TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
207{
208 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
209
210 // if any of this CPU's ThreadContexts are active, mark the CPU as
211 // running and schedule its tick event.
212 for (int i = 0; i < threadContexts.size(); ++i) {
213 ThreadContext *tc = threadContexts[i];
214 if (tc->status() == ThreadContext::Active && _status != Running) {
215 _status = Running;
216 break;
217 }
218 }
219
220 if (_status != Running) {
221 _status = Idle;
222 }
223 assert(threadContexts.size() == 1);
224 previousTick = curTick();
225}
226
227
228void
229TimingSimpleCPU::activateContext(int thread_num, int delay)
230{
231 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
232
233 assert(thread_num == 0);
234 assert(thread);
235
236 assert(_status == Idle);
237
238 notIdleFraction++;
239 _status = Running;
240
241 // kick things off by initiating the fetch of the next instruction
242 schedule(fetchEvent, nextCycle(curTick() + ticks(delay)));
243}
244
245
246void
247TimingSimpleCPU::suspendContext(int thread_num)
248{
249 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
250
251 assert(thread_num == 0);
252 assert(thread);
253
254 if (_status == Idle)
255 return;
256
257 assert(_status == Running);
258
259 // just change status to Idle... if status != Running,
260 // completeInst() will not initiate fetch of next instruction.
261
262 notIdleFraction--;
263 _status = Idle;
264}
265
266bool
267TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
268{
269 RequestPtr req = pkt->req;
270 if (req->isMmappedIpr()) {
271 Tick delay;
272 delay = TheISA::handleIprRead(thread->getTC(), pkt);
273 new IprEvent(pkt, this, nextCycle(curTick() + delay));
274 _status = DcacheWaitResponse;
275 dcache_pkt = NULL;
276 } else if (!dcachePort.sendTiming(pkt)) {
277 _status = DcacheRetry;
278 dcache_pkt = pkt;
279 } else {
280 _status = DcacheWaitResponse;
281 // memory system takes ownership of packet
282 dcache_pkt = NULL;
283 }
284 return dcache_pkt == NULL;
285}
286
287void
288TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
289 bool read)
290{
291 PacketPtr pkt;
292 buildPacket(pkt, req, read);
293 pkt->dataDynamicArray<uint8_t>(data);
294 if (req->getFlags().isSet(Request::NO_ACCESS)) {
295 assert(!dcache_pkt);
296 pkt->makeResponse();
297 completeDataAccess(pkt);
298 } else if (read) {
299 handleReadPacket(pkt);
300 } else {
301 bool do_access = true; // flag to suppress cache access
302
303 if (req->isLLSC()) {
304 do_access = TheISA::handleLockedWrite(thread, req);
305 } else if (req->isCondSwap()) {
306 assert(res);
307 req->setExtraData(*res);
308 }
309
310 if (do_access) {
311 dcache_pkt = pkt;
312 handleWritePacket();
313 } else {
314 _status = DcacheWaitResponse;
315 completeDataAccess(pkt);
316 }
317 }
318}
319
320void
321TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
322 RequestPtr req, uint8_t *data, bool read)
323{
324 PacketPtr pkt1, pkt2;
325 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
326 if (req->getFlags().isSet(Request::NO_ACCESS)) {
327 assert(!dcache_pkt);
328 pkt1->makeResponse();
329 completeDataAccess(pkt1);
330 } else if (read) {
331 SplitFragmentSenderState * send_state =
332 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
333 if (handleReadPacket(pkt1)) {
334 send_state->clearFromParent();
335 send_state = dynamic_cast<SplitFragmentSenderState *>(
336 pkt2->senderState);
337 if (handleReadPacket(pkt2)) {
338 send_state->clearFromParent();
339 }
340 }
341 } else {
342 dcache_pkt = pkt1;
343 SplitFragmentSenderState * send_state =
344 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
345 if (handleWritePacket()) {
346 send_state->clearFromParent();
347 dcache_pkt = pkt2;
348 send_state = dynamic_cast<SplitFragmentSenderState *>(
349 pkt2->senderState);
350 if (handleWritePacket()) {
351 send_state->clearFromParent();
352 }
353 }
354 }
355}
356
357void
358TimingSimpleCPU::translationFault(Fault fault)
359{
360 // fault may be NoFault in cases where a fault is suppressed,
361 // for instance prefetches.
362 numCycles += tickToCycles(curTick() - previousTick);
363 previousTick = curTick();
364
365 if (traceData) {
366 // Since there was a fault, we shouldn't trace this instruction.
367 delete traceData;
368 traceData = NULL;
369 }
370
371 postExecute();
372
373 if (getState() == SimObject::Draining) {
374 advancePC(fault);
375 completeDrain();
376 } else {
377 advanceInst(fault);
378 }
379}
380
381void
382TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
383{
384 MemCmd cmd;
385 if (read) {
386 cmd = MemCmd::ReadReq;
387 if (req->isLLSC())
388 cmd = MemCmd::LoadLockedReq;
389 } else {
390 cmd = MemCmd::WriteReq;
391 if (req->isLLSC()) {
392 cmd = MemCmd::StoreCondReq;
393 } else if (req->isSwap()) {
394 cmd = MemCmd::SwapReq;
395 }
396 }
397 pkt = new Packet(req, cmd, Packet::Broadcast);
398}
399
400void
401TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
402 RequestPtr req1, RequestPtr req2, RequestPtr req,
403 uint8_t *data, bool read)
404{
405 pkt1 = pkt2 = NULL;
406
407 assert(!req1->isMmappedIpr() && !req2->isMmappedIpr());
408
409 if (req->getFlags().isSet(Request::NO_ACCESS)) {
410 buildPacket(pkt1, req, read);
411 return;
412 }
413
414 buildPacket(pkt1, req1, read);
415 buildPacket(pkt2, req2, read);
416
417 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags());
418 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand(),
419 Packet::Broadcast);
420
421 pkt->dataDynamicArray<uint8_t>(data);
422 pkt1->dataStatic<uint8_t>(data);
423 pkt2->dataStatic<uint8_t>(data + req1->getSize());
424
425 SplitMainSenderState * main_send_state = new SplitMainSenderState;
426 pkt->senderState = main_send_state;
427 main_send_state->fragments[0] = pkt1;
428 main_send_state->fragments[1] = pkt2;
429 main_send_state->outstanding = 2;
430 pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
431 pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
432}
433
434Fault
1/*
2 * Copyright (c) 2010 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Steve Reinhardt
41 */
42
43#include "arch/locked_mem.hh"
44#include "arch/mmapped_ipr.hh"
45#include "arch/utility.hh"
46#include "base/bigint.hh"
47#include "config/the_isa.hh"
48#include "cpu/simple/timing.hh"
49#include "cpu/exetrace.hh"
50#include "debug/Config.hh"
51#include "debug/ExecFaulting.hh"
52#include "debug/SimpleCPU.hh"
53#include "mem/packet.hh"
54#include "mem/packet_access.hh"
55#include "params/TimingSimpleCPU.hh"
56#include "sim/faults.hh"
57#include "sim/system.hh"
58
59using namespace std;
60using namespace TheISA;
61
62Port *
63TimingSimpleCPU::getPort(const std::string &if_name, int idx)
64{
65 if (if_name == "dcache_port")
66 return &dcachePort;
67 else if (if_name == "icache_port")
68 return &icachePort;
69 else
70 panic("No Such Port\n");
71}
72
73void
74TimingSimpleCPU::init()
75{
76 BaseCPU::init();
77#if FULL_SYSTEM
78 for (int i = 0; i < threadContexts.size(); ++i) {
79 ThreadContext *tc = threadContexts[i];
80
81 // initialize CPU, including PC
82 TheISA::initCPU(tc, _cpuId);
83 }
84#endif
85}
86
87Tick
88TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
89{
90 panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
91 return curTick();
92}
93
94void
95TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
96{
97 //No internal storage to update, jusst return
98 return;
99}
100
101void
102TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
103{
104 if (status == RangeChange) {
105 if (!snoopRangeSent) {
106 snoopRangeSent = true;
107 sendStatusChange(Port::RangeChange);
108 }
109 return;
110 }
111
112 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
113}
114
115
116void
117TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
118{
119 pkt = _pkt;
120 cpu->schedule(this, t);
121}
122
123TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
124 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this, p->clock),
125 dcachePort(this, p->clock), fetchEvent(this)
126{
127 _status = Idle;
128
129 icachePort.snoopRangeSent = false;
130 dcachePort.snoopRangeSent = false;
131
132 ifetch_pkt = dcache_pkt = NULL;
133 drainEvent = NULL;
134 previousTick = 0;
135 changeState(SimObject::Running);
136 system->totalNumInsts = 0;
137}
138
139
140TimingSimpleCPU::~TimingSimpleCPU()
141{
142}
143
144void
145TimingSimpleCPU::serialize(ostream &os)
146{
147 SimObject::State so_state = SimObject::getState();
148 SERIALIZE_ENUM(so_state);
149 BaseSimpleCPU::serialize(os);
150}
151
152void
153TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
154{
155 SimObject::State so_state;
156 UNSERIALIZE_ENUM(so_state);
157 BaseSimpleCPU::unserialize(cp, section);
158}
159
160unsigned int
161TimingSimpleCPU::drain(Event *drain_event)
162{
163 // TimingSimpleCPU is ready to drain if it's not waiting for
164 // an access to complete.
165 if (_status == Idle || _status == Running || _status == SwitchedOut) {
166 changeState(SimObject::Drained);
167 return 0;
168 } else {
169 changeState(SimObject::Draining);
170 drainEvent = drain_event;
171 return 1;
172 }
173}
174
175void
176TimingSimpleCPU::resume()
177{
178 DPRINTF(SimpleCPU, "Resume\n");
179 if (_status != SwitchedOut && _status != Idle) {
180 assert(system->getMemoryMode() == Enums::timing);
181
182 if (fetchEvent.scheduled())
183 deschedule(fetchEvent);
184
185 schedule(fetchEvent, nextCycle());
186 }
187
188 changeState(SimObject::Running);
189}
190
191void
192TimingSimpleCPU::switchOut()
193{
194 assert(_status == Running || _status == Idle);
195 _status = SwitchedOut;
196 numCycles += tickToCycles(curTick() - previousTick);
197
198 // If we've been scheduled to resume but are then told to switch out,
199 // we'll need to cancel it.
200 if (fetchEvent.scheduled())
201 deschedule(fetchEvent);
202}
203
204
205void
206TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
207{
208 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
209
210 // if any of this CPU's ThreadContexts are active, mark the CPU as
211 // running and schedule its tick event.
212 for (int i = 0; i < threadContexts.size(); ++i) {
213 ThreadContext *tc = threadContexts[i];
214 if (tc->status() == ThreadContext::Active && _status != Running) {
215 _status = Running;
216 break;
217 }
218 }
219
220 if (_status != Running) {
221 _status = Idle;
222 }
223 assert(threadContexts.size() == 1);
224 previousTick = curTick();
225}
226
227
228void
229TimingSimpleCPU::activateContext(int thread_num, int delay)
230{
231 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
232
233 assert(thread_num == 0);
234 assert(thread);
235
236 assert(_status == Idle);
237
238 notIdleFraction++;
239 _status = Running;
240
241 // kick things off by initiating the fetch of the next instruction
242 schedule(fetchEvent, nextCycle(curTick() + ticks(delay)));
243}
244
245
246void
247TimingSimpleCPU::suspendContext(int thread_num)
248{
249 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
250
251 assert(thread_num == 0);
252 assert(thread);
253
254 if (_status == Idle)
255 return;
256
257 assert(_status == Running);
258
259 // just change status to Idle... if status != Running,
260 // completeInst() will not initiate fetch of next instruction.
261
262 notIdleFraction--;
263 _status = Idle;
264}
265
266bool
267TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
268{
269 RequestPtr req = pkt->req;
270 if (req->isMmappedIpr()) {
271 Tick delay;
272 delay = TheISA::handleIprRead(thread->getTC(), pkt);
273 new IprEvent(pkt, this, nextCycle(curTick() + delay));
274 _status = DcacheWaitResponse;
275 dcache_pkt = NULL;
276 } else if (!dcachePort.sendTiming(pkt)) {
277 _status = DcacheRetry;
278 dcache_pkt = pkt;
279 } else {
280 _status = DcacheWaitResponse;
281 // memory system takes ownership of packet
282 dcache_pkt = NULL;
283 }
284 return dcache_pkt == NULL;
285}
286
287void
288TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
289 bool read)
290{
291 PacketPtr pkt;
292 buildPacket(pkt, req, read);
293 pkt->dataDynamicArray<uint8_t>(data);
294 if (req->getFlags().isSet(Request::NO_ACCESS)) {
295 assert(!dcache_pkt);
296 pkt->makeResponse();
297 completeDataAccess(pkt);
298 } else if (read) {
299 handleReadPacket(pkt);
300 } else {
301 bool do_access = true; // flag to suppress cache access
302
303 if (req->isLLSC()) {
304 do_access = TheISA::handleLockedWrite(thread, req);
305 } else if (req->isCondSwap()) {
306 assert(res);
307 req->setExtraData(*res);
308 }
309
310 if (do_access) {
311 dcache_pkt = pkt;
312 handleWritePacket();
313 } else {
314 _status = DcacheWaitResponse;
315 completeDataAccess(pkt);
316 }
317 }
318}
319
320void
321TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
322 RequestPtr req, uint8_t *data, bool read)
323{
324 PacketPtr pkt1, pkt2;
325 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
326 if (req->getFlags().isSet(Request::NO_ACCESS)) {
327 assert(!dcache_pkt);
328 pkt1->makeResponse();
329 completeDataAccess(pkt1);
330 } else if (read) {
331 SplitFragmentSenderState * send_state =
332 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
333 if (handleReadPacket(pkt1)) {
334 send_state->clearFromParent();
335 send_state = dynamic_cast<SplitFragmentSenderState *>(
336 pkt2->senderState);
337 if (handleReadPacket(pkt2)) {
338 send_state->clearFromParent();
339 }
340 }
341 } else {
342 dcache_pkt = pkt1;
343 SplitFragmentSenderState * send_state =
344 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
345 if (handleWritePacket()) {
346 send_state->clearFromParent();
347 dcache_pkt = pkt2;
348 send_state = dynamic_cast<SplitFragmentSenderState *>(
349 pkt2->senderState);
350 if (handleWritePacket()) {
351 send_state->clearFromParent();
352 }
353 }
354 }
355}
356
357void
358TimingSimpleCPU::translationFault(Fault fault)
359{
360 // fault may be NoFault in cases where a fault is suppressed,
361 // for instance prefetches.
362 numCycles += tickToCycles(curTick() - previousTick);
363 previousTick = curTick();
364
365 if (traceData) {
366 // Since there was a fault, we shouldn't trace this instruction.
367 delete traceData;
368 traceData = NULL;
369 }
370
371 postExecute();
372
373 if (getState() == SimObject::Draining) {
374 advancePC(fault);
375 completeDrain();
376 } else {
377 advanceInst(fault);
378 }
379}
380
381void
382TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
383{
384 MemCmd cmd;
385 if (read) {
386 cmd = MemCmd::ReadReq;
387 if (req->isLLSC())
388 cmd = MemCmd::LoadLockedReq;
389 } else {
390 cmd = MemCmd::WriteReq;
391 if (req->isLLSC()) {
392 cmd = MemCmd::StoreCondReq;
393 } else if (req->isSwap()) {
394 cmd = MemCmd::SwapReq;
395 }
396 }
397 pkt = new Packet(req, cmd, Packet::Broadcast);
398}
399
400void
401TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
402 RequestPtr req1, RequestPtr req2, RequestPtr req,
403 uint8_t *data, bool read)
404{
405 pkt1 = pkt2 = NULL;
406
407 assert(!req1->isMmappedIpr() && !req2->isMmappedIpr());
408
409 if (req->getFlags().isSet(Request::NO_ACCESS)) {
410 buildPacket(pkt1, req, read);
411 return;
412 }
413
414 buildPacket(pkt1, req1, read);
415 buildPacket(pkt2, req2, read);
416
417 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags());
418 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand(),
419 Packet::Broadcast);
420
421 pkt->dataDynamicArray<uint8_t>(data);
422 pkt1->dataStatic<uint8_t>(data);
423 pkt2->dataStatic<uint8_t>(data + req1->getSize());
424
425 SplitMainSenderState * main_send_state = new SplitMainSenderState;
426 pkt->senderState = main_send_state;
427 main_send_state->fragments[0] = pkt1;
428 main_send_state->fragments[1] = pkt2;
429 main_send_state->outstanding = 2;
430 pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
431 pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
432}
433
434Fault
435TimingSimpleCPU::readBytes(Addr addr, uint8_t *data,
436 unsigned size, unsigned flags)
435TimingSimpleCPU::readMem(Addr addr, uint8_t *data,
436 unsigned size, unsigned flags)
437{
438 Fault fault;
439 const int asid = 0;
440 const ThreadID tid = 0;
441 const Addr pc = thread->instAddr();
442 unsigned block_size = dcachePort.peerBlockSize();
443 BaseTLB::Mode mode = BaseTLB::Read;
444
445 if (traceData) {
446 traceData->setAddr(addr);
447 }
448
449 RequestPtr req = new Request(asid, addr, size,
450 flags, pc, _cpuId, tid);
451
452 Addr split_addr = roundDown(addr + size - 1, block_size);
453 assert(split_addr <= addr || split_addr - addr < block_size);
454
455 _status = DTBWaitResponse;
456 if (split_addr > addr) {
457 RequestPtr req1, req2;
458 assert(!req->isLLSC() && !req->isSwap());
459 req->splitOnVaddr(split_addr, req1, req2);
460
461 WholeTranslationState *state =
462 new WholeTranslationState(req, req1, req2, new uint8_t[size],
463 NULL, mode);
464 DataTranslation<TimingSimpleCPU> *trans1 =
465 new DataTranslation<TimingSimpleCPU>(this, state, 0);
466 DataTranslation<TimingSimpleCPU> *trans2 =
467 new DataTranslation<TimingSimpleCPU>(this, state, 1);
468
469 thread->dtb->translateTiming(req1, tc, trans1, mode);
470 thread->dtb->translateTiming(req2, tc, trans2, mode);
471 } else {
472 WholeTranslationState *state =
473 new WholeTranslationState(req, new uint8_t[size], NULL, mode);
474 DataTranslation<TimingSimpleCPU> *translation
475 = new DataTranslation<TimingSimpleCPU>(this, state);
476 thread->dtb->translateTiming(req, tc, translation, mode);
477 }
478
479 return NoFault;
480}
481
482bool
483TimingSimpleCPU::handleWritePacket()
484{
485 RequestPtr req = dcache_pkt->req;
486 if (req->isMmappedIpr()) {
487 Tick delay;
488 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
489 new IprEvent(dcache_pkt, this, nextCycle(curTick() + delay));
490 _status = DcacheWaitResponse;
491 dcache_pkt = NULL;
492 } else if (!dcachePort.sendTiming(dcache_pkt)) {
493 _status = DcacheRetry;
494 } else {
495 _status = DcacheWaitResponse;
496 // memory system takes ownership of packet
497 dcache_pkt = NULL;
498 }
499 return dcache_pkt == NULL;
500}
501
502Fault
437{
438 Fault fault;
439 const int asid = 0;
440 const ThreadID tid = 0;
441 const Addr pc = thread->instAddr();
442 unsigned block_size = dcachePort.peerBlockSize();
443 BaseTLB::Mode mode = BaseTLB::Read;
444
445 if (traceData) {
446 traceData->setAddr(addr);
447 }
448
449 RequestPtr req = new Request(asid, addr, size,
450 flags, pc, _cpuId, tid);
451
452 Addr split_addr = roundDown(addr + size - 1, block_size);
453 assert(split_addr <= addr || split_addr - addr < block_size);
454
455 _status = DTBWaitResponse;
456 if (split_addr > addr) {
457 RequestPtr req1, req2;
458 assert(!req->isLLSC() && !req->isSwap());
459 req->splitOnVaddr(split_addr, req1, req2);
460
461 WholeTranslationState *state =
462 new WholeTranslationState(req, req1, req2, new uint8_t[size],
463 NULL, mode);
464 DataTranslation<TimingSimpleCPU> *trans1 =
465 new DataTranslation<TimingSimpleCPU>(this, state, 0);
466 DataTranslation<TimingSimpleCPU> *trans2 =
467 new DataTranslation<TimingSimpleCPU>(this, state, 1);
468
469 thread->dtb->translateTiming(req1, tc, trans1, mode);
470 thread->dtb->translateTiming(req2, tc, trans2, mode);
471 } else {
472 WholeTranslationState *state =
473 new WholeTranslationState(req, new uint8_t[size], NULL, mode);
474 DataTranslation<TimingSimpleCPU> *translation
475 = new DataTranslation<TimingSimpleCPU>(this, state);
476 thread->dtb->translateTiming(req, tc, translation, mode);
477 }
478
479 return NoFault;
480}
481
482bool
483TimingSimpleCPU::handleWritePacket()
484{
485 RequestPtr req = dcache_pkt->req;
486 if (req->isMmappedIpr()) {
487 Tick delay;
488 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
489 new IprEvent(dcache_pkt, this, nextCycle(curTick() + delay));
490 _status = DcacheWaitResponse;
491 dcache_pkt = NULL;
492 } else if (!dcachePort.sendTiming(dcache_pkt)) {
493 _status = DcacheRetry;
494 } else {
495 _status = DcacheWaitResponse;
496 // memory system takes ownership of packet
497 dcache_pkt = NULL;
498 }
499 return dcache_pkt == NULL;
500}
501
502Fault
503TimingSimpleCPU::writeBytes(uint8_t *data, unsigned size,
504 Addr addr, unsigned flags, uint64_t *res)
503TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
504 Addr addr, unsigned flags, uint64_t *res)
505{
506 uint8_t *newData = new uint8_t[size];
507 memcpy(newData, data, size);
508
509 const int asid = 0;
510 const ThreadID tid = 0;
511 const Addr pc = thread->instAddr();
512 unsigned block_size = dcachePort.peerBlockSize();
513 BaseTLB::Mode mode = BaseTLB::Write;
514
515 if (traceData) {
516 traceData->setAddr(addr);
517 }
518
519 RequestPtr req = new Request(asid, addr, size,
520 flags, pc, _cpuId, tid);
521
522 Addr split_addr = roundDown(addr + size - 1, block_size);
523 assert(split_addr <= addr || split_addr - addr < block_size);
524
525 _status = DTBWaitResponse;
526 if (split_addr > addr) {
527 RequestPtr req1, req2;
528 assert(!req->isLLSC() && !req->isSwap());
529 req->splitOnVaddr(split_addr, req1, req2);
530
531 WholeTranslationState *state =
532 new WholeTranslationState(req, req1, req2, newData, res, mode);
533 DataTranslation<TimingSimpleCPU> *trans1 =
534 new DataTranslation<TimingSimpleCPU>(this, state, 0);
535 DataTranslation<TimingSimpleCPU> *trans2 =
536 new DataTranslation<TimingSimpleCPU>(this, state, 1);
537
538 thread->dtb->translateTiming(req1, tc, trans1, mode);
539 thread->dtb->translateTiming(req2, tc, trans2, mode);
540 } else {
541 WholeTranslationState *state =
542 new WholeTranslationState(req, newData, res, mode);
543 DataTranslation<TimingSimpleCPU> *translation =
544 new DataTranslation<TimingSimpleCPU>(this, state);
545 thread->dtb->translateTiming(req, tc, translation, mode);
546 }
547
548 // Translation faults will be returned via finishTranslation()
549 return NoFault;
550}
551
552
553void
554TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
555{
556 _status = Running;
557
558 if (state->getFault() != NoFault) {
559 if (state->isPrefetch()) {
560 state->setNoFault();
561 }
562 delete [] state->data;
563 state->deleteReqs();
564 translationFault(state->getFault());
565 } else {
566 if (!state->isSplit) {
567 sendData(state->mainReq, state->data, state->res,
568 state->mode == BaseTLB::Read);
569 } else {
570 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
571 state->data, state->mode == BaseTLB::Read);
572 }
573 }
574
575 delete state;
576}
577
578
579void
580TimingSimpleCPU::fetch()
581{
582 DPRINTF(SimpleCPU, "Fetch\n");
583
584 if (!curStaticInst || !curStaticInst->isDelayedCommit())
585 checkForInterrupts();
586
587 checkPcEventQueue();
588
589 // We must have just got suspended by a PC event
590 if (_status == Idle)
591 return;
592
593 TheISA::PCState pcState = thread->pcState();
594 bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
595
596 if (needToFetch) {
597 _status = Running;
598 Request *ifetch_req = new Request();
599 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
600 setupFetchRequest(ifetch_req);
601 DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
602 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
603 BaseTLB::Execute);
604 } else {
605 _status = IcacheWaitResponse;
606 completeIfetch(NULL);
607
608 numCycles += tickToCycles(curTick() - previousTick);
609 previousTick = curTick();
610 }
611}
612
613
614void
615TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
616{
617 if (fault == NoFault) {
618 DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n",
619 req->getVaddr(), req->getPaddr());
620 ifetch_pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
621 ifetch_pkt->dataStatic(&inst);
622 DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr());
623
624 if (!icachePort.sendTiming(ifetch_pkt)) {
625 // Need to wait for retry
626 _status = IcacheRetry;
627 } else {
628 // Need to wait for cache to respond
629 _status = IcacheWaitResponse;
630 // ownership of packet transferred to memory system
631 ifetch_pkt = NULL;
632 }
633 } else {
634 DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
635 delete req;
636 // fetch fault: advance directly to next instruction (fault handler)
637 _status = Running;
638 advanceInst(fault);
639 }
640
641 numCycles += tickToCycles(curTick() - previousTick);
642 previousTick = curTick();
643}
644
645
646void
647TimingSimpleCPU::advanceInst(Fault fault)
648{
649
650 if (_status == Faulting)
651 return;
652
653 if (fault != NoFault) {
654 advancePC(fault);
655 DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
656 reschedule(fetchEvent, nextCycle(), true);
657 _status = Faulting;
658 return;
659 }
660
661
662 if (!stayAtPC)
663 advancePC(fault);
664
665 if (_status == Running) {
666 // kick off fetch of next instruction... callback from icache
667 // response will cause that instruction to be executed,
668 // keeping the CPU running.
669 fetch();
670 }
671}
672
673
674void
675TimingSimpleCPU::completeIfetch(PacketPtr pkt)
676{
677 DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ?
678 pkt->getAddr() : 0);
679
680 // received a response from the icache: execute the received
681 // instruction
682
683 assert(!pkt || !pkt->isError());
684 assert(_status == IcacheWaitResponse);
685
686 _status = Running;
687
688 numCycles += tickToCycles(curTick() - previousTick);
689 previousTick = curTick();
690
691 if (getState() == SimObject::Draining) {
692 if (pkt) {
693 delete pkt->req;
694 delete pkt;
695 }
696
697 completeDrain();
698 return;
699 }
700
701 preExecute();
702 if (curStaticInst && curStaticInst->isMemRef()) {
703 // load or store: just send to dcache
704 Fault fault = curStaticInst->initiateAcc(this, traceData);
705
706 // If we're not running now the instruction will complete in a dcache
707 // response callback or the instruction faulted and has started an
708 // ifetch
709 if (_status == Running) {
710 if (fault != NoFault && traceData) {
711 // If there was a fault, we shouldn't trace this instruction.
712 delete traceData;
713 traceData = NULL;
714 }
715
716 postExecute();
717 // @todo remove me after debugging with legion done
718 if (curStaticInst && (!curStaticInst->isMicroop() ||
719 curStaticInst->isFirstMicroop()))
720 instCnt++;
721 advanceInst(fault);
722 }
723 } else if (curStaticInst) {
724 // non-memory instruction: execute completely now
725 Fault fault = curStaticInst->execute(this, traceData);
726
727 // keep an instruction count
728 if (fault == NoFault)
729 countInst();
730 else if (traceData && !DTRACE(ExecFaulting)) {
731 delete traceData;
732 traceData = NULL;
733 }
734
735 postExecute();
736 // @todo remove me after debugging with legion done
737 if (curStaticInst && (!curStaticInst->isMicroop() ||
738 curStaticInst->isFirstMicroop()))
739 instCnt++;
740 advanceInst(fault);
741 } else {
742 advanceInst(NoFault);
743 }
744
745 if (pkt) {
746 delete pkt->req;
747 delete pkt;
748 }
749}
750
751void
752TimingSimpleCPU::IcachePort::ITickEvent::process()
753{
754 cpu->completeIfetch(pkt);
755}
756
757bool
758TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
759{
760 if (pkt->isResponse() && !pkt->wasNacked()) {
761 DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
762 // delay processing of returned data until next CPU clock edge
763 Tick next_tick = cpu->nextCycle(curTick());
764
765 if (next_tick == curTick())
766 cpu->completeIfetch(pkt);
767 else
768 tickEvent.schedule(pkt, next_tick);
769
770 return true;
771 } else if (pkt->wasNacked()) {
772 assert(cpu->_status == IcacheWaitResponse);
773 pkt->reinitNacked();
774 if (!sendTiming(pkt)) {
775 cpu->_status = IcacheRetry;
776 cpu->ifetch_pkt = pkt;
777 }
778 }
779 //Snooping a Coherence Request, do nothing
780 return true;
781}
782
783void
784TimingSimpleCPU::IcachePort::recvRetry()
785{
786 // we shouldn't get a retry unless we have a packet that we're
787 // waiting to transmit
788 assert(cpu->ifetch_pkt != NULL);
789 assert(cpu->_status == IcacheRetry);
790 PacketPtr tmp = cpu->ifetch_pkt;
791 if (sendTiming(tmp)) {
792 cpu->_status = IcacheWaitResponse;
793 cpu->ifetch_pkt = NULL;
794 }
795}
796
797void
798TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
799{
800 // received a response from the dcache: complete the load or store
801 // instruction
802 assert(!pkt->isError());
803 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
804 pkt->req->getFlags().isSet(Request::NO_ACCESS));
805
806 numCycles += tickToCycles(curTick() - previousTick);
807 previousTick = curTick();
808
809 if (pkt->senderState) {
810 SplitFragmentSenderState * send_state =
811 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
812 assert(send_state);
813 delete pkt->req;
814 delete pkt;
815 PacketPtr big_pkt = send_state->bigPkt;
816 delete send_state;
817
818 SplitMainSenderState * main_send_state =
819 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
820 assert(main_send_state);
821 // Record the fact that this packet is no longer outstanding.
822 assert(main_send_state->outstanding != 0);
823 main_send_state->outstanding--;
824
825 if (main_send_state->outstanding) {
826 return;
827 } else {
828 delete main_send_state;
829 big_pkt->senderState = NULL;
830 pkt = big_pkt;
831 }
832 }
833
834 _status = Running;
835
836 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
837
838 // keep an instruction count
839 if (fault == NoFault)
840 countInst();
841 else if (traceData) {
842 // If there was a fault, we shouldn't trace this instruction.
843 delete traceData;
844 traceData = NULL;
845 }
846
847 // the locked flag may be cleared on the response packet, so check
848 // pkt->req and not pkt to see if it was a load-locked
849 if (pkt->isRead() && pkt->req->isLLSC()) {
850 TheISA::handleLockedRead(thread, pkt->req);
851 }
852
853 delete pkt->req;
854 delete pkt;
855
856 postExecute();
857
858 if (getState() == SimObject::Draining) {
859 advancePC(fault);
860 completeDrain();
861
862 return;
863 }
864
865 advanceInst(fault);
866}
867
868
869void
870TimingSimpleCPU::completeDrain()
871{
872 DPRINTF(Config, "Done draining\n");
873 changeState(SimObject::Drained);
874 drainEvent->process();
875}
876
877void
878TimingSimpleCPU::DcachePort::setPeer(Port *port)
879{
880 Port::setPeer(port);
881
882#if FULL_SYSTEM
883 // Update the ThreadContext's memory ports (Functional/Virtual
884 // Ports)
885 cpu->tcBase()->connectMemPorts(cpu->tcBase());
886#endif
887}
888
889bool
890TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
891{
892 if (pkt->isResponse() && !pkt->wasNacked()) {
893 // delay processing of returned data until next CPU clock edge
894 Tick next_tick = cpu->nextCycle(curTick());
895
896 if (next_tick == curTick()) {
897 cpu->completeDataAccess(pkt);
898 } else {
899 if (!tickEvent.scheduled()) {
900 tickEvent.schedule(pkt, next_tick);
901 } else {
902 // In the case of a split transaction and a cache that is
903 // faster than a CPU we could get two responses before
904 // next_tick expires
905 if (!retryEvent.scheduled())
906 schedule(retryEvent, next_tick);
907 return false;
908 }
909 }
910
911 return true;
912 }
913 else if (pkt->wasNacked()) {
914 assert(cpu->_status == DcacheWaitResponse);
915 pkt->reinitNacked();
916 if (!sendTiming(pkt)) {
917 cpu->_status = DcacheRetry;
918 cpu->dcache_pkt = pkt;
919 }
920 }
921 //Snooping a Coherence Request, do nothing
922 return true;
923}
924
925void
926TimingSimpleCPU::DcachePort::DTickEvent::process()
927{
928 cpu->completeDataAccess(pkt);
929}
930
931void
932TimingSimpleCPU::DcachePort::recvRetry()
933{
934 // we shouldn't get a retry unless we have a packet that we're
935 // waiting to transmit
936 assert(cpu->dcache_pkt != NULL);
937 assert(cpu->_status == DcacheRetry);
938 PacketPtr tmp = cpu->dcache_pkt;
939 if (tmp->senderState) {
940 // This is a packet from a split access.
941 SplitFragmentSenderState * send_state =
942 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
943 assert(send_state);
944 PacketPtr big_pkt = send_state->bigPkt;
945
946 SplitMainSenderState * main_send_state =
947 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
948 assert(main_send_state);
949
950 if (sendTiming(tmp)) {
951 // If we were able to send without retrying, record that fact
952 // and try sending the other fragment.
953 send_state->clearFromParent();
954 int other_index = main_send_state->getPendingFragment();
955 if (other_index > 0) {
956 tmp = main_send_state->fragments[other_index];
957 cpu->dcache_pkt = tmp;
958 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
959 (big_pkt->isWrite() && cpu->handleWritePacket())) {
960 main_send_state->fragments[other_index] = NULL;
961 }
962 } else {
963 cpu->_status = DcacheWaitResponse;
964 // memory system takes ownership of packet
965 cpu->dcache_pkt = NULL;
966 }
967 }
968 } else if (sendTiming(tmp)) {
969 cpu->_status = DcacheWaitResponse;
970 // memory system takes ownership of packet
971 cpu->dcache_pkt = NULL;
972 }
973}
974
975TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
976 Tick t)
977 : pkt(_pkt), cpu(_cpu)
978{
979 cpu->schedule(this, t);
980}
981
982void
983TimingSimpleCPU::IprEvent::process()
984{
985 cpu->completeDataAccess(pkt);
986}
987
988const char *
989TimingSimpleCPU::IprEvent::description() const
990{
991 return "Timing Simple CPU Delay IPR event";
992}
993
994
995void
996TimingSimpleCPU::printAddr(Addr a)
997{
998 dcachePort.printAddr(a);
999}
1000
1001
1002////////////////////////////////////////////////////////////////////////
1003//
1004// TimingSimpleCPU Simulation Object
1005//
1006TimingSimpleCPU *
1007TimingSimpleCPUParams::create()
1008{
1009 numThreads = 1;
1010#if !FULL_SYSTEM
1011 if (workload.size() != 1)
1012 panic("only one workload allowed");
1013#endif
1014 return new TimingSimpleCPU(this);
1015}
505{
506 uint8_t *newData = new uint8_t[size];
507 memcpy(newData, data, size);
508
509 const int asid = 0;
510 const ThreadID tid = 0;
511 const Addr pc = thread->instAddr();
512 unsigned block_size = dcachePort.peerBlockSize();
513 BaseTLB::Mode mode = BaseTLB::Write;
514
515 if (traceData) {
516 traceData->setAddr(addr);
517 }
518
519 RequestPtr req = new Request(asid, addr, size,
520 flags, pc, _cpuId, tid);
521
522 Addr split_addr = roundDown(addr + size - 1, block_size);
523 assert(split_addr <= addr || split_addr - addr < block_size);
524
525 _status = DTBWaitResponse;
526 if (split_addr > addr) {
527 RequestPtr req1, req2;
528 assert(!req->isLLSC() && !req->isSwap());
529 req->splitOnVaddr(split_addr, req1, req2);
530
531 WholeTranslationState *state =
532 new WholeTranslationState(req, req1, req2, newData, res, mode);
533 DataTranslation<TimingSimpleCPU> *trans1 =
534 new DataTranslation<TimingSimpleCPU>(this, state, 0);
535 DataTranslation<TimingSimpleCPU> *trans2 =
536 new DataTranslation<TimingSimpleCPU>(this, state, 1);
537
538 thread->dtb->translateTiming(req1, tc, trans1, mode);
539 thread->dtb->translateTiming(req2, tc, trans2, mode);
540 } else {
541 WholeTranslationState *state =
542 new WholeTranslationState(req, newData, res, mode);
543 DataTranslation<TimingSimpleCPU> *translation =
544 new DataTranslation<TimingSimpleCPU>(this, state);
545 thread->dtb->translateTiming(req, tc, translation, mode);
546 }
547
548 // Translation faults will be returned via finishTranslation()
549 return NoFault;
550}
551
552
553void
554TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
555{
556 _status = Running;
557
558 if (state->getFault() != NoFault) {
559 if (state->isPrefetch()) {
560 state->setNoFault();
561 }
562 delete [] state->data;
563 state->deleteReqs();
564 translationFault(state->getFault());
565 } else {
566 if (!state->isSplit) {
567 sendData(state->mainReq, state->data, state->res,
568 state->mode == BaseTLB::Read);
569 } else {
570 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
571 state->data, state->mode == BaseTLB::Read);
572 }
573 }
574
575 delete state;
576}
577
578
579void
580TimingSimpleCPU::fetch()
581{
582 DPRINTF(SimpleCPU, "Fetch\n");
583
584 if (!curStaticInst || !curStaticInst->isDelayedCommit())
585 checkForInterrupts();
586
587 checkPcEventQueue();
588
589 // We must have just got suspended by a PC event
590 if (_status == Idle)
591 return;
592
593 TheISA::PCState pcState = thread->pcState();
594 bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
595
596 if (needToFetch) {
597 _status = Running;
598 Request *ifetch_req = new Request();
599 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
600 setupFetchRequest(ifetch_req);
601 DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
602 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
603 BaseTLB::Execute);
604 } else {
605 _status = IcacheWaitResponse;
606 completeIfetch(NULL);
607
608 numCycles += tickToCycles(curTick() - previousTick);
609 previousTick = curTick();
610 }
611}
612
613
614void
615TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
616{
617 if (fault == NoFault) {
618 DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n",
619 req->getVaddr(), req->getPaddr());
620 ifetch_pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
621 ifetch_pkt->dataStatic(&inst);
622 DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr());
623
624 if (!icachePort.sendTiming(ifetch_pkt)) {
625 // Need to wait for retry
626 _status = IcacheRetry;
627 } else {
628 // Need to wait for cache to respond
629 _status = IcacheWaitResponse;
630 // ownership of packet transferred to memory system
631 ifetch_pkt = NULL;
632 }
633 } else {
634 DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
635 delete req;
636 // fetch fault: advance directly to next instruction (fault handler)
637 _status = Running;
638 advanceInst(fault);
639 }
640
641 numCycles += tickToCycles(curTick() - previousTick);
642 previousTick = curTick();
643}
644
645
646void
647TimingSimpleCPU::advanceInst(Fault fault)
648{
649
650 if (_status == Faulting)
651 return;
652
653 if (fault != NoFault) {
654 advancePC(fault);
655 DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
656 reschedule(fetchEvent, nextCycle(), true);
657 _status = Faulting;
658 return;
659 }
660
661
662 if (!stayAtPC)
663 advancePC(fault);
664
665 if (_status == Running) {
666 // kick off fetch of next instruction... callback from icache
667 // response will cause that instruction to be executed,
668 // keeping the CPU running.
669 fetch();
670 }
671}
672
673
674void
675TimingSimpleCPU::completeIfetch(PacketPtr pkt)
676{
677 DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ?
678 pkt->getAddr() : 0);
679
680 // received a response from the icache: execute the received
681 // instruction
682
683 assert(!pkt || !pkt->isError());
684 assert(_status == IcacheWaitResponse);
685
686 _status = Running;
687
688 numCycles += tickToCycles(curTick() - previousTick);
689 previousTick = curTick();
690
691 if (getState() == SimObject::Draining) {
692 if (pkt) {
693 delete pkt->req;
694 delete pkt;
695 }
696
697 completeDrain();
698 return;
699 }
700
701 preExecute();
702 if (curStaticInst && curStaticInst->isMemRef()) {
703 // load or store: just send to dcache
704 Fault fault = curStaticInst->initiateAcc(this, traceData);
705
706 // If we're not running now the instruction will complete in a dcache
707 // response callback or the instruction faulted and has started an
708 // ifetch
709 if (_status == Running) {
710 if (fault != NoFault && traceData) {
711 // If there was a fault, we shouldn't trace this instruction.
712 delete traceData;
713 traceData = NULL;
714 }
715
716 postExecute();
717 // @todo remove me after debugging with legion done
718 if (curStaticInst && (!curStaticInst->isMicroop() ||
719 curStaticInst->isFirstMicroop()))
720 instCnt++;
721 advanceInst(fault);
722 }
723 } else if (curStaticInst) {
724 // non-memory instruction: execute completely now
725 Fault fault = curStaticInst->execute(this, traceData);
726
727 // keep an instruction count
728 if (fault == NoFault)
729 countInst();
730 else if (traceData && !DTRACE(ExecFaulting)) {
731 delete traceData;
732 traceData = NULL;
733 }
734
735 postExecute();
736 // @todo remove me after debugging with legion done
737 if (curStaticInst && (!curStaticInst->isMicroop() ||
738 curStaticInst->isFirstMicroop()))
739 instCnt++;
740 advanceInst(fault);
741 } else {
742 advanceInst(NoFault);
743 }
744
745 if (pkt) {
746 delete pkt->req;
747 delete pkt;
748 }
749}
750
751void
752TimingSimpleCPU::IcachePort::ITickEvent::process()
753{
754 cpu->completeIfetch(pkt);
755}
756
757bool
758TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
759{
760 if (pkt->isResponse() && !pkt->wasNacked()) {
761 DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
762 // delay processing of returned data until next CPU clock edge
763 Tick next_tick = cpu->nextCycle(curTick());
764
765 if (next_tick == curTick())
766 cpu->completeIfetch(pkt);
767 else
768 tickEvent.schedule(pkt, next_tick);
769
770 return true;
771 } else if (pkt->wasNacked()) {
772 assert(cpu->_status == IcacheWaitResponse);
773 pkt->reinitNacked();
774 if (!sendTiming(pkt)) {
775 cpu->_status = IcacheRetry;
776 cpu->ifetch_pkt = pkt;
777 }
778 }
779 //Snooping a Coherence Request, do nothing
780 return true;
781}
782
783void
784TimingSimpleCPU::IcachePort::recvRetry()
785{
786 // we shouldn't get a retry unless we have a packet that we're
787 // waiting to transmit
788 assert(cpu->ifetch_pkt != NULL);
789 assert(cpu->_status == IcacheRetry);
790 PacketPtr tmp = cpu->ifetch_pkt;
791 if (sendTiming(tmp)) {
792 cpu->_status = IcacheWaitResponse;
793 cpu->ifetch_pkt = NULL;
794 }
795}
796
797void
798TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
799{
800 // received a response from the dcache: complete the load or store
801 // instruction
802 assert(!pkt->isError());
803 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
804 pkt->req->getFlags().isSet(Request::NO_ACCESS));
805
806 numCycles += tickToCycles(curTick() - previousTick);
807 previousTick = curTick();
808
809 if (pkt->senderState) {
810 SplitFragmentSenderState * send_state =
811 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
812 assert(send_state);
813 delete pkt->req;
814 delete pkt;
815 PacketPtr big_pkt = send_state->bigPkt;
816 delete send_state;
817
818 SplitMainSenderState * main_send_state =
819 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
820 assert(main_send_state);
821 // Record the fact that this packet is no longer outstanding.
822 assert(main_send_state->outstanding != 0);
823 main_send_state->outstanding--;
824
825 if (main_send_state->outstanding) {
826 return;
827 } else {
828 delete main_send_state;
829 big_pkt->senderState = NULL;
830 pkt = big_pkt;
831 }
832 }
833
834 _status = Running;
835
836 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
837
838 // keep an instruction count
839 if (fault == NoFault)
840 countInst();
841 else if (traceData) {
842 // If there was a fault, we shouldn't trace this instruction.
843 delete traceData;
844 traceData = NULL;
845 }
846
847 // the locked flag may be cleared on the response packet, so check
848 // pkt->req and not pkt to see if it was a load-locked
849 if (pkt->isRead() && pkt->req->isLLSC()) {
850 TheISA::handleLockedRead(thread, pkt->req);
851 }
852
853 delete pkt->req;
854 delete pkt;
855
856 postExecute();
857
858 if (getState() == SimObject::Draining) {
859 advancePC(fault);
860 completeDrain();
861
862 return;
863 }
864
865 advanceInst(fault);
866}
867
868
869void
870TimingSimpleCPU::completeDrain()
871{
872 DPRINTF(Config, "Done draining\n");
873 changeState(SimObject::Drained);
874 drainEvent->process();
875}
876
877void
878TimingSimpleCPU::DcachePort::setPeer(Port *port)
879{
880 Port::setPeer(port);
881
882#if FULL_SYSTEM
883 // Update the ThreadContext's memory ports (Functional/Virtual
884 // Ports)
885 cpu->tcBase()->connectMemPorts(cpu->tcBase());
886#endif
887}
888
889bool
890TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
891{
892 if (pkt->isResponse() && !pkt->wasNacked()) {
893 // delay processing of returned data until next CPU clock edge
894 Tick next_tick = cpu->nextCycle(curTick());
895
896 if (next_tick == curTick()) {
897 cpu->completeDataAccess(pkt);
898 } else {
899 if (!tickEvent.scheduled()) {
900 tickEvent.schedule(pkt, next_tick);
901 } else {
902 // In the case of a split transaction and a cache that is
903 // faster than a CPU we could get two responses before
904 // next_tick expires
905 if (!retryEvent.scheduled())
906 schedule(retryEvent, next_tick);
907 return false;
908 }
909 }
910
911 return true;
912 }
913 else if (pkt->wasNacked()) {
914 assert(cpu->_status == DcacheWaitResponse);
915 pkt->reinitNacked();
916 if (!sendTiming(pkt)) {
917 cpu->_status = DcacheRetry;
918 cpu->dcache_pkt = pkt;
919 }
920 }
921 //Snooping a Coherence Request, do nothing
922 return true;
923}
924
925void
926TimingSimpleCPU::DcachePort::DTickEvent::process()
927{
928 cpu->completeDataAccess(pkt);
929}
930
931void
932TimingSimpleCPU::DcachePort::recvRetry()
933{
934 // we shouldn't get a retry unless we have a packet that we're
935 // waiting to transmit
936 assert(cpu->dcache_pkt != NULL);
937 assert(cpu->_status == DcacheRetry);
938 PacketPtr tmp = cpu->dcache_pkt;
939 if (tmp->senderState) {
940 // This is a packet from a split access.
941 SplitFragmentSenderState * send_state =
942 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
943 assert(send_state);
944 PacketPtr big_pkt = send_state->bigPkt;
945
946 SplitMainSenderState * main_send_state =
947 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
948 assert(main_send_state);
949
950 if (sendTiming(tmp)) {
951 // If we were able to send without retrying, record that fact
952 // and try sending the other fragment.
953 send_state->clearFromParent();
954 int other_index = main_send_state->getPendingFragment();
955 if (other_index > 0) {
956 tmp = main_send_state->fragments[other_index];
957 cpu->dcache_pkt = tmp;
958 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
959 (big_pkt->isWrite() && cpu->handleWritePacket())) {
960 main_send_state->fragments[other_index] = NULL;
961 }
962 } else {
963 cpu->_status = DcacheWaitResponse;
964 // memory system takes ownership of packet
965 cpu->dcache_pkt = NULL;
966 }
967 }
968 } else if (sendTiming(tmp)) {
969 cpu->_status = DcacheWaitResponse;
970 // memory system takes ownership of packet
971 cpu->dcache_pkt = NULL;
972 }
973}
974
975TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
976 Tick t)
977 : pkt(_pkt), cpu(_cpu)
978{
979 cpu->schedule(this, t);
980}
981
982void
983TimingSimpleCPU::IprEvent::process()
984{
985 cpu->completeDataAccess(pkt);
986}
987
988const char *
989TimingSimpleCPU::IprEvent::description() const
990{
991 return "Timing Simple CPU Delay IPR event";
992}
993
994
995void
996TimingSimpleCPU::printAddr(Addr a)
997{
998 dcachePort.printAddr(a);
999}
1000
1001
1002////////////////////////////////////////////////////////////////////////
1003//
1004// TimingSimpleCPU Simulation Object
1005//
1006TimingSimpleCPU *
1007TimingSimpleCPUParams::create()
1008{
1009 numThreads = 1;
1010#if !FULL_SYSTEM
1011 if (workload.size() != 1)
1012 panic("only one workload allowed");
1013#endif
1014 return new TimingSimpleCPU(this);
1015}