timing.cc (7897:d9e8b1fd1a9f) timing.cc (7911:267e1e16e51b)
1/*
2 * Copyright (c) 2010 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Steve Reinhardt
41 */
42
43#include "arch/locked_mem.hh"
44#include "arch/mmaped_ipr.hh"
45#include "arch/utility.hh"
46#include "base/bigint.hh"
47#include "config/the_isa.hh"
48#include "cpu/exetrace.hh"
49#include "cpu/simple/timing.hh"
50#include "mem/packet.hh"
51#include "mem/packet_access.hh"
52#include "params/TimingSimpleCPU.hh"
53#include "sim/faults.hh"
54#include "sim/system.hh"
55
56using namespace std;
57using namespace TheISA;
58
59Port *
60TimingSimpleCPU::getPort(const std::string &if_name, int idx)
61{
62 if (if_name == "dcache_port")
63 return &dcachePort;
64 else if (if_name == "icache_port")
65 return &icachePort;
66 else
67 panic("No Such Port\n");
68}
69
70void
71TimingSimpleCPU::init()
72{
73 BaseCPU::init();
74#if FULL_SYSTEM
75 for (int i = 0; i < threadContexts.size(); ++i) {
76 ThreadContext *tc = threadContexts[i];
77
78 // initialize CPU, including PC
79 TheISA::initCPU(tc, _cpuId);
80 }
81#endif
82}
83
84Tick
85TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
86{
87 panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
88 return curTick();
89}
90
91void
92TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
93{
94 //No internal storage to update, jusst return
95 return;
96}
97
98void
99TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
100{
101 if (status == RangeChange) {
102 if (!snoopRangeSent) {
103 snoopRangeSent = true;
104 sendStatusChange(Port::RangeChange);
105 }
106 return;
107 }
108
109 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
110}
111
112
113void
114TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
115{
116 pkt = _pkt;
117 cpu->schedule(this, t);
118}
119
120TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
121 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this, p->clock),
122 dcachePort(this, p->clock), fetchEvent(this)
123{
124 _status = Idle;
125
126 icachePort.snoopRangeSent = false;
127 dcachePort.snoopRangeSent = false;
128
129 ifetch_pkt = dcache_pkt = NULL;
130 drainEvent = NULL;
131 previousTick = 0;
132 changeState(SimObject::Running);
133 system->totalNumInsts = 0;
134}
135
136
137TimingSimpleCPU::~TimingSimpleCPU()
138{
139}
140
141void
142TimingSimpleCPU::serialize(ostream &os)
143{
144 SimObject::State so_state = SimObject::getState();
145 SERIALIZE_ENUM(so_state);
146 BaseSimpleCPU::serialize(os);
147}
148
149void
150TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
151{
152 SimObject::State so_state;
153 UNSERIALIZE_ENUM(so_state);
154 BaseSimpleCPU::unserialize(cp, section);
155}
156
157unsigned int
158TimingSimpleCPU::drain(Event *drain_event)
159{
160 // TimingSimpleCPU is ready to drain if it's not waiting for
161 // an access to complete.
162 if (_status == Idle || _status == Running || _status == SwitchedOut) {
163 changeState(SimObject::Drained);
164 return 0;
165 } else {
166 changeState(SimObject::Draining);
167 drainEvent = drain_event;
168 return 1;
169 }
170}
171
172void
173TimingSimpleCPU::resume()
174{
175 DPRINTF(SimpleCPU, "Resume\n");
176 if (_status != SwitchedOut && _status != Idle) {
177 assert(system->getMemoryMode() == Enums::timing);
178
179 if (fetchEvent.scheduled())
180 deschedule(fetchEvent);
181
182 schedule(fetchEvent, nextCycle());
183 }
184
185 changeState(SimObject::Running);
186}
187
188void
189TimingSimpleCPU::switchOut()
190{
191 assert(_status == Running || _status == Idle);
192 _status = SwitchedOut;
193 numCycles += tickToCycles(curTick() - previousTick);
194
195 // If we've been scheduled to resume but are then told to switch out,
196 // we'll need to cancel it.
197 if (fetchEvent.scheduled())
198 deschedule(fetchEvent);
199}
200
201
202void
203TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
204{
205 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
206
207 // if any of this CPU's ThreadContexts are active, mark the CPU as
208 // running and schedule its tick event.
209 for (int i = 0; i < threadContexts.size(); ++i) {
210 ThreadContext *tc = threadContexts[i];
211 if (tc->status() == ThreadContext::Active && _status != Running) {
212 _status = Running;
213 break;
214 }
215 }
216
217 if (_status != Running) {
218 _status = Idle;
219 }
220 assert(threadContexts.size() == 1);
221 previousTick = curTick();
222}
223
224
225void
226TimingSimpleCPU::activateContext(int thread_num, int delay)
227{
228 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
229
230 assert(thread_num == 0);
231 assert(thread);
232
233 assert(_status == Idle);
234
235 notIdleFraction++;
236 _status = Running;
237
238 // kick things off by initiating the fetch of the next instruction
239 schedule(fetchEvent, nextCycle(curTick() + ticks(delay)));
240}
241
242
243void
244TimingSimpleCPU::suspendContext(int thread_num)
245{
246 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
247
248 assert(thread_num == 0);
249 assert(thread);
250
251 if (_status == Idle)
252 return;
253
254 assert(_status == Running);
255
256 // just change status to Idle... if status != Running,
257 // completeInst() will not initiate fetch of next instruction.
258
259 notIdleFraction--;
260 _status = Idle;
261}
262
263bool
264TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
265{
266 RequestPtr req = pkt->req;
267 if (req->isMmapedIpr()) {
268 Tick delay;
269 delay = TheISA::handleIprRead(thread->getTC(), pkt);
270 new IprEvent(pkt, this, nextCycle(curTick() + delay));
271 _status = DcacheWaitResponse;
272 dcache_pkt = NULL;
273 } else if (!dcachePort.sendTiming(pkt)) {
274 _status = DcacheRetry;
275 dcache_pkt = pkt;
276 } else {
277 _status = DcacheWaitResponse;
278 // memory system takes ownership of packet
279 dcache_pkt = NULL;
280 }
281 return dcache_pkt == NULL;
282}
283
284void
285TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
286 bool read)
287{
288 PacketPtr pkt;
289 buildPacket(pkt, req, read);
290 pkt->dataDynamicArray<uint8_t>(data);
291 if (req->getFlags().isSet(Request::NO_ACCESS)) {
292 assert(!dcache_pkt);
293 pkt->makeResponse();
294 completeDataAccess(pkt);
295 } else if (read) {
296 handleReadPacket(pkt);
297 } else {
298 bool do_access = true; // flag to suppress cache access
299
300 if (req->isLLSC()) {
301 do_access = TheISA::handleLockedWrite(thread, req);
302 } else if (req->isCondSwap()) {
303 assert(res);
304 req->setExtraData(*res);
305 }
306
307 if (do_access) {
308 dcache_pkt = pkt;
309 handleWritePacket();
310 } else {
311 _status = DcacheWaitResponse;
312 completeDataAccess(pkt);
313 }
314 }
315}
316
317void
318TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
319 RequestPtr req, uint8_t *data, bool read)
320{
321 PacketPtr pkt1, pkt2;
322 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
323 if (req->getFlags().isSet(Request::NO_ACCESS)) {
324 assert(!dcache_pkt);
325 pkt1->makeResponse();
326 completeDataAccess(pkt1);
327 } else if (read) {
1/*
2 * Copyright (c) 2010 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Steve Reinhardt
41 */
42
43#include "arch/locked_mem.hh"
44#include "arch/mmaped_ipr.hh"
45#include "arch/utility.hh"
46#include "base/bigint.hh"
47#include "config/the_isa.hh"
48#include "cpu/exetrace.hh"
49#include "cpu/simple/timing.hh"
50#include "mem/packet.hh"
51#include "mem/packet_access.hh"
52#include "params/TimingSimpleCPU.hh"
53#include "sim/faults.hh"
54#include "sim/system.hh"
55
56using namespace std;
57using namespace TheISA;
58
59Port *
60TimingSimpleCPU::getPort(const std::string &if_name, int idx)
61{
62 if (if_name == "dcache_port")
63 return &dcachePort;
64 else if (if_name == "icache_port")
65 return &icachePort;
66 else
67 panic("No Such Port\n");
68}
69
70void
71TimingSimpleCPU::init()
72{
73 BaseCPU::init();
74#if FULL_SYSTEM
75 for (int i = 0; i < threadContexts.size(); ++i) {
76 ThreadContext *tc = threadContexts[i];
77
78 // initialize CPU, including PC
79 TheISA::initCPU(tc, _cpuId);
80 }
81#endif
82}
83
84Tick
85TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
86{
87 panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
88 return curTick();
89}
90
91void
92TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
93{
94 //No internal storage to update, jusst return
95 return;
96}
97
98void
99TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
100{
101 if (status == RangeChange) {
102 if (!snoopRangeSent) {
103 snoopRangeSent = true;
104 sendStatusChange(Port::RangeChange);
105 }
106 return;
107 }
108
109 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
110}
111
112
113void
114TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
115{
116 pkt = _pkt;
117 cpu->schedule(this, t);
118}
119
120TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
121 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this, p->clock),
122 dcachePort(this, p->clock), fetchEvent(this)
123{
124 _status = Idle;
125
126 icachePort.snoopRangeSent = false;
127 dcachePort.snoopRangeSent = false;
128
129 ifetch_pkt = dcache_pkt = NULL;
130 drainEvent = NULL;
131 previousTick = 0;
132 changeState(SimObject::Running);
133 system->totalNumInsts = 0;
134}
135
136
137TimingSimpleCPU::~TimingSimpleCPU()
138{
139}
140
141void
142TimingSimpleCPU::serialize(ostream &os)
143{
144 SimObject::State so_state = SimObject::getState();
145 SERIALIZE_ENUM(so_state);
146 BaseSimpleCPU::serialize(os);
147}
148
149void
150TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
151{
152 SimObject::State so_state;
153 UNSERIALIZE_ENUM(so_state);
154 BaseSimpleCPU::unserialize(cp, section);
155}
156
157unsigned int
158TimingSimpleCPU::drain(Event *drain_event)
159{
160 // TimingSimpleCPU is ready to drain if it's not waiting for
161 // an access to complete.
162 if (_status == Idle || _status == Running || _status == SwitchedOut) {
163 changeState(SimObject::Drained);
164 return 0;
165 } else {
166 changeState(SimObject::Draining);
167 drainEvent = drain_event;
168 return 1;
169 }
170}
171
172void
173TimingSimpleCPU::resume()
174{
175 DPRINTF(SimpleCPU, "Resume\n");
176 if (_status != SwitchedOut && _status != Idle) {
177 assert(system->getMemoryMode() == Enums::timing);
178
179 if (fetchEvent.scheduled())
180 deschedule(fetchEvent);
181
182 schedule(fetchEvent, nextCycle());
183 }
184
185 changeState(SimObject::Running);
186}
187
188void
189TimingSimpleCPU::switchOut()
190{
191 assert(_status == Running || _status == Idle);
192 _status = SwitchedOut;
193 numCycles += tickToCycles(curTick() - previousTick);
194
195 // If we've been scheduled to resume but are then told to switch out,
196 // we'll need to cancel it.
197 if (fetchEvent.scheduled())
198 deschedule(fetchEvent);
199}
200
201
202void
203TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
204{
205 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
206
207 // if any of this CPU's ThreadContexts are active, mark the CPU as
208 // running and schedule its tick event.
209 for (int i = 0; i < threadContexts.size(); ++i) {
210 ThreadContext *tc = threadContexts[i];
211 if (tc->status() == ThreadContext::Active && _status != Running) {
212 _status = Running;
213 break;
214 }
215 }
216
217 if (_status != Running) {
218 _status = Idle;
219 }
220 assert(threadContexts.size() == 1);
221 previousTick = curTick();
222}
223
224
225void
226TimingSimpleCPU::activateContext(int thread_num, int delay)
227{
228 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
229
230 assert(thread_num == 0);
231 assert(thread);
232
233 assert(_status == Idle);
234
235 notIdleFraction++;
236 _status = Running;
237
238 // kick things off by initiating the fetch of the next instruction
239 schedule(fetchEvent, nextCycle(curTick() + ticks(delay)));
240}
241
242
243void
244TimingSimpleCPU::suspendContext(int thread_num)
245{
246 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
247
248 assert(thread_num == 0);
249 assert(thread);
250
251 if (_status == Idle)
252 return;
253
254 assert(_status == Running);
255
256 // just change status to Idle... if status != Running,
257 // completeInst() will not initiate fetch of next instruction.
258
259 notIdleFraction--;
260 _status = Idle;
261}
262
263bool
264TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
265{
266 RequestPtr req = pkt->req;
267 if (req->isMmapedIpr()) {
268 Tick delay;
269 delay = TheISA::handleIprRead(thread->getTC(), pkt);
270 new IprEvent(pkt, this, nextCycle(curTick() + delay));
271 _status = DcacheWaitResponse;
272 dcache_pkt = NULL;
273 } else if (!dcachePort.sendTiming(pkt)) {
274 _status = DcacheRetry;
275 dcache_pkt = pkt;
276 } else {
277 _status = DcacheWaitResponse;
278 // memory system takes ownership of packet
279 dcache_pkt = NULL;
280 }
281 return dcache_pkt == NULL;
282}
283
284void
285TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
286 bool read)
287{
288 PacketPtr pkt;
289 buildPacket(pkt, req, read);
290 pkt->dataDynamicArray<uint8_t>(data);
291 if (req->getFlags().isSet(Request::NO_ACCESS)) {
292 assert(!dcache_pkt);
293 pkt->makeResponse();
294 completeDataAccess(pkt);
295 } else if (read) {
296 handleReadPacket(pkt);
297 } else {
298 bool do_access = true; // flag to suppress cache access
299
300 if (req->isLLSC()) {
301 do_access = TheISA::handleLockedWrite(thread, req);
302 } else if (req->isCondSwap()) {
303 assert(res);
304 req->setExtraData(*res);
305 }
306
307 if (do_access) {
308 dcache_pkt = pkt;
309 handleWritePacket();
310 } else {
311 _status = DcacheWaitResponse;
312 completeDataAccess(pkt);
313 }
314 }
315}
316
317void
318TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
319 RequestPtr req, uint8_t *data, bool read)
320{
321 PacketPtr pkt1, pkt2;
322 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
323 if (req->getFlags().isSet(Request::NO_ACCESS)) {
324 assert(!dcache_pkt);
325 pkt1->makeResponse();
326 completeDataAccess(pkt1);
327 } else if (read) {
328 SplitFragmentSenderState * send_state =
329 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
328 if (handleReadPacket(pkt1)) {
330 if (handleReadPacket(pkt1)) {
329 SplitFragmentSenderState * send_state =
330 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
331 send_state->clearFromParent();
331 send_state->clearFromParent();
332 send_state = dynamic_cast<SplitFragmentSenderState *>(
333 pkt2->senderState);
332 if (handleReadPacket(pkt2)) {
334 if (handleReadPacket(pkt2)) {
333 send_state = dynamic_cast<SplitFragmentSenderState *>(
334 pkt1->senderState);
335 send_state->clearFromParent();
336 }
337 }
338 } else {
339 dcache_pkt = pkt1;
335 send_state->clearFromParent();
336 }
337 }
338 } else {
339 dcache_pkt = pkt1;
340 SplitFragmentSenderState * send_state =
341 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
340 if (handleWritePacket()) {
342 if (handleWritePacket()) {
341 SplitFragmentSenderState * send_state =
342 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
343 send_state->clearFromParent();
344 dcache_pkt = pkt2;
343 send_state->clearFromParent();
344 dcache_pkt = pkt2;
345 send_state = dynamic_cast<SplitFragmentSenderState *>(
346 pkt2->senderState);
345 if (handleWritePacket()) {
347 if (handleWritePacket()) {
346 send_state = dynamic_cast<SplitFragmentSenderState *>(
347 pkt1->senderState);
348 send_state->clearFromParent();
349 }
350 }
351 }
352}
353
354void
355TimingSimpleCPU::translationFault(Fault fault)
356{
357 // fault may be NoFault in cases where a fault is suppressed,
358 // for instance prefetches.
359 numCycles += tickToCycles(curTick() - previousTick);
360 previousTick = curTick();
361
362 if (traceData) {
363 // Since there was a fault, we shouldn't trace this instruction.
364 delete traceData;
365 traceData = NULL;
366 }
367
368 postExecute();
369
370 if (getState() == SimObject::Draining) {
371 advancePC(fault);
372 completeDrain();
373 } else {
374 advanceInst(fault);
375 }
376}
377
378void
379TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
380{
381 MemCmd cmd;
382 if (read) {
383 cmd = MemCmd::ReadReq;
384 if (req->isLLSC())
385 cmd = MemCmd::LoadLockedReq;
386 } else {
387 cmd = MemCmd::WriteReq;
388 if (req->isLLSC()) {
389 cmd = MemCmd::StoreCondReq;
390 } else if (req->isSwap()) {
391 cmd = MemCmd::SwapReq;
392 }
393 }
394 pkt = new Packet(req, cmd, Packet::Broadcast);
395}
396
397void
398TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
399 RequestPtr req1, RequestPtr req2, RequestPtr req,
400 uint8_t *data, bool read)
401{
402 pkt1 = pkt2 = NULL;
403
404 assert(!req1->isMmapedIpr() && !req2->isMmapedIpr());
405
406 if (req->getFlags().isSet(Request::NO_ACCESS)) {
407 buildPacket(pkt1, req, read);
408 return;
409 }
410
411 buildPacket(pkt1, req1, read);
412 buildPacket(pkt2, req2, read);
413
414 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags());
415 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand(),
416 Packet::Broadcast);
417
418 pkt->dataDynamicArray<uint8_t>(data);
419 pkt1->dataStatic<uint8_t>(data);
420 pkt2->dataStatic<uint8_t>(data + req1->getSize());
421
422 SplitMainSenderState * main_send_state = new SplitMainSenderState;
423 pkt->senderState = main_send_state;
424 main_send_state->fragments[0] = pkt1;
425 main_send_state->fragments[1] = pkt2;
426 main_send_state->outstanding = 2;
427 pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
428 pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
429}
430
431Fault
432TimingSimpleCPU::readBytes(Addr addr, uint8_t *data,
433 unsigned size, unsigned flags)
434{
435 Fault fault;
436 const int asid = 0;
437 const ThreadID tid = 0;
438 const Addr pc = thread->instAddr();
439 unsigned block_size = dcachePort.peerBlockSize();
440 BaseTLB::Mode mode = BaseTLB::Read;
441
442 if (traceData) {
443 traceData->setAddr(addr);
444 }
445
446 RequestPtr req = new Request(asid, addr, size,
447 flags, pc, _cpuId, tid);
448
449 Addr split_addr = roundDown(addr + size - 1, block_size);
450 assert(split_addr <= addr || split_addr - addr < block_size);
451
452 _status = DTBWaitResponse;
453 if (split_addr > addr) {
454 RequestPtr req1, req2;
455 assert(!req->isLLSC() && !req->isSwap());
456 req->splitOnVaddr(split_addr, req1, req2);
457
458 WholeTranslationState *state =
459 new WholeTranslationState(req, req1, req2, new uint8_t[size],
460 NULL, mode);
461 DataTranslation<TimingSimpleCPU> *trans1 =
462 new DataTranslation<TimingSimpleCPU>(this, state, 0);
463 DataTranslation<TimingSimpleCPU> *trans2 =
464 new DataTranslation<TimingSimpleCPU>(this, state, 1);
465
466 thread->dtb->translateTiming(req1, tc, trans1, mode);
467 thread->dtb->translateTiming(req2, tc, trans2, mode);
468 } else {
469 WholeTranslationState *state =
470 new WholeTranslationState(req, new uint8_t[size], NULL, mode);
471 DataTranslation<TimingSimpleCPU> *translation
472 = new DataTranslation<TimingSimpleCPU>(this, state);
473 thread->dtb->translateTiming(req, tc, translation, mode);
474 }
475
476 return NoFault;
477}
478
479template <class T>
480Fault
481TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
482{
483 return readBytes(addr, (uint8_t *)&data, sizeof(T), flags);
484}
485
486#ifndef DOXYGEN_SHOULD_SKIP_THIS
487
488template
489Fault
490TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
491
492template
493Fault
494TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
495
496template
497Fault
498TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
499
500template
501Fault
502TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
503
504template
505Fault
506TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
507
508template
509Fault
510TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
511
512#endif //DOXYGEN_SHOULD_SKIP_THIS
513
514template<>
515Fault
516TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
517{
518 return read(addr, *(uint64_t*)&data, flags);
519}
520
521template<>
522Fault
523TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
524{
525 return read(addr, *(uint32_t*)&data, flags);
526}
527
528template<>
529Fault
530TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
531{
532 return read(addr, (uint32_t&)data, flags);
533}
534
535bool
536TimingSimpleCPU::handleWritePacket()
537{
538 RequestPtr req = dcache_pkt->req;
539 if (req->isMmapedIpr()) {
540 Tick delay;
541 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
542 new IprEvent(dcache_pkt, this, nextCycle(curTick() + delay));
543 _status = DcacheWaitResponse;
544 dcache_pkt = NULL;
545 } else if (!dcachePort.sendTiming(dcache_pkt)) {
546 _status = DcacheRetry;
547 } else {
548 _status = DcacheWaitResponse;
549 // memory system takes ownership of packet
550 dcache_pkt = NULL;
551 }
552 return dcache_pkt == NULL;
553}
554
555Fault
556TimingSimpleCPU::writeTheseBytes(uint8_t *data, unsigned size,
557 Addr addr, unsigned flags, uint64_t *res)
558{
559 const int asid = 0;
560 const ThreadID tid = 0;
561 const Addr pc = thread->instAddr();
562 unsigned block_size = dcachePort.peerBlockSize();
563 BaseTLB::Mode mode = BaseTLB::Write;
564
565 if (traceData) {
566 traceData->setAddr(addr);
567 }
568
569 RequestPtr req = new Request(asid, addr, size,
570 flags, pc, _cpuId, tid);
571
572 Addr split_addr = roundDown(addr + size - 1, block_size);
573 assert(split_addr <= addr || split_addr - addr < block_size);
574
575 _status = DTBWaitResponse;
576 if (split_addr > addr) {
577 RequestPtr req1, req2;
578 assert(!req->isLLSC() && !req->isSwap());
579 req->splitOnVaddr(split_addr, req1, req2);
580
581 WholeTranslationState *state =
582 new WholeTranslationState(req, req1, req2, data, res, mode);
583 DataTranslation<TimingSimpleCPU> *trans1 =
584 new DataTranslation<TimingSimpleCPU>(this, state, 0);
585 DataTranslation<TimingSimpleCPU> *trans2 =
586 new DataTranslation<TimingSimpleCPU>(this, state, 1);
587
588 thread->dtb->translateTiming(req1, tc, trans1, mode);
589 thread->dtb->translateTiming(req2, tc, trans2, mode);
590 } else {
591 WholeTranslationState *state =
592 new WholeTranslationState(req, data, res, mode);
593 DataTranslation<TimingSimpleCPU> *translation =
594 new DataTranslation<TimingSimpleCPU>(this, state);
595 thread->dtb->translateTiming(req, tc, translation, mode);
596 }
597
598 // Translation faults will be returned via finishTranslation()
599 return NoFault;
600}
601
602Fault
603TimingSimpleCPU::writeBytes(uint8_t *data, unsigned size,
604 Addr addr, unsigned flags, uint64_t *res)
605{
606 uint8_t *newData = new uint8_t[size];
607 memcpy(newData, data, size);
608 return writeTheseBytes(newData, size, addr, flags, res);
609}
610
611template <class T>
612Fault
613TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
614{
615 if (traceData) {
616 traceData->setData(data);
617 }
618 T *dataP = (T*) new uint8_t[sizeof(T)];
619 *dataP = TheISA::htog(data);
620
621 return writeTheseBytes((uint8_t *)dataP, sizeof(T), addr, flags, res);
622}
623
624
625#ifndef DOXYGEN_SHOULD_SKIP_THIS
626template
627Fault
628TimingSimpleCPU::write(Twin32_t data, Addr addr,
629 unsigned flags, uint64_t *res);
630
631template
632Fault
633TimingSimpleCPU::write(Twin64_t data, Addr addr,
634 unsigned flags, uint64_t *res);
635
636template
637Fault
638TimingSimpleCPU::write(uint64_t data, Addr addr,
639 unsigned flags, uint64_t *res);
640
641template
642Fault
643TimingSimpleCPU::write(uint32_t data, Addr addr,
644 unsigned flags, uint64_t *res);
645
646template
647Fault
648TimingSimpleCPU::write(uint16_t data, Addr addr,
649 unsigned flags, uint64_t *res);
650
651template
652Fault
653TimingSimpleCPU::write(uint8_t data, Addr addr,
654 unsigned flags, uint64_t *res);
655
656#endif //DOXYGEN_SHOULD_SKIP_THIS
657
658template<>
659Fault
660TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
661{
662 return write(*(uint64_t*)&data, addr, flags, res);
663}
664
665template<>
666Fault
667TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
668{
669 return write(*(uint32_t*)&data, addr, flags, res);
670}
671
672
673template<>
674Fault
675TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
676{
677 return write((uint32_t)data, addr, flags, res);
678}
679
680
681void
682TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
683{
684 _status = Running;
685
686 if (state->getFault() != NoFault) {
687 if (state->isPrefetch()) {
688 state->setNoFault();
689 }
690 delete [] state->data;
691 state->deleteReqs();
692 translationFault(state->getFault());
693 } else {
694 if (!state->isSplit) {
695 sendData(state->mainReq, state->data, state->res,
696 state->mode == BaseTLB::Read);
697 } else {
698 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
699 state->data, state->mode == BaseTLB::Read);
700 }
701 }
702
703 delete state;
704}
705
706
707void
708TimingSimpleCPU::fetch()
709{
710 DPRINTF(SimpleCPU, "Fetch\n");
711
712 if (!curStaticInst || !curStaticInst->isDelayedCommit())
713 checkForInterrupts();
714
715 checkPcEventQueue();
716
717 TheISA::PCState pcState = thread->pcState();
718 bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
719
720 if (needToFetch) {
721 Request *ifetch_req = new Request();
722 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
723 setupFetchRequest(ifetch_req);
724 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
725 BaseTLB::Execute);
726 } else {
727 _status = IcacheWaitResponse;
728 completeIfetch(NULL);
729
730 numCycles += tickToCycles(curTick() - previousTick);
731 previousTick = curTick();
732 }
733}
734
735
736void
737TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
738{
739 if (fault == NoFault) {
740 ifetch_pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
741 ifetch_pkt->dataStatic(&inst);
742
743 if (!icachePort.sendTiming(ifetch_pkt)) {
744 // Need to wait for retry
745 _status = IcacheRetry;
746 } else {
747 // Need to wait for cache to respond
748 _status = IcacheWaitResponse;
749 // ownership of packet transferred to memory system
750 ifetch_pkt = NULL;
751 }
752 } else {
753 delete req;
754 // fetch fault: advance directly to next instruction (fault handler)
755 advanceInst(fault);
756 }
757
758 numCycles += tickToCycles(curTick() - previousTick);
759 previousTick = curTick();
760}
761
762
763void
764TimingSimpleCPU::advanceInst(Fault fault)
765{
766 if (fault != NoFault || !stayAtPC)
767 advancePC(fault);
768
769 if (_status == Running) {
770 // kick off fetch of next instruction... callback from icache
771 // response will cause that instruction to be executed,
772 // keeping the CPU running.
773 fetch();
774 }
775}
776
777
778void
779TimingSimpleCPU::completeIfetch(PacketPtr pkt)
780{
781 DPRINTF(SimpleCPU, "Complete ICache Fetch\n");
782
783 // received a response from the icache: execute the received
784 // instruction
785
786 assert(!pkt || !pkt->isError());
787 assert(_status == IcacheWaitResponse);
788
789 _status = Running;
790
791 numCycles += tickToCycles(curTick() - previousTick);
792 previousTick = curTick();
793
794 if (getState() == SimObject::Draining) {
795 if (pkt) {
796 delete pkt->req;
797 delete pkt;
798 }
799
800 completeDrain();
801 return;
802 }
803
804 preExecute();
805 if (curStaticInst && curStaticInst->isMemRef()) {
806 // load or store: just send to dcache
807 Fault fault = curStaticInst->initiateAcc(this, traceData);
808 if (_status != Running) {
809 // instruction will complete in dcache response callback
810 assert(_status == DcacheWaitResponse ||
811 _status == DcacheRetry || DTBWaitResponse);
812 assert(fault == NoFault);
813 } else {
814 if (fault != NoFault && traceData) {
815 // If there was a fault, we shouldn't trace this instruction.
816 delete traceData;
817 traceData = NULL;
818 }
819
820 postExecute();
821 // @todo remove me after debugging with legion done
822 if (curStaticInst && (!curStaticInst->isMicroop() ||
823 curStaticInst->isFirstMicroop()))
824 instCnt++;
825 advanceInst(fault);
826 }
827 } else if (curStaticInst) {
828 // non-memory instruction: execute completely now
829 Fault fault = curStaticInst->execute(this, traceData);
830
831 // keep an instruction count
832 if (fault == NoFault)
833 countInst();
834 else if (traceData && !DTRACE(ExecFaulting)) {
835 delete traceData;
836 traceData = NULL;
837 }
838
839 postExecute();
840 // @todo remove me after debugging with legion done
841 if (curStaticInst && (!curStaticInst->isMicroop() ||
842 curStaticInst->isFirstMicroop()))
843 instCnt++;
844 advanceInst(fault);
845 } else {
846 advanceInst(NoFault);
847 }
848
849 if (pkt) {
850 delete pkt->req;
851 delete pkt;
852 }
853}
854
855void
856TimingSimpleCPU::IcachePort::ITickEvent::process()
857{
858 cpu->completeIfetch(pkt);
859}
860
861bool
862TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
863{
864 if (pkt->isResponse() && !pkt->wasNacked()) {
865 // delay processing of returned data until next CPU clock edge
866 Tick next_tick = cpu->nextCycle(curTick());
867
868 if (next_tick == curTick())
869 cpu->completeIfetch(pkt);
870 else
871 tickEvent.schedule(pkt, next_tick);
872
873 return true;
874 }
875 else if (pkt->wasNacked()) {
876 assert(cpu->_status == IcacheWaitResponse);
877 pkt->reinitNacked();
878 if (!sendTiming(pkt)) {
879 cpu->_status = IcacheRetry;
880 cpu->ifetch_pkt = pkt;
881 }
882 }
883 //Snooping a Coherence Request, do nothing
884 return true;
885}
886
887void
888TimingSimpleCPU::IcachePort::recvRetry()
889{
890 // we shouldn't get a retry unless we have a packet that we're
891 // waiting to transmit
892 assert(cpu->ifetch_pkt != NULL);
893 assert(cpu->_status == IcacheRetry);
894 PacketPtr tmp = cpu->ifetch_pkt;
895 if (sendTiming(tmp)) {
896 cpu->_status = IcacheWaitResponse;
897 cpu->ifetch_pkt = NULL;
898 }
899}
900
901void
902TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
903{
904 // received a response from the dcache: complete the load or store
905 // instruction
906 assert(!pkt->isError());
907 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
908 pkt->req->getFlags().isSet(Request::NO_ACCESS));
909
910 numCycles += tickToCycles(curTick() - previousTick);
911 previousTick = curTick();
912
913 if (pkt->senderState) {
914 SplitFragmentSenderState * send_state =
915 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
916 assert(send_state);
917 delete pkt->req;
918 delete pkt;
919 PacketPtr big_pkt = send_state->bigPkt;
920 delete send_state;
921
922 SplitMainSenderState * main_send_state =
923 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
924 assert(main_send_state);
925 // Record the fact that this packet is no longer outstanding.
926 assert(main_send_state->outstanding != 0);
927 main_send_state->outstanding--;
928
929 if (main_send_state->outstanding) {
930 return;
931 } else {
932 delete main_send_state;
933 big_pkt->senderState = NULL;
934 pkt = big_pkt;
935 }
936 }
937
938 _status = Running;
939
940 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
941
942 // keep an instruction count
943 if (fault == NoFault)
944 countInst();
945 else if (traceData) {
946 // If there was a fault, we shouldn't trace this instruction.
947 delete traceData;
948 traceData = NULL;
949 }
950
951 // the locked flag may be cleared on the response packet, so check
952 // pkt->req and not pkt to see if it was a load-locked
953 if (pkt->isRead() && pkt->req->isLLSC()) {
954 TheISA::handleLockedRead(thread, pkt->req);
955 }
956
957 delete pkt->req;
958 delete pkt;
959
960 postExecute();
961
962 if (getState() == SimObject::Draining) {
963 advancePC(fault);
964 completeDrain();
965
966 return;
967 }
968
969 advanceInst(fault);
970}
971
972
973void
974TimingSimpleCPU::completeDrain()
975{
976 DPRINTF(Config, "Done draining\n");
977 changeState(SimObject::Drained);
978 drainEvent->process();
979}
980
981void
982TimingSimpleCPU::DcachePort::setPeer(Port *port)
983{
984 Port::setPeer(port);
985
986#if FULL_SYSTEM
987 // Update the ThreadContext's memory ports (Functional/Virtual
988 // Ports)
989 cpu->tcBase()->connectMemPorts(cpu->tcBase());
990#endif
991}
992
993bool
994TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
995{
996 if (pkt->isResponse() && !pkt->wasNacked()) {
997 // delay processing of returned data until next CPU clock edge
998 Tick next_tick = cpu->nextCycle(curTick());
999
1000 if (next_tick == curTick()) {
1001 cpu->completeDataAccess(pkt);
1002 } else {
1003 if (!tickEvent.scheduled()) {
1004 tickEvent.schedule(pkt, next_tick);
1005 } else {
1006 // In the case of a split transaction and a cache that is
1007 // faster than a CPU we could get two responses before
1008 // next_tick expires
1009 if (!retryEvent.scheduled())
1010 schedule(retryEvent, next_tick);
1011 return false;
1012 }
1013 }
1014
1015 return true;
1016 }
1017 else if (pkt->wasNacked()) {
1018 assert(cpu->_status == DcacheWaitResponse);
1019 pkt->reinitNacked();
1020 if (!sendTiming(pkt)) {
1021 cpu->_status = DcacheRetry;
1022 cpu->dcache_pkt = pkt;
1023 }
1024 }
1025 //Snooping a Coherence Request, do nothing
1026 return true;
1027}
1028
1029void
1030TimingSimpleCPU::DcachePort::DTickEvent::process()
1031{
1032 cpu->completeDataAccess(pkt);
1033}
1034
1035void
1036TimingSimpleCPU::DcachePort::recvRetry()
1037{
1038 // we shouldn't get a retry unless we have a packet that we're
1039 // waiting to transmit
1040 assert(cpu->dcache_pkt != NULL);
1041 assert(cpu->_status == DcacheRetry);
1042 PacketPtr tmp = cpu->dcache_pkt;
1043 if (tmp->senderState) {
1044 // This is a packet from a split access.
1045 SplitFragmentSenderState * send_state =
1046 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
1047 assert(send_state);
1048 PacketPtr big_pkt = send_state->bigPkt;
1049
1050 SplitMainSenderState * main_send_state =
1051 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
1052 assert(main_send_state);
1053
1054 if (sendTiming(tmp)) {
1055 // If we were able to send without retrying, record that fact
1056 // and try sending the other fragment.
1057 send_state->clearFromParent();
1058 int other_index = main_send_state->getPendingFragment();
1059 if (other_index > 0) {
1060 tmp = main_send_state->fragments[other_index];
1061 cpu->dcache_pkt = tmp;
1062 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
1063 (big_pkt->isWrite() && cpu->handleWritePacket())) {
1064 main_send_state->fragments[other_index] = NULL;
1065 }
1066 } else {
1067 cpu->_status = DcacheWaitResponse;
1068 // memory system takes ownership of packet
1069 cpu->dcache_pkt = NULL;
1070 }
1071 }
1072 } else if (sendTiming(tmp)) {
1073 cpu->_status = DcacheWaitResponse;
1074 // memory system takes ownership of packet
1075 cpu->dcache_pkt = NULL;
1076 }
1077}
1078
1079TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
1080 Tick t)
1081 : pkt(_pkt), cpu(_cpu)
1082{
1083 cpu->schedule(this, t);
1084}
1085
1086void
1087TimingSimpleCPU::IprEvent::process()
1088{
1089 cpu->completeDataAccess(pkt);
1090}
1091
1092const char *
1093TimingSimpleCPU::IprEvent::description() const
1094{
1095 return "Timing Simple CPU Delay IPR event";
1096}
1097
1098
1099void
1100TimingSimpleCPU::printAddr(Addr a)
1101{
1102 dcachePort.printAddr(a);
1103}
1104
1105
1106////////////////////////////////////////////////////////////////////////
1107//
1108// TimingSimpleCPU Simulation Object
1109//
1110TimingSimpleCPU *
1111TimingSimpleCPUParams::create()
1112{
1113 numThreads = 1;
1114#if !FULL_SYSTEM
1115 if (workload.size() != 1)
1116 panic("only one workload allowed");
1117#endif
1118 return new TimingSimpleCPU(this);
1119}
348 send_state->clearFromParent();
349 }
350 }
351 }
352}
353
354void
355TimingSimpleCPU::translationFault(Fault fault)
356{
357 // fault may be NoFault in cases where a fault is suppressed,
358 // for instance prefetches.
359 numCycles += tickToCycles(curTick() - previousTick);
360 previousTick = curTick();
361
362 if (traceData) {
363 // Since there was a fault, we shouldn't trace this instruction.
364 delete traceData;
365 traceData = NULL;
366 }
367
368 postExecute();
369
370 if (getState() == SimObject::Draining) {
371 advancePC(fault);
372 completeDrain();
373 } else {
374 advanceInst(fault);
375 }
376}
377
378void
379TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
380{
381 MemCmd cmd;
382 if (read) {
383 cmd = MemCmd::ReadReq;
384 if (req->isLLSC())
385 cmd = MemCmd::LoadLockedReq;
386 } else {
387 cmd = MemCmd::WriteReq;
388 if (req->isLLSC()) {
389 cmd = MemCmd::StoreCondReq;
390 } else if (req->isSwap()) {
391 cmd = MemCmd::SwapReq;
392 }
393 }
394 pkt = new Packet(req, cmd, Packet::Broadcast);
395}
396
397void
398TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
399 RequestPtr req1, RequestPtr req2, RequestPtr req,
400 uint8_t *data, bool read)
401{
402 pkt1 = pkt2 = NULL;
403
404 assert(!req1->isMmapedIpr() && !req2->isMmapedIpr());
405
406 if (req->getFlags().isSet(Request::NO_ACCESS)) {
407 buildPacket(pkt1, req, read);
408 return;
409 }
410
411 buildPacket(pkt1, req1, read);
412 buildPacket(pkt2, req2, read);
413
414 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags());
415 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand(),
416 Packet::Broadcast);
417
418 pkt->dataDynamicArray<uint8_t>(data);
419 pkt1->dataStatic<uint8_t>(data);
420 pkt2->dataStatic<uint8_t>(data + req1->getSize());
421
422 SplitMainSenderState * main_send_state = new SplitMainSenderState;
423 pkt->senderState = main_send_state;
424 main_send_state->fragments[0] = pkt1;
425 main_send_state->fragments[1] = pkt2;
426 main_send_state->outstanding = 2;
427 pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
428 pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
429}
430
431Fault
432TimingSimpleCPU::readBytes(Addr addr, uint8_t *data,
433 unsigned size, unsigned flags)
434{
435 Fault fault;
436 const int asid = 0;
437 const ThreadID tid = 0;
438 const Addr pc = thread->instAddr();
439 unsigned block_size = dcachePort.peerBlockSize();
440 BaseTLB::Mode mode = BaseTLB::Read;
441
442 if (traceData) {
443 traceData->setAddr(addr);
444 }
445
446 RequestPtr req = new Request(asid, addr, size,
447 flags, pc, _cpuId, tid);
448
449 Addr split_addr = roundDown(addr + size - 1, block_size);
450 assert(split_addr <= addr || split_addr - addr < block_size);
451
452 _status = DTBWaitResponse;
453 if (split_addr > addr) {
454 RequestPtr req1, req2;
455 assert(!req->isLLSC() && !req->isSwap());
456 req->splitOnVaddr(split_addr, req1, req2);
457
458 WholeTranslationState *state =
459 new WholeTranslationState(req, req1, req2, new uint8_t[size],
460 NULL, mode);
461 DataTranslation<TimingSimpleCPU> *trans1 =
462 new DataTranslation<TimingSimpleCPU>(this, state, 0);
463 DataTranslation<TimingSimpleCPU> *trans2 =
464 new DataTranslation<TimingSimpleCPU>(this, state, 1);
465
466 thread->dtb->translateTiming(req1, tc, trans1, mode);
467 thread->dtb->translateTiming(req2, tc, trans2, mode);
468 } else {
469 WholeTranslationState *state =
470 new WholeTranslationState(req, new uint8_t[size], NULL, mode);
471 DataTranslation<TimingSimpleCPU> *translation
472 = new DataTranslation<TimingSimpleCPU>(this, state);
473 thread->dtb->translateTiming(req, tc, translation, mode);
474 }
475
476 return NoFault;
477}
478
479template <class T>
480Fault
481TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
482{
483 return readBytes(addr, (uint8_t *)&data, sizeof(T), flags);
484}
485
486#ifndef DOXYGEN_SHOULD_SKIP_THIS
487
488template
489Fault
490TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
491
492template
493Fault
494TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
495
496template
497Fault
498TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
499
500template
501Fault
502TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
503
504template
505Fault
506TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
507
508template
509Fault
510TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
511
512#endif //DOXYGEN_SHOULD_SKIP_THIS
513
514template<>
515Fault
516TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
517{
518 return read(addr, *(uint64_t*)&data, flags);
519}
520
521template<>
522Fault
523TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
524{
525 return read(addr, *(uint32_t*)&data, flags);
526}
527
528template<>
529Fault
530TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
531{
532 return read(addr, (uint32_t&)data, flags);
533}
534
535bool
536TimingSimpleCPU::handleWritePacket()
537{
538 RequestPtr req = dcache_pkt->req;
539 if (req->isMmapedIpr()) {
540 Tick delay;
541 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
542 new IprEvent(dcache_pkt, this, nextCycle(curTick() + delay));
543 _status = DcacheWaitResponse;
544 dcache_pkt = NULL;
545 } else if (!dcachePort.sendTiming(dcache_pkt)) {
546 _status = DcacheRetry;
547 } else {
548 _status = DcacheWaitResponse;
549 // memory system takes ownership of packet
550 dcache_pkt = NULL;
551 }
552 return dcache_pkt == NULL;
553}
554
555Fault
556TimingSimpleCPU::writeTheseBytes(uint8_t *data, unsigned size,
557 Addr addr, unsigned flags, uint64_t *res)
558{
559 const int asid = 0;
560 const ThreadID tid = 0;
561 const Addr pc = thread->instAddr();
562 unsigned block_size = dcachePort.peerBlockSize();
563 BaseTLB::Mode mode = BaseTLB::Write;
564
565 if (traceData) {
566 traceData->setAddr(addr);
567 }
568
569 RequestPtr req = new Request(asid, addr, size,
570 flags, pc, _cpuId, tid);
571
572 Addr split_addr = roundDown(addr + size - 1, block_size);
573 assert(split_addr <= addr || split_addr - addr < block_size);
574
575 _status = DTBWaitResponse;
576 if (split_addr > addr) {
577 RequestPtr req1, req2;
578 assert(!req->isLLSC() && !req->isSwap());
579 req->splitOnVaddr(split_addr, req1, req2);
580
581 WholeTranslationState *state =
582 new WholeTranslationState(req, req1, req2, data, res, mode);
583 DataTranslation<TimingSimpleCPU> *trans1 =
584 new DataTranslation<TimingSimpleCPU>(this, state, 0);
585 DataTranslation<TimingSimpleCPU> *trans2 =
586 new DataTranslation<TimingSimpleCPU>(this, state, 1);
587
588 thread->dtb->translateTiming(req1, tc, trans1, mode);
589 thread->dtb->translateTiming(req2, tc, trans2, mode);
590 } else {
591 WholeTranslationState *state =
592 new WholeTranslationState(req, data, res, mode);
593 DataTranslation<TimingSimpleCPU> *translation =
594 new DataTranslation<TimingSimpleCPU>(this, state);
595 thread->dtb->translateTiming(req, tc, translation, mode);
596 }
597
598 // Translation faults will be returned via finishTranslation()
599 return NoFault;
600}
601
602Fault
603TimingSimpleCPU::writeBytes(uint8_t *data, unsigned size,
604 Addr addr, unsigned flags, uint64_t *res)
605{
606 uint8_t *newData = new uint8_t[size];
607 memcpy(newData, data, size);
608 return writeTheseBytes(newData, size, addr, flags, res);
609}
610
611template <class T>
612Fault
613TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
614{
615 if (traceData) {
616 traceData->setData(data);
617 }
618 T *dataP = (T*) new uint8_t[sizeof(T)];
619 *dataP = TheISA::htog(data);
620
621 return writeTheseBytes((uint8_t *)dataP, sizeof(T), addr, flags, res);
622}
623
624
625#ifndef DOXYGEN_SHOULD_SKIP_THIS
626template
627Fault
628TimingSimpleCPU::write(Twin32_t data, Addr addr,
629 unsigned flags, uint64_t *res);
630
631template
632Fault
633TimingSimpleCPU::write(Twin64_t data, Addr addr,
634 unsigned flags, uint64_t *res);
635
636template
637Fault
638TimingSimpleCPU::write(uint64_t data, Addr addr,
639 unsigned flags, uint64_t *res);
640
641template
642Fault
643TimingSimpleCPU::write(uint32_t data, Addr addr,
644 unsigned flags, uint64_t *res);
645
646template
647Fault
648TimingSimpleCPU::write(uint16_t data, Addr addr,
649 unsigned flags, uint64_t *res);
650
651template
652Fault
653TimingSimpleCPU::write(uint8_t data, Addr addr,
654 unsigned flags, uint64_t *res);
655
656#endif //DOXYGEN_SHOULD_SKIP_THIS
657
658template<>
659Fault
660TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
661{
662 return write(*(uint64_t*)&data, addr, flags, res);
663}
664
665template<>
666Fault
667TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
668{
669 return write(*(uint32_t*)&data, addr, flags, res);
670}
671
672
673template<>
674Fault
675TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
676{
677 return write((uint32_t)data, addr, flags, res);
678}
679
680
681void
682TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
683{
684 _status = Running;
685
686 if (state->getFault() != NoFault) {
687 if (state->isPrefetch()) {
688 state->setNoFault();
689 }
690 delete [] state->data;
691 state->deleteReqs();
692 translationFault(state->getFault());
693 } else {
694 if (!state->isSplit) {
695 sendData(state->mainReq, state->data, state->res,
696 state->mode == BaseTLB::Read);
697 } else {
698 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
699 state->data, state->mode == BaseTLB::Read);
700 }
701 }
702
703 delete state;
704}
705
706
707void
708TimingSimpleCPU::fetch()
709{
710 DPRINTF(SimpleCPU, "Fetch\n");
711
712 if (!curStaticInst || !curStaticInst->isDelayedCommit())
713 checkForInterrupts();
714
715 checkPcEventQueue();
716
717 TheISA::PCState pcState = thread->pcState();
718 bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
719
720 if (needToFetch) {
721 Request *ifetch_req = new Request();
722 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
723 setupFetchRequest(ifetch_req);
724 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
725 BaseTLB::Execute);
726 } else {
727 _status = IcacheWaitResponse;
728 completeIfetch(NULL);
729
730 numCycles += tickToCycles(curTick() - previousTick);
731 previousTick = curTick();
732 }
733}
734
735
736void
737TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
738{
739 if (fault == NoFault) {
740 ifetch_pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
741 ifetch_pkt->dataStatic(&inst);
742
743 if (!icachePort.sendTiming(ifetch_pkt)) {
744 // Need to wait for retry
745 _status = IcacheRetry;
746 } else {
747 // Need to wait for cache to respond
748 _status = IcacheWaitResponse;
749 // ownership of packet transferred to memory system
750 ifetch_pkt = NULL;
751 }
752 } else {
753 delete req;
754 // fetch fault: advance directly to next instruction (fault handler)
755 advanceInst(fault);
756 }
757
758 numCycles += tickToCycles(curTick() - previousTick);
759 previousTick = curTick();
760}
761
762
763void
764TimingSimpleCPU::advanceInst(Fault fault)
765{
766 if (fault != NoFault || !stayAtPC)
767 advancePC(fault);
768
769 if (_status == Running) {
770 // kick off fetch of next instruction... callback from icache
771 // response will cause that instruction to be executed,
772 // keeping the CPU running.
773 fetch();
774 }
775}
776
777
778void
779TimingSimpleCPU::completeIfetch(PacketPtr pkt)
780{
781 DPRINTF(SimpleCPU, "Complete ICache Fetch\n");
782
783 // received a response from the icache: execute the received
784 // instruction
785
786 assert(!pkt || !pkt->isError());
787 assert(_status == IcacheWaitResponse);
788
789 _status = Running;
790
791 numCycles += tickToCycles(curTick() - previousTick);
792 previousTick = curTick();
793
794 if (getState() == SimObject::Draining) {
795 if (pkt) {
796 delete pkt->req;
797 delete pkt;
798 }
799
800 completeDrain();
801 return;
802 }
803
804 preExecute();
805 if (curStaticInst && curStaticInst->isMemRef()) {
806 // load or store: just send to dcache
807 Fault fault = curStaticInst->initiateAcc(this, traceData);
808 if (_status != Running) {
809 // instruction will complete in dcache response callback
810 assert(_status == DcacheWaitResponse ||
811 _status == DcacheRetry || DTBWaitResponse);
812 assert(fault == NoFault);
813 } else {
814 if (fault != NoFault && traceData) {
815 // If there was a fault, we shouldn't trace this instruction.
816 delete traceData;
817 traceData = NULL;
818 }
819
820 postExecute();
821 // @todo remove me after debugging with legion done
822 if (curStaticInst && (!curStaticInst->isMicroop() ||
823 curStaticInst->isFirstMicroop()))
824 instCnt++;
825 advanceInst(fault);
826 }
827 } else if (curStaticInst) {
828 // non-memory instruction: execute completely now
829 Fault fault = curStaticInst->execute(this, traceData);
830
831 // keep an instruction count
832 if (fault == NoFault)
833 countInst();
834 else if (traceData && !DTRACE(ExecFaulting)) {
835 delete traceData;
836 traceData = NULL;
837 }
838
839 postExecute();
840 // @todo remove me after debugging with legion done
841 if (curStaticInst && (!curStaticInst->isMicroop() ||
842 curStaticInst->isFirstMicroop()))
843 instCnt++;
844 advanceInst(fault);
845 } else {
846 advanceInst(NoFault);
847 }
848
849 if (pkt) {
850 delete pkt->req;
851 delete pkt;
852 }
853}
854
855void
856TimingSimpleCPU::IcachePort::ITickEvent::process()
857{
858 cpu->completeIfetch(pkt);
859}
860
861bool
862TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
863{
864 if (pkt->isResponse() && !pkt->wasNacked()) {
865 // delay processing of returned data until next CPU clock edge
866 Tick next_tick = cpu->nextCycle(curTick());
867
868 if (next_tick == curTick())
869 cpu->completeIfetch(pkt);
870 else
871 tickEvent.schedule(pkt, next_tick);
872
873 return true;
874 }
875 else if (pkt->wasNacked()) {
876 assert(cpu->_status == IcacheWaitResponse);
877 pkt->reinitNacked();
878 if (!sendTiming(pkt)) {
879 cpu->_status = IcacheRetry;
880 cpu->ifetch_pkt = pkt;
881 }
882 }
883 //Snooping a Coherence Request, do nothing
884 return true;
885}
886
887void
888TimingSimpleCPU::IcachePort::recvRetry()
889{
890 // we shouldn't get a retry unless we have a packet that we're
891 // waiting to transmit
892 assert(cpu->ifetch_pkt != NULL);
893 assert(cpu->_status == IcacheRetry);
894 PacketPtr tmp = cpu->ifetch_pkt;
895 if (sendTiming(tmp)) {
896 cpu->_status = IcacheWaitResponse;
897 cpu->ifetch_pkt = NULL;
898 }
899}
900
901void
902TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
903{
904 // received a response from the dcache: complete the load or store
905 // instruction
906 assert(!pkt->isError());
907 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
908 pkt->req->getFlags().isSet(Request::NO_ACCESS));
909
910 numCycles += tickToCycles(curTick() - previousTick);
911 previousTick = curTick();
912
913 if (pkt->senderState) {
914 SplitFragmentSenderState * send_state =
915 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
916 assert(send_state);
917 delete pkt->req;
918 delete pkt;
919 PacketPtr big_pkt = send_state->bigPkt;
920 delete send_state;
921
922 SplitMainSenderState * main_send_state =
923 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
924 assert(main_send_state);
925 // Record the fact that this packet is no longer outstanding.
926 assert(main_send_state->outstanding != 0);
927 main_send_state->outstanding--;
928
929 if (main_send_state->outstanding) {
930 return;
931 } else {
932 delete main_send_state;
933 big_pkt->senderState = NULL;
934 pkt = big_pkt;
935 }
936 }
937
938 _status = Running;
939
940 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
941
942 // keep an instruction count
943 if (fault == NoFault)
944 countInst();
945 else if (traceData) {
946 // If there was a fault, we shouldn't trace this instruction.
947 delete traceData;
948 traceData = NULL;
949 }
950
951 // the locked flag may be cleared on the response packet, so check
952 // pkt->req and not pkt to see if it was a load-locked
953 if (pkt->isRead() && pkt->req->isLLSC()) {
954 TheISA::handleLockedRead(thread, pkt->req);
955 }
956
957 delete pkt->req;
958 delete pkt;
959
960 postExecute();
961
962 if (getState() == SimObject::Draining) {
963 advancePC(fault);
964 completeDrain();
965
966 return;
967 }
968
969 advanceInst(fault);
970}
971
972
973void
974TimingSimpleCPU::completeDrain()
975{
976 DPRINTF(Config, "Done draining\n");
977 changeState(SimObject::Drained);
978 drainEvent->process();
979}
980
981void
982TimingSimpleCPU::DcachePort::setPeer(Port *port)
983{
984 Port::setPeer(port);
985
986#if FULL_SYSTEM
987 // Update the ThreadContext's memory ports (Functional/Virtual
988 // Ports)
989 cpu->tcBase()->connectMemPorts(cpu->tcBase());
990#endif
991}
992
993bool
994TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
995{
996 if (pkt->isResponse() && !pkt->wasNacked()) {
997 // delay processing of returned data until next CPU clock edge
998 Tick next_tick = cpu->nextCycle(curTick());
999
1000 if (next_tick == curTick()) {
1001 cpu->completeDataAccess(pkt);
1002 } else {
1003 if (!tickEvent.scheduled()) {
1004 tickEvent.schedule(pkt, next_tick);
1005 } else {
1006 // In the case of a split transaction and a cache that is
1007 // faster than a CPU we could get two responses before
1008 // next_tick expires
1009 if (!retryEvent.scheduled())
1010 schedule(retryEvent, next_tick);
1011 return false;
1012 }
1013 }
1014
1015 return true;
1016 }
1017 else if (pkt->wasNacked()) {
1018 assert(cpu->_status == DcacheWaitResponse);
1019 pkt->reinitNacked();
1020 if (!sendTiming(pkt)) {
1021 cpu->_status = DcacheRetry;
1022 cpu->dcache_pkt = pkt;
1023 }
1024 }
1025 //Snooping a Coherence Request, do nothing
1026 return true;
1027}
1028
1029void
1030TimingSimpleCPU::DcachePort::DTickEvent::process()
1031{
1032 cpu->completeDataAccess(pkt);
1033}
1034
1035void
1036TimingSimpleCPU::DcachePort::recvRetry()
1037{
1038 // we shouldn't get a retry unless we have a packet that we're
1039 // waiting to transmit
1040 assert(cpu->dcache_pkt != NULL);
1041 assert(cpu->_status == DcacheRetry);
1042 PacketPtr tmp = cpu->dcache_pkt;
1043 if (tmp->senderState) {
1044 // This is a packet from a split access.
1045 SplitFragmentSenderState * send_state =
1046 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
1047 assert(send_state);
1048 PacketPtr big_pkt = send_state->bigPkt;
1049
1050 SplitMainSenderState * main_send_state =
1051 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
1052 assert(main_send_state);
1053
1054 if (sendTiming(tmp)) {
1055 // If we were able to send without retrying, record that fact
1056 // and try sending the other fragment.
1057 send_state->clearFromParent();
1058 int other_index = main_send_state->getPendingFragment();
1059 if (other_index > 0) {
1060 tmp = main_send_state->fragments[other_index];
1061 cpu->dcache_pkt = tmp;
1062 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
1063 (big_pkt->isWrite() && cpu->handleWritePacket())) {
1064 main_send_state->fragments[other_index] = NULL;
1065 }
1066 } else {
1067 cpu->_status = DcacheWaitResponse;
1068 // memory system takes ownership of packet
1069 cpu->dcache_pkt = NULL;
1070 }
1071 }
1072 } else if (sendTiming(tmp)) {
1073 cpu->_status = DcacheWaitResponse;
1074 // memory system takes ownership of packet
1075 cpu->dcache_pkt = NULL;
1076 }
1077}
1078
1079TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
1080 Tick t)
1081 : pkt(_pkt), cpu(_cpu)
1082{
1083 cpu->schedule(this, t);
1084}
1085
1086void
1087TimingSimpleCPU::IprEvent::process()
1088{
1089 cpu->completeDataAccess(pkt);
1090}
1091
1092const char *
1093TimingSimpleCPU::IprEvent::description() const
1094{
1095 return "Timing Simple CPU Delay IPR event";
1096}
1097
1098
1099void
1100TimingSimpleCPU::printAddr(Addr a)
1101{
1102 dcachePort.printAddr(a);
1103}
1104
1105
1106////////////////////////////////////////////////////////////////////////
1107//
1108// TimingSimpleCPU Simulation Object
1109//
1110TimingSimpleCPU *
1111TimingSimpleCPUParams::create()
1112{
1113 numThreads = 1;
1114#if !FULL_SYSTEM
1115 if (workload.size() != 1)
1116 panic("only one workload allowed");
1117#endif
1118 return new TimingSimpleCPU(this);
1119}