timing.cc (5726:17157c5f7e15) timing.cc (5728:9574f561dfa2)
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Steve Reinhardt
29 */
30
31#include "arch/locked_mem.hh"
32#include "arch/mmaped_ipr.hh"
33#include "arch/utility.hh"
34#include "base/bigint.hh"
35#include "cpu/exetrace.hh"
36#include "cpu/simple/timing.hh"
37#include "mem/packet.hh"
38#include "mem/packet_access.hh"
39#include "params/TimingSimpleCPU.hh"
40#include "sim/system.hh"
41
42using namespace std;
43using namespace TheISA;
44
45Port *
46TimingSimpleCPU::getPort(const std::string &if_name, int idx)
47{
48 if (if_name == "dcache_port")
49 return &dcachePort;
50 else if (if_name == "icache_port")
51 return &icachePort;
52 else
53 panic("No Such Port\n");
54}
55
56void
57TimingSimpleCPU::init()
58{
59 BaseCPU::init();
60#if FULL_SYSTEM
61 for (int i = 0; i < threadContexts.size(); ++i) {
62 ThreadContext *tc = threadContexts[i];
63
64 // initialize CPU, including PC
65 TheISA::initCPU(tc, _cpuId);
66 }
67#endif
68}
69
70Tick
71TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
72{
73 panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
74 return curTick;
75}
76
77void
78TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
79{
80 //No internal storage to update, jusst return
81 return;
82}
83
84void
85TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
86{
87 if (status == RangeChange) {
88 if (!snoopRangeSent) {
89 snoopRangeSent = true;
90 sendStatusChange(Port::RangeChange);
91 }
92 return;
93 }
94
95 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
96}
97
98
99void
100TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
101{
102 pkt = _pkt;
103 cpu->schedule(this, t);
104}
105
106TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
107 : BaseSimpleCPU(p), icachePort(this, p->clock), dcachePort(this, p->clock), fetchEvent(this)
108{
109 _status = Idle;
110
111 icachePort.snoopRangeSent = false;
112 dcachePort.snoopRangeSent = false;
113
114 ifetch_pkt = dcache_pkt = NULL;
115 drainEvent = NULL;
116 previousTick = 0;
117 changeState(SimObject::Running);
118}
119
120
121TimingSimpleCPU::~TimingSimpleCPU()
122{
123}
124
125void
126TimingSimpleCPU::serialize(ostream &os)
127{
128 SimObject::State so_state = SimObject::getState();
129 SERIALIZE_ENUM(so_state);
130 BaseSimpleCPU::serialize(os);
131}
132
133void
134TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
135{
136 SimObject::State so_state;
137 UNSERIALIZE_ENUM(so_state);
138 BaseSimpleCPU::unserialize(cp, section);
139}
140
141unsigned int
142TimingSimpleCPU::drain(Event *drain_event)
143{
144 // TimingSimpleCPU is ready to drain if it's not waiting for
145 // an access to complete.
146 if (_status == Idle || _status == Running || _status == SwitchedOut) {
147 changeState(SimObject::Drained);
148 return 0;
149 } else {
150 changeState(SimObject::Draining);
151 drainEvent = drain_event;
152 return 1;
153 }
154}
155
156void
157TimingSimpleCPU::resume()
158{
159 DPRINTF(SimpleCPU, "Resume\n");
160 if (_status != SwitchedOut && _status != Idle) {
161 assert(system->getMemoryMode() == Enums::timing);
162
163 if (fetchEvent.scheduled())
164 deschedule(fetchEvent);
165
166 schedule(fetchEvent, nextCycle());
167 }
168
169 changeState(SimObject::Running);
170}
171
172void
173TimingSimpleCPU::switchOut()
174{
175 assert(_status == Running || _status == Idle);
176 _status = SwitchedOut;
177 numCycles += tickToCycles(curTick - previousTick);
178
179 // If we've been scheduled to resume but are then told to switch out,
180 // we'll need to cancel it.
181 if (fetchEvent.scheduled())
182 deschedule(fetchEvent);
183}
184
185
186void
187TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
188{
189 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
190
191 // if any of this CPU's ThreadContexts are active, mark the CPU as
192 // running and schedule its tick event.
193 for (int i = 0; i < threadContexts.size(); ++i) {
194 ThreadContext *tc = threadContexts[i];
195 if (tc->status() == ThreadContext::Active && _status != Running) {
196 _status = Running;
197 break;
198 }
199 }
200
201 if (_status != Running) {
202 _status = Idle;
203 }
204 assert(threadContexts.size() == 1);
205 previousTick = curTick;
206}
207
208
209void
210TimingSimpleCPU::activateContext(int thread_num, int delay)
211{
212 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
213
214 assert(thread_num == 0);
215 assert(thread);
216
217 assert(_status == Idle);
218
219 notIdleFraction++;
220 _status = Running;
221
222 // kick things off by initiating the fetch of the next instruction
223 schedule(fetchEvent, nextCycle(curTick + ticks(delay)));
224}
225
226
227void
228TimingSimpleCPU::suspendContext(int thread_num)
229{
230 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
231
232 assert(thread_num == 0);
233 assert(thread);
234
235 assert(_status == Running);
236
237 // just change status to Idle... if status != Running,
238 // completeInst() will not initiate fetch of next instruction.
239
240 notIdleFraction--;
241 _status = Idle;
242}
243
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Steve Reinhardt
29 */
30
31#include "arch/locked_mem.hh"
32#include "arch/mmaped_ipr.hh"
33#include "arch/utility.hh"
34#include "base/bigint.hh"
35#include "cpu/exetrace.hh"
36#include "cpu/simple/timing.hh"
37#include "mem/packet.hh"
38#include "mem/packet_access.hh"
39#include "params/TimingSimpleCPU.hh"
40#include "sim/system.hh"
41
42using namespace std;
43using namespace TheISA;
44
45Port *
46TimingSimpleCPU::getPort(const std::string &if_name, int idx)
47{
48 if (if_name == "dcache_port")
49 return &dcachePort;
50 else if (if_name == "icache_port")
51 return &icachePort;
52 else
53 panic("No Such Port\n");
54}
55
56void
57TimingSimpleCPU::init()
58{
59 BaseCPU::init();
60#if FULL_SYSTEM
61 for (int i = 0; i < threadContexts.size(); ++i) {
62 ThreadContext *tc = threadContexts[i];
63
64 // initialize CPU, including PC
65 TheISA::initCPU(tc, _cpuId);
66 }
67#endif
68}
69
70Tick
71TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
72{
73 panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
74 return curTick;
75}
76
77void
78TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
79{
80 //No internal storage to update, jusst return
81 return;
82}
83
84void
85TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
86{
87 if (status == RangeChange) {
88 if (!snoopRangeSent) {
89 snoopRangeSent = true;
90 sendStatusChange(Port::RangeChange);
91 }
92 return;
93 }
94
95 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
96}
97
98
99void
100TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
101{
102 pkt = _pkt;
103 cpu->schedule(this, t);
104}
105
106TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
107 : BaseSimpleCPU(p), icachePort(this, p->clock), dcachePort(this, p->clock), fetchEvent(this)
108{
109 _status = Idle;
110
111 icachePort.snoopRangeSent = false;
112 dcachePort.snoopRangeSent = false;
113
114 ifetch_pkt = dcache_pkt = NULL;
115 drainEvent = NULL;
116 previousTick = 0;
117 changeState(SimObject::Running);
118}
119
120
121TimingSimpleCPU::~TimingSimpleCPU()
122{
123}
124
125void
126TimingSimpleCPU::serialize(ostream &os)
127{
128 SimObject::State so_state = SimObject::getState();
129 SERIALIZE_ENUM(so_state);
130 BaseSimpleCPU::serialize(os);
131}
132
133void
134TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
135{
136 SimObject::State so_state;
137 UNSERIALIZE_ENUM(so_state);
138 BaseSimpleCPU::unserialize(cp, section);
139}
140
141unsigned int
142TimingSimpleCPU::drain(Event *drain_event)
143{
144 // TimingSimpleCPU is ready to drain if it's not waiting for
145 // an access to complete.
146 if (_status == Idle || _status == Running || _status == SwitchedOut) {
147 changeState(SimObject::Drained);
148 return 0;
149 } else {
150 changeState(SimObject::Draining);
151 drainEvent = drain_event;
152 return 1;
153 }
154}
155
156void
157TimingSimpleCPU::resume()
158{
159 DPRINTF(SimpleCPU, "Resume\n");
160 if (_status != SwitchedOut && _status != Idle) {
161 assert(system->getMemoryMode() == Enums::timing);
162
163 if (fetchEvent.scheduled())
164 deschedule(fetchEvent);
165
166 schedule(fetchEvent, nextCycle());
167 }
168
169 changeState(SimObject::Running);
170}
171
172void
173TimingSimpleCPU::switchOut()
174{
175 assert(_status == Running || _status == Idle);
176 _status = SwitchedOut;
177 numCycles += tickToCycles(curTick - previousTick);
178
179 // If we've been scheduled to resume but are then told to switch out,
180 // we'll need to cancel it.
181 if (fetchEvent.scheduled())
182 deschedule(fetchEvent);
183}
184
185
186void
187TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
188{
189 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
190
191 // if any of this CPU's ThreadContexts are active, mark the CPU as
192 // running and schedule its tick event.
193 for (int i = 0; i < threadContexts.size(); ++i) {
194 ThreadContext *tc = threadContexts[i];
195 if (tc->status() == ThreadContext::Active && _status != Running) {
196 _status = Running;
197 break;
198 }
199 }
200
201 if (_status != Running) {
202 _status = Idle;
203 }
204 assert(threadContexts.size() == 1);
205 previousTick = curTick;
206}
207
208
209void
210TimingSimpleCPU::activateContext(int thread_num, int delay)
211{
212 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
213
214 assert(thread_num == 0);
215 assert(thread);
216
217 assert(_status == Idle);
218
219 notIdleFraction++;
220 _status = Running;
221
222 // kick things off by initiating the fetch of the next instruction
223 schedule(fetchEvent, nextCycle(curTick + ticks(delay)));
224}
225
226
227void
228TimingSimpleCPU::suspendContext(int thread_num)
229{
230 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
231
232 assert(thread_num == 0);
233 assert(thread);
234
235 assert(_status == Running);
236
237 // just change status to Idle... if status != Running,
238 // completeInst() will not initiate fetch of next instruction.
239
240 notIdleFraction--;
241 _status = Idle;
242}
243
244bool
245TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
246{
247 RequestPtr req = pkt->req;
248 if (req->isMmapedIpr()) {
249 Tick delay;
250 delay = TheISA::handleIprRead(thread->getTC(), pkt);
251 new IprEvent(pkt, this, nextCycle(curTick + delay));
252 _status = DcacheWaitResponse;
253 dcache_pkt = NULL;
254 } else if (!dcachePort.sendTiming(pkt)) {
255 _status = DcacheRetry;
256 dcache_pkt = pkt;
257 } else {
258 _status = DcacheWaitResponse;
259 // memory system takes ownership of packet
260 dcache_pkt = NULL;
261 }
262 return dcache_pkt == NULL;
263}
244
245template <class T>
246Fault
247TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
248{
264
265template <class T>
266Fault
267TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
268{
249 Request *req =
250 new Request(/* asid */ 0, addr, sizeof(T), flags, thread->readPC(),
251 _cpuId, /* thread ID */ 0);
269 Fault fault;
270 const int asid = 0;
271 const int thread_id = 0;
272 const Addr pc = thread->readPC();
252
273
253 if (traceData) {
254 traceData->setAddr(req->getVaddr());
255 }
274 PacketPtr pkt;
275 RequestPtr req;
256
276
257 // translate to physical address
258 Fault fault = thread->translateDataReadReq(req);
277 int block_size = dcachePort.peerBlockSize();
278 int data_size = sizeof(T);
259
279
260 // Now do the access.
261 if (fault == NoFault) {
262 PacketPtr pkt =
263 new Packet(req,
264 (req->isLocked() ?
265 MemCmd::LoadLockedReq : MemCmd::ReadReq),
266 Packet::Broadcast);
267 pkt->dataDynamic<T>(new T);
280 Addr second_addr = roundDown(addr + data_size - 1, block_size);
268
281
269 if (req->isMmapedIpr()) {
270 Tick delay;
271 delay = TheISA::handleIprRead(thread->getTC(), pkt);
272 new IprEvent(pkt, this, nextCycle(curTick + delay));
273 _status = DcacheWaitResponse;
274 dcache_pkt = NULL;
275 } else if (!dcachePort.sendTiming(pkt)) {
276 _status = DcacheRetry;
277 dcache_pkt = pkt;
278 } else {
279 _status = DcacheWaitResponse;
280 // memory system takes ownership of packet
281 dcache_pkt = NULL;
282 if (second_addr > addr) {
283 Addr first_size = second_addr - addr;
284 Addr second_size = data_size - first_size;
285 // Make sure we'll only need two accesses.
286 assert(roundDown(second_addr + second_size - 1, block_size) ==
287 second_addr);
288
289 /*
290 * Do the translations. If something isn't going to work, find out
291 * before we waste time setting up anything else.
292 */
293 req = new Request(asid, addr, first_size,
294 flags, pc, _cpuId, thread_id);
295 fault = thread->translateDataReadReq(req);
296 if (fault != NoFault) {
297 delete req;
298 return fault;
282 }
299 }
300 Request *second_req =
301 new Request(asid, second_addr, second_size,
302 flags, pc, _cpuId, thread_id);
303 fault = thread->translateDataReadReq(second_req);
304 if (fault != NoFault) {
305 delete req;
306 delete second_req;
307 return fault;
308 }
283
309
284 // This will need a new way to tell if it has a dcache attached.
285 if (req->isUncacheable())
286 recordEvent("Uncached Read");
310 T * data_ptr = new T;
311
312 /*
313 * This is the big packet that will hold the data we've gotten so far,
314 * if any, and also act as the response we actually give to the
315 * instruction.
316 */
317 Request *orig_req =
318 new Request(asid, addr, data_size, flags, pc, _cpuId, thread_id);
319 orig_req->setPhys(req->getPaddr(), data_size, flags);
320 PacketPtr big_pkt =
321 new Packet(orig_req, MemCmd::ReadResp, Packet::Broadcast);
322 big_pkt->dataDynamic<T>(data_ptr);
323 SplitMainSenderState * main_send_state = new SplitMainSenderState;
324 big_pkt->senderState = main_send_state;
325 main_send_state->outstanding = 2;
326
327 // This is the packet we'll process now.
328 pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
329 pkt->dataStatic<uint8_t>((uint8_t *)data_ptr);
330 pkt->senderState = new SplitFragmentSenderState(big_pkt, 0);
331
332 // This is the second half of the access we'll deal with later.
333 PacketPtr second_pkt =
334 new Packet(second_req, MemCmd::ReadReq, Packet::Broadcast);
335 second_pkt->dataStatic<uint8_t>((uint8_t *)data_ptr + first_size);
336 second_pkt->senderState = new SplitFragmentSenderState(big_pkt, 1);
337 if (!handleReadPacket(pkt)) {
338 main_send_state->fragments[1] = second_pkt;
339 } else {
340 handleReadPacket(second_pkt);
341 }
287 } else {
342 } else {
288 delete req;
343 req = new Request(asid, addr, data_size,
344 flags, pc, _cpuId, thread_id);
345
346 // translate to physical address
347 Fault fault = thread->translateDataReadReq(req);
348
349 if (fault != NoFault) {
350 delete req;
351 return fault;
352 }
353
354 pkt = new Packet(req,
355 (req->isLocked() ?
356 MemCmd::LoadLockedReq : MemCmd::ReadReq),
357 Packet::Broadcast);
358 pkt->dataDynamic<T>(new T);
359
360 handleReadPacket(pkt);
289 }
290
291 if (traceData) {
292 traceData->setData(data);
361 }
362
363 if (traceData) {
364 traceData->setData(data);
365 traceData->setAddr(addr);
293 }
366 }
294 return fault;
367
368 // This will need a new way to tell if it has a dcache attached.
369 if (req->isUncacheable())
370 recordEvent("Uncached Read");
371
372 return NoFault;
295}
296
297Fault
298TimingSimpleCPU::translateDataReadAddr(Addr vaddr, Addr &paddr,
299 int size, unsigned flags)
300{
301 Request *req =
302 new Request(0, vaddr, size, flags, thread->readPC(), _cpuId, 0);
303
304 if (traceData) {
305 traceData->setAddr(vaddr);
306 }
307
308 Fault fault = thread->translateDataWriteReq(req);
309
310 if (fault == NoFault)
311 paddr = req->getPaddr();
312
313 delete req;
314 return fault;
315}
316
317#ifndef DOXYGEN_SHOULD_SKIP_THIS
318
319template
320Fault
321TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
322
323template
324Fault
325TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
326
327template
328Fault
329TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
330
331template
332Fault
333TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
334
335template
336Fault
337TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
338
339template
340Fault
341TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
342
343#endif //DOXYGEN_SHOULD_SKIP_THIS
344
345template<>
346Fault
347TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
348{
349 return read(addr, *(uint64_t*)&data, flags);
350}
351
352template<>
353Fault
354TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
355{
356 return read(addr, *(uint32_t*)&data, flags);
357}
358
359
360template<>
361Fault
362TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
363{
364 return read(addr, (uint32_t&)data, flags);
365}
366
373}
374
375Fault
376TimingSimpleCPU::translateDataReadAddr(Addr vaddr, Addr &paddr,
377 int size, unsigned flags)
378{
379 Request *req =
380 new Request(0, vaddr, size, flags, thread->readPC(), _cpuId, 0);
381
382 if (traceData) {
383 traceData->setAddr(vaddr);
384 }
385
386 Fault fault = thread->translateDataWriteReq(req);
387
388 if (fault == NoFault)
389 paddr = req->getPaddr();
390
391 delete req;
392 return fault;
393}
394
395#ifndef DOXYGEN_SHOULD_SKIP_THIS
396
397template
398Fault
399TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
400
401template
402Fault
403TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
404
405template
406Fault
407TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
408
409template
410Fault
411TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
412
413template
414Fault
415TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
416
417template
418Fault
419TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
420
421#endif //DOXYGEN_SHOULD_SKIP_THIS
422
423template<>
424Fault
425TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
426{
427 return read(addr, *(uint64_t*)&data, flags);
428}
429
430template<>
431Fault
432TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
433{
434 return read(addr, *(uint32_t*)&data, flags);
435}
436
437
438template<>
439Fault
440TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
441{
442 return read(addr, (uint32_t&)data, flags);
443}
444
445bool
446TimingSimpleCPU::handleWritePacket()
447{
448 RequestPtr req = dcache_pkt->req;
449 if (req->isMmapedIpr()) {
450 Tick delay;
451 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
452 new IprEvent(dcache_pkt, this, nextCycle(curTick + delay));
453 _status = DcacheWaitResponse;
454 dcache_pkt = NULL;
455 } else if (!dcachePort.sendTiming(dcache_pkt)) {
456 _status = DcacheRetry;
457 } else {
458 _status = DcacheWaitResponse;
459 // memory system takes ownership of packet
460 dcache_pkt = NULL;
461 }
462 return dcache_pkt == NULL;
463}
367
368template <class T>
369Fault
370TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
371{
464
465template <class T>
466Fault
467TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
468{
372 Request *req =
373 new Request(/* asid */ 0, addr, sizeof(T), flags, thread->readPC(),
374 _cpuId, /* thread ID */ 0);
469 const int asid = 0;
470 const int thread_id = 0;
471 bool do_access = true; // flag to suppress cache access
472 const Addr pc = thread->readPC();
375
473
376 if (traceData) {
377 traceData->setAddr(req->getVaddr());
378 }
474 RequestPtr req;
379
475
380 // translate to physical address
381 Fault fault = thread->translateDataWriteReq(req);
476 int block_size = dcachePort.peerBlockSize();
477 int data_size = sizeof(T);
382
478
383 // Now do the access.
384 if (fault == NoFault) {
479 Addr second_addr = roundDown(addr + data_size - 1, block_size);
480
481 if (second_addr > addr) {
482 Fault fault;
483 Addr first_size = second_addr - addr;
484 Addr second_size = data_size - first_size;
485 // Make sure we'll only need two accesses.
486 assert(roundDown(second_addr + second_size - 1, block_size) ==
487 second_addr);
488
489 req = new Request(asid, addr, first_size,
490 flags, pc, _cpuId, thread_id);
491 fault = thread->translateDataWriteReq(req);
492 if (fault != NoFault) {
493 delete req;
494 return fault;
495 }
496 RequestPtr second_req = new Request(asid, second_addr, second_size,
497 flags, pc, _cpuId, thread_id);
498 fault = thread->translateDataWriteReq(second_req);
499 if (fault != NoFault) {
500 delete req;
501 delete second_req;
502 return fault;
503 }
504
505 if (req->isLocked() || req->isSwap() ||
506 second_req->isLocked() || second_req->isSwap()) {
507 panic("LL/SCs and swaps can't be split.");
508 }
509
510 T * data_ptr = new T;
511
512 /*
513 * This is the big packet that will hold the data we've gotten so far,
514 * if any, and also act as the response we actually give to the
515 * instruction.
516 */
517 RequestPtr orig_req =
518 new Request(asid, addr, data_size, flags, pc, _cpuId, thread_id);
519 orig_req->setPhys(req->getPaddr(), data_size, flags);
520 PacketPtr big_pkt =
521 new Packet(orig_req, MemCmd::WriteResp, Packet::Broadcast);
522 big_pkt->dataDynamic<T>(data_ptr);
523 big_pkt->set(data);
524 SplitMainSenderState * main_send_state = new SplitMainSenderState;
525 big_pkt->senderState = main_send_state;
526 main_send_state->outstanding = 2;
527
528 assert(dcache_pkt == NULL);
529 // This is the packet we'll process now.
530 dcache_pkt = new Packet(req, MemCmd::WriteReq, Packet::Broadcast);
531 dcache_pkt->dataStatic<uint8_t>((uint8_t *)data_ptr);
532 dcache_pkt->senderState = new SplitFragmentSenderState(big_pkt, 0);
533
534 // This is the second half of the access we'll deal with later.
535 PacketPtr second_pkt =
536 new Packet(second_req, MemCmd::WriteReq, Packet::Broadcast);
537 second_pkt->dataStatic<uint8_t>((uint8_t *)data_ptr + first_size);
538 second_pkt->senderState = new SplitFragmentSenderState(big_pkt, 1);
539 if (!handleWritePacket()) {
540 main_send_state->fragments[1] = second_pkt;
541 } else {
542 dcache_pkt = second_pkt;
543 handleWritePacket();
544 }
545 } else {
546 req = new Request(asid, addr, data_size, flags, pc, _cpuId, thread_id);
547
548 // translate to physical address
549 Fault fault = thread->translateDataWriteReq(req);
550 if (fault != NoFault) {
551 delete req;
552 return fault;
553 }
554
385 MemCmd cmd = MemCmd::WriteReq; // default
555 MemCmd cmd = MemCmd::WriteReq; // default
386 bool do_access = true; // flag to suppress cache access
387
388 if (req->isLocked()) {
389 cmd = MemCmd::StoreCondReq;
390 do_access = TheISA::handleLockedWrite(thread, req);
391 } else if (req->isSwap()) {
392 cmd = MemCmd::SwapReq;
393 if (req->isCondSwap()) {
394 assert(res);
395 req->setExtraData(*res);
396 }
397 }
398
399 // Note: need to allocate dcache_pkt even if do_access is
400 // false, as it's used unconditionally to call completeAcc().
401 assert(dcache_pkt == NULL);
402 dcache_pkt = new Packet(req, cmd, Packet::Broadcast);
403 dcache_pkt->allocate();
556
557 if (req->isLocked()) {
558 cmd = MemCmd::StoreCondReq;
559 do_access = TheISA::handleLockedWrite(thread, req);
560 } else if (req->isSwap()) {
561 cmd = MemCmd::SwapReq;
562 if (req->isCondSwap()) {
563 assert(res);
564 req->setExtraData(*res);
565 }
566 }
567
568 // Note: need to allocate dcache_pkt even if do_access is
569 // false, as it's used unconditionally to call completeAcc().
570 assert(dcache_pkt == NULL);
571 dcache_pkt = new Packet(req, cmd, Packet::Broadcast);
572 dcache_pkt->allocate();
404 dcache_pkt->set(data);
573 if (req->isMmapedIpr())
574 dcache_pkt->set(htog(data));
575 else
576 dcache_pkt->set(data);
405
577
406 if (do_access) {
407 if (req->isMmapedIpr()) {
408 Tick delay;
409 dcache_pkt->set(htog(data));
410 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
411 new IprEvent(dcache_pkt, this, nextCycle(curTick + delay));
412 _status = DcacheWaitResponse;
413 dcache_pkt = NULL;
414 } else if (!dcachePort.sendTiming(dcache_pkt)) {
415 _status = DcacheRetry;
416 } else {
417 _status = DcacheWaitResponse;
418 // memory system takes ownership of packet
419 dcache_pkt = NULL;
420 }
421 }
422 // This will need a new way to tell if it's hooked up to a cache or not.
423 if (req->isUncacheable())
424 recordEvent("Uncached Write");
425 } else {
426 delete req;
578 if (do_access)
579 handleWritePacket();
427 }
428
429 if (traceData) {
580 }
581
582 if (traceData) {
583 traceData->setAddr(req->getVaddr());
430 traceData->setData(data);
431 }
432
584 traceData->setData(data);
585 }
586
587 // This will need a new way to tell if it's hooked up to a cache or not.
588 if (req->isUncacheable())
589 recordEvent("Uncached Write");
590
433 // If the write needs to have a fault on the access, consider calling
434 // changeStatus() and changing it to "bad addr write" or something.
591 // If the write needs to have a fault on the access, consider calling
592 // changeStatus() and changing it to "bad addr write" or something.
435 return fault;
593 return NoFault;
436}
437
438Fault
439TimingSimpleCPU::translateDataWriteAddr(Addr vaddr, Addr &paddr,
440 int size, unsigned flags)
441{
442 Request *req =
443 new Request(0, vaddr, size, flags, thread->readPC(), _cpuId, 0);
444
445 if (traceData) {
446 traceData->setAddr(vaddr);
447 }
448
449 Fault fault = thread->translateDataWriteReq(req);
450
451 if (fault == NoFault)
452 paddr = req->getPaddr();
453
454 delete req;
455 return fault;
456}
457
458
459#ifndef DOXYGEN_SHOULD_SKIP_THIS
460template
461Fault
462TimingSimpleCPU::write(Twin32_t data, Addr addr,
463 unsigned flags, uint64_t *res);
464
465template
466Fault
467TimingSimpleCPU::write(Twin64_t data, Addr addr,
468 unsigned flags, uint64_t *res);
469
470template
471Fault
472TimingSimpleCPU::write(uint64_t data, Addr addr,
473 unsigned flags, uint64_t *res);
474
475template
476Fault
477TimingSimpleCPU::write(uint32_t data, Addr addr,
478 unsigned flags, uint64_t *res);
479
480template
481Fault
482TimingSimpleCPU::write(uint16_t data, Addr addr,
483 unsigned flags, uint64_t *res);
484
485template
486Fault
487TimingSimpleCPU::write(uint8_t data, Addr addr,
488 unsigned flags, uint64_t *res);
489
490#endif //DOXYGEN_SHOULD_SKIP_THIS
491
492template<>
493Fault
494TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
495{
496 return write(*(uint64_t*)&data, addr, flags, res);
497}
498
499template<>
500Fault
501TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
502{
503 return write(*(uint32_t*)&data, addr, flags, res);
504}
505
506
507template<>
508Fault
509TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
510{
511 return write((uint32_t)data, addr, flags, res);
512}
513
514
515void
516TimingSimpleCPU::fetch()
517{
518 DPRINTF(SimpleCPU, "Fetch\n");
519
520 if (!curStaticInst || !curStaticInst->isDelayedCommit())
521 checkForInterrupts();
522
523 checkPcEventQueue();
524
525 bool fromRom = isRomMicroPC(thread->readMicroPC());
526
527 if (!fromRom) {
528 Request *ifetch_req = new Request();
529 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
530 Fault fault = setupFetchRequest(ifetch_req);
531
532 ifetch_pkt = new Packet(ifetch_req, MemCmd::ReadReq, Packet::Broadcast);
533 ifetch_pkt->dataStatic(&inst);
534
535 if (fault == NoFault) {
536 if (!icachePort.sendTiming(ifetch_pkt)) {
537 // Need to wait for retry
538 _status = IcacheRetry;
539 } else {
540 // Need to wait for cache to respond
541 _status = IcacheWaitResponse;
542 // ownership of packet transferred to memory system
543 ifetch_pkt = NULL;
544 }
545 } else {
546 delete ifetch_req;
547 delete ifetch_pkt;
548 // fetch fault: advance directly to next instruction (fault handler)
549 advanceInst(fault);
550 }
551 } else {
552 _status = IcacheWaitResponse;
553 completeIfetch(NULL);
554 }
555
556 numCycles += tickToCycles(curTick - previousTick);
557 previousTick = curTick;
558}
559
560
561void
562TimingSimpleCPU::advanceInst(Fault fault)
563{
564 if (fault != NoFault || !stayAtPC)
565 advancePC(fault);
566
567 if (_status == Running) {
568 // kick off fetch of next instruction... callback from icache
569 // response will cause that instruction to be executed,
570 // keeping the CPU running.
571 fetch();
572 }
573}
574
575
576void
577TimingSimpleCPU::completeIfetch(PacketPtr pkt)
578{
579 DPRINTF(SimpleCPU, "Complete ICache Fetch\n");
580
581 // received a response from the icache: execute the received
582 // instruction
583
584 assert(!pkt || !pkt->isError());
585 assert(_status == IcacheWaitResponse);
586
587 _status = Running;
588
589 numCycles += tickToCycles(curTick - previousTick);
590 previousTick = curTick;
591
592 if (getState() == SimObject::Draining) {
593 if (pkt) {
594 delete pkt->req;
595 delete pkt;
596 }
597
598 completeDrain();
599 return;
600 }
601
602 preExecute();
603 if (curStaticInst &&
604 curStaticInst->isMemRef() && !curStaticInst->isDataPrefetch()) {
605 // load or store: just send to dcache
606 Fault fault = curStaticInst->initiateAcc(this, traceData);
607 if (_status != Running) {
608 // instruction will complete in dcache response callback
609 assert(_status == DcacheWaitResponse || _status == DcacheRetry);
610 assert(fault == NoFault);
611 } else {
612 if (fault == NoFault) {
613 // Note that ARM can have NULL packets if the instruction gets
614 // squashed due to predication
615 // early fail on store conditional: complete now
616 assert(dcache_pkt != NULL || THE_ISA == ARM_ISA);
617
618 fault = curStaticInst->completeAcc(dcache_pkt, this,
619 traceData);
620 if (dcache_pkt != NULL)
621 {
622 delete dcache_pkt->req;
623 delete dcache_pkt;
624 dcache_pkt = NULL;
625 }
626
627 // keep an instruction count
628 if (fault == NoFault)
629 countInst();
630 } else if (traceData) {
631 // If there was a fault, we shouldn't trace this instruction.
632 delete traceData;
633 traceData = NULL;
634 }
635
636 postExecute();
637 // @todo remove me after debugging with legion done
638 if (curStaticInst && (!curStaticInst->isMicroop() ||
639 curStaticInst->isFirstMicroop()))
640 instCnt++;
641 advanceInst(fault);
642 }
643 } else if (curStaticInst) {
644 // non-memory instruction: execute completely now
645 Fault fault = curStaticInst->execute(this, traceData);
646
647 // keep an instruction count
648 if (fault == NoFault)
649 countInst();
650 else if (traceData) {
651 // If there was a fault, we shouldn't trace this instruction.
652 delete traceData;
653 traceData = NULL;
654 }
655
656 postExecute();
657 // @todo remove me after debugging with legion done
658 if (curStaticInst && (!curStaticInst->isMicroop() ||
659 curStaticInst->isFirstMicroop()))
660 instCnt++;
661 advanceInst(fault);
662 } else {
663 advanceInst(NoFault);
664 }
665
666 if (pkt) {
667 delete pkt->req;
668 delete pkt;
669 }
670}
671
672void
673TimingSimpleCPU::IcachePort::ITickEvent::process()
674{
675 cpu->completeIfetch(pkt);
676}
677
678bool
679TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
680{
681 if (pkt->isResponse() && !pkt->wasNacked()) {
682 // delay processing of returned data until next CPU clock edge
683 Tick next_tick = cpu->nextCycle(curTick);
684
685 if (next_tick == curTick)
686 cpu->completeIfetch(pkt);
687 else
688 tickEvent.schedule(pkt, next_tick);
689
690 return true;
691 }
692 else if (pkt->wasNacked()) {
693 assert(cpu->_status == IcacheWaitResponse);
694 pkt->reinitNacked();
695 if (!sendTiming(pkt)) {
696 cpu->_status = IcacheRetry;
697 cpu->ifetch_pkt = pkt;
698 }
699 }
700 //Snooping a Coherence Request, do nothing
701 return true;
702}
703
704void
705TimingSimpleCPU::IcachePort::recvRetry()
706{
707 // we shouldn't get a retry unless we have a packet that we're
708 // waiting to transmit
709 assert(cpu->ifetch_pkt != NULL);
710 assert(cpu->_status == IcacheRetry);
711 PacketPtr tmp = cpu->ifetch_pkt;
712 if (sendTiming(tmp)) {
713 cpu->_status = IcacheWaitResponse;
714 cpu->ifetch_pkt = NULL;
715 }
716}
717
718void
719TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
720{
721 // received a response from the dcache: complete the load or store
722 // instruction
723 assert(!pkt->isError());
594}
595
596Fault
597TimingSimpleCPU::translateDataWriteAddr(Addr vaddr, Addr &paddr,
598 int size, unsigned flags)
599{
600 Request *req =
601 new Request(0, vaddr, size, flags, thread->readPC(), _cpuId, 0);
602
603 if (traceData) {
604 traceData->setAddr(vaddr);
605 }
606
607 Fault fault = thread->translateDataWriteReq(req);
608
609 if (fault == NoFault)
610 paddr = req->getPaddr();
611
612 delete req;
613 return fault;
614}
615
616
617#ifndef DOXYGEN_SHOULD_SKIP_THIS
618template
619Fault
620TimingSimpleCPU::write(Twin32_t data, Addr addr,
621 unsigned flags, uint64_t *res);
622
623template
624Fault
625TimingSimpleCPU::write(Twin64_t data, Addr addr,
626 unsigned flags, uint64_t *res);
627
628template
629Fault
630TimingSimpleCPU::write(uint64_t data, Addr addr,
631 unsigned flags, uint64_t *res);
632
633template
634Fault
635TimingSimpleCPU::write(uint32_t data, Addr addr,
636 unsigned flags, uint64_t *res);
637
638template
639Fault
640TimingSimpleCPU::write(uint16_t data, Addr addr,
641 unsigned flags, uint64_t *res);
642
643template
644Fault
645TimingSimpleCPU::write(uint8_t data, Addr addr,
646 unsigned flags, uint64_t *res);
647
648#endif //DOXYGEN_SHOULD_SKIP_THIS
649
650template<>
651Fault
652TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
653{
654 return write(*(uint64_t*)&data, addr, flags, res);
655}
656
657template<>
658Fault
659TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
660{
661 return write(*(uint32_t*)&data, addr, flags, res);
662}
663
664
665template<>
666Fault
667TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
668{
669 return write((uint32_t)data, addr, flags, res);
670}
671
672
673void
674TimingSimpleCPU::fetch()
675{
676 DPRINTF(SimpleCPU, "Fetch\n");
677
678 if (!curStaticInst || !curStaticInst->isDelayedCommit())
679 checkForInterrupts();
680
681 checkPcEventQueue();
682
683 bool fromRom = isRomMicroPC(thread->readMicroPC());
684
685 if (!fromRom) {
686 Request *ifetch_req = new Request();
687 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
688 Fault fault = setupFetchRequest(ifetch_req);
689
690 ifetch_pkt = new Packet(ifetch_req, MemCmd::ReadReq, Packet::Broadcast);
691 ifetch_pkt->dataStatic(&inst);
692
693 if (fault == NoFault) {
694 if (!icachePort.sendTiming(ifetch_pkt)) {
695 // Need to wait for retry
696 _status = IcacheRetry;
697 } else {
698 // Need to wait for cache to respond
699 _status = IcacheWaitResponse;
700 // ownership of packet transferred to memory system
701 ifetch_pkt = NULL;
702 }
703 } else {
704 delete ifetch_req;
705 delete ifetch_pkt;
706 // fetch fault: advance directly to next instruction (fault handler)
707 advanceInst(fault);
708 }
709 } else {
710 _status = IcacheWaitResponse;
711 completeIfetch(NULL);
712 }
713
714 numCycles += tickToCycles(curTick - previousTick);
715 previousTick = curTick;
716}
717
718
719void
720TimingSimpleCPU::advanceInst(Fault fault)
721{
722 if (fault != NoFault || !stayAtPC)
723 advancePC(fault);
724
725 if (_status == Running) {
726 // kick off fetch of next instruction... callback from icache
727 // response will cause that instruction to be executed,
728 // keeping the CPU running.
729 fetch();
730 }
731}
732
733
734void
735TimingSimpleCPU::completeIfetch(PacketPtr pkt)
736{
737 DPRINTF(SimpleCPU, "Complete ICache Fetch\n");
738
739 // received a response from the icache: execute the received
740 // instruction
741
742 assert(!pkt || !pkt->isError());
743 assert(_status == IcacheWaitResponse);
744
745 _status = Running;
746
747 numCycles += tickToCycles(curTick - previousTick);
748 previousTick = curTick;
749
750 if (getState() == SimObject::Draining) {
751 if (pkt) {
752 delete pkt->req;
753 delete pkt;
754 }
755
756 completeDrain();
757 return;
758 }
759
760 preExecute();
761 if (curStaticInst &&
762 curStaticInst->isMemRef() && !curStaticInst->isDataPrefetch()) {
763 // load or store: just send to dcache
764 Fault fault = curStaticInst->initiateAcc(this, traceData);
765 if (_status != Running) {
766 // instruction will complete in dcache response callback
767 assert(_status == DcacheWaitResponse || _status == DcacheRetry);
768 assert(fault == NoFault);
769 } else {
770 if (fault == NoFault) {
771 // Note that ARM can have NULL packets if the instruction gets
772 // squashed due to predication
773 // early fail on store conditional: complete now
774 assert(dcache_pkt != NULL || THE_ISA == ARM_ISA);
775
776 fault = curStaticInst->completeAcc(dcache_pkt, this,
777 traceData);
778 if (dcache_pkt != NULL)
779 {
780 delete dcache_pkt->req;
781 delete dcache_pkt;
782 dcache_pkt = NULL;
783 }
784
785 // keep an instruction count
786 if (fault == NoFault)
787 countInst();
788 } else if (traceData) {
789 // If there was a fault, we shouldn't trace this instruction.
790 delete traceData;
791 traceData = NULL;
792 }
793
794 postExecute();
795 // @todo remove me after debugging with legion done
796 if (curStaticInst && (!curStaticInst->isMicroop() ||
797 curStaticInst->isFirstMicroop()))
798 instCnt++;
799 advanceInst(fault);
800 }
801 } else if (curStaticInst) {
802 // non-memory instruction: execute completely now
803 Fault fault = curStaticInst->execute(this, traceData);
804
805 // keep an instruction count
806 if (fault == NoFault)
807 countInst();
808 else if (traceData) {
809 // If there was a fault, we shouldn't trace this instruction.
810 delete traceData;
811 traceData = NULL;
812 }
813
814 postExecute();
815 // @todo remove me after debugging with legion done
816 if (curStaticInst && (!curStaticInst->isMicroop() ||
817 curStaticInst->isFirstMicroop()))
818 instCnt++;
819 advanceInst(fault);
820 } else {
821 advanceInst(NoFault);
822 }
823
824 if (pkt) {
825 delete pkt->req;
826 delete pkt;
827 }
828}
829
830void
831TimingSimpleCPU::IcachePort::ITickEvent::process()
832{
833 cpu->completeIfetch(pkt);
834}
835
836bool
837TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
838{
839 if (pkt->isResponse() && !pkt->wasNacked()) {
840 // delay processing of returned data until next CPU clock edge
841 Tick next_tick = cpu->nextCycle(curTick);
842
843 if (next_tick == curTick)
844 cpu->completeIfetch(pkt);
845 else
846 tickEvent.schedule(pkt, next_tick);
847
848 return true;
849 }
850 else if (pkt->wasNacked()) {
851 assert(cpu->_status == IcacheWaitResponse);
852 pkt->reinitNacked();
853 if (!sendTiming(pkt)) {
854 cpu->_status = IcacheRetry;
855 cpu->ifetch_pkt = pkt;
856 }
857 }
858 //Snooping a Coherence Request, do nothing
859 return true;
860}
861
862void
863TimingSimpleCPU::IcachePort::recvRetry()
864{
865 // we shouldn't get a retry unless we have a packet that we're
866 // waiting to transmit
867 assert(cpu->ifetch_pkt != NULL);
868 assert(cpu->_status == IcacheRetry);
869 PacketPtr tmp = cpu->ifetch_pkt;
870 if (sendTiming(tmp)) {
871 cpu->_status = IcacheWaitResponse;
872 cpu->ifetch_pkt = NULL;
873 }
874}
875
876void
877TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
878{
879 // received a response from the dcache: complete the load or store
880 // instruction
881 assert(!pkt->isError());
724 assert(_status == DcacheWaitResponse);
725 _status = Running;
726
727 numCycles += tickToCycles(curTick - previousTick);
728 previousTick = curTick;
729
882
883 numCycles += tickToCycles(curTick - previousTick);
884 previousTick = curTick;
885
886 if (pkt->senderState) {
887 SplitFragmentSenderState * send_state =
888 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
889 assert(send_state);
890 delete pkt->req;
891 delete pkt;
892 PacketPtr big_pkt = send_state->bigPkt;
893 delete send_state;
894
895 SplitMainSenderState * main_send_state =
896 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
897 assert(main_send_state);
898 // Record the fact that this packet is no longer outstanding.
899 assert(main_send_state->outstanding != 0);
900 main_send_state->outstanding--;
901
902 if (main_send_state->outstanding) {
903 return;
904 } else {
905 delete main_send_state;
906 big_pkt->senderState = NULL;
907 pkt = big_pkt;
908 }
909 }
910
911 assert(_status == DcacheWaitResponse);
912 _status = Running;
913
730 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
731
732 // keep an instruction count
733 if (fault == NoFault)
734 countInst();
735 else if (traceData) {
736 // If there was a fault, we shouldn't trace this instruction.
737 delete traceData;
738 traceData = NULL;
739 }
740
741 // the locked flag may be cleared on the response packet, so check
742 // pkt->req and not pkt to see if it was a load-locked
743 if (pkt->isRead() && pkt->req->isLocked()) {
744 TheISA::handleLockedRead(thread, pkt->req);
745 }
746
747 delete pkt->req;
748 delete pkt;
749
750 postExecute();
751
752 if (getState() == SimObject::Draining) {
753 advancePC(fault);
754 completeDrain();
755
756 return;
757 }
758
759 advanceInst(fault);
760}
761
762
763void
764TimingSimpleCPU::completeDrain()
765{
766 DPRINTF(Config, "Done draining\n");
767 changeState(SimObject::Drained);
768 drainEvent->process();
769}
770
771void
772TimingSimpleCPU::DcachePort::setPeer(Port *port)
773{
774 Port::setPeer(port);
775
776#if FULL_SYSTEM
777 // Update the ThreadContext's memory ports (Functional/Virtual
778 // Ports)
779 cpu->tcBase()->connectMemPorts(cpu->tcBase());
780#endif
781}
782
783bool
784TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
785{
786 if (pkt->isResponse() && !pkt->wasNacked()) {
787 // delay processing of returned data until next CPU clock edge
788 Tick next_tick = cpu->nextCycle(curTick);
789
914 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
915
916 // keep an instruction count
917 if (fault == NoFault)
918 countInst();
919 else if (traceData) {
920 // If there was a fault, we shouldn't trace this instruction.
921 delete traceData;
922 traceData = NULL;
923 }
924
925 // the locked flag may be cleared on the response packet, so check
926 // pkt->req and not pkt to see if it was a load-locked
927 if (pkt->isRead() && pkt->req->isLocked()) {
928 TheISA::handleLockedRead(thread, pkt->req);
929 }
930
931 delete pkt->req;
932 delete pkt;
933
934 postExecute();
935
936 if (getState() == SimObject::Draining) {
937 advancePC(fault);
938 completeDrain();
939
940 return;
941 }
942
943 advanceInst(fault);
944}
945
946
947void
948TimingSimpleCPU::completeDrain()
949{
950 DPRINTF(Config, "Done draining\n");
951 changeState(SimObject::Drained);
952 drainEvent->process();
953}
954
955void
956TimingSimpleCPU::DcachePort::setPeer(Port *port)
957{
958 Port::setPeer(port);
959
960#if FULL_SYSTEM
961 // Update the ThreadContext's memory ports (Functional/Virtual
962 // Ports)
963 cpu->tcBase()->connectMemPorts(cpu->tcBase());
964#endif
965}
966
967bool
968TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
969{
970 if (pkt->isResponse() && !pkt->wasNacked()) {
971 // delay processing of returned data until next CPU clock edge
972 Tick next_tick = cpu->nextCycle(curTick);
973
790 if (next_tick == curTick)
974 if (next_tick == curTick) {
791 cpu->completeDataAccess(pkt);
975 cpu->completeDataAccess(pkt);
792 else
976 } else {
793 tickEvent.schedule(pkt, next_tick);
977 tickEvent.schedule(pkt, next_tick);
978 }
794
795 return true;
796 }
797 else if (pkt->wasNacked()) {
798 assert(cpu->_status == DcacheWaitResponse);
799 pkt->reinitNacked();
800 if (!sendTiming(pkt)) {
801 cpu->_status = DcacheRetry;
802 cpu->dcache_pkt = pkt;
803 }
804 }
805 //Snooping a Coherence Request, do nothing
806 return true;
807}
808
809void
810TimingSimpleCPU::DcachePort::DTickEvent::process()
811{
812 cpu->completeDataAccess(pkt);
813}
814
815void
816TimingSimpleCPU::DcachePort::recvRetry()
817{
818 // we shouldn't get a retry unless we have a packet that we're
819 // waiting to transmit
820 assert(cpu->dcache_pkt != NULL);
821 assert(cpu->_status == DcacheRetry);
822 PacketPtr tmp = cpu->dcache_pkt;
979
980 return true;
981 }
982 else if (pkt->wasNacked()) {
983 assert(cpu->_status == DcacheWaitResponse);
984 pkt->reinitNacked();
985 if (!sendTiming(pkt)) {
986 cpu->_status = DcacheRetry;
987 cpu->dcache_pkt = pkt;
988 }
989 }
990 //Snooping a Coherence Request, do nothing
991 return true;
992}
993
994void
995TimingSimpleCPU::DcachePort::DTickEvent::process()
996{
997 cpu->completeDataAccess(pkt);
998}
999
1000void
1001TimingSimpleCPU::DcachePort::recvRetry()
1002{
1003 // we shouldn't get a retry unless we have a packet that we're
1004 // waiting to transmit
1005 assert(cpu->dcache_pkt != NULL);
1006 assert(cpu->_status == DcacheRetry);
1007 PacketPtr tmp = cpu->dcache_pkt;
823 if (sendTiming(tmp)) {
1008 if (tmp->senderState) {
1009 // This is a packet from a split access.
1010 SplitFragmentSenderState * send_state =
1011 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
1012 assert(send_state);
1013 PacketPtr big_pkt = send_state->bigPkt;
1014
1015 SplitMainSenderState * main_send_state =
1016 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
1017 assert(main_send_state);
1018
1019 if (sendTiming(tmp)) {
1020 // If we were able to send without retrying, record that fact
1021 // and try sending the other fragment.
1022 send_state->clearFromParent();
1023 int other_index = main_send_state->getPendingFragment();
1024 if (other_index > 0) {
1025 tmp = main_send_state->fragments[other_index];
1026 cpu->dcache_pkt = tmp;
1027 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
1028 (big_pkt->isWrite() && cpu->handleWritePacket())) {
1029 main_send_state->fragments[other_index] = NULL;
1030 }
1031 } else {
1032 cpu->_status = DcacheWaitResponse;
1033 // memory system takes ownership of packet
1034 cpu->dcache_pkt = NULL;
1035 }
1036 }
1037 } else if (sendTiming(tmp)) {
824 cpu->_status = DcacheWaitResponse;
825 // memory system takes ownership of packet
826 cpu->dcache_pkt = NULL;
827 }
828}
829
830TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
831 Tick t)
832 : pkt(_pkt), cpu(_cpu)
833{
834 cpu->schedule(this, t);
835}
836
837void
838TimingSimpleCPU::IprEvent::process()
839{
840 cpu->completeDataAccess(pkt);
841}
842
843const char *
844TimingSimpleCPU::IprEvent::description() const
845{
846 return "Timing Simple CPU Delay IPR event";
847}
848
849
850void
851TimingSimpleCPU::printAddr(Addr a)
852{
853 dcachePort.printAddr(a);
854}
855
856
857////////////////////////////////////////////////////////////////////////
858//
859// TimingSimpleCPU Simulation Object
860//
861TimingSimpleCPU *
862TimingSimpleCPUParams::create()
863{
864 numThreads = 1;
865#if !FULL_SYSTEM
866 if (workload.size() != 1)
867 panic("only one workload allowed");
868#endif
869 return new TimingSimpleCPU(this);
870}
1038 cpu->_status = DcacheWaitResponse;
1039 // memory system takes ownership of packet
1040 cpu->dcache_pkt = NULL;
1041 }
1042}
1043
1044TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
1045 Tick t)
1046 : pkt(_pkt), cpu(_cpu)
1047{
1048 cpu->schedule(this, t);
1049}
1050
1051void
1052TimingSimpleCPU::IprEvent::process()
1053{
1054 cpu->completeDataAccess(pkt);
1055}
1056
1057const char *
1058TimingSimpleCPU::IprEvent::description() const
1059{
1060 return "Timing Simple CPU Delay IPR event";
1061}
1062
1063
1064void
1065TimingSimpleCPU::printAddr(Addr a)
1066{
1067 dcachePort.printAddr(a);
1068}
1069
1070
1071////////////////////////////////////////////////////////////////////////
1072//
1073// TimingSimpleCPU Simulation Object
1074//
1075TimingSimpleCPU *
1076TimingSimpleCPUParams::create()
1077{
1078 numThreads = 1;
1079#if !FULL_SYSTEM
1080 if (workload.size() != 1)
1081 panic("only one workload allowed");
1082#endif
1083 return new TimingSimpleCPU(this);
1084}