timing.cc (5710:b44dd45bd604) timing.cc (5712:199d31b47f7b)
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Steve Reinhardt
29 */
30
31#include "arch/locked_mem.hh"
32#include "arch/mmaped_ipr.hh"
33#include "arch/utility.hh"
34#include "base/bigint.hh"
35#include "cpu/exetrace.hh"
36#include "cpu/simple/timing.hh"
37#include "mem/packet.hh"
38#include "mem/packet_access.hh"
39#include "params/TimingSimpleCPU.hh"
40#include "sim/system.hh"
41
42using namespace std;
43using namespace TheISA;
44
45Port *
46TimingSimpleCPU::getPort(const std::string &if_name, int idx)
47{
48 if (if_name == "dcache_port")
49 return &dcachePort;
50 else if (if_name == "icache_port")
51 return &icachePort;
52 else
53 panic("No Such Port\n");
54}
55
56void
57TimingSimpleCPU::init()
58{
59 BaseCPU::init();
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Steve Reinhardt
29 */
30
31#include "arch/locked_mem.hh"
32#include "arch/mmaped_ipr.hh"
33#include "arch/utility.hh"
34#include "base/bigint.hh"
35#include "cpu/exetrace.hh"
36#include "cpu/simple/timing.hh"
37#include "mem/packet.hh"
38#include "mem/packet_access.hh"
39#include "params/TimingSimpleCPU.hh"
40#include "sim/system.hh"
41
42using namespace std;
43using namespace TheISA;
44
45Port *
46TimingSimpleCPU::getPort(const std::string &if_name, int idx)
47{
48 if (if_name == "dcache_port")
49 return &dcachePort;
50 else if (if_name == "icache_port")
51 return &icachePort;
52 else
53 panic("No Such Port\n");
54}
55
56void
57TimingSimpleCPU::init()
58{
59 BaseCPU::init();
60 cpuId = tc->readCpuId();
61#if FULL_SYSTEM
62 for (int i = 0; i < threadContexts.size(); ++i) {
63 ThreadContext *tc = threadContexts[i];
64
65 // initialize CPU, including PC
60#if FULL_SYSTEM
61 for (int i = 0; i < threadContexts.size(); ++i) {
62 ThreadContext *tc = threadContexts[i];
63
64 // initialize CPU, including PC
66 TheISA::initCPU(tc, cpuId);
65 TheISA::initCPU(tc, _cpuId);
67 }
68#endif
69}
70
71Tick
72TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
73{
74 panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
75 return curTick;
76}
77
78void
79TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
80{
81 //No internal storage to update, jusst return
82 return;
83}
84
85void
86TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
87{
88 if (status == RangeChange) {
89 if (!snoopRangeSent) {
90 snoopRangeSent = true;
91 sendStatusChange(Port::RangeChange);
92 }
93 return;
94 }
95
96 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
97}
98
99
100void
101TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
102{
103 pkt = _pkt;
104 cpu->schedule(this, t);
105}
106
107TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
108 : BaseSimpleCPU(p), icachePort(this, p->clock), dcachePort(this, p->clock), fetchEvent(this)
109{
110 _status = Idle;
111
112 icachePort.snoopRangeSent = false;
113 dcachePort.snoopRangeSent = false;
114
115 ifetch_pkt = dcache_pkt = NULL;
116 drainEvent = NULL;
117 previousTick = 0;
118 changeState(SimObject::Running);
119}
120
121
122TimingSimpleCPU::~TimingSimpleCPU()
123{
124}
125
126void
127TimingSimpleCPU::serialize(ostream &os)
128{
129 SimObject::State so_state = SimObject::getState();
130 SERIALIZE_ENUM(so_state);
131 BaseSimpleCPU::serialize(os);
132}
133
134void
135TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
136{
137 SimObject::State so_state;
138 UNSERIALIZE_ENUM(so_state);
139 BaseSimpleCPU::unserialize(cp, section);
140}
141
142unsigned int
143TimingSimpleCPU::drain(Event *drain_event)
144{
145 // TimingSimpleCPU is ready to drain if it's not waiting for
146 // an access to complete.
147 if (_status == Idle || _status == Running || _status == SwitchedOut) {
148 changeState(SimObject::Drained);
149 return 0;
150 } else {
151 changeState(SimObject::Draining);
152 drainEvent = drain_event;
153 return 1;
154 }
155}
156
157void
158TimingSimpleCPU::resume()
159{
160 DPRINTF(SimpleCPU, "Resume\n");
161 if (_status != SwitchedOut && _status != Idle) {
162 assert(system->getMemoryMode() == Enums::timing);
163
164 if (fetchEvent.scheduled())
165 deschedule(fetchEvent);
166
167 schedule(fetchEvent, nextCycle());
168 }
169
170 changeState(SimObject::Running);
171}
172
173void
174TimingSimpleCPU::switchOut()
175{
176 assert(_status == Running || _status == Idle);
177 _status = SwitchedOut;
178 numCycles += tickToCycles(curTick - previousTick);
179
180 // If we've been scheduled to resume but are then told to switch out,
181 // we'll need to cancel it.
182 if (fetchEvent.scheduled())
183 deschedule(fetchEvent);
184}
185
186
187void
188TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
189{
190 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
191
192 // if any of this CPU's ThreadContexts are active, mark the CPU as
193 // running and schedule its tick event.
194 for (int i = 0; i < threadContexts.size(); ++i) {
195 ThreadContext *tc = threadContexts[i];
196 if (tc->status() == ThreadContext::Active && _status != Running) {
197 _status = Running;
198 break;
199 }
200 }
201
202 if (_status != Running) {
203 _status = Idle;
204 }
205 assert(threadContexts.size() == 1);
66 }
67#endif
68}
69
70Tick
71TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
72{
73 panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
74 return curTick;
75}
76
77void
78TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
79{
80 //No internal storage to update, jusst return
81 return;
82}
83
84void
85TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
86{
87 if (status == RangeChange) {
88 if (!snoopRangeSent) {
89 snoopRangeSent = true;
90 sendStatusChange(Port::RangeChange);
91 }
92 return;
93 }
94
95 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
96}
97
98
99void
100TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
101{
102 pkt = _pkt;
103 cpu->schedule(this, t);
104}
105
106TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
107 : BaseSimpleCPU(p), icachePort(this, p->clock), dcachePort(this, p->clock), fetchEvent(this)
108{
109 _status = Idle;
110
111 icachePort.snoopRangeSent = false;
112 dcachePort.snoopRangeSent = false;
113
114 ifetch_pkt = dcache_pkt = NULL;
115 drainEvent = NULL;
116 previousTick = 0;
117 changeState(SimObject::Running);
118}
119
120
121TimingSimpleCPU::~TimingSimpleCPU()
122{
123}
124
125void
126TimingSimpleCPU::serialize(ostream &os)
127{
128 SimObject::State so_state = SimObject::getState();
129 SERIALIZE_ENUM(so_state);
130 BaseSimpleCPU::serialize(os);
131}
132
133void
134TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
135{
136 SimObject::State so_state;
137 UNSERIALIZE_ENUM(so_state);
138 BaseSimpleCPU::unserialize(cp, section);
139}
140
141unsigned int
142TimingSimpleCPU::drain(Event *drain_event)
143{
144 // TimingSimpleCPU is ready to drain if it's not waiting for
145 // an access to complete.
146 if (_status == Idle || _status == Running || _status == SwitchedOut) {
147 changeState(SimObject::Drained);
148 return 0;
149 } else {
150 changeState(SimObject::Draining);
151 drainEvent = drain_event;
152 return 1;
153 }
154}
155
156void
157TimingSimpleCPU::resume()
158{
159 DPRINTF(SimpleCPU, "Resume\n");
160 if (_status != SwitchedOut && _status != Idle) {
161 assert(system->getMemoryMode() == Enums::timing);
162
163 if (fetchEvent.scheduled())
164 deschedule(fetchEvent);
165
166 schedule(fetchEvent, nextCycle());
167 }
168
169 changeState(SimObject::Running);
170}
171
172void
173TimingSimpleCPU::switchOut()
174{
175 assert(_status == Running || _status == Idle);
176 _status = SwitchedOut;
177 numCycles += tickToCycles(curTick - previousTick);
178
179 // If we've been scheduled to resume but are then told to switch out,
180 // we'll need to cancel it.
181 if (fetchEvent.scheduled())
182 deschedule(fetchEvent);
183}
184
185
186void
187TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
188{
189 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
190
191 // if any of this CPU's ThreadContexts are active, mark the CPU as
192 // running and schedule its tick event.
193 for (int i = 0; i < threadContexts.size(); ++i) {
194 ThreadContext *tc = threadContexts[i];
195 if (tc->status() == ThreadContext::Active && _status != Running) {
196 _status = Running;
197 break;
198 }
199 }
200
201 if (_status != Running) {
202 _status = Idle;
203 }
204 assert(threadContexts.size() == 1);
206 cpuId = tc->readCpuId();
205 _cpuId = tc->cpuId();
207 previousTick = curTick;
208}
209
210
211void
212TimingSimpleCPU::activateContext(int thread_num, int delay)
213{
214 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
215
216 assert(thread_num == 0);
217 assert(thread);
218
219 assert(_status == Idle);
220
221 notIdleFraction++;
222 _status = Running;
223
224 // kick things off by initiating the fetch of the next instruction
225 schedule(fetchEvent, nextCycle(curTick + ticks(delay)));
226}
227
228
229void
230TimingSimpleCPU::suspendContext(int thread_num)
231{
232 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
233
234 assert(thread_num == 0);
235 assert(thread);
236
237 assert(_status == Running);
238
239 // just change status to Idle... if status != Running,
240 // completeInst() will not initiate fetch of next instruction.
241
242 notIdleFraction--;
243 _status = Idle;
244}
245
246
247template <class T>
248Fault
249TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
250{
251 Request *req =
252 new Request(/* asid */ 0, addr, sizeof(T), flags, thread->readPC(),
206 previousTick = curTick;
207}
208
209
210void
211TimingSimpleCPU::activateContext(int thread_num, int delay)
212{
213 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
214
215 assert(thread_num == 0);
216 assert(thread);
217
218 assert(_status == Idle);
219
220 notIdleFraction++;
221 _status = Running;
222
223 // kick things off by initiating the fetch of the next instruction
224 schedule(fetchEvent, nextCycle(curTick + ticks(delay)));
225}
226
227
228void
229TimingSimpleCPU::suspendContext(int thread_num)
230{
231 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
232
233 assert(thread_num == 0);
234 assert(thread);
235
236 assert(_status == Running);
237
238 // just change status to Idle... if status != Running,
239 // completeInst() will not initiate fetch of next instruction.
240
241 notIdleFraction--;
242 _status = Idle;
243}
244
245
246template <class T>
247Fault
248TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
249{
250 Request *req =
251 new Request(/* asid */ 0, addr, sizeof(T), flags, thread->readPC(),
253 cpuId, /* thread ID */ 0);
252 _cpuId, /* thread ID */ 0);
254
255 if (traceData) {
256 traceData->setAddr(req->getVaddr());
257 }
258
259 // translate to physical address
260 Fault fault = thread->translateDataReadReq(req);
261
262 // Now do the access.
263 if (fault == NoFault) {
264 PacketPtr pkt =
265 new Packet(req,
266 (req->isLocked() ?
267 MemCmd::LoadLockedReq : MemCmd::ReadReq),
268 Packet::Broadcast);
269 pkt->dataDynamic<T>(new T);
270
271 if (req->isMmapedIpr()) {
272 Tick delay;
273 delay = TheISA::handleIprRead(thread->getTC(), pkt);
274 new IprEvent(pkt, this, nextCycle(curTick + delay));
275 _status = DcacheWaitResponse;
276 dcache_pkt = NULL;
277 } else if (!dcachePort.sendTiming(pkt)) {
278 _status = DcacheRetry;
279 dcache_pkt = pkt;
280 } else {
281 _status = DcacheWaitResponse;
282 // memory system takes ownership of packet
283 dcache_pkt = NULL;
284 }
285
286 // This will need a new way to tell if it has a dcache attached.
287 if (req->isUncacheable())
288 recordEvent("Uncached Read");
289 } else {
290 delete req;
291 }
292
293 if (traceData) {
294 traceData->setData(data);
295 }
296 return fault;
297}
298
299Fault
300TimingSimpleCPU::translateDataReadAddr(Addr vaddr, Addr &paddr,
301 int size, unsigned flags)
302{
303 Request *req =
253
254 if (traceData) {
255 traceData->setAddr(req->getVaddr());
256 }
257
258 // translate to physical address
259 Fault fault = thread->translateDataReadReq(req);
260
261 // Now do the access.
262 if (fault == NoFault) {
263 PacketPtr pkt =
264 new Packet(req,
265 (req->isLocked() ?
266 MemCmd::LoadLockedReq : MemCmd::ReadReq),
267 Packet::Broadcast);
268 pkt->dataDynamic<T>(new T);
269
270 if (req->isMmapedIpr()) {
271 Tick delay;
272 delay = TheISA::handleIprRead(thread->getTC(), pkt);
273 new IprEvent(pkt, this, nextCycle(curTick + delay));
274 _status = DcacheWaitResponse;
275 dcache_pkt = NULL;
276 } else if (!dcachePort.sendTiming(pkt)) {
277 _status = DcacheRetry;
278 dcache_pkt = pkt;
279 } else {
280 _status = DcacheWaitResponse;
281 // memory system takes ownership of packet
282 dcache_pkt = NULL;
283 }
284
285 // This will need a new way to tell if it has a dcache attached.
286 if (req->isUncacheable())
287 recordEvent("Uncached Read");
288 } else {
289 delete req;
290 }
291
292 if (traceData) {
293 traceData->setData(data);
294 }
295 return fault;
296}
297
298Fault
299TimingSimpleCPU::translateDataReadAddr(Addr vaddr, Addr &paddr,
300 int size, unsigned flags)
301{
302 Request *req =
304 new Request(0, vaddr, size, flags, thread->readPC(), cpuId, 0);
303 new Request(0, vaddr, size, flags, thread->readPC(), _cpuId, 0);
305
306 if (traceData) {
307 traceData->setAddr(vaddr);
308 }
309
310 Fault fault = thread->translateDataWriteReq(req);
311
312 if (fault == NoFault)
313 paddr = req->getPaddr();
314
315 delete req;
316 return fault;
317}
318
319#ifndef DOXYGEN_SHOULD_SKIP_THIS
320
321template
322Fault
323TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
324
325template
326Fault
327TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
328
329template
330Fault
331TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
332
333template
334Fault
335TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
336
337template
338Fault
339TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
340
341template
342Fault
343TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
344
345#endif //DOXYGEN_SHOULD_SKIP_THIS
346
347template<>
348Fault
349TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
350{
351 return read(addr, *(uint64_t*)&data, flags);
352}
353
354template<>
355Fault
356TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
357{
358 return read(addr, *(uint32_t*)&data, flags);
359}
360
361
362template<>
363Fault
364TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
365{
366 return read(addr, (uint32_t&)data, flags);
367}
368
369
370template <class T>
371Fault
372TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
373{
374 Request *req =
375 new Request(/* asid */ 0, addr, sizeof(T), flags, thread->readPC(),
304
305 if (traceData) {
306 traceData->setAddr(vaddr);
307 }
308
309 Fault fault = thread->translateDataWriteReq(req);
310
311 if (fault == NoFault)
312 paddr = req->getPaddr();
313
314 delete req;
315 return fault;
316}
317
318#ifndef DOXYGEN_SHOULD_SKIP_THIS
319
320template
321Fault
322TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
323
324template
325Fault
326TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
327
328template
329Fault
330TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
331
332template
333Fault
334TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
335
336template
337Fault
338TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
339
340template
341Fault
342TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
343
344#endif //DOXYGEN_SHOULD_SKIP_THIS
345
346template<>
347Fault
348TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
349{
350 return read(addr, *(uint64_t*)&data, flags);
351}
352
353template<>
354Fault
355TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
356{
357 return read(addr, *(uint32_t*)&data, flags);
358}
359
360
361template<>
362Fault
363TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
364{
365 return read(addr, (uint32_t&)data, flags);
366}
367
368
369template <class T>
370Fault
371TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
372{
373 Request *req =
374 new Request(/* asid */ 0, addr, sizeof(T), flags, thread->readPC(),
376 cpuId, /* thread ID */ 0);
375 _cpuId, /* thread ID */ 0);
377
378 if (traceData) {
379 traceData->setAddr(req->getVaddr());
380 }
381
382 // translate to physical address
383 Fault fault = thread->translateDataWriteReq(req);
384
385 // Now do the access.
386 if (fault == NoFault) {
387 MemCmd cmd = MemCmd::WriteReq; // default
388 bool do_access = true; // flag to suppress cache access
389
390 if (req->isLocked()) {
391 cmd = MemCmd::StoreCondReq;
392 do_access = TheISA::handleLockedWrite(thread, req);
393 } else if (req->isSwap()) {
394 cmd = MemCmd::SwapReq;
395 if (req->isCondSwap()) {
396 assert(res);
397 req->setExtraData(*res);
398 }
399 }
400
401 // Note: need to allocate dcache_pkt even if do_access is
402 // false, as it's used unconditionally to call completeAcc().
403 assert(dcache_pkt == NULL);
404 dcache_pkt = new Packet(req, cmd, Packet::Broadcast);
405 dcache_pkt->allocate();
406 dcache_pkt->set(data);
407
408 if (do_access) {
409 if (req->isMmapedIpr()) {
410 Tick delay;
411 dcache_pkt->set(htog(data));
412 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
413 new IprEvent(dcache_pkt, this, nextCycle(curTick + delay));
414 _status = DcacheWaitResponse;
415 dcache_pkt = NULL;
416 } else if (!dcachePort.sendTiming(dcache_pkt)) {
417 _status = DcacheRetry;
418 } else {
419 _status = DcacheWaitResponse;
420 // memory system takes ownership of packet
421 dcache_pkt = NULL;
422 }
423 }
424 // This will need a new way to tell if it's hooked up to a cache or not.
425 if (req->isUncacheable())
426 recordEvent("Uncached Write");
427 } else {
428 delete req;
429 }
430
431 if (traceData) {
432 traceData->setData(data);
433 }
434
435 // If the write needs to have a fault on the access, consider calling
436 // changeStatus() and changing it to "bad addr write" or something.
437 return fault;
438}
439
440Fault
441TimingSimpleCPU::translateDataWriteAddr(Addr vaddr, Addr &paddr,
442 int size, unsigned flags)
443{
444 Request *req =
376
377 if (traceData) {
378 traceData->setAddr(req->getVaddr());
379 }
380
381 // translate to physical address
382 Fault fault = thread->translateDataWriteReq(req);
383
384 // Now do the access.
385 if (fault == NoFault) {
386 MemCmd cmd = MemCmd::WriteReq; // default
387 bool do_access = true; // flag to suppress cache access
388
389 if (req->isLocked()) {
390 cmd = MemCmd::StoreCondReq;
391 do_access = TheISA::handleLockedWrite(thread, req);
392 } else if (req->isSwap()) {
393 cmd = MemCmd::SwapReq;
394 if (req->isCondSwap()) {
395 assert(res);
396 req->setExtraData(*res);
397 }
398 }
399
400 // Note: need to allocate dcache_pkt even if do_access is
401 // false, as it's used unconditionally to call completeAcc().
402 assert(dcache_pkt == NULL);
403 dcache_pkt = new Packet(req, cmd, Packet::Broadcast);
404 dcache_pkt->allocate();
405 dcache_pkt->set(data);
406
407 if (do_access) {
408 if (req->isMmapedIpr()) {
409 Tick delay;
410 dcache_pkt->set(htog(data));
411 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
412 new IprEvent(dcache_pkt, this, nextCycle(curTick + delay));
413 _status = DcacheWaitResponse;
414 dcache_pkt = NULL;
415 } else if (!dcachePort.sendTiming(dcache_pkt)) {
416 _status = DcacheRetry;
417 } else {
418 _status = DcacheWaitResponse;
419 // memory system takes ownership of packet
420 dcache_pkt = NULL;
421 }
422 }
423 // This will need a new way to tell if it's hooked up to a cache or not.
424 if (req->isUncacheable())
425 recordEvent("Uncached Write");
426 } else {
427 delete req;
428 }
429
430 if (traceData) {
431 traceData->setData(data);
432 }
433
434 // If the write needs to have a fault on the access, consider calling
435 // changeStatus() and changing it to "bad addr write" or something.
436 return fault;
437}
438
439Fault
440TimingSimpleCPU::translateDataWriteAddr(Addr vaddr, Addr &paddr,
441 int size, unsigned flags)
442{
443 Request *req =
445 new Request(0, vaddr, size, flags, thread->readPC(), cpuId, 0);
444 new Request(0, vaddr, size, flags, thread->readPC(), _cpuId, 0);
446
447 if (traceData) {
448 traceData->setAddr(vaddr);
449 }
450
451 Fault fault = thread->translateDataWriteReq(req);
452
453 if (fault == NoFault)
454 paddr = req->getPaddr();
455
456 delete req;
457 return fault;
458}
459
460
461#ifndef DOXYGEN_SHOULD_SKIP_THIS
462template
463Fault
464TimingSimpleCPU::write(Twin32_t data, Addr addr,
465 unsigned flags, uint64_t *res);
466
467template
468Fault
469TimingSimpleCPU::write(Twin64_t data, Addr addr,
470 unsigned flags, uint64_t *res);
471
472template
473Fault
474TimingSimpleCPU::write(uint64_t data, Addr addr,
475 unsigned flags, uint64_t *res);
476
477template
478Fault
479TimingSimpleCPU::write(uint32_t data, Addr addr,
480 unsigned flags, uint64_t *res);
481
482template
483Fault
484TimingSimpleCPU::write(uint16_t data, Addr addr,
485 unsigned flags, uint64_t *res);
486
487template
488Fault
489TimingSimpleCPU::write(uint8_t data, Addr addr,
490 unsigned flags, uint64_t *res);
491
492#endif //DOXYGEN_SHOULD_SKIP_THIS
493
494template<>
495Fault
496TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
497{
498 return write(*(uint64_t*)&data, addr, flags, res);
499}
500
501template<>
502Fault
503TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
504{
505 return write(*(uint32_t*)&data, addr, flags, res);
506}
507
508
509template<>
510Fault
511TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
512{
513 return write((uint32_t)data, addr, flags, res);
514}
515
516
517void
518TimingSimpleCPU::fetch()
519{
520 DPRINTF(SimpleCPU, "Fetch\n");
521
522 if (!curStaticInst || !curStaticInst->isDelayedCommit())
523 checkForInterrupts();
524
525 checkPcEventQueue();
526
527 bool fromRom = isRomMicroPC(thread->readMicroPC());
528
529 if (!fromRom) {
530 Request *ifetch_req = new Request();
445
446 if (traceData) {
447 traceData->setAddr(vaddr);
448 }
449
450 Fault fault = thread->translateDataWriteReq(req);
451
452 if (fault == NoFault)
453 paddr = req->getPaddr();
454
455 delete req;
456 return fault;
457}
458
459
460#ifndef DOXYGEN_SHOULD_SKIP_THIS
461template
462Fault
463TimingSimpleCPU::write(Twin32_t data, Addr addr,
464 unsigned flags, uint64_t *res);
465
466template
467Fault
468TimingSimpleCPU::write(Twin64_t data, Addr addr,
469 unsigned flags, uint64_t *res);
470
471template
472Fault
473TimingSimpleCPU::write(uint64_t data, Addr addr,
474 unsigned flags, uint64_t *res);
475
476template
477Fault
478TimingSimpleCPU::write(uint32_t data, Addr addr,
479 unsigned flags, uint64_t *res);
480
481template
482Fault
483TimingSimpleCPU::write(uint16_t data, Addr addr,
484 unsigned flags, uint64_t *res);
485
486template
487Fault
488TimingSimpleCPU::write(uint8_t data, Addr addr,
489 unsigned flags, uint64_t *res);
490
491#endif //DOXYGEN_SHOULD_SKIP_THIS
492
493template<>
494Fault
495TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
496{
497 return write(*(uint64_t*)&data, addr, flags, res);
498}
499
500template<>
501Fault
502TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
503{
504 return write(*(uint32_t*)&data, addr, flags, res);
505}
506
507
508template<>
509Fault
510TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
511{
512 return write((uint32_t)data, addr, flags, res);
513}
514
515
516void
517TimingSimpleCPU::fetch()
518{
519 DPRINTF(SimpleCPU, "Fetch\n");
520
521 if (!curStaticInst || !curStaticInst->isDelayedCommit())
522 checkForInterrupts();
523
524 checkPcEventQueue();
525
526 bool fromRom = isRomMicroPC(thread->readMicroPC());
527
528 if (!fromRom) {
529 Request *ifetch_req = new Request();
531 ifetch_req->setThreadContext(cpuId, /* thread ID */ 0);
530 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
532 Fault fault = setupFetchRequest(ifetch_req);
533
534 ifetch_pkt = new Packet(ifetch_req, MemCmd::ReadReq, Packet::Broadcast);
535 ifetch_pkt->dataStatic(&inst);
536
537 if (fault == NoFault) {
538 if (!icachePort.sendTiming(ifetch_pkt)) {
539 // Need to wait for retry
540 _status = IcacheRetry;
541 } else {
542 // Need to wait for cache to respond
543 _status = IcacheWaitResponse;
544 // ownership of packet transferred to memory system
545 ifetch_pkt = NULL;
546 }
547 } else {
548 delete ifetch_req;
549 delete ifetch_pkt;
550 // fetch fault: advance directly to next instruction (fault handler)
551 advanceInst(fault);
552 }
553 } else {
554 _status = IcacheWaitResponse;
555 completeIfetch(NULL);
556 }
557
558 numCycles += tickToCycles(curTick - previousTick);
559 previousTick = curTick;
560}
561
562
563void
564TimingSimpleCPU::advanceInst(Fault fault)
565{
566 advancePC(fault);
567
568 if (_status == Running) {
569 // kick off fetch of next instruction... callback from icache
570 // response will cause that instruction to be executed,
571 // keeping the CPU running.
572 fetch();
573 }
574}
575
576
577void
578TimingSimpleCPU::completeIfetch(PacketPtr pkt)
579{
580 DPRINTF(SimpleCPU, "Complete ICache Fetch\n");
581
582 // received a response from the icache: execute the received
583 // instruction
584
585 assert(!pkt || !pkt->isError());
586 assert(_status == IcacheWaitResponse);
587
588 _status = Running;
589
590 numCycles += tickToCycles(curTick - previousTick);
591 previousTick = curTick;
592
593 if (getState() == SimObject::Draining) {
594 if (pkt) {
595 delete pkt->req;
596 delete pkt;
597 }
598
599 completeDrain();
600 return;
601 }
602
603 preExecute();
604 if (curStaticInst->isMemRef() && !curStaticInst->isDataPrefetch()) {
605 // load or store: just send to dcache
606 Fault fault = curStaticInst->initiateAcc(this, traceData);
607 if (_status != Running) {
608 // instruction will complete in dcache response callback
609 assert(_status == DcacheWaitResponse || _status == DcacheRetry);
610 assert(fault == NoFault);
611 } else {
612 if (fault == NoFault) {
613 // Note that ARM can have NULL packets if the instruction gets
614 // squashed due to predication
615 // early fail on store conditional: complete now
616 assert(dcache_pkt != NULL || THE_ISA == ARM_ISA);
617
618 fault = curStaticInst->completeAcc(dcache_pkt, this,
619 traceData);
620 if (dcache_pkt != NULL)
621 {
622 delete dcache_pkt->req;
623 delete dcache_pkt;
624 dcache_pkt = NULL;
625 }
626
627 // keep an instruction count
628 if (fault == NoFault)
629 countInst();
630 } else if (traceData) {
631 // If there was a fault, we shouldn't trace this instruction.
632 delete traceData;
633 traceData = NULL;
634 }
635
636 postExecute();
637 // @todo remove me after debugging with legion done
638 if (curStaticInst && (!curStaticInst->isMicroop() ||
639 curStaticInst->isFirstMicroop()))
640 instCnt++;
641 advanceInst(fault);
642 }
643 } else {
644 // non-memory instruction: execute completely now
645 Fault fault = curStaticInst->execute(this, traceData);
646
647 // keep an instruction count
648 if (fault == NoFault)
649 countInst();
650 else if (traceData) {
651 // If there was a fault, we shouldn't trace this instruction.
652 delete traceData;
653 traceData = NULL;
654 }
655
656 postExecute();
657 // @todo remove me after debugging with legion done
658 if (curStaticInst && (!curStaticInst->isMicroop() ||
659 curStaticInst->isFirstMicroop()))
660 instCnt++;
661 advanceInst(fault);
662 }
663
664 if (pkt) {
665 delete pkt->req;
666 delete pkt;
667 }
668}
669
670void
671TimingSimpleCPU::IcachePort::ITickEvent::process()
672{
673 cpu->completeIfetch(pkt);
674}
675
676bool
677TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
678{
679 if (pkt->isResponse() && !pkt->wasNacked()) {
680 // delay processing of returned data until next CPU clock edge
681 Tick next_tick = cpu->nextCycle(curTick);
682
683 if (next_tick == curTick)
684 cpu->completeIfetch(pkt);
685 else
686 tickEvent.schedule(pkt, next_tick);
687
688 return true;
689 }
690 else if (pkt->wasNacked()) {
691 assert(cpu->_status == IcacheWaitResponse);
692 pkt->reinitNacked();
693 if (!sendTiming(pkt)) {
694 cpu->_status = IcacheRetry;
695 cpu->ifetch_pkt = pkt;
696 }
697 }
698 //Snooping a Coherence Request, do nothing
699 return true;
700}
701
702void
703TimingSimpleCPU::IcachePort::recvRetry()
704{
705 // we shouldn't get a retry unless we have a packet that we're
706 // waiting to transmit
707 assert(cpu->ifetch_pkt != NULL);
708 assert(cpu->_status == IcacheRetry);
709 PacketPtr tmp = cpu->ifetch_pkt;
710 if (sendTiming(tmp)) {
711 cpu->_status = IcacheWaitResponse;
712 cpu->ifetch_pkt = NULL;
713 }
714}
715
716void
717TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
718{
719 // received a response from the dcache: complete the load or store
720 // instruction
721 assert(!pkt->isError());
722 assert(_status == DcacheWaitResponse);
723 _status = Running;
724
725 numCycles += tickToCycles(curTick - previousTick);
726 previousTick = curTick;
727
728 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
729
730 // keep an instruction count
731 if (fault == NoFault)
732 countInst();
733 else if (traceData) {
734 // If there was a fault, we shouldn't trace this instruction.
735 delete traceData;
736 traceData = NULL;
737 }
738
739 // the locked flag may be cleared on the response packet, so check
740 // pkt->req and not pkt to see if it was a load-locked
741 if (pkt->isRead() && pkt->req->isLocked()) {
742 TheISA::handleLockedRead(thread, pkt->req);
743 }
744
745 delete pkt->req;
746 delete pkt;
747
748 postExecute();
749
750 if (getState() == SimObject::Draining) {
751 advancePC(fault);
752 completeDrain();
753
754 return;
755 }
756
757 advanceInst(fault);
758}
759
760
761void
762TimingSimpleCPU::completeDrain()
763{
764 DPRINTF(Config, "Done draining\n");
765 changeState(SimObject::Drained);
766 drainEvent->process();
767}
768
769void
770TimingSimpleCPU::DcachePort::setPeer(Port *port)
771{
772 Port::setPeer(port);
773
774#if FULL_SYSTEM
775 // Update the ThreadContext's memory ports (Functional/Virtual
776 // Ports)
777 cpu->tcBase()->connectMemPorts(cpu->tcBase());
778#endif
779}
780
781bool
782TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
783{
784 if (pkt->isResponse() && !pkt->wasNacked()) {
785 // delay processing of returned data until next CPU clock edge
786 Tick next_tick = cpu->nextCycle(curTick);
787
788 if (next_tick == curTick)
789 cpu->completeDataAccess(pkt);
790 else
791 tickEvent.schedule(pkt, next_tick);
792
793 return true;
794 }
795 else if (pkt->wasNacked()) {
796 assert(cpu->_status == DcacheWaitResponse);
797 pkt->reinitNacked();
798 if (!sendTiming(pkt)) {
799 cpu->_status = DcacheRetry;
800 cpu->dcache_pkt = pkt;
801 }
802 }
803 //Snooping a Coherence Request, do nothing
804 return true;
805}
806
807void
808TimingSimpleCPU::DcachePort::DTickEvent::process()
809{
810 cpu->completeDataAccess(pkt);
811}
812
813void
814TimingSimpleCPU::DcachePort::recvRetry()
815{
816 // we shouldn't get a retry unless we have a packet that we're
817 // waiting to transmit
818 assert(cpu->dcache_pkt != NULL);
819 assert(cpu->_status == DcacheRetry);
820 PacketPtr tmp = cpu->dcache_pkt;
821 if (sendTiming(tmp)) {
822 cpu->_status = DcacheWaitResponse;
823 // memory system takes ownership of packet
824 cpu->dcache_pkt = NULL;
825 }
826}
827
828TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
829 Tick t)
830 : pkt(_pkt), cpu(_cpu)
831{
832 cpu->schedule(this, t);
833}
834
835void
836TimingSimpleCPU::IprEvent::process()
837{
838 cpu->completeDataAccess(pkt);
839}
840
841const char *
842TimingSimpleCPU::IprEvent::description() const
843{
844 return "Timing Simple CPU Delay IPR event";
845}
846
847
848void
849TimingSimpleCPU::printAddr(Addr a)
850{
851 dcachePort.printAddr(a);
852}
853
854
855////////////////////////////////////////////////////////////////////////
856//
857// TimingSimpleCPU Simulation Object
858//
859TimingSimpleCPU *
860TimingSimpleCPUParams::create()
861{
862 numThreads = 1;
863#if !FULL_SYSTEM
864 if (workload.size() != 1)
865 panic("only one workload allowed");
866#endif
867 return new TimingSimpleCPU(this);
868}
531 Fault fault = setupFetchRequest(ifetch_req);
532
533 ifetch_pkt = new Packet(ifetch_req, MemCmd::ReadReq, Packet::Broadcast);
534 ifetch_pkt->dataStatic(&inst);
535
536 if (fault == NoFault) {
537 if (!icachePort.sendTiming(ifetch_pkt)) {
538 // Need to wait for retry
539 _status = IcacheRetry;
540 } else {
541 // Need to wait for cache to respond
542 _status = IcacheWaitResponse;
543 // ownership of packet transferred to memory system
544 ifetch_pkt = NULL;
545 }
546 } else {
547 delete ifetch_req;
548 delete ifetch_pkt;
549 // fetch fault: advance directly to next instruction (fault handler)
550 advanceInst(fault);
551 }
552 } else {
553 _status = IcacheWaitResponse;
554 completeIfetch(NULL);
555 }
556
557 numCycles += tickToCycles(curTick - previousTick);
558 previousTick = curTick;
559}
560
561
562void
563TimingSimpleCPU::advanceInst(Fault fault)
564{
565 advancePC(fault);
566
567 if (_status == Running) {
568 // kick off fetch of next instruction... callback from icache
569 // response will cause that instruction to be executed,
570 // keeping the CPU running.
571 fetch();
572 }
573}
574
575
576void
577TimingSimpleCPU::completeIfetch(PacketPtr pkt)
578{
579 DPRINTF(SimpleCPU, "Complete ICache Fetch\n");
580
581 // received a response from the icache: execute the received
582 // instruction
583
584 assert(!pkt || !pkt->isError());
585 assert(_status == IcacheWaitResponse);
586
587 _status = Running;
588
589 numCycles += tickToCycles(curTick - previousTick);
590 previousTick = curTick;
591
592 if (getState() == SimObject::Draining) {
593 if (pkt) {
594 delete pkt->req;
595 delete pkt;
596 }
597
598 completeDrain();
599 return;
600 }
601
602 preExecute();
603 if (curStaticInst->isMemRef() && !curStaticInst->isDataPrefetch()) {
604 // load or store: just send to dcache
605 Fault fault = curStaticInst->initiateAcc(this, traceData);
606 if (_status != Running) {
607 // instruction will complete in dcache response callback
608 assert(_status == DcacheWaitResponse || _status == DcacheRetry);
609 assert(fault == NoFault);
610 } else {
611 if (fault == NoFault) {
612 // Note that ARM can have NULL packets if the instruction gets
613 // squashed due to predication
614 // early fail on store conditional: complete now
615 assert(dcache_pkt != NULL || THE_ISA == ARM_ISA);
616
617 fault = curStaticInst->completeAcc(dcache_pkt, this,
618 traceData);
619 if (dcache_pkt != NULL)
620 {
621 delete dcache_pkt->req;
622 delete dcache_pkt;
623 dcache_pkt = NULL;
624 }
625
626 // keep an instruction count
627 if (fault == NoFault)
628 countInst();
629 } else if (traceData) {
630 // If there was a fault, we shouldn't trace this instruction.
631 delete traceData;
632 traceData = NULL;
633 }
634
635 postExecute();
636 // @todo remove me after debugging with legion done
637 if (curStaticInst && (!curStaticInst->isMicroop() ||
638 curStaticInst->isFirstMicroop()))
639 instCnt++;
640 advanceInst(fault);
641 }
642 } else {
643 // non-memory instruction: execute completely now
644 Fault fault = curStaticInst->execute(this, traceData);
645
646 // keep an instruction count
647 if (fault == NoFault)
648 countInst();
649 else if (traceData) {
650 // If there was a fault, we shouldn't trace this instruction.
651 delete traceData;
652 traceData = NULL;
653 }
654
655 postExecute();
656 // @todo remove me after debugging with legion done
657 if (curStaticInst && (!curStaticInst->isMicroop() ||
658 curStaticInst->isFirstMicroop()))
659 instCnt++;
660 advanceInst(fault);
661 }
662
663 if (pkt) {
664 delete pkt->req;
665 delete pkt;
666 }
667}
668
669void
670TimingSimpleCPU::IcachePort::ITickEvent::process()
671{
672 cpu->completeIfetch(pkt);
673}
674
675bool
676TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
677{
678 if (pkt->isResponse() && !pkt->wasNacked()) {
679 // delay processing of returned data until next CPU clock edge
680 Tick next_tick = cpu->nextCycle(curTick);
681
682 if (next_tick == curTick)
683 cpu->completeIfetch(pkt);
684 else
685 tickEvent.schedule(pkt, next_tick);
686
687 return true;
688 }
689 else if (pkt->wasNacked()) {
690 assert(cpu->_status == IcacheWaitResponse);
691 pkt->reinitNacked();
692 if (!sendTiming(pkt)) {
693 cpu->_status = IcacheRetry;
694 cpu->ifetch_pkt = pkt;
695 }
696 }
697 //Snooping a Coherence Request, do nothing
698 return true;
699}
700
701void
702TimingSimpleCPU::IcachePort::recvRetry()
703{
704 // we shouldn't get a retry unless we have a packet that we're
705 // waiting to transmit
706 assert(cpu->ifetch_pkt != NULL);
707 assert(cpu->_status == IcacheRetry);
708 PacketPtr tmp = cpu->ifetch_pkt;
709 if (sendTiming(tmp)) {
710 cpu->_status = IcacheWaitResponse;
711 cpu->ifetch_pkt = NULL;
712 }
713}
714
715void
716TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
717{
718 // received a response from the dcache: complete the load or store
719 // instruction
720 assert(!pkt->isError());
721 assert(_status == DcacheWaitResponse);
722 _status = Running;
723
724 numCycles += tickToCycles(curTick - previousTick);
725 previousTick = curTick;
726
727 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
728
729 // keep an instruction count
730 if (fault == NoFault)
731 countInst();
732 else if (traceData) {
733 // If there was a fault, we shouldn't trace this instruction.
734 delete traceData;
735 traceData = NULL;
736 }
737
738 // the locked flag may be cleared on the response packet, so check
739 // pkt->req and not pkt to see if it was a load-locked
740 if (pkt->isRead() && pkt->req->isLocked()) {
741 TheISA::handleLockedRead(thread, pkt->req);
742 }
743
744 delete pkt->req;
745 delete pkt;
746
747 postExecute();
748
749 if (getState() == SimObject::Draining) {
750 advancePC(fault);
751 completeDrain();
752
753 return;
754 }
755
756 advanceInst(fault);
757}
758
759
760void
761TimingSimpleCPU::completeDrain()
762{
763 DPRINTF(Config, "Done draining\n");
764 changeState(SimObject::Drained);
765 drainEvent->process();
766}
767
768void
769TimingSimpleCPU::DcachePort::setPeer(Port *port)
770{
771 Port::setPeer(port);
772
773#if FULL_SYSTEM
774 // Update the ThreadContext's memory ports (Functional/Virtual
775 // Ports)
776 cpu->tcBase()->connectMemPorts(cpu->tcBase());
777#endif
778}
779
780bool
781TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
782{
783 if (pkt->isResponse() && !pkt->wasNacked()) {
784 // delay processing of returned data until next CPU clock edge
785 Tick next_tick = cpu->nextCycle(curTick);
786
787 if (next_tick == curTick)
788 cpu->completeDataAccess(pkt);
789 else
790 tickEvent.schedule(pkt, next_tick);
791
792 return true;
793 }
794 else if (pkt->wasNacked()) {
795 assert(cpu->_status == DcacheWaitResponse);
796 pkt->reinitNacked();
797 if (!sendTiming(pkt)) {
798 cpu->_status = DcacheRetry;
799 cpu->dcache_pkt = pkt;
800 }
801 }
802 //Snooping a Coherence Request, do nothing
803 return true;
804}
805
806void
807TimingSimpleCPU::DcachePort::DTickEvent::process()
808{
809 cpu->completeDataAccess(pkt);
810}
811
812void
813TimingSimpleCPU::DcachePort::recvRetry()
814{
815 // we shouldn't get a retry unless we have a packet that we're
816 // waiting to transmit
817 assert(cpu->dcache_pkt != NULL);
818 assert(cpu->_status == DcacheRetry);
819 PacketPtr tmp = cpu->dcache_pkt;
820 if (sendTiming(tmp)) {
821 cpu->_status = DcacheWaitResponse;
822 // memory system takes ownership of packet
823 cpu->dcache_pkt = NULL;
824 }
825}
826
827TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
828 Tick t)
829 : pkt(_pkt), cpu(_cpu)
830{
831 cpu->schedule(this, t);
832}
833
834void
835TimingSimpleCPU::IprEvent::process()
836{
837 cpu->completeDataAccess(pkt);
838}
839
840const char *
841TimingSimpleCPU::IprEvent::description() const
842{
843 return "Timing Simple CPU Delay IPR event";
844}
845
846
847void
848TimingSimpleCPU::printAddr(Addr a)
849{
850 dcachePort.printAddr(a);
851}
852
853
854////////////////////////////////////////////////////////////////////////
855//
856// TimingSimpleCPU Simulation Object
857//
858TimingSimpleCPU *
859TimingSimpleCPUParams::create()
860{
861 numThreads = 1;
862#if !FULL_SYSTEM
863 if (workload.size() != 1)
864 panic("only one workload allowed");
865#endif
866 return new TimingSimpleCPU(this);
867}