timing.cc (5101:8af5a6a6223d) timing.cc (5103:391933804192)
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Steve Reinhardt
29 */
30
31#include "arch/locked_mem.hh"
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Steve Reinhardt
29 */
30
31#include "arch/locked_mem.hh"
32#include "arch/mmaped_ipr.hh"
32#include "arch/utility.hh"
33#include "base/bigint.hh"
34#include "cpu/exetrace.hh"
35#include "cpu/simple/timing.hh"
36#include "mem/packet.hh"
37#include "mem/packet_access.hh"
38#include "params/TimingSimpleCPU.hh"
39#include "sim/system.hh"
40
41using namespace std;
42using namespace TheISA;
43
44Port *
45TimingSimpleCPU::getPort(const std::string &if_name, int idx)
46{
47 if (if_name == "dcache_port")
48 return &dcachePort;
49 else if (if_name == "icache_port")
50 return &icachePort;
51 else
52 panic("No Such Port\n");
53}
54
55void
56TimingSimpleCPU::init()
57{
58 BaseCPU::init();
59#if FULL_SYSTEM
60 for (int i = 0; i < threadContexts.size(); ++i) {
61 ThreadContext *tc = threadContexts[i];
62
63 // initialize CPU, including PC
64 TheISA::initCPU(tc, tc->readCpuId());
65 }
66#endif
67}
68
69Tick
70TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
71{
72 panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
73 return curTick;
74}
75
76void
77TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
78{
79 //No internal storage to update, jusst return
80 return;
81}
82
83void
84TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
85{
86 if (status == RangeChange) {
87 if (!snoopRangeSent) {
88 snoopRangeSent = true;
89 sendStatusChange(Port::RangeChange);
90 }
91 return;
92 }
93
94 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
95}
96
97
98void
99TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
100{
101 pkt = _pkt;
102 Event::schedule(t);
103}
104
105TimingSimpleCPU::TimingSimpleCPU(Params *p)
106 : BaseSimpleCPU(p), icachePort(this, p->clock), dcachePort(this, p->clock),
107 cpu_id(p->cpu_id)
108{
109 _status = Idle;
110
111 icachePort.snoopRangeSent = false;
112 dcachePort.snoopRangeSent = false;
113
114 ifetch_pkt = dcache_pkt = NULL;
115 drainEvent = NULL;
116 fetchEvent = NULL;
117 previousTick = 0;
118 changeState(SimObject::Running);
119}
120
121
122TimingSimpleCPU::~TimingSimpleCPU()
123{
124}
125
126void
127TimingSimpleCPU::serialize(ostream &os)
128{
129 SimObject::State so_state = SimObject::getState();
130 SERIALIZE_ENUM(so_state);
131 BaseSimpleCPU::serialize(os);
132}
133
134void
135TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
136{
137 SimObject::State so_state;
138 UNSERIALIZE_ENUM(so_state);
139 BaseSimpleCPU::unserialize(cp, section);
140}
141
142unsigned int
143TimingSimpleCPU::drain(Event *drain_event)
144{
145 // TimingSimpleCPU is ready to drain if it's not waiting for
146 // an access to complete.
147 if (status() == Idle || status() == Running || status() == SwitchedOut) {
148 changeState(SimObject::Drained);
149 return 0;
150 } else {
151 changeState(SimObject::Draining);
152 drainEvent = drain_event;
153 return 1;
154 }
155}
156
157void
158TimingSimpleCPU::resume()
159{
160 if (_status != SwitchedOut && _status != Idle) {
161 assert(system->getMemoryMode() == Enums::timing);
162
163 // Delete the old event if it existed.
164 if (fetchEvent) {
165 if (fetchEvent->scheduled())
166 fetchEvent->deschedule();
167
168 delete fetchEvent;
169 }
170
171 fetchEvent = new FetchEvent(this, nextCycle());
172 }
173
174 changeState(SimObject::Running);
175}
176
177void
178TimingSimpleCPU::switchOut()
179{
180 assert(status() == Running || status() == Idle);
181 _status = SwitchedOut;
182 numCycles += tickToCycles(curTick - previousTick);
183
184 // If we've been scheduled to resume but are then told to switch out,
185 // we'll need to cancel it.
186 if (fetchEvent && fetchEvent->scheduled())
187 fetchEvent->deschedule();
188}
189
190
191void
192TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
193{
194 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
195
196 // if any of this CPU's ThreadContexts are active, mark the CPU as
197 // running and schedule its tick event.
198 for (int i = 0; i < threadContexts.size(); ++i) {
199 ThreadContext *tc = threadContexts[i];
200 if (tc->status() == ThreadContext::Active && _status != Running) {
201 _status = Running;
202 break;
203 }
204 }
205
206 if (_status != Running) {
207 _status = Idle;
208 }
209 previousTick = curTick;
210}
211
212
213void
214TimingSimpleCPU::activateContext(int thread_num, int delay)
215{
216 assert(thread_num == 0);
217 assert(thread);
218
219 assert(_status == Idle);
220
221 notIdleFraction++;
222 _status = Running;
223
224 // kick things off by initiating the fetch of the next instruction
225 fetchEvent = new FetchEvent(this, nextCycle(curTick + ticks(delay)));
226}
227
228
229void
230TimingSimpleCPU::suspendContext(int thread_num)
231{
232 assert(thread_num == 0);
233 assert(thread);
234
235 assert(_status == Running);
236
237 // just change status to Idle... if status != Running,
238 // completeInst() will not initiate fetch of next instruction.
239
240 notIdleFraction--;
241 _status = Idle;
242}
243
244
245template <class T>
246Fault
247TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
248{
249 Request *req =
250 new Request(/* asid */ 0, addr, sizeof(T), flags, thread->readPC(),
251 cpu_id, /* thread ID */ 0);
252
253 if (traceData) {
254 traceData->setAddr(req->getVaddr());
255 }
256
257 // translate to physical address
258 Fault fault = thread->translateDataReadReq(req);
259
260 // Now do the access.
261 if (fault == NoFault) {
262 PacketPtr pkt =
263 new Packet(req,
264 (req->isLocked() ?
265 MemCmd::LoadLockedReq : MemCmd::ReadReq),
266 Packet::Broadcast);
267 pkt->dataDynamic<T>(new T);
268
33#include "arch/utility.hh"
34#include "base/bigint.hh"
35#include "cpu/exetrace.hh"
36#include "cpu/simple/timing.hh"
37#include "mem/packet.hh"
38#include "mem/packet_access.hh"
39#include "params/TimingSimpleCPU.hh"
40#include "sim/system.hh"
41
42using namespace std;
43using namespace TheISA;
44
45Port *
46TimingSimpleCPU::getPort(const std::string &if_name, int idx)
47{
48 if (if_name == "dcache_port")
49 return &dcachePort;
50 else if (if_name == "icache_port")
51 return &icachePort;
52 else
53 panic("No Such Port\n");
54}
55
56void
57TimingSimpleCPU::init()
58{
59 BaseCPU::init();
60#if FULL_SYSTEM
61 for (int i = 0; i < threadContexts.size(); ++i) {
62 ThreadContext *tc = threadContexts[i];
63
64 // initialize CPU, including PC
65 TheISA::initCPU(tc, tc->readCpuId());
66 }
67#endif
68}
69
70Tick
71TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
72{
73 panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
74 return curTick;
75}
76
77void
78TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
79{
80 //No internal storage to update, jusst return
81 return;
82}
83
84void
85TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
86{
87 if (status == RangeChange) {
88 if (!snoopRangeSent) {
89 snoopRangeSent = true;
90 sendStatusChange(Port::RangeChange);
91 }
92 return;
93 }
94
95 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
96}
97
98
99void
100TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
101{
102 pkt = _pkt;
103 Event::schedule(t);
104}
105
106TimingSimpleCPU::TimingSimpleCPU(Params *p)
107 : BaseSimpleCPU(p), icachePort(this, p->clock), dcachePort(this, p->clock),
108 cpu_id(p->cpu_id)
109{
110 _status = Idle;
111
112 icachePort.snoopRangeSent = false;
113 dcachePort.snoopRangeSent = false;
114
115 ifetch_pkt = dcache_pkt = NULL;
116 drainEvent = NULL;
117 fetchEvent = NULL;
118 previousTick = 0;
119 changeState(SimObject::Running);
120}
121
122
123TimingSimpleCPU::~TimingSimpleCPU()
124{
125}
126
127void
128TimingSimpleCPU::serialize(ostream &os)
129{
130 SimObject::State so_state = SimObject::getState();
131 SERIALIZE_ENUM(so_state);
132 BaseSimpleCPU::serialize(os);
133}
134
135void
136TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
137{
138 SimObject::State so_state;
139 UNSERIALIZE_ENUM(so_state);
140 BaseSimpleCPU::unserialize(cp, section);
141}
142
143unsigned int
144TimingSimpleCPU::drain(Event *drain_event)
145{
146 // TimingSimpleCPU is ready to drain if it's not waiting for
147 // an access to complete.
148 if (status() == Idle || status() == Running || status() == SwitchedOut) {
149 changeState(SimObject::Drained);
150 return 0;
151 } else {
152 changeState(SimObject::Draining);
153 drainEvent = drain_event;
154 return 1;
155 }
156}
157
158void
159TimingSimpleCPU::resume()
160{
161 if (_status != SwitchedOut && _status != Idle) {
162 assert(system->getMemoryMode() == Enums::timing);
163
164 // Delete the old event if it existed.
165 if (fetchEvent) {
166 if (fetchEvent->scheduled())
167 fetchEvent->deschedule();
168
169 delete fetchEvent;
170 }
171
172 fetchEvent = new FetchEvent(this, nextCycle());
173 }
174
175 changeState(SimObject::Running);
176}
177
178void
179TimingSimpleCPU::switchOut()
180{
181 assert(status() == Running || status() == Idle);
182 _status = SwitchedOut;
183 numCycles += tickToCycles(curTick - previousTick);
184
185 // If we've been scheduled to resume but are then told to switch out,
186 // we'll need to cancel it.
187 if (fetchEvent && fetchEvent->scheduled())
188 fetchEvent->deschedule();
189}
190
191
192void
193TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
194{
195 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
196
197 // if any of this CPU's ThreadContexts are active, mark the CPU as
198 // running and schedule its tick event.
199 for (int i = 0; i < threadContexts.size(); ++i) {
200 ThreadContext *tc = threadContexts[i];
201 if (tc->status() == ThreadContext::Active && _status != Running) {
202 _status = Running;
203 break;
204 }
205 }
206
207 if (_status != Running) {
208 _status = Idle;
209 }
210 previousTick = curTick;
211}
212
213
214void
215TimingSimpleCPU::activateContext(int thread_num, int delay)
216{
217 assert(thread_num == 0);
218 assert(thread);
219
220 assert(_status == Idle);
221
222 notIdleFraction++;
223 _status = Running;
224
225 // kick things off by initiating the fetch of the next instruction
226 fetchEvent = new FetchEvent(this, nextCycle(curTick + ticks(delay)));
227}
228
229
230void
231TimingSimpleCPU::suspendContext(int thread_num)
232{
233 assert(thread_num == 0);
234 assert(thread);
235
236 assert(_status == Running);
237
238 // just change status to Idle... if status != Running,
239 // completeInst() will not initiate fetch of next instruction.
240
241 notIdleFraction--;
242 _status = Idle;
243}
244
245
246template <class T>
247Fault
248TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
249{
250 Request *req =
251 new Request(/* asid */ 0, addr, sizeof(T), flags, thread->readPC(),
252 cpu_id, /* thread ID */ 0);
253
254 if (traceData) {
255 traceData->setAddr(req->getVaddr());
256 }
257
258 // translate to physical address
259 Fault fault = thread->translateDataReadReq(req);
260
261 // Now do the access.
262 if (fault == NoFault) {
263 PacketPtr pkt =
264 new Packet(req,
265 (req->isLocked() ?
266 MemCmd::LoadLockedReq : MemCmd::ReadReq),
267 Packet::Broadcast);
268 pkt->dataDynamic<T>(new T);
269
269 if (!dcachePort.sendTiming(pkt)) {
270 if (req->isMmapedIpr()) {
271 Tick delay;
272 delay = TheISA::handleIprRead(thread->getTC(), pkt);
273 new IprEvent(pkt, this, nextCycle(curTick + delay));
274 _status = DcacheWaitResponse;
275 dcache_pkt = NULL;
276 } else if (!dcachePort.sendTiming(pkt)) {
270 _status = DcacheRetry;
271 dcache_pkt = pkt;
272 } else {
273 _status = DcacheWaitResponse;
274 // memory system takes ownership of packet
275 dcache_pkt = NULL;
276 }
277
278 // This will need a new way to tell if it has a dcache attached.
279 if (req->isUncacheable())
280 recordEvent("Uncached Read");
281 } else {
282 delete req;
283 }
284
285 return fault;
286}
287
288#ifndef DOXYGEN_SHOULD_SKIP_THIS
289
290template
291Fault
292TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
293
294template
295Fault
296TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
297
298template
299Fault
300TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
301
302template
303Fault
304TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
305
306template
307Fault
308TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
309
310template
311Fault
312TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
313
314#endif //DOXYGEN_SHOULD_SKIP_THIS
315
316template<>
317Fault
318TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
319{
320 return read(addr, *(uint64_t*)&data, flags);
321}
322
323template<>
324Fault
325TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
326{
327 return read(addr, *(uint32_t*)&data, flags);
328}
329
330
331template<>
332Fault
333TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
334{
335 return read(addr, (uint32_t&)data, flags);
336}
337
338
339template <class T>
340Fault
341TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
342{
343 Request *req =
344 new Request(/* asid */ 0, addr, sizeof(T), flags, thread->readPC(),
345 cpu_id, /* thread ID */ 0);
346
347 if (traceData) {
348 traceData->setAddr(req->getVaddr());
349 }
350
351 // translate to physical address
352 Fault fault = thread->translateDataWriteReq(req);
353
354 // Now do the access.
355 if (fault == NoFault) {
356 MemCmd cmd = MemCmd::WriteReq; // default
357 bool do_access = true; // flag to suppress cache access
358
359 if (req->isLocked()) {
360 cmd = MemCmd::StoreCondReq;
361 do_access = TheISA::handleLockedWrite(thread, req);
362 } else if (req->isSwap()) {
363 cmd = MemCmd::SwapReq;
364 if (req->isCondSwap()) {
365 assert(res);
366 req->setExtraData(*res);
367 }
368 }
369
370 // Note: need to allocate dcache_pkt even if do_access is
371 // false, as it's used unconditionally to call completeAcc().
372 assert(dcache_pkt == NULL);
373 dcache_pkt = new Packet(req, cmd, Packet::Broadcast);
374 dcache_pkt->allocate();
375 dcache_pkt->set(data);
376
377 if (do_access) {
277 _status = DcacheRetry;
278 dcache_pkt = pkt;
279 } else {
280 _status = DcacheWaitResponse;
281 // memory system takes ownership of packet
282 dcache_pkt = NULL;
283 }
284
285 // This will need a new way to tell if it has a dcache attached.
286 if (req->isUncacheable())
287 recordEvent("Uncached Read");
288 } else {
289 delete req;
290 }
291
292 return fault;
293}
294
295#ifndef DOXYGEN_SHOULD_SKIP_THIS
296
297template
298Fault
299TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
300
301template
302Fault
303TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
304
305template
306Fault
307TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
308
309template
310Fault
311TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
312
313template
314Fault
315TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
316
317template
318Fault
319TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
320
321#endif //DOXYGEN_SHOULD_SKIP_THIS
322
323template<>
324Fault
325TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
326{
327 return read(addr, *(uint64_t*)&data, flags);
328}
329
330template<>
331Fault
332TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
333{
334 return read(addr, *(uint32_t*)&data, flags);
335}
336
337
338template<>
339Fault
340TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
341{
342 return read(addr, (uint32_t&)data, flags);
343}
344
345
346template <class T>
347Fault
348TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
349{
350 Request *req =
351 new Request(/* asid */ 0, addr, sizeof(T), flags, thread->readPC(),
352 cpu_id, /* thread ID */ 0);
353
354 if (traceData) {
355 traceData->setAddr(req->getVaddr());
356 }
357
358 // translate to physical address
359 Fault fault = thread->translateDataWriteReq(req);
360
361 // Now do the access.
362 if (fault == NoFault) {
363 MemCmd cmd = MemCmd::WriteReq; // default
364 bool do_access = true; // flag to suppress cache access
365
366 if (req->isLocked()) {
367 cmd = MemCmd::StoreCondReq;
368 do_access = TheISA::handleLockedWrite(thread, req);
369 } else if (req->isSwap()) {
370 cmd = MemCmd::SwapReq;
371 if (req->isCondSwap()) {
372 assert(res);
373 req->setExtraData(*res);
374 }
375 }
376
377 // Note: need to allocate dcache_pkt even if do_access is
378 // false, as it's used unconditionally to call completeAcc().
379 assert(dcache_pkt == NULL);
380 dcache_pkt = new Packet(req, cmd, Packet::Broadcast);
381 dcache_pkt->allocate();
382 dcache_pkt->set(data);
383
384 if (do_access) {
378 if (!dcachePort.sendTiming(dcache_pkt)) {
385 if (req->isMmapedIpr()) {
386 Tick delay;
387 dcache_pkt->set(htog(data));
388 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
389 new IprEvent(dcache_pkt, this, nextCycle(curTick + delay));
390 _status = DcacheWaitResponse;
391 dcache_pkt = NULL;
392 } else if (!dcachePort.sendTiming(dcache_pkt)) {
379 _status = DcacheRetry;
380 } else {
381 _status = DcacheWaitResponse;
382 // memory system takes ownership of packet
383 dcache_pkt = NULL;
384 }
385 }
386 // This will need a new way to tell if it's hooked up to a cache or not.
387 if (req->isUncacheable())
388 recordEvent("Uncached Write");
389 } else {
390 delete req;
391 }
392
393
394 // If the write needs to have a fault on the access, consider calling
395 // changeStatus() and changing it to "bad addr write" or something.
396 return fault;
397}
398
399
400#ifndef DOXYGEN_SHOULD_SKIP_THIS
401template
402Fault
403TimingSimpleCPU::write(Twin32_t data, Addr addr,
404 unsigned flags, uint64_t *res);
405
406template
407Fault
408TimingSimpleCPU::write(Twin64_t data, Addr addr,
409 unsigned flags, uint64_t *res);
410
411template
412Fault
413TimingSimpleCPU::write(uint64_t data, Addr addr,
414 unsigned flags, uint64_t *res);
415
416template
417Fault
418TimingSimpleCPU::write(uint32_t data, Addr addr,
419 unsigned flags, uint64_t *res);
420
421template
422Fault
423TimingSimpleCPU::write(uint16_t data, Addr addr,
424 unsigned flags, uint64_t *res);
425
426template
427Fault
428TimingSimpleCPU::write(uint8_t data, Addr addr,
429 unsigned flags, uint64_t *res);
430
431#endif //DOXYGEN_SHOULD_SKIP_THIS
432
433template<>
434Fault
435TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
436{
437 return write(*(uint64_t*)&data, addr, flags, res);
438}
439
440template<>
441Fault
442TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
443{
444 return write(*(uint32_t*)&data, addr, flags, res);
445}
446
447
448template<>
449Fault
450TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
451{
452 return write((uint32_t)data, addr, flags, res);
453}
454
455
456void
457TimingSimpleCPU::fetch()
458{
459 if (!curStaticInst || !curStaticInst->isDelayedCommit())
460 checkForInterrupts();
461
462 Request *ifetch_req = new Request();
463 ifetch_req->setThreadContext(cpu_id, /* thread ID */ 0);
464 Fault fault = setupFetchRequest(ifetch_req);
465
466 ifetch_pkt = new Packet(ifetch_req, MemCmd::ReadReq, Packet::Broadcast);
467 ifetch_pkt->dataStatic(&inst);
468
469 if (fault == NoFault) {
470 if (!icachePort.sendTiming(ifetch_pkt)) {
471 // Need to wait for retry
472 _status = IcacheRetry;
473 } else {
474 // Need to wait for cache to respond
475 _status = IcacheWaitResponse;
476 // ownership of packet transferred to memory system
477 ifetch_pkt = NULL;
478 }
479 } else {
480 delete ifetch_req;
481 delete ifetch_pkt;
482 // fetch fault: advance directly to next instruction (fault handler)
483 advanceInst(fault);
484 }
485
486 numCycles += tickToCycles(curTick - previousTick);
487 previousTick = curTick;
488}
489
490
491void
492TimingSimpleCPU::advanceInst(Fault fault)
493{
494 advancePC(fault);
495
496 if (_status == Running) {
497 // kick off fetch of next instruction... callback from icache
498 // response will cause that instruction to be executed,
499 // keeping the CPU running.
500 fetch();
501 }
502}
503
504
505void
506TimingSimpleCPU::completeIfetch(PacketPtr pkt)
507{
508 // received a response from the icache: execute the received
509 // instruction
510 assert(!pkt->isError());
511 assert(_status == IcacheWaitResponse);
512
513 _status = Running;
514
515 numCycles += tickToCycles(curTick - previousTick);
516 previousTick = curTick;
517
518 if (getState() == SimObject::Draining) {
519 delete pkt->req;
520 delete pkt;
521
522 completeDrain();
523 return;
524 }
525
526 preExecute();
527 if (curStaticInst->isMemRef() && !curStaticInst->isDataPrefetch()) {
528 // load or store: just send to dcache
529 Fault fault = curStaticInst->initiateAcc(this, traceData);
530 if (_status != Running) {
531 // instruction will complete in dcache response callback
532 assert(_status == DcacheWaitResponse || _status == DcacheRetry);
533 assert(fault == NoFault);
534 } else {
535 if (fault == NoFault) {
536 // early fail on store conditional: complete now
537 assert(dcache_pkt != NULL);
538 fault = curStaticInst->completeAcc(dcache_pkt, this,
539 traceData);
540 delete dcache_pkt->req;
541 delete dcache_pkt;
542 dcache_pkt = NULL;
543
544 // keep an instruction count
545 if (fault == NoFault)
546 countInst();
547 } else if (traceData) {
548 // If there was a fault, we shouldn't trace this instruction.
549 delete traceData;
550 traceData = NULL;
551 }
552
553 postExecute();
393 _status = DcacheRetry;
394 } else {
395 _status = DcacheWaitResponse;
396 // memory system takes ownership of packet
397 dcache_pkt = NULL;
398 }
399 }
400 // This will need a new way to tell if it's hooked up to a cache or not.
401 if (req->isUncacheable())
402 recordEvent("Uncached Write");
403 } else {
404 delete req;
405 }
406
407
408 // If the write needs to have a fault on the access, consider calling
409 // changeStatus() and changing it to "bad addr write" or something.
410 return fault;
411}
412
413
414#ifndef DOXYGEN_SHOULD_SKIP_THIS
415template
416Fault
417TimingSimpleCPU::write(Twin32_t data, Addr addr,
418 unsigned flags, uint64_t *res);
419
420template
421Fault
422TimingSimpleCPU::write(Twin64_t data, Addr addr,
423 unsigned flags, uint64_t *res);
424
425template
426Fault
427TimingSimpleCPU::write(uint64_t data, Addr addr,
428 unsigned flags, uint64_t *res);
429
430template
431Fault
432TimingSimpleCPU::write(uint32_t data, Addr addr,
433 unsigned flags, uint64_t *res);
434
435template
436Fault
437TimingSimpleCPU::write(uint16_t data, Addr addr,
438 unsigned flags, uint64_t *res);
439
440template
441Fault
442TimingSimpleCPU::write(uint8_t data, Addr addr,
443 unsigned flags, uint64_t *res);
444
445#endif //DOXYGEN_SHOULD_SKIP_THIS
446
447template<>
448Fault
449TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
450{
451 return write(*(uint64_t*)&data, addr, flags, res);
452}
453
454template<>
455Fault
456TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
457{
458 return write(*(uint32_t*)&data, addr, flags, res);
459}
460
461
462template<>
463Fault
464TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
465{
466 return write((uint32_t)data, addr, flags, res);
467}
468
469
470void
471TimingSimpleCPU::fetch()
472{
473 if (!curStaticInst || !curStaticInst->isDelayedCommit())
474 checkForInterrupts();
475
476 Request *ifetch_req = new Request();
477 ifetch_req->setThreadContext(cpu_id, /* thread ID */ 0);
478 Fault fault = setupFetchRequest(ifetch_req);
479
480 ifetch_pkt = new Packet(ifetch_req, MemCmd::ReadReq, Packet::Broadcast);
481 ifetch_pkt->dataStatic(&inst);
482
483 if (fault == NoFault) {
484 if (!icachePort.sendTiming(ifetch_pkt)) {
485 // Need to wait for retry
486 _status = IcacheRetry;
487 } else {
488 // Need to wait for cache to respond
489 _status = IcacheWaitResponse;
490 // ownership of packet transferred to memory system
491 ifetch_pkt = NULL;
492 }
493 } else {
494 delete ifetch_req;
495 delete ifetch_pkt;
496 // fetch fault: advance directly to next instruction (fault handler)
497 advanceInst(fault);
498 }
499
500 numCycles += tickToCycles(curTick - previousTick);
501 previousTick = curTick;
502}
503
504
505void
506TimingSimpleCPU::advanceInst(Fault fault)
507{
508 advancePC(fault);
509
510 if (_status == Running) {
511 // kick off fetch of next instruction... callback from icache
512 // response will cause that instruction to be executed,
513 // keeping the CPU running.
514 fetch();
515 }
516}
517
518
519void
520TimingSimpleCPU::completeIfetch(PacketPtr pkt)
521{
522 // received a response from the icache: execute the received
523 // instruction
524 assert(!pkt->isError());
525 assert(_status == IcacheWaitResponse);
526
527 _status = Running;
528
529 numCycles += tickToCycles(curTick - previousTick);
530 previousTick = curTick;
531
532 if (getState() == SimObject::Draining) {
533 delete pkt->req;
534 delete pkt;
535
536 completeDrain();
537 return;
538 }
539
540 preExecute();
541 if (curStaticInst->isMemRef() && !curStaticInst->isDataPrefetch()) {
542 // load or store: just send to dcache
543 Fault fault = curStaticInst->initiateAcc(this, traceData);
544 if (_status != Running) {
545 // instruction will complete in dcache response callback
546 assert(_status == DcacheWaitResponse || _status == DcacheRetry);
547 assert(fault == NoFault);
548 } else {
549 if (fault == NoFault) {
550 // early fail on store conditional: complete now
551 assert(dcache_pkt != NULL);
552 fault = curStaticInst->completeAcc(dcache_pkt, this,
553 traceData);
554 delete dcache_pkt->req;
555 delete dcache_pkt;
556 dcache_pkt = NULL;
557
558 // keep an instruction count
559 if (fault == NoFault)
560 countInst();
561 } else if (traceData) {
562 // If there was a fault, we shouldn't trace this instruction.
563 delete traceData;
564 traceData = NULL;
565 }
566
567 postExecute();
568 // @todo remove me after debugging with legion done
569 if (curStaticInst && (!curStaticInst->isMicroop() ||
570 curStaticInst->isFirstMicroop()))
571 instCnt++;
554 advanceInst(fault);
555 }
556 } else {
557 // non-memory instruction: execute completely now
558 Fault fault = curStaticInst->execute(this, traceData);
559
560 // keep an instruction count
561 if (fault == NoFault)
562 countInst();
563 else if (traceData) {
564 // If there was a fault, we shouldn't trace this instruction.
565 delete traceData;
566 traceData = NULL;
567 }
568
569 postExecute();
572 advanceInst(fault);
573 }
574 } else {
575 // non-memory instruction: execute completely now
576 Fault fault = curStaticInst->execute(this, traceData);
577
578 // keep an instruction count
579 if (fault == NoFault)
580 countInst();
581 else if (traceData) {
582 // If there was a fault, we shouldn't trace this instruction.
583 delete traceData;
584 traceData = NULL;
585 }
586
587 postExecute();
588 // @todo remove me after debugging with legion done
589 if (curStaticInst && (!curStaticInst->isMicroop() ||
590 curStaticInst->isFirstMicroop()))
591 instCnt++;
570 advanceInst(fault);
571 }
572
573 delete pkt->req;
574 delete pkt;
575}
576
577void
578TimingSimpleCPU::IcachePort::ITickEvent::process()
579{
580 cpu->completeIfetch(pkt);
581}
582
583bool
584TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
585{
586 if (pkt->isResponse() && !pkt->wasNacked()) {
587 // delay processing of returned data until next CPU clock edge
588 Tick next_tick = cpu->nextCycle(curTick);
589
590 if (next_tick == curTick)
591 cpu->completeIfetch(pkt);
592 else
593 tickEvent.schedule(pkt, next_tick);
594
595 return true;
596 }
597 else if (pkt->wasNacked()) {
598 assert(cpu->_status == IcacheWaitResponse);
599 pkt->reinitNacked();
600 if (!sendTiming(pkt)) {
601 cpu->_status = IcacheRetry;
602 cpu->ifetch_pkt = pkt;
603 }
604 }
605 //Snooping a Coherence Request, do nothing
606 return true;
607}
608
609void
610TimingSimpleCPU::IcachePort::recvRetry()
611{
612 // we shouldn't get a retry unless we have a packet that we're
613 // waiting to transmit
614 assert(cpu->ifetch_pkt != NULL);
615 assert(cpu->_status == IcacheRetry);
616 PacketPtr tmp = cpu->ifetch_pkt;
617 if (sendTiming(tmp)) {
618 cpu->_status = IcacheWaitResponse;
619 cpu->ifetch_pkt = NULL;
620 }
621}
622
623void
624TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
625{
626 // received a response from the dcache: complete the load or store
627 // instruction
628 assert(!pkt->isError());
629 assert(_status == DcacheWaitResponse);
630 _status = Running;
631
632 numCycles += tickToCycles(curTick - previousTick);
633 previousTick = curTick;
634
635 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
636
637 // keep an instruction count
638 if (fault == NoFault)
639 countInst();
640 else if (traceData) {
641 // If there was a fault, we shouldn't trace this instruction.
642 delete traceData;
643 traceData = NULL;
644 }
645
646 if (pkt->isRead() && pkt->isLocked()) {
647 TheISA::handleLockedRead(thread, pkt->req);
648 }
649
650 delete pkt->req;
651 delete pkt;
652
653 postExecute();
654
655 if (getState() == SimObject::Draining) {
656 advancePC(fault);
657 completeDrain();
658
659 return;
660 }
661
662 advanceInst(fault);
663}
664
665
666void
667TimingSimpleCPU::completeDrain()
668{
669 DPRINTF(Config, "Done draining\n");
670 changeState(SimObject::Drained);
671 drainEvent->process();
672}
673
674void
675TimingSimpleCPU::DcachePort::setPeer(Port *port)
676{
677 Port::setPeer(port);
678
679#if FULL_SYSTEM
680 // Update the ThreadContext's memory ports (Functional/Virtual
681 // Ports)
682 cpu->tcBase()->connectMemPorts();
683#endif
684}
685
686bool
687TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
688{
689 if (pkt->isResponse() && !pkt->wasNacked()) {
690 // delay processing of returned data until next CPU clock edge
691 Tick next_tick = cpu->nextCycle(curTick);
692
693 if (next_tick == curTick)
694 cpu->completeDataAccess(pkt);
695 else
696 tickEvent.schedule(pkt, next_tick);
697
698 return true;
699 }
700 else if (pkt->wasNacked()) {
701 assert(cpu->_status == DcacheWaitResponse);
702 pkt->reinitNacked();
703 if (!sendTiming(pkt)) {
704 cpu->_status = DcacheRetry;
705 cpu->dcache_pkt = pkt;
706 }
707 }
708 //Snooping a Coherence Request, do nothing
709 return true;
710}
711
712void
713TimingSimpleCPU::DcachePort::DTickEvent::process()
714{
715 cpu->completeDataAccess(pkt);
716}
717
718void
719TimingSimpleCPU::DcachePort::recvRetry()
720{
721 // we shouldn't get a retry unless we have a packet that we're
722 // waiting to transmit
723 assert(cpu->dcache_pkt != NULL);
724 assert(cpu->_status == DcacheRetry);
725 PacketPtr tmp = cpu->dcache_pkt;
726 if (sendTiming(tmp)) {
727 cpu->_status = DcacheWaitResponse;
728 // memory system takes ownership of packet
729 cpu->dcache_pkt = NULL;
730 }
731}
732
592 advanceInst(fault);
593 }
594
595 delete pkt->req;
596 delete pkt;
597}
598
599void
600TimingSimpleCPU::IcachePort::ITickEvent::process()
601{
602 cpu->completeIfetch(pkt);
603}
604
605bool
606TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
607{
608 if (pkt->isResponse() && !pkt->wasNacked()) {
609 // delay processing of returned data until next CPU clock edge
610 Tick next_tick = cpu->nextCycle(curTick);
611
612 if (next_tick == curTick)
613 cpu->completeIfetch(pkt);
614 else
615 tickEvent.schedule(pkt, next_tick);
616
617 return true;
618 }
619 else if (pkt->wasNacked()) {
620 assert(cpu->_status == IcacheWaitResponse);
621 pkt->reinitNacked();
622 if (!sendTiming(pkt)) {
623 cpu->_status = IcacheRetry;
624 cpu->ifetch_pkt = pkt;
625 }
626 }
627 //Snooping a Coherence Request, do nothing
628 return true;
629}
630
631void
632TimingSimpleCPU::IcachePort::recvRetry()
633{
634 // we shouldn't get a retry unless we have a packet that we're
635 // waiting to transmit
636 assert(cpu->ifetch_pkt != NULL);
637 assert(cpu->_status == IcacheRetry);
638 PacketPtr tmp = cpu->ifetch_pkt;
639 if (sendTiming(tmp)) {
640 cpu->_status = IcacheWaitResponse;
641 cpu->ifetch_pkt = NULL;
642 }
643}
644
645void
646TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
647{
648 // received a response from the dcache: complete the load or store
649 // instruction
650 assert(!pkt->isError());
651 assert(_status == DcacheWaitResponse);
652 _status = Running;
653
654 numCycles += tickToCycles(curTick - previousTick);
655 previousTick = curTick;
656
657 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
658
659 // keep an instruction count
660 if (fault == NoFault)
661 countInst();
662 else if (traceData) {
663 // If there was a fault, we shouldn't trace this instruction.
664 delete traceData;
665 traceData = NULL;
666 }
667
668 if (pkt->isRead() && pkt->isLocked()) {
669 TheISA::handleLockedRead(thread, pkt->req);
670 }
671
672 delete pkt->req;
673 delete pkt;
674
675 postExecute();
676
677 if (getState() == SimObject::Draining) {
678 advancePC(fault);
679 completeDrain();
680
681 return;
682 }
683
684 advanceInst(fault);
685}
686
687
688void
689TimingSimpleCPU::completeDrain()
690{
691 DPRINTF(Config, "Done draining\n");
692 changeState(SimObject::Drained);
693 drainEvent->process();
694}
695
696void
697TimingSimpleCPU::DcachePort::setPeer(Port *port)
698{
699 Port::setPeer(port);
700
701#if FULL_SYSTEM
702 // Update the ThreadContext's memory ports (Functional/Virtual
703 // Ports)
704 cpu->tcBase()->connectMemPorts();
705#endif
706}
707
708bool
709TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
710{
711 if (pkt->isResponse() && !pkt->wasNacked()) {
712 // delay processing of returned data until next CPU clock edge
713 Tick next_tick = cpu->nextCycle(curTick);
714
715 if (next_tick == curTick)
716 cpu->completeDataAccess(pkt);
717 else
718 tickEvent.schedule(pkt, next_tick);
719
720 return true;
721 }
722 else if (pkt->wasNacked()) {
723 assert(cpu->_status == DcacheWaitResponse);
724 pkt->reinitNacked();
725 if (!sendTiming(pkt)) {
726 cpu->_status = DcacheRetry;
727 cpu->dcache_pkt = pkt;
728 }
729 }
730 //Snooping a Coherence Request, do nothing
731 return true;
732}
733
734void
735TimingSimpleCPU::DcachePort::DTickEvent::process()
736{
737 cpu->completeDataAccess(pkt);
738}
739
740void
741TimingSimpleCPU::DcachePort::recvRetry()
742{
743 // we shouldn't get a retry unless we have a packet that we're
744 // waiting to transmit
745 assert(cpu->dcache_pkt != NULL);
746 assert(cpu->_status == DcacheRetry);
747 PacketPtr tmp = cpu->dcache_pkt;
748 if (sendTiming(tmp)) {
749 cpu->_status = DcacheWaitResponse;
750 // memory system takes ownership of packet
751 cpu->dcache_pkt = NULL;
752 }
753}
754
755TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu, Tick t)
756 : Event(&mainEventQueue), pkt(_pkt), cpu(_cpu)
757{
758 schedule(t);
759}
733
760
761void
762TimingSimpleCPU::IprEvent::process()
763{
764 cpu->completeDataAccess(pkt);
765}
766
767const char *
768TimingSimpleCPU::IprEvent::description()
769{
770 return "Timing Simple CPU Delay IPR event";
771}
772
773
734////////////////////////////////////////////////////////////////////////
735//
736// TimingSimpleCPU Simulation Object
737//
738TimingSimpleCPU *
739TimingSimpleCPUParams::create()
740{
741 TimingSimpleCPU::Params *params = new TimingSimpleCPU::Params();
742 params->name = name;
743 params->numberOfThreads = 1;
744 params->max_insts_any_thread = max_insts_any_thread;
745 params->max_insts_all_threads = max_insts_all_threads;
746 params->max_loads_any_thread = max_loads_any_thread;
747 params->max_loads_all_threads = max_loads_all_threads;
748 params->progress_interval = progress_interval;
749 params->deferRegistration = defer_registration;
750 params->clock = clock;
751 params->phase = phase;
752 params->functionTrace = function_trace;
753 params->functionTraceStart = function_trace_start;
754 params->system = system;
755 params->cpu_id = cpu_id;
756 params->tracer = tracer;
757
758 params->itb = itb;
759 params->dtb = dtb;
760#if FULL_SYSTEM
761 params->profile = profile;
762 params->do_quiesce = do_quiesce;
763 params->do_checkpoint_insts = do_checkpoint_insts;
764 params->do_statistics_insts = do_statistics_insts;
765#else
766 if (workload.size() != 1)
767 panic("only one workload allowed");
768 params->process = workload[0];
769#endif
770
771 TimingSimpleCPU *cpu = new TimingSimpleCPU(params);
772 return cpu;
773}
774////////////////////////////////////////////////////////////////////////
775//
776// TimingSimpleCPU Simulation Object
777//
778TimingSimpleCPU *
779TimingSimpleCPUParams::create()
780{
781 TimingSimpleCPU::Params *params = new TimingSimpleCPU::Params();
782 params->name = name;
783 params->numberOfThreads = 1;
784 params->max_insts_any_thread = max_insts_any_thread;
785 params->max_insts_all_threads = max_insts_all_threads;
786 params->max_loads_any_thread = max_loads_any_thread;
787 params->max_loads_all_threads = max_loads_all_threads;
788 params->progress_interval = progress_interval;
789 params->deferRegistration = defer_registration;
790 params->clock = clock;
791 params->phase = phase;
792 params->functionTrace = function_trace;
793 params->functionTraceStart = function_trace_start;
794 params->system = system;
795 params->cpu_id = cpu_id;
796 params->tracer = tracer;
797
798 params->itb = itb;
799 params->dtb = dtb;
800#if FULL_SYSTEM
801 params->profile = profile;
802 params->do_quiesce = do_quiesce;
803 params->do_checkpoint_insts = do_checkpoint_insts;
804 params->do_statistics_insts = do_statistics_insts;
805#else
806 if (workload.size() != 1)
807 panic("only one workload allowed");
808 params->process = workload[0];
809#endif
810
811 TimingSimpleCPU *cpu = new TimingSimpleCPU(params);
812 return cpu;
813}