timing.cc (4880:4de4d072e977) timing.cc (4881:3e4b4f6ff9dd)
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Steve Reinhardt
29 */
30
31#include "arch/locked_mem.hh"
32#include "arch/utility.hh"
33#include "base/bigint.hh"
34#include "cpu/exetrace.hh"
35#include "cpu/simple/timing.hh"
36#include "mem/packet.hh"
37#include "mem/packet_access.hh"
38#include "sim/builder.hh"
39#include "sim/system.hh"
40
41using namespace std;
42using namespace TheISA;
43
44Port *
45TimingSimpleCPU::getPort(const std::string &if_name, int idx)
46{
47 if (if_name == "dcache_port")
48 return &dcachePort;
49 else if (if_name == "icache_port")
50 return &icachePort;
51 else
52 panic("No Such Port\n");
53}
54
55void
56TimingSimpleCPU::init()
57{
58 BaseCPU::init();
59#if FULL_SYSTEM
60 for (int i = 0; i < threadContexts.size(); ++i) {
61 ThreadContext *tc = threadContexts[i];
62
63 // initialize CPU, including PC
64 TheISA::initCPU(tc, tc->readCpuId());
65 }
66#endif
67}
68
69Tick
70TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
71{
72 panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
73 return curTick;
74}
75
76void
77TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
78{
79 //No internal storage to update, jusst return
80 return;
81}
82
83void
84TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
85{
86 if (status == RangeChange) {
87 if (!snoopRangeSent) {
88 snoopRangeSent = true;
89 sendStatusChange(Port::RangeChange);
90 }
91 return;
92 }
93
94 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
95}
96
97
98void
99TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
100{
101 pkt = _pkt;
102 Event::schedule(t);
103}
104
105TimingSimpleCPU::TimingSimpleCPU(Params *p)
106 : BaseSimpleCPU(p), icachePort(this, p->clock), dcachePort(this, p->clock),
107 cpu_id(p->cpu_id)
108{
109 _status = Idle;
110
111 icachePort.snoopRangeSent = false;
112 dcachePort.snoopRangeSent = false;
113
114 ifetch_pkt = dcache_pkt = NULL;
115 drainEvent = NULL;
116 fetchEvent = NULL;
117 previousTick = 0;
118 changeState(SimObject::Running);
119}
120
121
122TimingSimpleCPU::~TimingSimpleCPU()
123{
124}
125
126void
127TimingSimpleCPU::serialize(ostream &os)
128{
129 SimObject::State so_state = SimObject::getState();
130 SERIALIZE_ENUM(so_state);
131 BaseSimpleCPU::serialize(os);
132}
133
134void
135TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
136{
137 SimObject::State so_state;
138 UNSERIALIZE_ENUM(so_state);
139 BaseSimpleCPU::unserialize(cp, section);
140}
141
142unsigned int
143TimingSimpleCPU::drain(Event *drain_event)
144{
145 // TimingSimpleCPU is ready to drain if it's not waiting for
146 // an access to complete.
147 if (status() == Idle || status() == Running || status() == SwitchedOut) {
148 changeState(SimObject::Drained);
149 return 0;
150 } else {
151 changeState(SimObject::Draining);
152 drainEvent = drain_event;
153 return 1;
154 }
155}
156
157void
158TimingSimpleCPU::resume()
159{
160 if (_status != SwitchedOut && _status != Idle) {
161 assert(system->getMemoryMode() == System::Timing);
162
163 // Delete the old event if it existed.
164 if (fetchEvent) {
165 if (fetchEvent->scheduled())
166 fetchEvent->deschedule();
167
168 delete fetchEvent;
169 }
170
171 fetchEvent = new FetchEvent(this, nextCycle());
172 }
173
174 changeState(SimObject::Running);
175 previousTick = curTick;
176}
177
178void
179TimingSimpleCPU::switchOut()
180{
181 assert(status() == Running || status() == Idle);
182 _status = SwitchedOut;
183 numCycles += curTick - previousTick;
184
185 // If we've been scheduled to resume but are then told to switch out,
186 // we'll need to cancel it.
187 if (fetchEvent && fetchEvent->scheduled())
188 fetchEvent->deschedule();
189}
190
191
192void
193TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
194{
195 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
196
197 // if any of this CPU's ThreadContexts are active, mark the CPU as
198 // running and schedule its tick event.
199 for (int i = 0; i < threadContexts.size(); ++i) {
200 ThreadContext *tc = threadContexts[i];
201 if (tc->status() == ThreadContext::Active && _status != Running) {
202 _status = Running;
203 break;
204 }
205 }
206
207 if (_status != Running) {
208 _status = Idle;
209 }
210}
211
212
213void
214TimingSimpleCPU::activateContext(int thread_num, int delay)
215{
216 assert(thread_num == 0);
217 assert(thread);
218
219 assert(_status == Idle);
220
221 notIdleFraction++;
222 _status = Running;
223
224 // kick things off by initiating the fetch of the next instruction
225 fetchEvent = new FetchEvent(this, nextCycle(curTick + cycles(delay)));
226}
227
228
229void
230TimingSimpleCPU::suspendContext(int thread_num)
231{
232 assert(thread_num == 0);
233 assert(thread);
234
235 assert(_status == Running);
236
237 // just change status to Idle... if status != Running,
238 // completeInst() will not initiate fetch of next instruction.
239
240 notIdleFraction--;
241 _status = Idle;
242}
243
244
245template <class T>
246Fault
247TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
248{
249 Request *req =
250 new Request(/* asid */ 0, addr, sizeof(T), flags, thread->readPC(),
251 cpu_id, /* thread ID */ 0);
252
253 if (traceData) {
254 traceData->setAddr(req->getVaddr());
255 }
256
257 // translate to physical address
258 Fault fault = thread->translateDataReadReq(req);
259
260 // Now do the access.
261 if (fault == NoFault) {
262 PacketPtr pkt =
263 new Packet(req,
264 (req->isLocked() ?
265 MemCmd::LoadLockedReq : MemCmd::ReadReq),
266 Packet::Broadcast);
267 pkt->dataDynamic<T>(new T);
268
269 if (!dcachePort.sendTiming(pkt)) {
270 _status = DcacheRetry;
271 dcache_pkt = pkt;
272 } else {
273 _status = DcacheWaitResponse;
274 // memory system takes ownership of packet
275 dcache_pkt = NULL;
276 }
277
278 // This will need a new way to tell if it has a dcache attached.
279 if (req->isUncacheable())
280 recordEvent("Uncached Read");
281 } else {
282 delete req;
283 }
284
285 return fault;
286}
287
288#ifndef DOXYGEN_SHOULD_SKIP_THIS
289
290template
291Fault
292TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
293
294template
295Fault
296TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
297
298template
299Fault
300TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
301
302template
303Fault
304TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
305
306template
307Fault
308TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
309
310template
311Fault
312TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
313
314#endif //DOXYGEN_SHOULD_SKIP_THIS
315
316template<>
317Fault
318TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
319{
320 return read(addr, *(uint64_t*)&data, flags);
321}
322
323template<>
324Fault
325TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
326{
327 return read(addr, *(uint32_t*)&data, flags);
328}
329
330
331template<>
332Fault
333TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
334{
335 return read(addr, (uint32_t&)data, flags);
336}
337
338
339template <class T>
340Fault
341TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
342{
343 Request *req =
344 new Request(/* asid */ 0, addr, sizeof(T), flags, thread->readPC(),
345 cpu_id, /* thread ID */ 0);
346
347 if (traceData) {
348 traceData->setAddr(req->getVaddr());
349 }
350
351 // translate to physical address
352 Fault fault = thread->translateDataWriteReq(req);
353
354 // Now do the access.
355 if (fault == NoFault) {
356 MemCmd cmd = MemCmd::WriteReq; // default
357 bool do_access = true; // flag to suppress cache access
358
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Steve Reinhardt
29 */
30
31#include "arch/locked_mem.hh"
32#include "arch/utility.hh"
33#include "base/bigint.hh"
34#include "cpu/exetrace.hh"
35#include "cpu/simple/timing.hh"
36#include "mem/packet.hh"
37#include "mem/packet_access.hh"
38#include "sim/builder.hh"
39#include "sim/system.hh"
40
41using namespace std;
42using namespace TheISA;
43
44Port *
45TimingSimpleCPU::getPort(const std::string &if_name, int idx)
46{
47 if (if_name == "dcache_port")
48 return &dcachePort;
49 else if (if_name == "icache_port")
50 return &icachePort;
51 else
52 panic("No Such Port\n");
53}
54
55void
56TimingSimpleCPU::init()
57{
58 BaseCPU::init();
59#if FULL_SYSTEM
60 for (int i = 0; i < threadContexts.size(); ++i) {
61 ThreadContext *tc = threadContexts[i];
62
63 // initialize CPU, including PC
64 TheISA::initCPU(tc, tc->readCpuId());
65 }
66#endif
67}
68
69Tick
70TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
71{
72 panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
73 return curTick;
74}
75
76void
77TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
78{
79 //No internal storage to update, jusst return
80 return;
81}
82
83void
84TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
85{
86 if (status == RangeChange) {
87 if (!snoopRangeSent) {
88 snoopRangeSent = true;
89 sendStatusChange(Port::RangeChange);
90 }
91 return;
92 }
93
94 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
95}
96
97
98void
99TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
100{
101 pkt = _pkt;
102 Event::schedule(t);
103}
104
105TimingSimpleCPU::TimingSimpleCPU(Params *p)
106 : BaseSimpleCPU(p), icachePort(this, p->clock), dcachePort(this, p->clock),
107 cpu_id(p->cpu_id)
108{
109 _status = Idle;
110
111 icachePort.snoopRangeSent = false;
112 dcachePort.snoopRangeSent = false;
113
114 ifetch_pkt = dcache_pkt = NULL;
115 drainEvent = NULL;
116 fetchEvent = NULL;
117 previousTick = 0;
118 changeState(SimObject::Running);
119}
120
121
122TimingSimpleCPU::~TimingSimpleCPU()
123{
124}
125
126void
127TimingSimpleCPU::serialize(ostream &os)
128{
129 SimObject::State so_state = SimObject::getState();
130 SERIALIZE_ENUM(so_state);
131 BaseSimpleCPU::serialize(os);
132}
133
134void
135TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
136{
137 SimObject::State so_state;
138 UNSERIALIZE_ENUM(so_state);
139 BaseSimpleCPU::unserialize(cp, section);
140}
141
142unsigned int
143TimingSimpleCPU::drain(Event *drain_event)
144{
145 // TimingSimpleCPU is ready to drain if it's not waiting for
146 // an access to complete.
147 if (status() == Idle || status() == Running || status() == SwitchedOut) {
148 changeState(SimObject::Drained);
149 return 0;
150 } else {
151 changeState(SimObject::Draining);
152 drainEvent = drain_event;
153 return 1;
154 }
155}
156
157void
158TimingSimpleCPU::resume()
159{
160 if (_status != SwitchedOut && _status != Idle) {
161 assert(system->getMemoryMode() == System::Timing);
162
163 // Delete the old event if it existed.
164 if (fetchEvent) {
165 if (fetchEvent->scheduled())
166 fetchEvent->deschedule();
167
168 delete fetchEvent;
169 }
170
171 fetchEvent = new FetchEvent(this, nextCycle());
172 }
173
174 changeState(SimObject::Running);
175 previousTick = curTick;
176}
177
178void
179TimingSimpleCPU::switchOut()
180{
181 assert(status() == Running || status() == Idle);
182 _status = SwitchedOut;
183 numCycles += curTick - previousTick;
184
185 // If we've been scheduled to resume but are then told to switch out,
186 // we'll need to cancel it.
187 if (fetchEvent && fetchEvent->scheduled())
188 fetchEvent->deschedule();
189}
190
191
192void
193TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
194{
195 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
196
197 // if any of this CPU's ThreadContexts are active, mark the CPU as
198 // running and schedule its tick event.
199 for (int i = 0; i < threadContexts.size(); ++i) {
200 ThreadContext *tc = threadContexts[i];
201 if (tc->status() == ThreadContext::Active && _status != Running) {
202 _status = Running;
203 break;
204 }
205 }
206
207 if (_status != Running) {
208 _status = Idle;
209 }
210}
211
212
213void
214TimingSimpleCPU::activateContext(int thread_num, int delay)
215{
216 assert(thread_num == 0);
217 assert(thread);
218
219 assert(_status == Idle);
220
221 notIdleFraction++;
222 _status = Running;
223
224 // kick things off by initiating the fetch of the next instruction
225 fetchEvent = new FetchEvent(this, nextCycle(curTick + cycles(delay)));
226}
227
228
229void
230TimingSimpleCPU::suspendContext(int thread_num)
231{
232 assert(thread_num == 0);
233 assert(thread);
234
235 assert(_status == Running);
236
237 // just change status to Idle... if status != Running,
238 // completeInst() will not initiate fetch of next instruction.
239
240 notIdleFraction--;
241 _status = Idle;
242}
243
244
245template <class T>
246Fault
247TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
248{
249 Request *req =
250 new Request(/* asid */ 0, addr, sizeof(T), flags, thread->readPC(),
251 cpu_id, /* thread ID */ 0);
252
253 if (traceData) {
254 traceData->setAddr(req->getVaddr());
255 }
256
257 // translate to physical address
258 Fault fault = thread->translateDataReadReq(req);
259
260 // Now do the access.
261 if (fault == NoFault) {
262 PacketPtr pkt =
263 new Packet(req,
264 (req->isLocked() ?
265 MemCmd::LoadLockedReq : MemCmd::ReadReq),
266 Packet::Broadcast);
267 pkt->dataDynamic<T>(new T);
268
269 if (!dcachePort.sendTiming(pkt)) {
270 _status = DcacheRetry;
271 dcache_pkt = pkt;
272 } else {
273 _status = DcacheWaitResponse;
274 // memory system takes ownership of packet
275 dcache_pkt = NULL;
276 }
277
278 // This will need a new way to tell if it has a dcache attached.
279 if (req->isUncacheable())
280 recordEvent("Uncached Read");
281 } else {
282 delete req;
283 }
284
285 return fault;
286}
287
288#ifndef DOXYGEN_SHOULD_SKIP_THIS
289
290template
291Fault
292TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
293
294template
295Fault
296TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
297
298template
299Fault
300TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
301
302template
303Fault
304TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
305
306template
307Fault
308TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
309
310template
311Fault
312TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
313
314#endif //DOXYGEN_SHOULD_SKIP_THIS
315
316template<>
317Fault
318TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
319{
320 return read(addr, *(uint64_t*)&data, flags);
321}
322
323template<>
324Fault
325TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
326{
327 return read(addr, *(uint32_t*)&data, flags);
328}
329
330
331template<>
332Fault
333TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
334{
335 return read(addr, (uint32_t&)data, flags);
336}
337
338
339template <class T>
340Fault
341TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
342{
343 Request *req =
344 new Request(/* asid */ 0, addr, sizeof(T), flags, thread->readPC(),
345 cpu_id, /* thread ID */ 0);
346
347 if (traceData) {
348 traceData->setAddr(req->getVaddr());
349 }
350
351 // translate to physical address
352 Fault fault = thread->translateDataWriteReq(req);
353
354 // Now do the access.
355 if (fault == NoFault) {
356 MemCmd cmd = MemCmd::WriteReq; // default
357 bool do_access = true; // flag to suppress cache access
358
359 assert(dcache_pkt == NULL);
360
361 if (req->isLocked()) {
362 cmd = MemCmd::StoreCondReq;
363 do_access = TheISA::handleLockedWrite(thread, req);
364 } else if (req->isSwap()) {
365 cmd = MemCmd::SwapReq;
366 if (req->isCondSwap()) {
367 assert(res);
368 req->setExtraData(*res);
369 }
370 }
371
359 if (req->isLocked()) {
360 cmd = MemCmd::StoreCondReq;
361 do_access = TheISA::handleLockedWrite(thread, req);
362 } else if (req->isSwap()) {
363 cmd = MemCmd::SwapReq;
364 if (req->isCondSwap()) {
365 assert(res);
366 req->setExtraData(*res);
367 }
368 }
369
372 if (do_access) {
373 dcache_pkt = new Packet(req, cmd, Packet::Broadcast);
374 dcache_pkt->allocate();
375 dcache_pkt->set(data);
370 // Note: need to allocate dcache_pkt even if do_access is
371 // false, as it's used unconditionally to call completeAcc().
372 assert(dcache_pkt == NULL);
373 dcache_pkt = new Packet(req, cmd, Packet::Broadcast);
374 dcache_pkt->allocate();
375 dcache_pkt->set(data);
376
376
377 if (do_access) {
377 if (!dcachePort.sendTiming(dcache_pkt)) {
378 _status = DcacheRetry;
379 } else {
380 _status = DcacheWaitResponse;
381 // memory system takes ownership of packet
382 dcache_pkt = NULL;
383 }
384 }
385 // This will need a new way to tell if it's hooked up to a cache or not.
386 if (req->isUncacheable())
387 recordEvent("Uncached Write");
388 } else {
389 delete req;
390 }
391
392
393 // If the write needs to have a fault on the access, consider calling
394 // changeStatus() and changing it to "bad addr write" or something.
395 return fault;
396}
397
398
399#ifndef DOXYGEN_SHOULD_SKIP_THIS
400template
401Fault
402TimingSimpleCPU::write(Twin32_t data, Addr addr,
403 unsigned flags, uint64_t *res);
404
405template
406Fault
407TimingSimpleCPU::write(Twin64_t data, Addr addr,
408 unsigned flags, uint64_t *res);
409
410template
411Fault
412TimingSimpleCPU::write(uint64_t data, Addr addr,
413 unsigned flags, uint64_t *res);
414
415template
416Fault
417TimingSimpleCPU::write(uint32_t data, Addr addr,
418 unsigned flags, uint64_t *res);
419
420template
421Fault
422TimingSimpleCPU::write(uint16_t data, Addr addr,
423 unsigned flags, uint64_t *res);
424
425template
426Fault
427TimingSimpleCPU::write(uint8_t data, Addr addr,
428 unsigned flags, uint64_t *res);
429
430#endif //DOXYGEN_SHOULD_SKIP_THIS
431
432template<>
433Fault
434TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
435{
436 return write(*(uint64_t*)&data, addr, flags, res);
437}
438
439template<>
440Fault
441TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
442{
443 return write(*(uint32_t*)&data, addr, flags, res);
444}
445
446
447template<>
448Fault
449TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
450{
451 return write((uint32_t)data, addr, flags, res);
452}
453
454
455void
456TimingSimpleCPU::fetch()
457{
458 if (!curStaticInst || !curStaticInst->isDelayedCommit())
459 checkForInterrupts();
460
461 Request *ifetch_req = new Request();
462 ifetch_req->setThreadContext(cpu_id, /* thread ID */ 0);
463 Fault fault = setupFetchRequest(ifetch_req);
464
465 ifetch_pkt = new Packet(ifetch_req, MemCmd::ReadReq, Packet::Broadcast);
466 ifetch_pkt->dataStatic(&inst);
467
468 if (fault == NoFault) {
469 if (!icachePort.sendTiming(ifetch_pkt)) {
470 // Need to wait for retry
471 _status = IcacheRetry;
472 } else {
473 // Need to wait for cache to respond
474 _status = IcacheWaitResponse;
475 // ownership of packet transferred to memory system
476 ifetch_pkt = NULL;
477 }
478 } else {
479 delete ifetch_req;
480 delete ifetch_pkt;
481 // fetch fault: advance directly to next instruction (fault handler)
482 advanceInst(fault);
483 }
484
485 numCycles += curTick - previousTick;
486 previousTick = curTick;
487}
488
489
490void
491TimingSimpleCPU::advanceInst(Fault fault)
492{
493 advancePC(fault);
494
495 if (_status == Running) {
496 // kick off fetch of next instruction... callback from icache
497 // response will cause that instruction to be executed,
498 // keeping the CPU running.
499 fetch();
500 }
501}
502
503
504void
505TimingSimpleCPU::completeIfetch(PacketPtr pkt)
506{
507 // received a response from the icache: execute the received
508 // instruction
509 assert(!pkt->isError());
510 assert(_status == IcacheWaitResponse);
511
512 _status = Running;
513
514 numCycles += curTick - previousTick;
515 previousTick = curTick;
516
517 if (getState() == SimObject::Draining) {
518 delete pkt->req;
519 delete pkt;
520
521 completeDrain();
522 return;
523 }
524
525 preExecute();
526 if (curStaticInst->isMemRef() && !curStaticInst->isDataPrefetch()) {
527 // load or store: just send to dcache
528 Fault fault = curStaticInst->initiateAcc(this, traceData);
529 if (_status != Running) {
530 // instruction will complete in dcache response callback
531 assert(_status == DcacheWaitResponse || _status == DcacheRetry);
532 assert(fault == NoFault);
533 } else {
534 if (fault == NoFault) {
535 // early fail on store conditional: complete now
536 assert(dcache_pkt != NULL);
537 fault = curStaticInst->completeAcc(dcache_pkt, this,
538 traceData);
539 delete dcache_pkt->req;
540 delete dcache_pkt;
541 dcache_pkt = NULL;
542 }
543 postExecute();
544 advanceInst(fault);
545 }
546 } else {
547 // non-memory instruction: execute completely now
548 Fault fault = curStaticInst->execute(this, traceData);
549 postExecute();
550 advanceInst(fault);
551 }
552
553 delete pkt->req;
554 delete pkt;
555}
556
557void
558TimingSimpleCPU::IcachePort::ITickEvent::process()
559{
560 cpu->completeIfetch(pkt);
561}
562
563bool
564TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
565{
566 if (pkt->isResponse()) {
567 // delay processing of returned data until next CPU clock edge
568 Tick next_tick = cpu->nextCycle(curTick);
569
570 if (next_tick == curTick)
571 cpu->completeIfetch(pkt);
572 else
573 tickEvent.schedule(pkt, next_tick);
574
575 return true;
576 }
577 else if (pkt->wasNacked()) {
578 assert(cpu->_status == IcacheWaitResponse);
579 pkt->reinitNacked();
580 if (!sendTiming(pkt)) {
581 cpu->_status = IcacheRetry;
582 cpu->ifetch_pkt = pkt;
583 }
584 }
585 //Snooping a Coherence Request, do nothing
586 return true;
587}
588
589void
590TimingSimpleCPU::IcachePort::recvRetry()
591{
592 // we shouldn't get a retry unless we have a packet that we're
593 // waiting to transmit
594 assert(cpu->ifetch_pkt != NULL);
595 assert(cpu->_status == IcacheRetry);
596 PacketPtr tmp = cpu->ifetch_pkt;
597 if (sendTiming(tmp)) {
598 cpu->_status = IcacheWaitResponse;
599 cpu->ifetch_pkt = NULL;
600 }
601}
602
603void
604TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
605{
606 // received a response from the dcache: complete the load or store
607 // instruction
608 assert(!pkt->isError());
609 assert(_status == DcacheWaitResponse);
610 _status = Running;
611
612 numCycles += curTick - previousTick;
613 previousTick = curTick;
614
615 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
616
617 if (pkt->isRead() && pkt->isLocked()) {
618 TheISA::handleLockedRead(thread, pkt->req);
619 }
620
621 delete pkt->req;
622 delete pkt;
623
624 postExecute();
625
626 if (getState() == SimObject::Draining) {
627 advancePC(fault);
628 completeDrain();
629
630 return;
631 }
632
633 advanceInst(fault);
634}
635
636
637void
638TimingSimpleCPU::completeDrain()
639{
640 DPRINTF(Config, "Done draining\n");
641 changeState(SimObject::Drained);
642 drainEvent->process();
643}
644
645void
646TimingSimpleCPU::DcachePort::setPeer(Port *port)
647{
648 Port::setPeer(port);
649
650#if FULL_SYSTEM
651 // Update the ThreadContext's memory ports (Functional/Virtual
652 // Ports)
653 cpu->tcBase()->connectMemPorts();
654#endif
655}
656
657bool
658TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
659{
660 if (pkt->isResponse()) {
661 // delay processing of returned data until next CPU clock edge
662 Tick next_tick = cpu->nextCycle(curTick);
663
664 if (next_tick == curTick)
665 cpu->completeDataAccess(pkt);
666 else
667 tickEvent.schedule(pkt, next_tick);
668
669 return true;
670 }
671 else if (pkt->wasNacked()) {
672 assert(cpu->_status == DcacheWaitResponse);
673 pkt->reinitNacked();
674 if (!sendTiming(pkt)) {
675 cpu->_status = DcacheRetry;
676 cpu->dcache_pkt = pkt;
677 }
678 }
679 //Snooping a Coherence Request, do nothing
680 return true;
681}
682
683void
684TimingSimpleCPU::DcachePort::DTickEvent::process()
685{
686 cpu->completeDataAccess(pkt);
687}
688
689void
690TimingSimpleCPU::DcachePort::recvRetry()
691{
692 // we shouldn't get a retry unless we have a packet that we're
693 // waiting to transmit
694 assert(cpu->dcache_pkt != NULL);
695 assert(cpu->_status == DcacheRetry);
696 PacketPtr tmp = cpu->dcache_pkt;
697 if (sendTiming(tmp)) {
698 cpu->_status = DcacheWaitResponse;
699 // memory system takes ownership of packet
700 cpu->dcache_pkt = NULL;
701 }
702}
703
704
705////////////////////////////////////////////////////////////////////////
706//
707// TimingSimpleCPU Simulation Object
708//
709BEGIN_DECLARE_SIM_OBJECT_PARAMS(TimingSimpleCPU)
710
711 Param<Counter> max_insts_any_thread;
712 Param<Counter> max_insts_all_threads;
713 Param<Counter> max_loads_any_thread;
714 Param<Counter> max_loads_all_threads;
715 Param<Tick> progress_interval;
716 SimObjectParam<System *> system;
717 Param<int> cpu_id;
718
719#if FULL_SYSTEM
720 SimObjectParam<TheISA::ITB *> itb;
721 SimObjectParam<TheISA::DTB *> dtb;
722 Param<Tick> profile;
723
724 Param<bool> do_quiesce;
725 Param<bool> do_checkpoint_insts;
726 Param<bool> do_statistics_insts;
727#else
728 SimObjectParam<Process *> workload;
729#endif // FULL_SYSTEM
730
731 Param<int> clock;
732 Param<int> phase;
733
734 Param<bool> defer_registration;
735 Param<int> width;
736 Param<bool> function_trace;
737 Param<Tick> function_trace_start;
738 Param<bool> simulate_stalls;
739
740END_DECLARE_SIM_OBJECT_PARAMS(TimingSimpleCPU)
741
742BEGIN_INIT_SIM_OBJECT_PARAMS(TimingSimpleCPU)
743
744 INIT_PARAM(max_insts_any_thread,
745 "terminate when any thread reaches this inst count"),
746 INIT_PARAM(max_insts_all_threads,
747 "terminate when all threads have reached this inst count"),
748 INIT_PARAM(max_loads_any_thread,
749 "terminate when any thread reaches this load count"),
750 INIT_PARAM(max_loads_all_threads,
751 "terminate when all threads have reached this load count"),
752 INIT_PARAM(progress_interval, "Progress interval"),
753 INIT_PARAM(system, "system object"),
754 INIT_PARAM(cpu_id, "processor ID"),
755
756#if FULL_SYSTEM
757 INIT_PARAM(itb, "Instruction TLB"),
758 INIT_PARAM(dtb, "Data TLB"),
759 INIT_PARAM(profile, ""),
760 INIT_PARAM(do_quiesce, ""),
761 INIT_PARAM(do_checkpoint_insts, ""),
762 INIT_PARAM(do_statistics_insts, ""),
763#else
764 INIT_PARAM(workload, "processes to run"),
765#endif // FULL_SYSTEM
766
767 INIT_PARAM(clock, "clock speed"),
768 INIT_PARAM_DFLT(phase, "clock phase", 0),
769 INIT_PARAM(defer_registration, "defer system registration (for sampling)"),
770 INIT_PARAM(width, "cpu width"),
771 INIT_PARAM(function_trace, "Enable function trace"),
772 INIT_PARAM(function_trace_start, "Cycle to start function trace"),
773 INIT_PARAM(simulate_stalls, "Simulate cache stall cycles")
774
775END_INIT_SIM_OBJECT_PARAMS(TimingSimpleCPU)
776
777
778CREATE_SIM_OBJECT(TimingSimpleCPU)
779{
780 TimingSimpleCPU::Params *params = new TimingSimpleCPU::Params();
781 params->name = getInstanceName();
782 params->numberOfThreads = 1;
783 params->max_insts_any_thread = max_insts_any_thread;
784 params->max_insts_all_threads = max_insts_all_threads;
785 params->max_loads_any_thread = max_loads_any_thread;
786 params->max_loads_all_threads = max_loads_all_threads;
787 params->progress_interval = progress_interval;
788 params->deferRegistration = defer_registration;
789 params->clock = clock;
790 params->phase = phase;
791 params->functionTrace = function_trace;
792 params->functionTraceStart = function_trace_start;
793 params->system = system;
794 params->cpu_id = cpu_id;
795
796#if FULL_SYSTEM
797 params->itb = itb;
798 params->dtb = dtb;
799 params->profile = profile;
800 params->do_quiesce = do_quiesce;
801 params->do_checkpoint_insts = do_checkpoint_insts;
802 params->do_statistics_insts = do_statistics_insts;
803#else
804 params->process = workload;
805#endif
806
807 TimingSimpleCPU *cpu = new TimingSimpleCPU(params);
808 return cpu;
809}
810
811REGISTER_SIM_OBJECT("TimingSimpleCPU", TimingSimpleCPU)
812
378 if (!dcachePort.sendTiming(dcache_pkt)) {
379 _status = DcacheRetry;
380 } else {
381 _status = DcacheWaitResponse;
382 // memory system takes ownership of packet
383 dcache_pkt = NULL;
384 }
385 }
386 // This will need a new way to tell if it's hooked up to a cache or not.
387 if (req->isUncacheable())
388 recordEvent("Uncached Write");
389 } else {
390 delete req;
391 }
392
393
394 // If the write needs to have a fault on the access, consider calling
395 // changeStatus() and changing it to "bad addr write" or something.
396 return fault;
397}
398
399
400#ifndef DOXYGEN_SHOULD_SKIP_THIS
401template
402Fault
403TimingSimpleCPU::write(Twin32_t data, Addr addr,
404 unsigned flags, uint64_t *res);
405
406template
407Fault
408TimingSimpleCPU::write(Twin64_t data, Addr addr,
409 unsigned flags, uint64_t *res);
410
411template
412Fault
413TimingSimpleCPU::write(uint64_t data, Addr addr,
414 unsigned flags, uint64_t *res);
415
416template
417Fault
418TimingSimpleCPU::write(uint32_t data, Addr addr,
419 unsigned flags, uint64_t *res);
420
421template
422Fault
423TimingSimpleCPU::write(uint16_t data, Addr addr,
424 unsigned flags, uint64_t *res);
425
426template
427Fault
428TimingSimpleCPU::write(uint8_t data, Addr addr,
429 unsigned flags, uint64_t *res);
430
431#endif //DOXYGEN_SHOULD_SKIP_THIS
432
433template<>
434Fault
435TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
436{
437 return write(*(uint64_t*)&data, addr, flags, res);
438}
439
440template<>
441Fault
442TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
443{
444 return write(*(uint32_t*)&data, addr, flags, res);
445}
446
447
448template<>
449Fault
450TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
451{
452 return write((uint32_t)data, addr, flags, res);
453}
454
455
456void
457TimingSimpleCPU::fetch()
458{
459 if (!curStaticInst || !curStaticInst->isDelayedCommit())
460 checkForInterrupts();
461
462 Request *ifetch_req = new Request();
463 ifetch_req->setThreadContext(cpu_id, /* thread ID */ 0);
464 Fault fault = setupFetchRequest(ifetch_req);
465
466 ifetch_pkt = new Packet(ifetch_req, MemCmd::ReadReq, Packet::Broadcast);
467 ifetch_pkt->dataStatic(&inst);
468
469 if (fault == NoFault) {
470 if (!icachePort.sendTiming(ifetch_pkt)) {
471 // Need to wait for retry
472 _status = IcacheRetry;
473 } else {
474 // Need to wait for cache to respond
475 _status = IcacheWaitResponse;
476 // ownership of packet transferred to memory system
477 ifetch_pkt = NULL;
478 }
479 } else {
480 delete ifetch_req;
481 delete ifetch_pkt;
482 // fetch fault: advance directly to next instruction (fault handler)
483 advanceInst(fault);
484 }
485
486 numCycles += curTick - previousTick;
487 previousTick = curTick;
488}
489
490
491void
492TimingSimpleCPU::advanceInst(Fault fault)
493{
494 advancePC(fault);
495
496 if (_status == Running) {
497 // kick off fetch of next instruction... callback from icache
498 // response will cause that instruction to be executed,
499 // keeping the CPU running.
500 fetch();
501 }
502}
503
504
505void
506TimingSimpleCPU::completeIfetch(PacketPtr pkt)
507{
508 // received a response from the icache: execute the received
509 // instruction
510 assert(!pkt->isError());
511 assert(_status == IcacheWaitResponse);
512
513 _status = Running;
514
515 numCycles += curTick - previousTick;
516 previousTick = curTick;
517
518 if (getState() == SimObject::Draining) {
519 delete pkt->req;
520 delete pkt;
521
522 completeDrain();
523 return;
524 }
525
526 preExecute();
527 if (curStaticInst->isMemRef() && !curStaticInst->isDataPrefetch()) {
528 // load or store: just send to dcache
529 Fault fault = curStaticInst->initiateAcc(this, traceData);
530 if (_status != Running) {
531 // instruction will complete in dcache response callback
532 assert(_status == DcacheWaitResponse || _status == DcacheRetry);
533 assert(fault == NoFault);
534 } else {
535 if (fault == NoFault) {
536 // early fail on store conditional: complete now
537 assert(dcache_pkt != NULL);
538 fault = curStaticInst->completeAcc(dcache_pkt, this,
539 traceData);
540 delete dcache_pkt->req;
541 delete dcache_pkt;
542 dcache_pkt = NULL;
543 }
544 postExecute();
545 advanceInst(fault);
546 }
547 } else {
548 // non-memory instruction: execute completely now
549 Fault fault = curStaticInst->execute(this, traceData);
550 postExecute();
551 advanceInst(fault);
552 }
553
554 delete pkt->req;
555 delete pkt;
556}
557
558void
559TimingSimpleCPU::IcachePort::ITickEvent::process()
560{
561 cpu->completeIfetch(pkt);
562}
563
564bool
565TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
566{
567 if (pkt->isResponse()) {
568 // delay processing of returned data until next CPU clock edge
569 Tick next_tick = cpu->nextCycle(curTick);
570
571 if (next_tick == curTick)
572 cpu->completeIfetch(pkt);
573 else
574 tickEvent.schedule(pkt, next_tick);
575
576 return true;
577 }
578 else if (pkt->wasNacked()) {
579 assert(cpu->_status == IcacheWaitResponse);
580 pkt->reinitNacked();
581 if (!sendTiming(pkt)) {
582 cpu->_status = IcacheRetry;
583 cpu->ifetch_pkt = pkt;
584 }
585 }
586 //Snooping a Coherence Request, do nothing
587 return true;
588}
589
590void
591TimingSimpleCPU::IcachePort::recvRetry()
592{
593 // we shouldn't get a retry unless we have a packet that we're
594 // waiting to transmit
595 assert(cpu->ifetch_pkt != NULL);
596 assert(cpu->_status == IcacheRetry);
597 PacketPtr tmp = cpu->ifetch_pkt;
598 if (sendTiming(tmp)) {
599 cpu->_status = IcacheWaitResponse;
600 cpu->ifetch_pkt = NULL;
601 }
602}
603
604void
605TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
606{
607 // received a response from the dcache: complete the load or store
608 // instruction
609 assert(!pkt->isError());
610 assert(_status == DcacheWaitResponse);
611 _status = Running;
612
613 numCycles += curTick - previousTick;
614 previousTick = curTick;
615
616 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
617
618 if (pkt->isRead() && pkt->isLocked()) {
619 TheISA::handleLockedRead(thread, pkt->req);
620 }
621
622 delete pkt->req;
623 delete pkt;
624
625 postExecute();
626
627 if (getState() == SimObject::Draining) {
628 advancePC(fault);
629 completeDrain();
630
631 return;
632 }
633
634 advanceInst(fault);
635}
636
637
638void
639TimingSimpleCPU::completeDrain()
640{
641 DPRINTF(Config, "Done draining\n");
642 changeState(SimObject::Drained);
643 drainEvent->process();
644}
645
646void
647TimingSimpleCPU::DcachePort::setPeer(Port *port)
648{
649 Port::setPeer(port);
650
651#if FULL_SYSTEM
652 // Update the ThreadContext's memory ports (Functional/Virtual
653 // Ports)
654 cpu->tcBase()->connectMemPorts();
655#endif
656}
657
658bool
659TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
660{
661 if (pkt->isResponse()) {
662 // delay processing of returned data until next CPU clock edge
663 Tick next_tick = cpu->nextCycle(curTick);
664
665 if (next_tick == curTick)
666 cpu->completeDataAccess(pkt);
667 else
668 tickEvent.schedule(pkt, next_tick);
669
670 return true;
671 }
672 else if (pkt->wasNacked()) {
673 assert(cpu->_status == DcacheWaitResponse);
674 pkt->reinitNacked();
675 if (!sendTiming(pkt)) {
676 cpu->_status = DcacheRetry;
677 cpu->dcache_pkt = pkt;
678 }
679 }
680 //Snooping a Coherence Request, do nothing
681 return true;
682}
683
684void
685TimingSimpleCPU::DcachePort::DTickEvent::process()
686{
687 cpu->completeDataAccess(pkt);
688}
689
690void
691TimingSimpleCPU::DcachePort::recvRetry()
692{
693 // we shouldn't get a retry unless we have a packet that we're
694 // waiting to transmit
695 assert(cpu->dcache_pkt != NULL);
696 assert(cpu->_status == DcacheRetry);
697 PacketPtr tmp = cpu->dcache_pkt;
698 if (sendTiming(tmp)) {
699 cpu->_status = DcacheWaitResponse;
700 // memory system takes ownership of packet
701 cpu->dcache_pkt = NULL;
702 }
703}
704
705
706////////////////////////////////////////////////////////////////////////
707//
708// TimingSimpleCPU Simulation Object
709//
710BEGIN_DECLARE_SIM_OBJECT_PARAMS(TimingSimpleCPU)
711
712 Param<Counter> max_insts_any_thread;
713 Param<Counter> max_insts_all_threads;
714 Param<Counter> max_loads_any_thread;
715 Param<Counter> max_loads_all_threads;
716 Param<Tick> progress_interval;
717 SimObjectParam<System *> system;
718 Param<int> cpu_id;
719
720#if FULL_SYSTEM
721 SimObjectParam<TheISA::ITB *> itb;
722 SimObjectParam<TheISA::DTB *> dtb;
723 Param<Tick> profile;
724
725 Param<bool> do_quiesce;
726 Param<bool> do_checkpoint_insts;
727 Param<bool> do_statistics_insts;
728#else
729 SimObjectParam<Process *> workload;
730#endif // FULL_SYSTEM
731
732 Param<int> clock;
733 Param<int> phase;
734
735 Param<bool> defer_registration;
736 Param<int> width;
737 Param<bool> function_trace;
738 Param<Tick> function_trace_start;
739 Param<bool> simulate_stalls;
740
741END_DECLARE_SIM_OBJECT_PARAMS(TimingSimpleCPU)
742
743BEGIN_INIT_SIM_OBJECT_PARAMS(TimingSimpleCPU)
744
745 INIT_PARAM(max_insts_any_thread,
746 "terminate when any thread reaches this inst count"),
747 INIT_PARAM(max_insts_all_threads,
748 "terminate when all threads have reached this inst count"),
749 INIT_PARAM(max_loads_any_thread,
750 "terminate when any thread reaches this load count"),
751 INIT_PARAM(max_loads_all_threads,
752 "terminate when all threads have reached this load count"),
753 INIT_PARAM(progress_interval, "Progress interval"),
754 INIT_PARAM(system, "system object"),
755 INIT_PARAM(cpu_id, "processor ID"),
756
757#if FULL_SYSTEM
758 INIT_PARAM(itb, "Instruction TLB"),
759 INIT_PARAM(dtb, "Data TLB"),
760 INIT_PARAM(profile, ""),
761 INIT_PARAM(do_quiesce, ""),
762 INIT_PARAM(do_checkpoint_insts, ""),
763 INIT_PARAM(do_statistics_insts, ""),
764#else
765 INIT_PARAM(workload, "processes to run"),
766#endif // FULL_SYSTEM
767
768 INIT_PARAM(clock, "clock speed"),
769 INIT_PARAM_DFLT(phase, "clock phase", 0),
770 INIT_PARAM(defer_registration, "defer system registration (for sampling)"),
771 INIT_PARAM(width, "cpu width"),
772 INIT_PARAM(function_trace, "Enable function trace"),
773 INIT_PARAM(function_trace_start, "Cycle to start function trace"),
774 INIT_PARAM(simulate_stalls, "Simulate cache stall cycles")
775
776END_INIT_SIM_OBJECT_PARAMS(TimingSimpleCPU)
777
778
779CREATE_SIM_OBJECT(TimingSimpleCPU)
780{
781 TimingSimpleCPU::Params *params = new TimingSimpleCPU::Params();
782 params->name = getInstanceName();
783 params->numberOfThreads = 1;
784 params->max_insts_any_thread = max_insts_any_thread;
785 params->max_insts_all_threads = max_insts_all_threads;
786 params->max_loads_any_thread = max_loads_any_thread;
787 params->max_loads_all_threads = max_loads_all_threads;
788 params->progress_interval = progress_interval;
789 params->deferRegistration = defer_registration;
790 params->clock = clock;
791 params->phase = phase;
792 params->functionTrace = function_trace;
793 params->functionTraceStart = function_trace_start;
794 params->system = system;
795 params->cpu_id = cpu_id;
796
797#if FULL_SYSTEM
798 params->itb = itb;
799 params->dtb = dtb;
800 params->profile = profile;
801 params->do_quiesce = do_quiesce;
802 params->do_checkpoint_insts = do_checkpoint_insts;
803 params->do_statistics_insts = do_statistics_insts;
804#else
805 params->process = workload;
806#endif
807
808 TimingSimpleCPU *cpu = new TimingSimpleCPU(params);
809 return cpu;
810}
811
812REGISTER_SIM_OBJECT("TimingSimpleCPU", TimingSimpleCPU)
813