atomic.cc (2640:266b80dd5eca) atomic.cc (2641:6d9d837e2032)
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "arch/utility.hh"
30#include "cpu/exetrace.hh"
31#include "cpu/simple/atomic.hh"
32#include "mem/packet_impl.hh"
33#include "sim/builder.hh"
34
35using namespace std;
36using namespace TheISA;
37
38AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c)
39 : Event(&mainEventQueue, CPU_Tick_Pri), cpu(c)
40{
41}
42
43
44void
45AtomicSimpleCPU::TickEvent::process()
46{
47 cpu->tick();
48}
49
50const char *
51AtomicSimpleCPU::TickEvent::description()
52{
53 return "AtomicSimpleCPU tick event";
54}
55
56
57void
58AtomicSimpleCPU::init()
59{
60 //Create Memory Ports (conect them up)
61 Port *mem_dport = mem->getPort("");
62 dcachePort.setPeer(mem_dport);
63 mem_dport->setPeer(&dcachePort);
64
65 Port *mem_iport = mem->getPort("");
66 icachePort.setPeer(mem_iport);
67 mem_iport->setPeer(&icachePort);
68
69 BaseCPU::init();
70#if FULL_SYSTEM
71 for (int i = 0; i < execContexts.size(); ++i) {
72 ExecContext *xc = execContexts[i];
73
74 // initialize CPU, including PC
75 TheISA::initCPU(xc, xc->readCpuId());
76 }
77#endif
78}
79
80bool
81AtomicSimpleCPU::CpuPort::recvTiming(Packet *pkt)
82{
83 panic("AtomicSimpleCPU doesn't expect recvAtomic callback!");
84 return true;
85}
86
87Tick
88AtomicSimpleCPU::CpuPort::recvAtomic(Packet *pkt)
89{
90 panic("AtomicSimpleCPU doesn't expect recvAtomic callback!");
91 return curTick;
92}
93
94void
95AtomicSimpleCPU::CpuPort::recvFunctional(Packet *pkt)
96{
97 panic("AtomicSimpleCPU doesn't expect recvFunctional callback!");
98}
99
100void
101AtomicSimpleCPU::CpuPort::recvStatusChange(Status status)
102{
103 if (status == RangeChange)
104 return;
105
106 panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!");
107}
108
109Packet *
110AtomicSimpleCPU::CpuPort::recvRetry()
111{
112 panic("AtomicSimpleCPU doesn't expect recvRetry callback!");
113 return NULL;
114}
115
116
117AtomicSimpleCPU::AtomicSimpleCPU(Params *p)
118 : BaseSimpleCPU(p), tickEvent(this),
119 width(p->width), simulate_stalls(p->simulate_stalls),
120 icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this)
121{
122 _status = Idle;
123
124 ifetch_req = new Request(true);
125 ifetch_req->setAsid(0);
126 // @todo fix me and get the real cpu iD!!!
127 ifetch_req->setCpuNum(0);
128 ifetch_req->setSize(sizeof(MachInst));
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "arch/utility.hh"
30#include "cpu/exetrace.hh"
31#include "cpu/simple/atomic.hh"
32#include "mem/packet_impl.hh"
33#include "sim/builder.hh"
34
35using namespace std;
36using namespace TheISA;
37
38AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c)
39 : Event(&mainEventQueue, CPU_Tick_Pri), cpu(c)
40{
41}
42
43
44void
45AtomicSimpleCPU::TickEvent::process()
46{
47 cpu->tick();
48}
49
50const char *
51AtomicSimpleCPU::TickEvent::description()
52{
53 return "AtomicSimpleCPU tick event";
54}
55
56
57void
58AtomicSimpleCPU::init()
59{
60 //Create Memory Ports (conect them up)
61 Port *mem_dport = mem->getPort("");
62 dcachePort.setPeer(mem_dport);
63 mem_dport->setPeer(&dcachePort);
64
65 Port *mem_iport = mem->getPort("");
66 icachePort.setPeer(mem_iport);
67 mem_iport->setPeer(&icachePort);
68
69 BaseCPU::init();
70#if FULL_SYSTEM
71 for (int i = 0; i < execContexts.size(); ++i) {
72 ExecContext *xc = execContexts[i];
73
74 // initialize CPU, including PC
75 TheISA::initCPU(xc, xc->readCpuId());
76 }
77#endif
78}
79
80bool
81AtomicSimpleCPU::CpuPort::recvTiming(Packet *pkt)
82{
83 panic("AtomicSimpleCPU doesn't expect recvAtomic callback!");
84 return true;
85}
86
87Tick
88AtomicSimpleCPU::CpuPort::recvAtomic(Packet *pkt)
89{
90 panic("AtomicSimpleCPU doesn't expect recvAtomic callback!");
91 return curTick;
92}
93
94void
95AtomicSimpleCPU::CpuPort::recvFunctional(Packet *pkt)
96{
97 panic("AtomicSimpleCPU doesn't expect recvFunctional callback!");
98}
99
100void
101AtomicSimpleCPU::CpuPort::recvStatusChange(Status status)
102{
103 if (status == RangeChange)
104 return;
105
106 panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!");
107}
108
109Packet *
110AtomicSimpleCPU::CpuPort::recvRetry()
111{
112 panic("AtomicSimpleCPU doesn't expect recvRetry callback!");
113 return NULL;
114}
115
116
117AtomicSimpleCPU::AtomicSimpleCPU(Params *p)
118 : BaseSimpleCPU(p), tickEvent(this),
119 width(p->width), simulate_stalls(p->simulate_stalls),
120 icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this)
121{
122 _status = Idle;
123
124 ifetch_req = new Request(true);
125 ifetch_req->setAsid(0);
126 // @todo fix me and get the real cpu iD!!!
127 ifetch_req->setCpuNum(0);
128 ifetch_req->setSize(sizeof(MachInst));
129 ifetch_pkt = new Packet;
130 ifetch_pkt->cmd = Read;
129 ifetch_pkt = new Packet(ifetch_req, Packet::ReadReq, Packet::Broadcast);
131 ifetch_pkt->dataStatic(&inst);
130 ifetch_pkt->dataStatic(&inst);
132 ifetch_pkt->req = ifetch_req;
133 ifetch_pkt->size = sizeof(MachInst);
134 ifetch_pkt->dest = Packet::Broadcast;
135
136 data_read_req = new Request(true);
137 // @todo fix me and get the real cpu iD!!!
138 data_read_req->setCpuNum(0);
139 data_read_req->setAsid(0);
131
132 data_read_req = new Request(true);
133 // @todo fix me and get the real cpu iD!!!
134 data_read_req->setCpuNum(0);
135 data_read_req->setAsid(0);
140 data_read_pkt = new Packet;
141 data_read_pkt->cmd = Read;
136 data_read_pkt = new Packet(data_read_req, Packet::ReadReq,
137 Packet::Broadcast);
142 data_read_pkt->dataStatic(&dataReg);
138 data_read_pkt->dataStatic(&dataReg);
143 data_read_pkt->req = data_read_req;
144 data_read_pkt->dest = Packet::Broadcast;
145
146 data_write_req = new Request(true);
147 // @todo fix me and get the real cpu iD!!!
148 data_write_req->setCpuNum(0);
149 data_write_req->setAsid(0);
139
140 data_write_req = new Request(true);
141 // @todo fix me and get the real cpu iD!!!
142 data_write_req->setCpuNum(0);
143 data_write_req->setAsid(0);
150 data_write_pkt = new Packet;
151 data_write_pkt->cmd = Write;
152 data_write_pkt->req = data_write_req;
153 data_write_pkt->dest = Packet::Broadcast;
144 data_write_pkt = new Packet(data_write_req, Packet::WriteReq,
145 Packet::Broadcast);
154}
155
156
157AtomicSimpleCPU::~AtomicSimpleCPU()
158{
159}
160
161void
162AtomicSimpleCPU::serialize(ostream &os)
163{
164 BaseSimpleCPU::serialize(os);
165 SERIALIZE_ENUM(_status);
166 nameOut(os, csprintf("%s.tickEvent", name()));
167 tickEvent.serialize(os);
168}
169
170void
171AtomicSimpleCPU::unserialize(Checkpoint *cp, const string &section)
172{
173 BaseSimpleCPU::unserialize(cp, section);
174 UNSERIALIZE_ENUM(_status);
175 tickEvent.unserialize(cp, csprintf("%s.tickEvent", section));
176}
177
178void
179AtomicSimpleCPU::switchOut(Sampler *s)
180{
181 sampler = s;
182 if (status() == Running) {
183 _status = SwitchedOut;
184
185 tickEvent.squash();
186 }
187 sampler->signalSwitched();
188}
189
190
191void
192AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
193{
194 BaseCPU::takeOverFrom(oldCPU);
195
196 assert(!tickEvent.scheduled());
197
198 // if any of this CPU's ExecContexts are active, mark the CPU as
199 // running and schedule its tick event.
200 for (int i = 0; i < execContexts.size(); ++i) {
201 ExecContext *xc = execContexts[i];
202 if (xc->status() == ExecContext::Active && _status != Running) {
203 _status = Running;
204 tickEvent.schedule(curTick);
205 break;
206 }
207 }
208}
209
210
211void
212AtomicSimpleCPU::activateContext(int thread_num, int delay)
213{
214 assert(thread_num == 0);
215 assert(cpuXC);
216
217 assert(_status == Idle);
218 assert(!tickEvent.scheduled());
219
220 notIdleFraction++;
221 tickEvent.schedule(curTick + cycles(delay));
222 _status = Running;
223}
224
225
226void
227AtomicSimpleCPU::suspendContext(int thread_num)
228{
229 assert(thread_num == 0);
230 assert(cpuXC);
231
232 assert(_status == Running);
233
234 // tick event may not be scheduled if this gets called from inside
235 // an instruction's execution, e.g. "quiesce"
236 if (tickEvent.scheduled())
237 tickEvent.deschedule();
238
239 notIdleFraction--;
240 _status = Idle;
241}
242
243
244template <class T>
245Fault
246AtomicSimpleCPU::read(Addr addr, T &data, unsigned flags)
247{
248 data_read_req->setVaddr(addr);
249 data_read_req->setSize(sizeof(T));
250 data_read_req->setFlags(flags);
251 data_read_req->setTime(curTick);
252
253 if (traceData) {
254 traceData->setAddr(addr);
255 }
256
257 // translate to physical address
258 Fault fault = cpuXC->translateDataReadReq(data_read_req);
259
260 // Now do the access.
261 if (fault == NoFault) {
262 data_read_pkt->reset();
146}
147
148
149AtomicSimpleCPU::~AtomicSimpleCPU()
150{
151}
152
153void
154AtomicSimpleCPU::serialize(ostream &os)
155{
156 BaseSimpleCPU::serialize(os);
157 SERIALIZE_ENUM(_status);
158 nameOut(os, csprintf("%s.tickEvent", name()));
159 tickEvent.serialize(os);
160}
161
162void
163AtomicSimpleCPU::unserialize(Checkpoint *cp, const string &section)
164{
165 BaseSimpleCPU::unserialize(cp, section);
166 UNSERIALIZE_ENUM(_status);
167 tickEvent.unserialize(cp, csprintf("%s.tickEvent", section));
168}
169
170void
171AtomicSimpleCPU::switchOut(Sampler *s)
172{
173 sampler = s;
174 if (status() == Running) {
175 _status = SwitchedOut;
176
177 tickEvent.squash();
178 }
179 sampler->signalSwitched();
180}
181
182
183void
184AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
185{
186 BaseCPU::takeOverFrom(oldCPU);
187
188 assert(!tickEvent.scheduled());
189
190 // if any of this CPU's ExecContexts are active, mark the CPU as
191 // running and schedule its tick event.
192 for (int i = 0; i < execContexts.size(); ++i) {
193 ExecContext *xc = execContexts[i];
194 if (xc->status() == ExecContext::Active && _status != Running) {
195 _status = Running;
196 tickEvent.schedule(curTick);
197 break;
198 }
199 }
200}
201
202
203void
204AtomicSimpleCPU::activateContext(int thread_num, int delay)
205{
206 assert(thread_num == 0);
207 assert(cpuXC);
208
209 assert(_status == Idle);
210 assert(!tickEvent.scheduled());
211
212 notIdleFraction++;
213 tickEvent.schedule(curTick + cycles(delay));
214 _status = Running;
215}
216
217
218void
219AtomicSimpleCPU::suspendContext(int thread_num)
220{
221 assert(thread_num == 0);
222 assert(cpuXC);
223
224 assert(_status == Running);
225
226 // tick event may not be scheduled if this gets called from inside
227 // an instruction's execution, e.g. "quiesce"
228 if (tickEvent.scheduled())
229 tickEvent.deschedule();
230
231 notIdleFraction--;
232 _status = Idle;
233}
234
235
236template <class T>
237Fault
238AtomicSimpleCPU::read(Addr addr, T &data, unsigned flags)
239{
240 data_read_req->setVaddr(addr);
241 data_read_req->setSize(sizeof(T));
242 data_read_req->setFlags(flags);
243 data_read_req->setTime(curTick);
244
245 if (traceData) {
246 traceData->setAddr(addr);
247 }
248
249 // translate to physical address
250 Fault fault = cpuXC->translateDataReadReq(data_read_req);
251
252 // Now do the access.
253 if (fault == NoFault) {
254 data_read_pkt->reset();
263 data_read_pkt->addr = data_read_req->getPaddr();
264 data_read_pkt->size = sizeof(T);
255 data_read_pkt->reinitFromRequest();
265
266 dcache_complete = dcachePort.sendAtomic(data_read_pkt);
267 dcache_access = true;
268
256
257 dcache_complete = dcachePort.sendAtomic(data_read_pkt);
258 dcache_access = true;
259
269 assert(data_read_pkt->result == Success);
260 assert(data_read_pkt->result == Packet::Success);
270 data = data_read_pkt->get<T>();
271
272 }
273
274 // This will need a new way to tell if it has a dcache attached.
275 if (data_read_req->getFlags() & UNCACHEABLE)
276 recordEvent("Uncached Read");
277
278 return fault;
279}
280
281#ifndef DOXYGEN_SHOULD_SKIP_THIS
282
283template
284Fault
285AtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
286
287template
288Fault
289AtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
290
291template
292Fault
293AtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
294
295template
296Fault
297AtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
298
299#endif //DOXYGEN_SHOULD_SKIP_THIS
300
301template<>
302Fault
303AtomicSimpleCPU::read(Addr addr, double &data, unsigned flags)
304{
305 return read(addr, *(uint64_t*)&data, flags);
306}
307
308template<>
309Fault
310AtomicSimpleCPU::read(Addr addr, float &data, unsigned flags)
311{
312 return read(addr, *(uint32_t*)&data, flags);
313}
314
315
316template<>
317Fault
318AtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
319{
320 return read(addr, (uint32_t&)data, flags);
321}
322
323
324template <class T>
325Fault
326AtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
327{
328 data_write_req->setVaddr(addr);
329 data_write_req->setTime(curTick);
330 data_write_req->setSize(sizeof(T));
331 data_write_req->setFlags(flags);
332
333 if (traceData) {
334 traceData->setAddr(addr);
335 }
336
337 // translate to physical address
338 Fault fault = cpuXC->translateDataWriteReq(data_write_req);
339
340 // Now do the access.
341 if (fault == NoFault) {
342 data_write_pkt->reset();
343 data = htog(data);
344 data_write_pkt->dataStatic(&data);
261 data = data_read_pkt->get<T>();
262
263 }
264
265 // This will need a new way to tell if it has a dcache attached.
266 if (data_read_req->getFlags() & UNCACHEABLE)
267 recordEvent("Uncached Read");
268
269 return fault;
270}
271
272#ifndef DOXYGEN_SHOULD_SKIP_THIS
273
274template
275Fault
276AtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
277
278template
279Fault
280AtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
281
282template
283Fault
284AtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
285
286template
287Fault
288AtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
289
290#endif //DOXYGEN_SHOULD_SKIP_THIS
291
292template<>
293Fault
294AtomicSimpleCPU::read(Addr addr, double &data, unsigned flags)
295{
296 return read(addr, *(uint64_t*)&data, flags);
297}
298
299template<>
300Fault
301AtomicSimpleCPU::read(Addr addr, float &data, unsigned flags)
302{
303 return read(addr, *(uint32_t*)&data, flags);
304}
305
306
307template<>
308Fault
309AtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
310{
311 return read(addr, (uint32_t&)data, flags);
312}
313
314
315template <class T>
316Fault
317AtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
318{
319 data_write_req->setVaddr(addr);
320 data_write_req->setTime(curTick);
321 data_write_req->setSize(sizeof(T));
322 data_write_req->setFlags(flags);
323
324 if (traceData) {
325 traceData->setAddr(addr);
326 }
327
328 // translate to physical address
329 Fault fault = cpuXC->translateDataWriteReq(data_write_req);
330
331 // Now do the access.
332 if (fault == NoFault) {
333 data_write_pkt->reset();
334 data = htog(data);
335 data_write_pkt->dataStatic(&data);
345 data_write_pkt->addr = data_write_req->getPaddr();
346 data_write_pkt->size = sizeof(T);
336 data_write_pkt->reinitFromRequest();
347
348 dcache_complete = dcachePort.sendAtomic(data_write_pkt);
349 dcache_access = true;
350
337
338 dcache_complete = dcachePort.sendAtomic(data_write_pkt);
339 dcache_access = true;
340
351 assert(data_write_pkt->result == Success);
341 assert(data_write_pkt->result == Packet::Success);
352
353 if (res && data_write_req->getFlags() & LOCKED) {
354 *res = data_write_req->getScResult();
355 }
356 }
357
358 // This will need a new way to tell if it's hooked up to a cache or not.
359 if (data_write_req->getFlags() & UNCACHEABLE)
360 recordEvent("Uncached Write");
361
362 // If the write needs to have a fault on the access, consider calling
363 // changeStatus() and changing it to "bad addr write" or something.
364 return fault;
365}
366
367
368#ifndef DOXYGEN_SHOULD_SKIP_THIS
369template
370Fault
371AtomicSimpleCPU::write(uint64_t data, Addr addr,
372 unsigned flags, uint64_t *res);
373
374template
375Fault
376AtomicSimpleCPU::write(uint32_t data, Addr addr,
377 unsigned flags, uint64_t *res);
378
379template
380Fault
381AtomicSimpleCPU::write(uint16_t data, Addr addr,
382 unsigned flags, uint64_t *res);
383
384template
385Fault
386AtomicSimpleCPU::write(uint8_t data, Addr addr,
387 unsigned flags, uint64_t *res);
388
389#endif //DOXYGEN_SHOULD_SKIP_THIS
390
391template<>
392Fault
393AtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
394{
395 return write(*(uint64_t*)&data, addr, flags, res);
396}
397
398template<>
399Fault
400AtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
401{
402 return write(*(uint32_t*)&data, addr, flags, res);
403}
404
405
406template<>
407Fault
408AtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
409{
410 return write((uint32_t)data, addr, flags, res);
411}
412
413
414void
415AtomicSimpleCPU::tick()
416{
417 Tick latency = cycles(1); // instruction takes one cycle by default
418
419 for (int i = 0; i < width; ++i) {
420 numCycles++;
421
422 checkForInterrupts();
423
424 ifetch_req->resetMin();
425 ifetch_pkt->reset();
426 Fault fault = setupFetchPacket(ifetch_pkt);
427
428 if (fault == NoFault) {
429 Tick icache_complete = icachePort.sendAtomic(ifetch_pkt);
430 // ifetch_req is initialized to read the instruction directly
431 // into the CPU object's inst field.
432
433 dcache_access = false; // assume no dcache access
434 preExecute();
435 fault = curStaticInst->execute(this, traceData);
436 postExecute();
437
438 if (traceData) {
439 traceData->finalize();
440 }
441
442 if (simulate_stalls) {
443 // This calculation assumes that the icache and dcache
444 // access latencies are always a multiple of the CPU's
445 // cycle time. If not, the next tick event may get
446 // scheduled at a non-integer multiple of the CPU
447 // cycle time.
448 Tick icache_stall = icache_complete - curTick - cycles(1);
449 Tick dcache_stall =
450 dcache_access ? dcache_complete - curTick - cycles(1) : 0;
451 latency += icache_stall + dcache_stall;
452 }
453
454 }
455
456 advancePC(fault);
457 }
458
459 if (_status != Idle)
460 tickEvent.schedule(curTick + latency);
461}
462
463
464////////////////////////////////////////////////////////////////////////
465//
466// AtomicSimpleCPU Simulation Object
467//
468BEGIN_DECLARE_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
469
470 Param<Counter> max_insts_any_thread;
471 Param<Counter> max_insts_all_threads;
472 Param<Counter> max_loads_any_thread;
473 Param<Counter> max_loads_all_threads;
474 SimObjectParam<MemObject *> mem;
475
476#if FULL_SYSTEM
477 SimObjectParam<AlphaITB *> itb;
478 SimObjectParam<AlphaDTB *> dtb;
479 SimObjectParam<System *> system;
480 Param<int> cpu_id;
481 Param<Tick> profile;
482#else
483 SimObjectParam<Process *> workload;
484#endif // FULL_SYSTEM
485
486 Param<int> clock;
487
488 Param<bool> defer_registration;
489 Param<int> width;
490 Param<bool> function_trace;
491 Param<Tick> function_trace_start;
492 Param<bool> simulate_stalls;
493
494END_DECLARE_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
495
496BEGIN_INIT_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
497
498 INIT_PARAM(max_insts_any_thread,
499 "terminate when any thread reaches this inst count"),
500 INIT_PARAM(max_insts_all_threads,
501 "terminate when all threads have reached this inst count"),
502 INIT_PARAM(max_loads_any_thread,
503 "terminate when any thread reaches this load count"),
504 INIT_PARAM(max_loads_all_threads,
505 "terminate when all threads have reached this load count"),
506 INIT_PARAM(mem, "memory"),
507
508#if FULL_SYSTEM
509 INIT_PARAM(itb, "Instruction TLB"),
510 INIT_PARAM(dtb, "Data TLB"),
511 INIT_PARAM(system, "system object"),
512 INIT_PARAM(cpu_id, "processor ID"),
513 INIT_PARAM(profile, ""),
514#else
515 INIT_PARAM(workload, "processes to run"),
516#endif // FULL_SYSTEM
517
518 INIT_PARAM(clock, "clock speed"),
519 INIT_PARAM(defer_registration, "defer system registration (for sampling)"),
520 INIT_PARAM(width, "cpu width"),
521 INIT_PARAM(function_trace, "Enable function trace"),
522 INIT_PARAM(function_trace_start, "Cycle to start function trace"),
523 INIT_PARAM(simulate_stalls, "Simulate cache stall cycles")
524
525END_INIT_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
526
527
528CREATE_SIM_OBJECT(AtomicSimpleCPU)
529{
530 AtomicSimpleCPU::Params *params = new AtomicSimpleCPU::Params();
531 params->name = getInstanceName();
532 params->numberOfThreads = 1;
533 params->max_insts_any_thread = max_insts_any_thread;
534 params->max_insts_all_threads = max_insts_all_threads;
535 params->max_loads_any_thread = max_loads_any_thread;
536 params->max_loads_all_threads = max_loads_all_threads;
537 params->deferRegistration = defer_registration;
538 params->clock = clock;
539 params->functionTrace = function_trace;
540 params->functionTraceStart = function_trace_start;
541 params->width = width;
542 params->simulate_stalls = simulate_stalls;
543 params->mem = mem;
544
545#if FULL_SYSTEM
546 params->itb = itb;
547 params->dtb = dtb;
548 params->system = system;
549 params->cpu_id = cpu_id;
550 params->profile = profile;
551#else
552 params->process = workload;
553#endif
554
555 AtomicSimpleCPU *cpu = new AtomicSimpleCPU(params);
556 return cpu;
557}
558
559REGISTER_SIM_OBJECT("AtomicSimpleCPU", AtomicSimpleCPU)
560
342
343 if (res && data_write_req->getFlags() & LOCKED) {
344 *res = data_write_req->getScResult();
345 }
346 }
347
348 // This will need a new way to tell if it's hooked up to a cache or not.
349 if (data_write_req->getFlags() & UNCACHEABLE)
350 recordEvent("Uncached Write");
351
352 // If the write needs to have a fault on the access, consider calling
353 // changeStatus() and changing it to "bad addr write" or something.
354 return fault;
355}
356
357
358#ifndef DOXYGEN_SHOULD_SKIP_THIS
359template
360Fault
361AtomicSimpleCPU::write(uint64_t data, Addr addr,
362 unsigned flags, uint64_t *res);
363
364template
365Fault
366AtomicSimpleCPU::write(uint32_t data, Addr addr,
367 unsigned flags, uint64_t *res);
368
369template
370Fault
371AtomicSimpleCPU::write(uint16_t data, Addr addr,
372 unsigned flags, uint64_t *res);
373
374template
375Fault
376AtomicSimpleCPU::write(uint8_t data, Addr addr,
377 unsigned flags, uint64_t *res);
378
379#endif //DOXYGEN_SHOULD_SKIP_THIS
380
381template<>
382Fault
383AtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
384{
385 return write(*(uint64_t*)&data, addr, flags, res);
386}
387
388template<>
389Fault
390AtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
391{
392 return write(*(uint32_t*)&data, addr, flags, res);
393}
394
395
396template<>
397Fault
398AtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
399{
400 return write((uint32_t)data, addr, flags, res);
401}
402
403
404void
405AtomicSimpleCPU::tick()
406{
407 Tick latency = cycles(1); // instruction takes one cycle by default
408
409 for (int i = 0; i < width; ++i) {
410 numCycles++;
411
412 checkForInterrupts();
413
414 ifetch_req->resetMin();
415 ifetch_pkt->reset();
416 Fault fault = setupFetchPacket(ifetch_pkt);
417
418 if (fault == NoFault) {
419 Tick icache_complete = icachePort.sendAtomic(ifetch_pkt);
420 // ifetch_req is initialized to read the instruction directly
421 // into the CPU object's inst field.
422
423 dcache_access = false; // assume no dcache access
424 preExecute();
425 fault = curStaticInst->execute(this, traceData);
426 postExecute();
427
428 if (traceData) {
429 traceData->finalize();
430 }
431
432 if (simulate_stalls) {
433 // This calculation assumes that the icache and dcache
434 // access latencies are always a multiple of the CPU's
435 // cycle time. If not, the next tick event may get
436 // scheduled at a non-integer multiple of the CPU
437 // cycle time.
438 Tick icache_stall = icache_complete - curTick - cycles(1);
439 Tick dcache_stall =
440 dcache_access ? dcache_complete - curTick - cycles(1) : 0;
441 latency += icache_stall + dcache_stall;
442 }
443
444 }
445
446 advancePC(fault);
447 }
448
449 if (_status != Idle)
450 tickEvent.schedule(curTick + latency);
451}
452
453
454////////////////////////////////////////////////////////////////////////
455//
456// AtomicSimpleCPU Simulation Object
457//
458BEGIN_DECLARE_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
459
460 Param<Counter> max_insts_any_thread;
461 Param<Counter> max_insts_all_threads;
462 Param<Counter> max_loads_any_thread;
463 Param<Counter> max_loads_all_threads;
464 SimObjectParam<MemObject *> mem;
465
466#if FULL_SYSTEM
467 SimObjectParam<AlphaITB *> itb;
468 SimObjectParam<AlphaDTB *> dtb;
469 SimObjectParam<System *> system;
470 Param<int> cpu_id;
471 Param<Tick> profile;
472#else
473 SimObjectParam<Process *> workload;
474#endif // FULL_SYSTEM
475
476 Param<int> clock;
477
478 Param<bool> defer_registration;
479 Param<int> width;
480 Param<bool> function_trace;
481 Param<Tick> function_trace_start;
482 Param<bool> simulate_stalls;
483
484END_DECLARE_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
485
486BEGIN_INIT_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
487
488 INIT_PARAM(max_insts_any_thread,
489 "terminate when any thread reaches this inst count"),
490 INIT_PARAM(max_insts_all_threads,
491 "terminate when all threads have reached this inst count"),
492 INIT_PARAM(max_loads_any_thread,
493 "terminate when any thread reaches this load count"),
494 INIT_PARAM(max_loads_all_threads,
495 "terminate when all threads have reached this load count"),
496 INIT_PARAM(mem, "memory"),
497
498#if FULL_SYSTEM
499 INIT_PARAM(itb, "Instruction TLB"),
500 INIT_PARAM(dtb, "Data TLB"),
501 INIT_PARAM(system, "system object"),
502 INIT_PARAM(cpu_id, "processor ID"),
503 INIT_PARAM(profile, ""),
504#else
505 INIT_PARAM(workload, "processes to run"),
506#endif // FULL_SYSTEM
507
508 INIT_PARAM(clock, "clock speed"),
509 INIT_PARAM(defer_registration, "defer system registration (for sampling)"),
510 INIT_PARAM(width, "cpu width"),
511 INIT_PARAM(function_trace, "Enable function trace"),
512 INIT_PARAM(function_trace_start, "Cycle to start function trace"),
513 INIT_PARAM(simulate_stalls, "Simulate cache stall cycles")
514
515END_INIT_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
516
517
518CREATE_SIM_OBJECT(AtomicSimpleCPU)
519{
520 AtomicSimpleCPU::Params *params = new AtomicSimpleCPU::Params();
521 params->name = getInstanceName();
522 params->numberOfThreads = 1;
523 params->max_insts_any_thread = max_insts_any_thread;
524 params->max_insts_all_threads = max_insts_all_threads;
525 params->max_loads_any_thread = max_loads_any_thread;
526 params->max_loads_all_threads = max_loads_all_threads;
527 params->deferRegistration = defer_registration;
528 params->clock = clock;
529 params->functionTrace = function_trace;
530 params->functionTraceStart = function_trace_start;
531 params->width = width;
532 params->simulate_stalls = simulate_stalls;
533 params->mem = mem;
534
535#if FULL_SYSTEM
536 params->itb = itb;
537 params->dtb = dtb;
538 params->system = system;
539 params->cpu_id = cpu_id;
540 params->profile = profile;
541#else
542 params->process = workload;
543#endif
544
545 AtomicSimpleCPU *cpu = new AtomicSimpleCPU(params);
546 return cpu;
547}
548
549REGISTER_SIM_OBJECT("AtomicSimpleCPU", AtomicSimpleCPU)
550