atomic.cc (2662:f24ae2d09e27) atomic.cc (2663:c82193ae8467)
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "arch/utility.hh"
30#include "cpu/exetrace.hh"
31#include "cpu/simple/atomic.hh"
32#include "mem/packet_impl.hh"
33#include "sim/builder.hh"
34
35using namespace std;
36using namespace TheISA;
37
38AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c)
39 : Event(&mainEventQueue, CPU_Tick_Pri), cpu(c)
40{
41}
42
43
44void
45AtomicSimpleCPU::TickEvent::process()
46{
47 cpu->tick();
48}
49
50const char *
51AtomicSimpleCPU::TickEvent::description()
52{
53 return "AtomicSimpleCPU tick event";
54}
55
56
57void
58AtomicSimpleCPU::init()
59{
60 //Create Memory Ports (conect them up)
61 Port *mem_dport = mem->getPort("");
62 dcachePort.setPeer(mem_dport);
63 mem_dport->setPeer(&dcachePort);
64
65 Port *mem_iport = mem->getPort("");
66 icachePort.setPeer(mem_iport);
67 mem_iport->setPeer(&icachePort);
68
69 BaseCPU::init();
70#if FULL_SYSTEM
71 for (int i = 0; i < execContexts.size(); ++i) {
72 ExecContext *xc = execContexts[i];
73
74 // initialize CPU, including PC
75 TheISA::initCPU(xc, xc->readCpuId());
76 }
77#endif
78}
79
80bool
81AtomicSimpleCPU::CpuPort::recvTiming(Packet *pkt)
82{
83 panic("AtomicSimpleCPU doesn't expect recvAtomic callback!");
84 return true;
85}
86
87Tick
88AtomicSimpleCPU::CpuPort::recvAtomic(Packet *pkt)
89{
90 panic("AtomicSimpleCPU doesn't expect recvAtomic callback!");
91 return curTick;
92}
93
94void
95AtomicSimpleCPU::CpuPort::recvFunctional(Packet *pkt)
96{
97 panic("AtomicSimpleCPU doesn't expect recvFunctional callback!");
98}
99
100void
101AtomicSimpleCPU::CpuPort::recvStatusChange(Status status)
102{
103 if (status == RangeChange)
104 return;
105
106 panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!");
107}
108
109void
110AtomicSimpleCPU::CpuPort::recvRetry()
111{
112 panic("AtomicSimpleCPU doesn't expect recvRetry callback!");
113}
114
115
116AtomicSimpleCPU::AtomicSimpleCPU(Params *p)
117 : BaseSimpleCPU(p), tickEvent(this),
118 width(p->width), simulate_stalls(p->simulate_stalls),
119 icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this)
120{
121 _status = Idle;
122
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "arch/utility.hh"
30#include "cpu/exetrace.hh"
31#include "cpu/simple/atomic.hh"
32#include "mem/packet_impl.hh"
33#include "sim/builder.hh"
34
35using namespace std;
36using namespace TheISA;
37
38AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c)
39 : Event(&mainEventQueue, CPU_Tick_Pri), cpu(c)
40{
41}
42
43
44void
45AtomicSimpleCPU::TickEvent::process()
46{
47 cpu->tick();
48}
49
50const char *
51AtomicSimpleCPU::TickEvent::description()
52{
53 return "AtomicSimpleCPU tick event";
54}
55
56
57void
58AtomicSimpleCPU::init()
59{
60 //Create Memory Ports (conect them up)
61 Port *mem_dport = mem->getPort("");
62 dcachePort.setPeer(mem_dport);
63 mem_dport->setPeer(&dcachePort);
64
65 Port *mem_iport = mem->getPort("");
66 icachePort.setPeer(mem_iport);
67 mem_iport->setPeer(&icachePort);
68
69 BaseCPU::init();
70#if FULL_SYSTEM
71 for (int i = 0; i < execContexts.size(); ++i) {
72 ExecContext *xc = execContexts[i];
73
74 // initialize CPU, including PC
75 TheISA::initCPU(xc, xc->readCpuId());
76 }
77#endif
78}
79
80bool
81AtomicSimpleCPU::CpuPort::recvTiming(Packet *pkt)
82{
83 panic("AtomicSimpleCPU doesn't expect recvAtomic callback!");
84 return true;
85}
86
87Tick
88AtomicSimpleCPU::CpuPort::recvAtomic(Packet *pkt)
89{
90 panic("AtomicSimpleCPU doesn't expect recvAtomic callback!");
91 return curTick;
92}
93
94void
95AtomicSimpleCPU::CpuPort::recvFunctional(Packet *pkt)
96{
97 panic("AtomicSimpleCPU doesn't expect recvFunctional callback!");
98}
99
100void
101AtomicSimpleCPU::CpuPort::recvStatusChange(Status status)
102{
103 if (status == RangeChange)
104 return;
105
106 panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!");
107}
108
109void
110AtomicSimpleCPU::CpuPort::recvRetry()
111{
112 panic("AtomicSimpleCPU doesn't expect recvRetry callback!");
113}
114
115
116AtomicSimpleCPU::AtomicSimpleCPU(Params *p)
117 : BaseSimpleCPU(p), tickEvent(this),
118 width(p->width), simulate_stalls(p->simulate_stalls),
119 icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this)
120{
121 _status = Idle;
122
123 ifetch_req = new Request(true);
124 ifetch_req->setAsid(0);
125 // @todo fix me and get the real cpu iD!!!
126 ifetch_req->setCpuNum(0);
127 ifetch_req->setSize(sizeof(MachInst));
123 // @todo fix me and get the real cpu id & thread number!!!
124 ifetch_req = new Request();
128 ifetch_pkt = new Packet(ifetch_req, Packet::ReadReq, Packet::Broadcast);
129 ifetch_pkt->dataStatic(&inst);
130
125 ifetch_pkt = new Packet(ifetch_req, Packet::ReadReq, Packet::Broadcast);
126 ifetch_pkt->dataStatic(&inst);
127
131 data_read_req = new Request(true);
132 // @todo fix me and get the real cpu iD!!!
133 data_read_req->setCpuNum(0);
134 data_read_req->setAsid(0);
128 data_read_req = new Request();
135 data_read_pkt = new Packet(data_read_req, Packet::ReadReq,
136 Packet::Broadcast);
137 data_read_pkt->dataStatic(&dataReg);
138
129 data_read_pkt = new Packet(data_read_req, Packet::ReadReq,
130 Packet::Broadcast);
131 data_read_pkt->dataStatic(&dataReg);
132
139 data_write_req = new Request(true);
140 // @todo fix me and get the real cpu iD!!!
141 data_write_req->setCpuNum(0);
142 data_write_req->setAsid(0);
133 data_write_req = new Request();
143 data_write_pkt = new Packet(data_write_req, Packet::WriteReq,
144 Packet::Broadcast);
145}
146
147
148AtomicSimpleCPU::~AtomicSimpleCPU()
149{
150}
151
152void
153AtomicSimpleCPU::serialize(ostream &os)
154{
155 BaseSimpleCPU::serialize(os);
156 SERIALIZE_ENUM(_status);
157 nameOut(os, csprintf("%s.tickEvent", name()));
158 tickEvent.serialize(os);
159}
160
161void
162AtomicSimpleCPU::unserialize(Checkpoint *cp, const string &section)
163{
164 BaseSimpleCPU::unserialize(cp, section);
165 UNSERIALIZE_ENUM(_status);
166 tickEvent.unserialize(cp, csprintf("%s.tickEvent", section));
167}
168
169void
170AtomicSimpleCPU::switchOut(Sampler *s)
171{
172 sampler = s;
173 if (status() == Running) {
174 _status = SwitchedOut;
175
176 tickEvent.squash();
177 }
178 sampler->signalSwitched();
179}
180
181
182void
183AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
184{
185 BaseCPU::takeOverFrom(oldCPU);
186
187 assert(!tickEvent.scheduled());
188
189 // if any of this CPU's ExecContexts are active, mark the CPU as
190 // running and schedule its tick event.
191 for (int i = 0; i < execContexts.size(); ++i) {
192 ExecContext *xc = execContexts[i];
193 if (xc->status() == ExecContext::Active && _status != Running) {
194 _status = Running;
195 tickEvent.schedule(curTick);
196 break;
197 }
198 }
199}
200
201
202void
203AtomicSimpleCPU::activateContext(int thread_num, int delay)
204{
205 assert(thread_num == 0);
206 assert(cpuXC);
207
208 assert(_status == Idle);
209 assert(!tickEvent.scheduled());
210
211 notIdleFraction++;
212 tickEvent.schedule(curTick + cycles(delay));
213 _status = Running;
214}
215
216
217void
218AtomicSimpleCPU::suspendContext(int thread_num)
219{
220 assert(thread_num == 0);
221 assert(cpuXC);
222
223 assert(_status == Running);
224
225 // tick event may not be scheduled if this gets called from inside
226 // an instruction's execution, e.g. "quiesce"
227 if (tickEvent.scheduled())
228 tickEvent.deschedule();
229
230 notIdleFraction--;
231 _status = Idle;
232}
233
234
235template <class T>
236Fault
237AtomicSimpleCPU::read(Addr addr, T &data, unsigned flags)
238{
134 data_write_pkt = new Packet(data_write_req, Packet::WriteReq,
135 Packet::Broadcast);
136}
137
138
139AtomicSimpleCPU::~AtomicSimpleCPU()
140{
141}
142
143void
144AtomicSimpleCPU::serialize(ostream &os)
145{
146 BaseSimpleCPU::serialize(os);
147 SERIALIZE_ENUM(_status);
148 nameOut(os, csprintf("%s.tickEvent", name()));
149 tickEvent.serialize(os);
150}
151
152void
153AtomicSimpleCPU::unserialize(Checkpoint *cp, const string &section)
154{
155 BaseSimpleCPU::unserialize(cp, section);
156 UNSERIALIZE_ENUM(_status);
157 tickEvent.unserialize(cp, csprintf("%s.tickEvent", section));
158}
159
160void
161AtomicSimpleCPU::switchOut(Sampler *s)
162{
163 sampler = s;
164 if (status() == Running) {
165 _status = SwitchedOut;
166
167 tickEvent.squash();
168 }
169 sampler->signalSwitched();
170}
171
172
173void
174AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
175{
176 BaseCPU::takeOverFrom(oldCPU);
177
178 assert(!tickEvent.scheduled());
179
180 // if any of this CPU's ExecContexts are active, mark the CPU as
181 // running and schedule its tick event.
182 for (int i = 0; i < execContexts.size(); ++i) {
183 ExecContext *xc = execContexts[i];
184 if (xc->status() == ExecContext::Active && _status != Running) {
185 _status = Running;
186 tickEvent.schedule(curTick);
187 break;
188 }
189 }
190}
191
192
193void
194AtomicSimpleCPU::activateContext(int thread_num, int delay)
195{
196 assert(thread_num == 0);
197 assert(cpuXC);
198
199 assert(_status == Idle);
200 assert(!tickEvent.scheduled());
201
202 notIdleFraction++;
203 tickEvent.schedule(curTick + cycles(delay));
204 _status = Running;
205}
206
207
208void
209AtomicSimpleCPU::suspendContext(int thread_num)
210{
211 assert(thread_num == 0);
212 assert(cpuXC);
213
214 assert(_status == Running);
215
216 // tick event may not be scheduled if this gets called from inside
217 // an instruction's execution, e.g. "quiesce"
218 if (tickEvent.scheduled())
219 tickEvent.deschedule();
220
221 notIdleFraction--;
222 _status = Idle;
223}
224
225
226template <class T>
227Fault
228AtomicSimpleCPU::read(Addr addr, T &data, unsigned flags)
229{
239 data_read_req->setVaddr(addr);
240 data_read_req->setSize(sizeof(T));
241 data_read_req->setFlags(flags);
242 data_read_req->setTime(curTick);
230 data_read_req->setVirt(0, addr, sizeof(T), flags, cpuXC->readPC());
243
244 if (traceData) {
245 traceData->setAddr(addr);
246 }
247
248 // translate to physical address
249 Fault fault = cpuXC->translateDataReadReq(data_read_req);
250
251 // Now do the access.
252 if (fault == NoFault) {
253 data_read_pkt->reinitFromRequest();
254
255 dcache_latency = dcachePort.sendAtomic(data_read_pkt);
256 dcache_access = true;
257
258 assert(data_read_pkt->result == Packet::Success);
259 data = data_read_pkt->get<T>();
260
261 }
262
263 // This will need a new way to tell if it has a dcache attached.
264 if (data_read_req->getFlags() & UNCACHEABLE)
265 recordEvent("Uncached Read");
266
267 return fault;
268}
269
270#ifndef DOXYGEN_SHOULD_SKIP_THIS
271
272template
273Fault
274AtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
275
276template
277Fault
278AtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
279
280template
281Fault
282AtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
283
284template
285Fault
286AtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
287
288#endif //DOXYGEN_SHOULD_SKIP_THIS
289
290template<>
291Fault
292AtomicSimpleCPU::read(Addr addr, double &data, unsigned flags)
293{
294 return read(addr, *(uint64_t*)&data, flags);
295}
296
297template<>
298Fault
299AtomicSimpleCPU::read(Addr addr, float &data, unsigned flags)
300{
301 return read(addr, *(uint32_t*)&data, flags);
302}
303
304
305template<>
306Fault
307AtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
308{
309 return read(addr, (uint32_t&)data, flags);
310}
311
312
313template <class T>
314Fault
315AtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
316{
231
232 if (traceData) {
233 traceData->setAddr(addr);
234 }
235
236 // translate to physical address
237 Fault fault = cpuXC->translateDataReadReq(data_read_req);
238
239 // Now do the access.
240 if (fault == NoFault) {
241 data_read_pkt->reinitFromRequest();
242
243 dcache_latency = dcachePort.sendAtomic(data_read_pkt);
244 dcache_access = true;
245
246 assert(data_read_pkt->result == Packet::Success);
247 data = data_read_pkt->get<T>();
248
249 }
250
251 // This will need a new way to tell if it has a dcache attached.
252 if (data_read_req->getFlags() & UNCACHEABLE)
253 recordEvent("Uncached Read");
254
255 return fault;
256}
257
258#ifndef DOXYGEN_SHOULD_SKIP_THIS
259
260template
261Fault
262AtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
263
264template
265Fault
266AtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
267
268template
269Fault
270AtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
271
272template
273Fault
274AtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
275
276#endif //DOXYGEN_SHOULD_SKIP_THIS
277
278template<>
279Fault
280AtomicSimpleCPU::read(Addr addr, double &data, unsigned flags)
281{
282 return read(addr, *(uint64_t*)&data, flags);
283}
284
285template<>
286Fault
287AtomicSimpleCPU::read(Addr addr, float &data, unsigned flags)
288{
289 return read(addr, *(uint32_t*)&data, flags);
290}
291
292
293template<>
294Fault
295AtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
296{
297 return read(addr, (uint32_t&)data, flags);
298}
299
300
301template <class T>
302Fault
303AtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
304{
317 data_write_req->setVaddr(addr);
318 data_write_req->setTime(curTick);
319 data_write_req->setSize(sizeof(T));
320 data_write_req->setFlags(flags);
305 data_write_req->setVirt(0, addr, sizeof(T), flags, cpuXC->readPC());
321
322 if (traceData) {
323 traceData->setAddr(addr);
324 }
325
326 // translate to physical address
327 Fault fault = cpuXC->translateDataWriteReq(data_write_req);
328
329 // Now do the access.
330 if (fault == NoFault) {
331 data = htog(data);
332 data_write_pkt->reinitFromRequest();
333 data_write_pkt->dataStatic(&data);
334
335 dcache_latency = dcachePort.sendAtomic(data_write_pkt);
336 dcache_access = true;
337
338 assert(data_write_pkt->result == Packet::Success);
339
340 if (res && data_write_req->getFlags() & LOCKED) {
341 *res = data_write_req->getScResult();
342 }
343 }
344
345 // This will need a new way to tell if it's hooked up to a cache or not.
346 if (data_write_req->getFlags() & UNCACHEABLE)
347 recordEvent("Uncached Write");
348
349 // If the write needs to have a fault on the access, consider calling
350 // changeStatus() and changing it to "bad addr write" or something.
351 return fault;
352}
353
354
355#ifndef DOXYGEN_SHOULD_SKIP_THIS
356template
357Fault
358AtomicSimpleCPU::write(uint64_t data, Addr addr,
359 unsigned flags, uint64_t *res);
360
361template
362Fault
363AtomicSimpleCPU::write(uint32_t data, Addr addr,
364 unsigned flags, uint64_t *res);
365
366template
367Fault
368AtomicSimpleCPU::write(uint16_t data, Addr addr,
369 unsigned flags, uint64_t *res);
370
371template
372Fault
373AtomicSimpleCPU::write(uint8_t data, Addr addr,
374 unsigned flags, uint64_t *res);
375
376#endif //DOXYGEN_SHOULD_SKIP_THIS
377
378template<>
379Fault
380AtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
381{
382 return write(*(uint64_t*)&data, addr, flags, res);
383}
384
385template<>
386Fault
387AtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
388{
389 return write(*(uint32_t*)&data, addr, flags, res);
390}
391
392
393template<>
394Fault
395AtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
396{
397 return write((uint32_t)data, addr, flags, res);
398}
399
400
401void
402AtomicSimpleCPU::tick()
403{
404 Tick latency = cycles(1); // instruction takes one cycle by default
405
406 for (int i = 0; i < width; ++i) {
407 numCycles++;
408
409 checkForInterrupts();
410
306
307 if (traceData) {
308 traceData->setAddr(addr);
309 }
310
311 // translate to physical address
312 Fault fault = cpuXC->translateDataWriteReq(data_write_req);
313
314 // Now do the access.
315 if (fault == NoFault) {
316 data = htog(data);
317 data_write_pkt->reinitFromRequest();
318 data_write_pkt->dataStatic(&data);
319
320 dcache_latency = dcachePort.sendAtomic(data_write_pkt);
321 dcache_access = true;
322
323 assert(data_write_pkt->result == Packet::Success);
324
325 if (res && data_write_req->getFlags() & LOCKED) {
326 *res = data_write_req->getScResult();
327 }
328 }
329
330 // This will need a new way to tell if it's hooked up to a cache or not.
331 if (data_write_req->getFlags() & UNCACHEABLE)
332 recordEvent("Uncached Write");
333
334 // If the write needs to have a fault on the access, consider calling
335 // changeStatus() and changing it to "bad addr write" or something.
336 return fault;
337}
338
339
340#ifndef DOXYGEN_SHOULD_SKIP_THIS
341template
342Fault
343AtomicSimpleCPU::write(uint64_t data, Addr addr,
344 unsigned flags, uint64_t *res);
345
346template
347Fault
348AtomicSimpleCPU::write(uint32_t data, Addr addr,
349 unsigned flags, uint64_t *res);
350
351template
352Fault
353AtomicSimpleCPU::write(uint16_t data, Addr addr,
354 unsigned flags, uint64_t *res);
355
356template
357Fault
358AtomicSimpleCPU::write(uint8_t data, Addr addr,
359 unsigned flags, uint64_t *res);
360
361#endif //DOXYGEN_SHOULD_SKIP_THIS
362
363template<>
364Fault
365AtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
366{
367 return write(*(uint64_t*)&data, addr, flags, res);
368}
369
370template<>
371Fault
372AtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
373{
374 return write(*(uint32_t*)&data, addr, flags, res);
375}
376
377
378template<>
379Fault
380AtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
381{
382 return write((uint32_t)data, addr, flags, res);
383}
384
385
386void
387AtomicSimpleCPU::tick()
388{
389 Tick latency = cycles(1); // instruction takes one cycle by default
390
391 for (int i = 0; i < width; ++i) {
392 numCycles++;
393
394 checkForInterrupts();
395
411 ifetch_req->resetMin();
412 Fault fault = setupFetchRequest(ifetch_req);
413
414 if (fault == NoFault) {
415 ifetch_pkt->reinitFromRequest();
416
417 Tick icache_latency = icachePort.sendAtomic(ifetch_pkt);
418 // ifetch_req is initialized to read the instruction directly
419 // into the CPU object's inst field.
420
421 dcache_access = false; // assume no dcache access
422 preExecute();
423 fault = curStaticInst->execute(this, traceData);
424 postExecute();
425
426 if (simulate_stalls) {
427 // This calculation assumes that the icache and dcache
428 // access latencies are always a multiple of the CPU's
429 // cycle time. If not, the next tick event may get
430 // scheduled at a non-integer multiple of the CPU
431 // cycle time.
432 Tick icache_stall = icache_latency - cycles(1);
433 Tick dcache_stall =
434 dcache_access ? dcache_latency - cycles(1) : 0;
435 latency += icache_stall + dcache_stall;
436 }
437
438 }
439
440 advancePC(fault);
441 }
442
443 if (_status != Idle)
444 tickEvent.schedule(curTick + latency);
445}
446
447
448////////////////////////////////////////////////////////////////////////
449//
450// AtomicSimpleCPU Simulation Object
451//
452BEGIN_DECLARE_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
453
454 Param<Counter> max_insts_any_thread;
455 Param<Counter> max_insts_all_threads;
456 Param<Counter> max_loads_any_thread;
457 Param<Counter> max_loads_all_threads;
458 SimObjectParam<MemObject *> mem;
459
460#if FULL_SYSTEM
461 SimObjectParam<AlphaITB *> itb;
462 SimObjectParam<AlphaDTB *> dtb;
463 SimObjectParam<System *> system;
464 Param<int> cpu_id;
465 Param<Tick> profile;
466#else
467 SimObjectParam<Process *> workload;
468#endif // FULL_SYSTEM
469
470 Param<int> clock;
471
472 Param<bool> defer_registration;
473 Param<int> width;
474 Param<bool> function_trace;
475 Param<Tick> function_trace_start;
476 Param<bool> simulate_stalls;
477
478END_DECLARE_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
479
480BEGIN_INIT_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
481
482 INIT_PARAM(max_insts_any_thread,
483 "terminate when any thread reaches this inst count"),
484 INIT_PARAM(max_insts_all_threads,
485 "terminate when all threads have reached this inst count"),
486 INIT_PARAM(max_loads_any_thread,
487 "terminate when any thread reaches this load count"),
488 INIT_PARAM(max_loads_all_threads,
489 "terminate when all threads have reached this load count"),
490 INIT_PARAM(mem, "memory"),
491
492#if FULL_SYSTEM
493 INIT_PARAM(itb, "Instruction TLB"),
494 INIT_PARAM(dtb, "Data TLB"),
495 INIT_PARAM(system, "system object"),
496 INIT_PARAM(cpu_id, "processor ID"),
497 INIT_PARAM(profile, ""),
498#else
499 INIT_PARAM(workload, "processes to run"),
500#endif // FULL_SYSTEM
501
502 INIT_PARAM(clock, "clock speed"),
503 INIT_PARAM(defer_registration, "defer system registration (for sampling)"),
504 INIT_PARAM(width, "cpu width"),
505 INIT_PARAM(function_trace, "Enable function trace"),
506 INIT_PARAM(function_trace_start, "Cycle to start function trace"),
507 INIT_PARAM(simulate_stalls, "Simulate cache stall cycles")
508
509END_INIT_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
510
511
512CREATE_SIM_OBJECT(AtomicSimpleCPU)
513{
514 AtomicSimpleCPU::Params *params = new AtomicSimpleCPU::Params();
515 params->name = getInstanceName();
516 params->numberOfThreads = 1;
517 params->max_insts_any_thread = max_insts_any_thread;
518 params->max_insts_all_threads = max_insts_all_threads;
519 params->max_loads_any_thread = max_loads_any_thread;
520 params->max_loads_all_threads = max_loads_all_threads;
521 params->deferRegistration = defer_registration;
522 params->clock = clock;
523 params->functionTrace = function_trace;
524 params->functionTraceStart = function_trace_start;
525 params->width = width;
526 params->simulate_stalls = simulate_stalls;
527 params->mem = mem;
528
529#if FULL_SYSTEM
530 params->itb = itb;
531 params->dtb = dtb;
532 params->system = system;
533 params->cpu_id = cpu_id;
534 params->profile = profile;
535#else
536 params->process = workload;
537#endif
538
539 AtomicSimpleCPU *cpu = new AtomicSimpleCPU(params);
540 return cpu;
541}
542
543REGISTER_SIM_OBJECT("AtomicSimpleCPU", AtomicSimpleCPU)
544
396 Fault fault = setupFetchRequest(ifetch_req);
397
398 if (fault == NoFault) {
399 ifetch_pkt->reinitFromRequest();
400
401 Tick icache_latency = icachePort.sendAtomic(ifetch_pkt);
402 // ifetch_req is initialized to read the instruction directly
403 // into the CPU object's inst field.
404
405 dcache_access = false; // assume no dcache access
406 preExecute();
407 fault = curStaticInst->execute(this, traceData);
408 postExecute();
409
410 if (simulate_stalls) {
411 // This calculation assumes that the icache and dcache
412 // access latencies are always a multiple of the CPU's
413 // cycle time. If not, the next tick event may get
414 // scheduled at a non-integer multiple of the CPU
415 // cycle time.
416 Tick icache_stall = icache_latency - cycles(1);
417 Tick dcache_stall =
418 dcache_access ? dcache_latency - cycles(1) : 0;
419 latency += icache_stall + dcache_stall;
420 }
421
422 }
423
424 advancePC(fault);
425 }
426
427 if (_status != Idle)
428 tickEvent.schedule(curTick + latency);
429}
430
431
432////////////////////////////////////////////////////////////////////////
433//
434// AtomicSimpleCPU Simulation Object
435//
436BEGIN_DECLARE_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
437
438 Param<Counter> max_insts_any_thread;
439 Param<Counter> max_insts_all_threads;
440 Param<Counter> max_loads_any_thread;
441 Param<Counter> max_loads_all_threads;
442 SimObjectParam<MemObject *> mem;
443
444#if FULL_SYSTEM
445 SimObjectParam<AlphaITB *> itb;
446 SimObjectParam<AlphaDTB *> dtb;
447 SimObjectParam<System *> system;
448 Param<int> cpu_id;
449 Param<Tick> profile;
450#else
451 SimObjectParam<Process *> workload;
452#endif // FULL_SYSTEM
453
454 Param<int> clock;
455
456 Param<bool> defer_registration;
457 Param<int> width;
458 Param<bool> function_trace;
459 Param<Tick> function_trace_start;
460 Param<bool> simulate_stalls;
461
462END_DECLARE_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
463
464BEGIN_INIT_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
465
466 INIT_PARAM(max_insts_any_thread,
467 "terminate when any thread reaches this inst count"),
468 INIT_PARAM(max_insts_all_threads,
469 "terminate when all threads have reached this inst count"),
470 INIT_PARAM(max_loads_any_thread,
471 "terminate when any thread reaches this load count"),
472 INIT_PARAM(max_loads_all_threads,
473 "terminate when all threads have reached this load count"),
474 INIT_PARAM(mem, "memory"),
475
476#if FULL_SYSTEM
477 INIT_PARAM(itb, "Instruction TLB"),
478 INIT_PARAM(dtb, "Data TLB"),
479 INIT_PARAM(system, "system object"),
480 INIT_PARAM(cpu_id, "processor ID"),
481 INIT_PARAM(profile, ""),
482#else
483 INIT_PARAM(workload, "processes to run"),
484#endif // FULL_SYSTEM
485
486 INIT_PARAM(clock, "clock speed"),
487 INIT_PARAM(defer_registration, "defer system registration (for sampling)"),
488 INIT_PARAM(width, "cpu width"),
489 INIT_PARAM(function_trace, "Enable function trace"),
490 INIT_PARAM(function_trace_start, "Cycle to start function trace"),
491 INIT_PARAM(simulate_stalls, "Simulate cache stall cycles")
492
493END_INIT_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
494
495
496CREATE_SIM_OBJECT(AtomicSimpleCPU)
497{
498 AtomicSimpleCPU::Params *params = new AtomicSimpleCPU::Params();
499 params->name = getInstanceName();
500 params->numberOfThreads = 1;
501 params->max_insts_any_thread = max_insts_any_thread;
502 params->max_insts_all_threads = max_insts_all_threads;
503 params->max_loads_any_thread = max_loads_any_thread;
504 params->max_loads_all_threads = max_loads_all_threads;
505 params->deferRegistration = defer_registration;
506 params->clock = clock;
507 params->functionTrace = function_trace;
508 params->functionTraceStart = function_trace_start;
509 params->width = width;
510 params->simulate_stalls = simulate_stalls;
511 params->mem = mem;
512
513#if FULL_SYSTEM
514 params->itb = itb;
515 params->dtb = dtb;
516 params->system = system;
517 params->cpu_id = cpu_id;
518 params->profile = profile;
519#else
520 params->process = workload;
521#endif
522
523 AtomicSimpleCPU *cpu = new AtomicSimpleCPU(params);
524 return cpu;
525}
526
527REGISTER_SIM_OBJECT("AtomicSimpleCPU", AtomicSimpleCPU)
528