atomic.cc (2657:b119b774656b) atomic.cc (2662:f24ae2d09e27)
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "arch/utility.hh"
30#include "cpu/exetrace.hh"
31#include "cpu/simple/atomic.hh"
32#include "mem/packet_impl.hh"
33#include "sim/builder.hh"
34
35using namespace std;
36using namespace TheISA;
37
38AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c)
39 : Event(&mainEventQueue, CPU_Tick_Pri), cpu(c)
40{
41}
42
43
44void
45AtomicSimpleCPU::TickEvent::process()
46{
47 cpu->tick();
48}
49
50const char *
51AtomicSimpleCPU::TickEvent::description()
52{
53 return "AtomicSimpleCPU tick event";
54}
55
56
57void
58AtomicSimpleCPU::init()
59{
60 //Create Memory Ports (conect them up)
61 Port *mem_dport = mem->getPort("");
62 dcachePort.setPeer(mem_dport);
63 mem_dport->setPeer(&dcachePort);
64
65 Port *mem_iport = mem->getPort("");
66 icachePort.setPeer(mem_iport);
67 mem_iport->setPeer(&icachePort);
68
69 BaseCPU::init();
70#if FULL_SYSTEM
71 for (int i = 0; i < execContexts.size(); ++i) {
72 ExecContext *xc = execContexts[i];
73
74 // initialize CPU, including PC
75 TheISA::initCPU(xc, xc->readCpuId());
76 }
77#endif
78}
79
80bool
81AtomicSimpleCPU::CpuPort::recvTiming(Packet *pkt)
82{
83 panic("AtomicSimpleCPU doesn't expect recvAtomic callback!");
84 return true;
85}
86
87Tick
88AtomicSimpleCPU::CpuPort::recvAtomic(Packet *pkt)
89{
90 panic("AtomicSimpleCPU doesn't expect recvAtomic callback!");
91 return curTick;
92}
93
94void
95AtomicSimpleCPU::CpuPort::recvFunctional(Packet *pkt)
96{
97 panic("AtomicSimpleCPU doesn't expect recvFunctional callback!");
98}
99
100void
101AtomicSimpleCPU::CpuPort::recvStatusChange(Status status)
102{
103 if (status == RangeChange)
104 return;
105
106 panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!");
107}
108
109void
110AtomicSimpleCPU::CpuPort::recvRetry()
111{
112 panic("AtomicSimpleCPU doesn't expect recvRetry callback!");
113}
114
115
116AtomicSimpleCPU::AtomicSimpleCPU(Params *p)
117 : BaseSimpleCPU(p), tickEvent(this),
118 width(p->width), simulate_stalls(p->simulate_stalls),
119 icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this)
120{
121 _status = Idle;
122
123 ifetch_req = new Request(true);
124 ifetch_req->setAsid(0);
125 // @todo fix me and get the real cpu iD!!!
126 ifetch_req->setCpuNum(0);
127 ifetch_req->setSize(sizeof(MachInst));
128 ifetch_pkt = new Packet(ifetch_req, Packet::ReadReq, Packet::Broadcast);
129 ifetch_pkt->dataStatic(&inst);
130
131 data_read_req = new Request(true);
132 // @todo fix me and get the real cpu iD!!!
133 data_read_req->setCpuNum(0);
134 data_read_req->setAsid(0);
135 data_read_pkt = new Packet(data_read_req, Packet::ReadReq,
136 Packet::Broadcast);
137 data_read_pkt->dataStatic(&dataReg);
138
139 data_write_req = new Request(true);
140 // @todo fix me and get the real cpu iD!!!
141 data_write_req->setCpuNum(0);
142 data_write_req->setAsid(0);
143 data_write_pkt = new Packet(data_write_req, Packet::WriteReq,
144 Packet::Broadcast);
145}
146
147
148AtomicSimpleCPU::~AtomicSimpleCPU()
149{
150}
151
152void
153AtomicSimpleCPU::serialize(ostream &os)
154{
155 BaseSimpleCPU::serialize(os);
156 SERIALIZE_ENUM(_status);
157 nameOut(os, csprintf("%s.tickEvent", name()));
158 tickEvent.serialize(os);
159}
160
161void
162AtomicSimpleCPU::unserialize(Checkpoint *cp, const string &section)
163{
164 BaseSimpleCPU::unserialize(cp, section);
165 UNSERIALIZE_ENUM(_status);
166 tickEvent.unserialize(cp, csprintf("%s.tickEvent", section));
167}
168
169void
170AtomicSimpleCPU::switchOut(Sampler *s)
171{
172 sampler = s;
173 if (status() == Running) {
174 _status = SwitchedOut;
175
176 tickEvent.squash();
177 }
178 sampler->signalSwitched();
179}
180
181
182void
183AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
184{
185 BaseCPU::takeOverFrom(oldCPU);
186
187 assert(!tickEvent.scheduled());
188
189 // if any of this CPU's ExecContexts are active, mark the CPU as
190 // running and schedule its tick event.
191 for (int i = 0; i < execContexts.size(); ++i) {
192 ExecContext *xc = execContexts[i];
193 if (xc->status() == ExecContext::Active && _status != Running) {
194 _status = Running;
195 tickEvent.schedule(curTick);
196 break;
197 }
198 }
199}
200
201
202void
203AtomicSimpleCPU::activateContext(int thread_num, int delay)
204{
205 assert(thread_num == 0);
206 assert(cpuXC);
207
208 assert(_status == Idle);
209 assert(!tickEvent.scheduled());
210
211 notIdleFraction++;
212 tickEvent.schedule(curTick + cycles(delay));
213 _status = Running;
214}
215
216
217void
218AtomicSimpleCPU::suspendContext(int thread_num)
219{
220 assert(thread_num == 0);
221 assert(cpuXC);
222
223 assert(_status == Running);
224
225 // tick event may not be scheduled if this gets called from inside
226 // an instruction's execution, e.g. "quiesce"
227 if (tickEvent.scheduled())
228 tickEvent.deschedule();
229
230 notIdleFraction--;
231 _status = Idle;
232}
233
234
235template <class T>
236Fault
237AtomicSimpleCPU::read(Addr addr, T &data, unsigned flags)
238{
239 data_read_req->setVaddr(addr);
240 data_read_req->setSize(sizeof(T));
241 data_read_req->setFlags(flags);
242 data_read_req->setTime(curTick);
243
244 if (traceData) {
245 traceData->setAddr(addr);
246 }
247
248 // translate to physical address
249 Fault fault = cpuXC->translateDataReadReq(data_read_req);
250
251 // Now do the access.
252 if (fault == NoFault) {
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "arch/utility.hh"
30#include "cpu/exetrace.hh"
31#include "cpu/simple/atomic.hh"
32#include "mem/packet_impl.hh"
33#include "sim/builder.hh"
34
35using namespace std;
36using namespace TheISA;
37
38AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c)
39 : Event(&mainEventQueue, CPU_Tick_Pri), cpu(c)
40{
41}
42
43
44void
45AtomicSimpleCPU::TickEvent::process()
46{
47 cpu->tick();
48}
49
50const char *
51AtomicSimpleCPU::TickEvent::description()
52{
53 return "AtomicSimpleCPU tick event";
54}
55
56
57void
58AtomicSimpleCPU::init()
59{
60 //Create Memory Ports (conect them up)
61 Port *mem_dport = mem->getPort("");
62 dcachePort.setPeer(mem_dport);
63 mem_dport->setPeer(&dcachePort);
64
65 Port *mem_iport = mem->getPort("");
66 icachePort.setPeer(mem_iport);
67 mem_iport->setPeer(&icachePort);
68
69 BaseCPU::init();
70#if FULL_SYSTEM
71 for (int i = 0; i < execContexts.size(); ++i) {
72 ExecContext *xc = execContexts[i];
73
74 // initialize CPU, including PC
75 TheISA::initCPU(xc, xc->readCpuId());
76 }
77#endif
78}
79
80bool
81AtomicSimpleCPU::CpuPort::recvTiming(Packet *pkt)
82{
83 panic("AtomicSimpleCPU doesn't expect recvAtomic callback!");
84 return true;
85}
86
87Tick
88AtomicSimpleCPU::CpuPort::recvAtomic(Packet *pkt)
89{
90 panic("AtomicSimpleCPU doesn't expect recvAtomic callback!");
91 return curTick;
92}
93
94void
95AtomicSimpleCPU::CpuPort::recvFunctional(Packet *pkt)
96{
97 panic("AtomicSimpleCPU doesn't expect recvFunctional callback!");
98}
99
100void
101AtomicSimpleCPU::CpuPort::recvStatusChange(Status status)
102{
103 if (status == RangeChange)
104 return;
105
106 panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!");
107}
108
109void
110AtomicSimpleCPU::CpuPort::recvRetry()
111{
112 panic("AtomicSimpleCPU doesn't expect recvRetry callback!");
113}
114
115
116AtomicSimpleCPU::AtomicSimpleCPU(Params *p)
117 : BaseSimpleCPU(p), tickEvent(this),
118 width(p->width), simulate_stalls(p->simulate_stalls),
119 icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this)
120{
121 _status = Idle;
122
123 ifetch_req = new Request(true);
124 ifetch_req->setAsid(0);
125 // @todo fix me and get the real cpu iD!!!
126 ifetch_req->setCpuNum(0);
127 ifetch_req->setSize(sizeof(MachInst));
128 ifetch_pkt = new Packet(ifetch_req, Packet::ReadReq, Packet::Broadcast);
129 ifetch_pkt->dataStatic(&inst);
130
131 data_read_req = new Request(true);
132 // @todo fix me and get the real cpu iD!!!
133 data_read_req->setCpuNum(0);
134 data_read_req->setAsid(0);
135 data_read_pkt = new Packet(data_read_req, Packet::ReadReq,
136 Packet::Broadcast);
137 data_read_pkt->dataStatic(&dataReg);
138
139 data_write_req = new Request(true);
140 // @todo fix me and get the real cpu iD!!!
141 data_write_req->setCpuNum(0);
142 data_write_req->setAsid(0);
143 data_write_pkt = new Packet(data_write_req, Packet::WriteReq,
144 Packet::Broadcast);
145}
146
147
148AtomicSimpleCPU::~AtomicSimpleCPU()
149{
150}
151
152void
153AtomicSimpleCPU::serialize(ostream &os)
154{
155 BaseSimpleCPU::serialize(os);
156 SERIALIZE_ENUM(_status);
157 nameOut(os, csprintf("%s.tickEvent", name()));
158 tickEvent.serialize(os);
159}
160
161void
162AtomicSimpleCPU::unserialize(Checkpoint *cp, const string &section)
163{
164 BaseSimpleCPU::unserialize(cp, section);
165 UNSERIALIZE_ENUM(_status);
166 tickEvent.unserialize(cp, csprintf("%s.tickEvent", section));
167}
168
169void
170AtomicSimpleCPU::switchOut(Sampler *s)
171{
172 sampler = s;
173 if (status() == Running) {
174 _status = SwitchedOut;
175
176 tickEvent.squash();
177 }
178 sampler->signalSwitched();
179}
180
181
182void
183AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
184{
185 BaseCPU::takeOverFrom(oldCPU);
186
187 assert(!tickEvent.scheduled());
188
189 // if any of this CPU's ExecContexts are active, mark the CPU as
190 // running and schedule its tick event.
191 for (int i = 0; i < execContexts.size(); ++i) {
192 ExecContext *xc = execContexts[i];
193 if (xc->status() == ExecContext::Active && _status != Running) {
194 _status = Running;
195 tickEvent.schedule(curTick);
196 break;
197 }
198 }
199}
200
201
202void
203AtomicSimpleCPU::activateContext(int thread_num, int delay)
204{
205 assert(thread_num == 0);
206 assert(cpuXC);
207
208 assert(_status == Idle);
209 assert(!tickEvent.scheduled());
210
211 notIdleFraction++;
212 tickEvent.schedule(curTick + cycles(delay));
213 _status = Running;
214}
215
216
217void
218AtomicSimpleCPU::suspendContext(int thread_num)
219{
220 assert(thread_num == 0);
221 assert(cpuXC);
222
223 assert(_status == Running);
224
225 // tick event may not be scheduled if this gets called from inside
226 // an instruction's execution, e.g. "quiesce"
227 if (tickEvent.scheduled())
228 tickEvent.deschedule();
229
230 notIdleFraction--;
231 _status = Idle;
232}
233
234
235template <class T>
236Fault
237AtomicSimpleCPU::read(Addr addr, T &data, unsigned flags)
238{
239 data_read_req->setVaddr(addr);
240 data_read_req->setSize(sizeof(T));
241 data_read_req->setFlags(flags);
242 data_read_req->setTime(curTick);
243
244 if (traceData) {
245 traceData->setAddr(addr);
246 }
247
248 // translate to physical address
249 Fault fault = cpuXC->translateDataReadReq(data_read_req);
250
251 // Now do the access.
252 if (fault == NoFault) {
253 data_read_pkt->reset();
254 data_read_pkt->reinitFromRequest();
255
253 data_read_pkt->reinitFromRequest();
254
256 dcache_complete = dcachePort.sendAtomic(data_read_pkt);
255 dcache_latency = dcachePort.sendAtomic(data_read_pkt);
257 dcache_access = true;
258
259 assert(data_read_pkt->result == Packet::Success);
260 data = data_read_pkt->get<T>();
261
262 }
263
264 // This will need a new way to tell if it has a dcache attached.
265 if (data_read_req->getFlags() & UNCACHEABLE)
266 recordEvent("Uncached Read");
267
268 return fault;
269}
270
271#ifndef DOXYGEN_SHOULD_SKIP_THIS
272
273template
274Fault
275AtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
276
277template
278Fault
279AtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
280
281template
282Fault
283AtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
284
285template
286Fault
287AtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
288
289#endif //DOXYGEN_SHOULD_SKIP_THIS
290
291template<>
292Fault
293AtomicSimpleCPU::read(Addr addr, double &data, unsigned flags)
294{
295 return read(addr, *(uint64_t*)&data, flags);
296}
297
298template<>
299Fault
300AtomicSimpleCPU::read(Addr addr, float &data, unsigned flags)
301{
302 return read(addr, *(uint32_t*)&data, flags);
303}
304
305
306template<>
307Fault
308AtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
309{
310 return read(addr, (uint32_t&)data, flags);
311}
312
313
314template <class T>
315Fault
316AtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
317{
318 data_write_req->setVaddr(addr);
319 data_write_req->setTime(curTick);
320 data_write_req->setSize(sizeof(T));
321 data_write_req->setFlags(flags);
322
323 if (traceData) {
324 traceData->setAddr(addr);
325 }
326
327 // translate to physical address
328 Fault fault = cpuXC->translateDataWriteReq(data_write_req);
329
330 // Now do the access.
331 if (fault == NoFault) {
256 dcache_access = true;
257
258 assert(data_read_pkt->result == Packet::Success);
259 data = data_read_pkt->get<T>();
260
261 }
262
263 // This will need a new way to tell if it has a dcache attached.
264 if (data_read_req->getFlags() & UNCACHEABLE)
265 recordEvent("Uncached Read");
266
267 return fault;
268}
269
270#ifndef DOXYGEN_SHOULD_SKIP_THIS
271
272template
273Fault
274AtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
275
276template
277Fault
278AtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
279
280template
281Fault
282AtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
283
284template
285Fault
286AtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
287
288#endif //DOXYGEN_SHOULD_SKIP_THIS
289
290template<>
291Fault
292AtomicSimpleCPU::read(Addr addr, double &data, unsigned flags)
293{
294 return read(addr, *(uint64_t*)&data, flags);
295}
296
297template<>
298Fault
299AtomicSimpleCPU::read(Addr addr, float &data, unsigned flags)
300{
301 return read(addr, *(uint32_t*)&data, flags);
302}
303
304
305template<>
306Fault
307AtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
308{
309 return read(addr, (uint32_t&)data, flags);
310}
311
312
313template <class T>
314Fault
315AtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
316{
317 data_write_req->setVaddr(addr);
318 data_write_req->setTime(curTick);
319 data_write_req->setSize(sizeof(T));
320 data_write_req->setFlags(flags);
321
322 if (traceData) {
323 traceData->setAddr(addr);
324 }
325
326 // translate to physical address
327 Fault fault = cpuXC->translateDataWriteReq(data_write_req);
328
329 // Now do the access.
330 if (fault == NoFault) {
332 data_write_pkt->reset();
333 data = htog(data);
331 data = htog(data);
334 data_write_pkt->dataStatic(&data);
335 data_write_pkt->reinitFromRequest();
332 data_write_pkt->reinitFromRequest();
333 data_write_pkt->dataStatic(&data);
336
334
337 dcache_complete = dcachePort.sendAtomic(data_write_pkt);
335 dcache_latency = dcachePort.sendAtomic(data_write_pkt);
338 dcache_access = true;
339
340 assert(data_write_pkt->result == Packet::Success);
341
342 if (res && data_write_req->getFlags() & LOCKED) {
343 *res = data_write_req->getScResult();
344 }
345 }
346
347 // This will need a new way to tell if it's hooked up to a cache or not.
348 if (data_write_req->getFlags() & UNCACHEABLE)
349 recordEvent("Uncached Write");
350
351 // If the write needs to have a fault on the access, consider calling
352 // changeStatus() and changing it to "bad addr write" or something.
353 return fault;
354}
355
356
357#ifndef DOXYGEN_SHOULD_SKIP_THIS
358template
359Fault
360AtomicSimpleCPU::write(uint64_t data, Addr addr,
361 unsigned flags, uint64_t *res);
362
363template
364Fault
365AtomicSimpleCPU::write(uint32_t data, Addr addr,
366 unsigned flags, uint64_t *res);
367
368template
369Fault
370AtomicSimpleCPU::write(uint16_t data, Addr addr,
371 unsigned flags, uint64_t *res);
372
373template
374Fault
375AtomicSimpleCPU::write(uint8_t data, Addr addr,
376 unsigned flags, uint64_t *res);
377
378#endif //DOXYGEN_SHOULD_SKIP_THIS
379
380template<>
381Fault
382AtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
383{
384 return write(*(uint64_t*)&data, addr, flags, res);
385}
386
387template<>
388Fault
389AtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
390{
391 return write(*(uint32_t*)&data, addr, flags, res);
392}
393
394
395template<>
396Fault
397AtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
398{
399 return write((uint32_t)data, addr, flags, res);
400}
401
402
403void
404AtomicSimpleCPU::tick()
405{
406 Tick latency = cycles(1); // instruction takes one cycle by default
407
408 for (int i = 0; i < width; ++i) {
409 numCycles++;
410
411 checkForInterrupts();
412
413 ifetch_req->resetMin();
336 dcache_access = true;
337
338 assert(data_write_pkt->result == Packet::Success);
339
340 if (res && data_write_req->getFlags() & LOCKED) {
341 *res = data_write_req->getScResult();
342 }
343 }
344
345 // This will need a new way to tell if it's hooked up to a cache or not.
346 if (data_write_req->getFlags() & UNCACHEABLE)
347 recordEvent("Uncached Write");
348
349 // If the write needs to have a fault on the access, consider calling
350 // changeStatus() and changing it to "bad addr write" or something.
351 return fault;
352}
353
354
355#ifndef DOXYGEN_SHOULD_SKIP_THIS
356template
357Fault
358AtomicSimpleCPU::write(uint64_t data, Addr addr,
359 unsigned flags, uint64_t *res);
360
361template
362Fault
363AtomicSimpleCPU::write(uint32_t data, Addr addr,
364 unsigned flags, uint64_t *res);
365
366template
367Fault
368AtomicSimpleCPU::write(uint16_t data, Addr addr,
369 unsigned flags, uint64_t *res);
370
371template
372Fault
373AtomicSimpleCPU::write(uint8_t data, Addr addr,
374 unsigned flags, uint64_t *res);
375
376#endif //DOXYGEN_SHOULD_SKIP_THIS
377
378template<>
379Fault
380AtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
381{
382 return write(*(uint64_t*)&data, addr, flags, res);
383}
384
385template<>
386Fault
387AtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
388{
389 return write(*(uint32_t*)&data, addr, flags, res);
390}
391
392
393template<>
394Fault
395AtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
396{
397 return write((uint32_t)data, addr, flags, res);
398}
399
400
401void
402AtomicSimpleCPU::tick()
403{
404 Tick latency = cycles(1); // instruction takes one cycle by default
405
406 for (int i = 0; i < width; ++i) {
407 numCycles++;
408
409 checkForInterrupts();
410
411 ifetch_req->resetMin();
414 ifetch_pkt->reset();
415 Fault fault = setupFetchPacket(ifetch_pkt);
412 Fault fault = setupFetchRequest(ifetch_req);
416
417 if (fault == NoFault) {
413
414 if (fault == NoFault) {
418 Tick icache_complete = icachePort.sendAtomic(ifetch_pkt);
415 ifetch_pkt->reinitFromRequest();
416
417 Tick icache_latency = icachePort.sendAtomic(ifetch_pkt);
419 // ifetch_req is initialized to read the instruction directly
420 // into the CPU object's inst field.
421
422 dcache_access = false; // assume no dcache access
423 preExecute();
424 fault = curStaticInst->execute(this, traceData);
425 postExecute();
426
427 if (simulate_stalls) {
428 // This calculation assumes that the icache and dcache
429 // access latencies are always a multiple of the CPU's
430 // cycle time. If not, the next tick event may get
431 // scheduled at a non-integer multiple of the CPU
432 // cycle time.
418 // ifetch_req is initialized to read the instruction directly
419 // into the CPU object's inst field.
420
421 dcache_access = false; // assume no dcache access
422 preExecute();
423 fault = curStaticInst->execute(this, traceData);
424 postExecute();
425
426 if (simulate_stalls) {
427 // This calculation assumes that the icache and dcache
428 // access latencies are always a multiple of the CPU's
429 // cycle time. If not, the next tick event may get
430 // scheduled at a non-integer multiple of the CPU
431 // cycle time.
433 Tick icache_stall = icache_complete - curTick - cycles(1);
432 Tick icache_stall = icache_latency - cycles(1);
434 Tick dcache_stall =
433 Tick dcache_stall =
435 dcache_access ? dcache_complete - curTick - cycles(1) : 0;
434 dcache_access ? dcache_latency - cycles(1) : 0;
436 latency += icache_stall + dcache_stall;
437 }
438
439 }
440
441 advancePC(fault);
442 }
443
444 if (_status != Idle)
445 tickEvent.schedule(curTick + latency);
446}
447
448
449////////////////////////////////////////////////////////////////////////
450//
451// AtomicSimpleCPU Simulation Object
452//
453BEGIN_DECLARE_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
454
455 Param<Counter> max_insts_any_thread;
456 Param<Counter> max_insts_all_threads;
457 Param<Counter> max_loads_any_thread;
458 Param<Counter> max_loads_all_threads;
459 SimObjectParam<MemObject *> mem;
460
461#if FULL_SYSTEM
462 SimObjectParam<AlphaITB *> itb;
463 SimObjectParam<AlphaDTB *> dtb;
464 SimObjectParam<System *> system;
465 Param<int> cpu_id;
466 Param<Tick> profile;
467#else
468 SimObjectParam<Process *> workload;
469#endif // FULL_SYSTEM
470
471 Param<int> clock;
472
473 Param<bool> defer_registration;
474 Param<int> width;
475 Param<bool> function_trace;
476 Param<Tick> function_trace_start;
477 Param<bool> simulate_stalls;
478
479END_DECLARE_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
480
481BEGIN_INIT_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
482
483 INIT_PARAM(max_insts_any_thread,
484 "terminate when any thread reaches this inst count"),
485 INIT_PARAM(max_insts_all_threads,
486 "terminate when all threads have reached this inst count"),
487 INIT_PARAM(max_loads_any_thread,
488 "terminate when any thread reaches this load count"),
489 INIT_PARAM(max_loads_all_threads,
490 "terminate when all threads have reached this load count"),
491 INIT_PARAM(mem, "memory"),
492
493#if FULL_SYSTEM
494 INIT_PARAM(itb, "Instruction TLB"),
495 INIT_PARAM(dtb, "Data TLB"),
496 INIT_PARAM(system, "system object"),
497 INIT_PARAM(cpu_id, "processor ID"),
498 INIT_PARAM(profile, ""),
499#else
500 INIT_PARAM(workload, "processes to run"),
501#endif // FULL_SYSTEM
502
503 INIT_PARAM(clock, "clock speed"),
504 INIT_PARAM(defer_registration, "defer system registration (for sampling)"),
505 INIT_PARAM(width, "cpu width"),
506 INIT_PARAM(function_trace, "Enable function trace"),
507 INIT_PARAM(function_trace_start, "Cycle to start function trace"),
508 INIT_PARAM(simulate_stalls, "Simulate cache stall cycles")
509
510END_INIT_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
511
512
513CREATE_SIM_OBJECT(AtomicSimpleCPU)
514{
515 AtomicSimpleCPU::Params *params = new AtomicSimpleCPU::Params();
516 params->name = getInstanceName();
517 params->numberOfThreads = 1;
518 params->max_insts_any_thread = max_insts_any_thread;
519 params->max_insts_all_threads = max_insts_all_threads;
520 params->max_loads_any_thread = max_loads_any_thread;
521 params->max_loads_all_threads = max_loads_all_threads;
522 params->deferRegistration = defer_registration;
523 params->clock = clock;
524 params->functionTrace = function_trace;
525 params->functionTraceStart = function_trace_start;
526 params->width = width;
527 params->simulate_stalls = simulate_stalls;
528 params->mem = mem;
529
530#if FULL_SYSTEM
531 params->itb = itb;
532 params->dtb = dtb;
533 params->system = system;
534 params->cpu_id = cpu_id;
535 params->profile = profile;
536#else
537 params->process = workload;
538#endif
539
540 AtomicSimpleCPU *cpu = new AtomicSimpleCPU(params);
541 return cpu;
542}
543
544REGISTER_SIM_OBJECT("AtomicSimpleCPU", AtomicSimpleCPU)
545
435 latency += icache_stall + dcache_stall;
436 }
437
438 }
439
440 advancePC(fault);
441 }
442
443 if (_status != Idle)
444 tickEvent.schedule(curTick + latency);
445}
446
447
448////////////////////////////////////////////////////////////////////////
449//
450// AtomicSimpleCPU Simulation Object
451//
452BEGIN_DECLARE_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
453
454 Param<Counter> max_insts_any_thread;
455 Param<Counter> max_insts_all_threads;
456 Param<Counter> max_loads_any_thread;
457 Param<Counter> max_loads_all_threads;
458 SimObjectParam<MemObject *> mem;
459
460#if FULL_SYSTEM
461 SimObjectParam<AlphaITB *> itb;
462 SimObjectParam<AlphaDTB *> dtb;
463 SimObjectParam<System *> system;
464 Param<int> cpu_id;
465 Param<Tick> profile;
466#else
467 SimObjectParam<Process *> workload;
468#endif // FULL_SYSTEM
469
470 Param<int> clock;
471
472 Param<bool> defer_registration;
473 Param<int> width;
474 Param<bool> function_trace;
475 Param<Tick> function_trace_start;
476 Param<bool> simulate_stalls;
477
478END_DECLARE_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
479
480BEGIN_INIT_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
481
482 INIT_PARAM(max_insts_any_thread,
483 "terminate when any thread reaches this inst count"),
484 INIT_PARAM(max_insts_all_threads,
485 "terminate when all threads have reached this inst count"),
486 INIT_PARAM(max_loads_any_thread,
487 "terminate when any thread reaches this load count"),
488 INIT_PARAM(max_loads_all_threads,
489 "terminate when all threads have reached this load count"),
490 INIT_PARAM(mem, "memory"),
491
492#if FULL_SYSTEM
493 INIT_PARAM(itb, "Instruction TLB"),
494 INIT_PARAM(dtb, "Data TLB"),
495 INIT_PARAM(system, "system object"),
496 INIT_PARAM(cpu_id, "processor ID"),
497 INIT_PARAM(profile, ""),
498#else
499 INIT_PARAM(workload, "processes to run"),
500#endif // FULL_SYSTEM
501
502 INIT_PARAM(clock, "clock speed"),
503 INIT_PARAM(defer_registration, "defer system registration (for sampling)"),
504 INIT_PARAM(width, "cpu width"),
505 INIT_PARAM(function_trace, "Enable function trace"),
506 INIT_PARAM(function_trace_start, "Cycle to start function trace"),
507 INIT_PARAM(simulate_stalls, "Simulate cache stall cycles")
508
509END_INIT_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
510
511
512CREATE_SIM_OBJECT(AtomicSimpleCPU)
513{
514 AtomicSimpleCPU::Params *params = new AtomicSimpleCPU::Params();
515 params->name = getInstanceName();
516 params->numberOfThreads = 1;
517 params->max_insts_any_thread = max_insts_any_thread;
518 params->max_insts_all_threads = max_insts_all_threads;
519 params->max_loads_any_thread = max_loads_any_thread;
520 params->max_loads_all_threads = max_loads_all_threads;
521 params->deferRegistration = defer_registration;
522 params->clock = clock;
523 params->functionTrace = function_trace;
524 params->functionTraceStart = function_trace_start;
525 params->width = width;
526 params->simulate_stalls = simulate_stalls;
527 params->mem = mem;
528
529#if FULL_SYSTEM
530 params->itb = itb;
531 params->dtb = dtb;
532 params->system = system;
533 params->cpu_id = cpu_id;
534 params->profile = profile;
535#else
536 params->process = workload;
537#endif
538
539 AtomicSimpleCPU *cpu = new AtomicSimpleCPU(params);
540 return cpu;
541}
542
543REGISTER_SIM_OBJECT("AtomicSimpleCPU", AtomicSimpleCPU)
544