atomic.cc revision 2641
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "arch/utility.hh"
30#include "cpu/exetrace.hh"
31#include "cpu/simple/atomic.hh"
32#include "mem/packet_impl.hh"
33#include "sim/builder.hh"
34
35using namespace std;
36using namespace TheISA;
37
38AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c)
39    : Event(&mainEventQueue, CPU_Tick_Pri), cpu(c)
40{
41}
42
43
44void
45AtomicSimpleCPU::TickEvent::process()
46{
47    cpu->tick();
48}
49
50const char *
51AtomicSimpleCPU::TickEvent::description()
52{
53    return "AtomicSimpleCPU tick event";
54}
55
56
57void
58AtomicSimpleCPU::init()
59{
60    //Create Memory Ports (conect them up)
61    Port *mem_dport = mem->getPort("");
62    dcachePort.setPeer(mem_dport);
63    mem_dport->setPeer(&dcachePort);
64
65    Port *mem_iport = mem->getPort("");
66    icachePort.setPeer(mem_iport);
67    mem_iport->setPeer(&icachePort);
68
69    BaseCPU::init();
70#if FULL_SYSTEM
71    for (int i = 0; i < execContexts.size(); ++i) {
72        ExecContext *xc = execContexts[i];
73
74        // initialize CPU, including PC
75        TheISA::initCPU(xc, xc->readCpuId());
76    }
77#endif
78}
79
80bool
81AtomicSimpleCPU::CpuPort::recvTiming(Packet *pkt)
82{
83    panic("AtomicSimpleCPU doesn't expect recvAtomic callback!");
84    return true;
85}
86
87Tick
88AtomicSimpleCPU::CpuPort::recvAtomic(Packet *pkt)
89{
90    panic("AtomicSimpleCPU doesn't expect recvAtomic callback!");
91    return curTick;
92}
93
94void
95AtomicSimpleCPU::CpuPort::recvFunctional(Packet *pkt)
96{
97    panic("AtomicSimpleCPU doesn't expect recvFunctional callback!");
98}
99
100void
101AtomicSimpleCPU::CpuPort::recvStatusChange(Status status)
102{
103    if (status == RangeChange)
104        return;
105
106    panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!");
107}
108
109Packet *
110AtomicSimpleCPU::CpuPort::recvRetry()
111{
112    panic("AtomicSimpleCPU doesn't expect recvRetry callback!");
113    return NULL;
114}
115
116
117AtomicSimpleCPU::AtomicSimpleCPU(Params *p)
118    : BaseSimpleCPU(p), tickEvent(this),
119      width(p->width), simulate_stalls(p->simulate_stalls),
120      icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this)
121{
122    _status = Idle;
123
124    ifetch_req = new Request(true);
125    ifetch_req->setAsid(0);
126    // @todo fix me and get the real cpu iD!!!
127    ifetch_req->setCpuNum(0);
128    ifetch_req->setSize(sizeof(MachInst));
129    ifetch_pkt = new Packet(ifetch_req, Packet::ReadReq, Packet::Broadcast);
130    ifetch_pkt->dataStatic(&inst);
131
132    data_read_req = new Request(true);
133    // @todo fix me and get the real cpu iD!!!
134    data_read_req->setCpuNum(0);
135    data_read_req->setAsid(0);
136    data_read_pkt = new Packet(data_read_req, Packet::ReadReq,
137                               Packet::Broadcast);
138    data_read_pkt->dataStatic(&dataReg);
139
140    data_write_req = new Request(true);
141    // @todo fix me and get the real cpu iD!!!
142    data_write_req->setCpuNum(0);
143    data_write_req->setAsid(0);
144    data_write_pkt = new Packet(data_write_req, Packet::WriteReq,
145                                Packet::Broadcast);
146}
147
148
149AtomicSimpleCPU::~AtomicSimpleCPU()
150{
151}
152
153void
154AtomicSimpleCPU::serialize(ostream &os)
155{
156    BaseSimpleCPU::serialize(os);
157    SERIALIZE_ENUM(_status);
158    nameOut(os, csprintf("%s.tickEvent", name()));
159    tickEvent.serialize(os);
160}
161
162void
163AtomicSimpleCPU::unserialize(Checkpoint *cp, const string &section)
164{
165    BaseSimpleCPU::unserialize(cp, section);
166    UNSERIALIZE_ENUM(_status);
167    tickEvent.unserialize(cp, csprintf("%s.tickEvent", section));
168}
169
170void
171AtomicSimpleCPU::switchOut(Sampler *s)
172{
173    sampler = s;
174    if (status() == Running) {
175        _status = SwitchedOut;
176
177        tickEvent.squash();
178    }
179    sampler->signalSwitched();
180}
181
182
183void
184AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
185{
186    BaseCPU::takeOverFrom(oldCPU);
187
188    assert(!tickEvent.scheduled());
189
190    // if any of this CPU's ExecContexts are active, mark the CPU as
191    // running and schedule its tick event.
192    for (int i = 0; i < execContexts.size(); ++i) {
193        ExecContext *xc = execContexts[i];
194        if (xc->status() == ExecContext::Active && _status != Running) {
195            _status = Running;
196            tickEvent.schedule(curTick);
197            break;
198        }
199    }
200}
201
202
203void
204AtomicSimpleCPU::activateContext(int thread_num, int delay)
205{
206    assert(thread_num == 0);
207    assert(cpuXC);
208
209    assert(_status == Idle);
210    assert(!tickEvent.scheduled());
211
212    notIdleFraction++;
213    tickEvent.schedule(curTick + cycles(delay));
214    _status = Running;
215}
216
217
218void
219AtomicSimpleCPU::suspendContext(int thread_num)
220{
221    assert(thread_num == 0);
222    assert(cpuXC);
223
224    assert(_status == Running);
225
226    // tick event may not be scheduled if this gets called from inside
227    // an instruction's execution, e.g. "quiesce"
228    if (tickEvent.scheduled())
229        tickEvent.deschedule();
230
231    notIdleFraction--;
232    _status = Idle;
233}
234
235
236template <class T>
237Fault
238AtomicSimpleCPU::read(Addr addr, T &data, unsigned flags)
239{
240    data_read_req->setVaddr(addr);
241    data_read_req->setSize(sizeof(T));
242    data_read_req->setFlags(flags);
243    data_read_req->setTime(curTick);
244
245    if (traceData) {
246        traceData->setAddr(addr);
247    }
248
249    // translate to physical address
250    Fault fault = cpuXC->translateDataReadReq(data_read_req);
251
252    // Now do the access.
253    if (fault == NoFault) {
254        data_read_pkt->reset();
255        data_read_pkt->reinitFromRequest();
256
257        dcache_complete = dcachePort.sendAtomic(data_read_pkt);
258        dcache_access = true;
259
260        assert(data_read_pkt->result == Packet::Success);
261        data = data_read_pkt->get<T>();
262
263    }
264
265    // This will need a new way to tell if it has a dcache attached.
266    if (data_read_req->getFlags() & UNCACHEABLE)
267        recordEvent("Uncached Read");
268
269    return fault;
270}
271
272#ifndef DOXYGEN_SHOULD_SKIP_THIS
273
274template
275Fault
276AtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
277
278template
279Fault
280AtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
281
282template
283Fault
284AtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
285
286template
287Fault
288AtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
289
290#endif //DOXYGEN_SHOULD_SKIP_THIS
291
292template<>
293Fault
294AtomicSimpleCPU::read(Addr addr, double &data, unsigned flags)
295{
296    return read(addr, *(uint64_t*)&data, flags);
297}
298
299template<>
300Fault
301AtomicSimpleCPU::read(Addr addr, float &data, unsigned flags)
302{
303    return read(addr, *(uint32_t*)&data, flags);
304}
305
306
307template<>
308Fault
309AtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
310{
311    return read(addr, (uint32_t&)data, flags);
312}
313
314
315template <class T>
316Fault
317AtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
318{
319    data_write_req->setVaddr(addr);
320    data_write_req->setTime(curTick);
321    data_write_req->setSize(sizeof(T));
322    data_write_req->setFlags(flags);
323
324    if (traceData) {
325        traceData->setAddr(addr);
326    }
327
328    // translate to physical address
329    Fault fault = cpuXC->translateDataWriteReq(data_write_req);
330
331    // Now do the access.
332    if (fault == NoFault) {
333        data_write_pkt->reset();
334        data = htog(data);
335        data_write_pkt->dataStatic(&data);
336        data_write_pkt->reinitFromRequest();
337
338        dcache_complete = dcachePort.sendAtomic(data_write_pkt);
339        dcache_access = true;
340
341        assert(data_write_pkt->result == Packet::Success);
342
343        if (res && data_write_req->getFlags() & LOCKED) {
344            *res = data_write_req->getScResult();
345        }
346    }
347
348    // This will need a new way to tell if it's hooked up to a cache or not.
349    if (data_write_req->getFlags() & UNCACHEABLE)
350        recordEvent("Uncached Write");
351
352    // If the write needs to have a fault on the access, consider calling
353    // changeStatus() and changing it to "bad addr write" or something.
354    return fault;
355}
356
357
358#ifndef DOXYGEN_SHOULD_SKIP_THIS
359template
360Fault
361AtomicSimpleCPU::write(uint64_t data, Addr addr,
362                       unsigned flags, uint64_t *res);
363
364template
365Fault
366AtomicSimpleCPU::write(uint32_t data, Addr addr,
367                       unsigned flags, uint64_t *res);
368
369template
370Fault
371AtomicSimpleCPU::write(uint16_t data, Addr addr,
372                       unsigned flags, uint64_t *res);
373
374template
375Fault
376AtomicSimpleCPU::write(uint8_t data, Addr addr,
377                       unsigned flags, uint64_t *res);
378
379#endif //DOXYGEN_SHOULD_SKIP_THIS
380
381template<>
382Fault
383AtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
384{
385    return write(*(uint64_t*)&data, addr, flags, res);
386}
387
388template<>
389Fault
390AtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
391{
392    return write(*(uint32_t*)&data, addr, flags, res);
393}
394
395
396template<>
397Fault
398AtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
399{
400    return write((uint32_t)data, addr, flags, res);
401}
402
403
404void
405AtomicSimpleCPU::tick()
406{
407    Tick latency = cycles(1); // instruction takes one cycle by default
408
409    for (int i = 0; i < width; ++i) {
410        numCycles++;
411
412        checkForInterrupts();
413
414        ifetch_req->resetMin();
415        ifetch_pkt->reset();
416        Fault fault = setupFetchPacket(ifetch_pkt);
417
418        if (fault == NoFault) {
419            Tick icache_complete = icachePort.sendAtomic(ifetch_pkt);
420            // ifetch_req is initialized to read the instruction directly
421            // into the CPU object's inst field.
422
423            dcache_access = false; // assume no dcache access
424            preExecute();
425            fault = curStaticInst->execute(this, traceData);
426            postExecute();
427
428            if (traceData) {
429                traceData->finalize();
430            }
431
432            if (simulate_stalls) {
433                // This calculation assumes that the icache and dcache
434                // access latencies are always a multiple of the CPU's
435                // cycle time.  If not, the next tick event may get
436                // scheduled at a non-integer multiple of the CPU
437                // cycle time.
438                Tick icache_stall = icache_complete - curTick - cycles(1);
439                Tick dcache_stall =
440                    dcache_access ? dcache_complete - curTick - cycles(1) : 0;
441                latency += icache_stall + dcache_stall;
442            }
443
444        }
445
446        advancePC(fault);
447    }
448
449    if (_status != Idle)
450        tickEvent.schedule(curTick + latency);
451}
452
453
454////////////////////////////////////////////////////////////////////////
455//
456//  AtomicSimpleCPU Simulation Object
457//
458BEGIN_DECLARE_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
459
460    Param<Counter> max_insts_any_thread;
461    Param<Counter> max_insts_all_threads;
462    Param<Counter> max_loads_any_thread;
463    Param<Counter> max_loads_all_threads;
464    SimObjectParam<MemObject *> mem;
465
466#if FULL_SYSTEM
467    SimObjectParam<AlphaITB *> itb;
468    SimObjectParam<AlphaDTB *> dtb;
469    SimObjectParam<System *> system;
470    Param<int> cpu_id;
471    Param<Tick> profile;
472#else
473    SimObjectParam<Process *> workload;
474#endif // FULL_SYSTEM
475
476    Param<int> clock;
477
478    Param<bool> defer_registration;
479    Param<int> width;
480    Param<bool> function_trace;
481    Param<Tick> function_trace_start;
482    Param<bool> simulate_stalls;
483
484END_DECLARE_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
485
486BEGIN_INIT_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
487
488    INIT_PARAM(max_insts_any_thread,
489               "terminate when any thread reaches this inst count"),
490    INIT_PARAM(max_insts_all_threads,
491               "terminate when all threads have reached this inst count"),
492    INIT_PARAM(max_loads_any_thread,
493               "terminate when any thread reaches this load count"),
494    INIT_PARAM(max_loads_all_threads,
495               "terminate when all threads have reached this load count"),
496    INIT_PARAM(mem, "memory"),
497
498#if FULL_SYSTEM
499    INIT_PARAM(itb, "Instruction TLB"),
500    INIT_PARAM(dtb, "Data TLB"),
501    INIT_PARAM(system, "system object"),
502    INIT_PARAM(cpu_id, "processor ID"),
503    INIT_PARAM(profile, ""),
504#else
505    INIT_PARAM(workload, "processes to run"),
506#endif // FULL_SYSTEM
507
508    INIT_PARAM(clock, "clock speed"),
509    INIT_PARAM(defer_registration, "defer system registration (for sampling)"),
510    INIT_PARAM(width, "cpu width"),
511    INIT_PARAM(function_trace, "Enable function trace"),
512    INIT_PARAM(function_trace_start, "Cycle to start function trace"),
513    INIT_PARAM(simulate_stalls, "Simulate cache stall cycles")
514
515END_INIT_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
516
517
518CREATE_SIM_OBJECT(AtomicSimpleCPU)
519{
520    AtomicSimpleCPU::Params *params = new AtomicSimpleCPU::Params();
521    params->name = getInstanceName();
522    params->numberOfThreads = 1;
523    params->max_insts_any_thread = max_insts_any_thread;
524    params->max_insts_all_threads = max_insts_all_threads;
525    params->max_loads_any_thread = max_loads_any_thread;
526    params->max_loads_all_threads = max_loads_all_threads;
527    params->deferRegistration = defer_registration;
528    params->clock = clock;
529    params->functionTrace = function_trace;
530    params->functionTraceStart = function_trace_start;
531    params->width = width;
532    params->simulate_stalls = simulate_stalls;
533    params->mem = mem;
534
535#if FULL_SYSTEM
536    params->itb = itb;
537    params->dtb = dtb;
538    params->system = system;
539    params->cpu_id = cpu_id;
540    params->profile = profile;
541#else
542    params->process = workload;
543#endif
544
545    AtomicSimpleCPU *cpu = new AtomicSimpleCPU(params);
546    return cpu;
547}
548
549REGISTER_SIM_OBJECT("AtomicSimpleCPU", AtomicSimpleCPU)
550
551