atomic.cc revision 2657
12207SN/A/*
25254Sksewell@umich.edu * Copyright (c) 2002-2005 The Regents of The University of Michigan
35254Sksewell@umich.edu * All rights reserved.
42207SN/A *
55254Sksewell@umich.edu * Redistribution and use in source and binary forms, with or without
65254Sksewell@umich.edu * modification, are permitted provided that the following conditions are
75254Sksewell@umich.edu * met: redistributions of source code must retain the above copyright
85254Sksewell@umich.edu * notice, this list of conditions and the following disclaimer;
95254Sksewell@umich.edu * redistributions in binary form must reproduce the above copyright
105254Sksewell@umich.edu * notice, this list of conditions and the following disclaimer in the
115254Sksewell@umich.edu * documentation and/or other materials provided with the distribution;
125254Sksewell@umich.edu * neither the name of the copyright holders nor the names of its
135254Sksewell@umich.edu * contributors may be used to endorse or promote products derived from
145254Sksewell@umich.edu * this software without specific prior written permission.
152207SN/A *
165254Sksewell@umich.edu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
175254Sksewell@umich.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
185254Sksewell@umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
195254Sksewell@umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
205254Sksewell@umich.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
215254Sksewell@umich.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
225254Sksewell@umich.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
235254Sksewell@umich.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
245254Sksewell@umich.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
255254Sksewell@umich.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
265254Sksewell@umich.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
272665Ssaidi@eecs.umich.edu */
285254Sksewell@umich.edu
295254Sksewell@umich.edu#include "arch/utility.hh"
305254Sksewell@umich.edu#include "cpu/exetrace.hh"
312207SN/A#include "cpu/simple/atomic.hh"
322207SN/A#include "mem/packet_impl.hh"
332474SN/A#include "sim/builder.hh"
342207SN/A
358229Snate@binkert.orgusing namespace std;
362454SN/Ausing namespace TheISA;
372454SN/A
382680Sktlim@umich.eduAtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c)
398232Snate@binkert.org    : Event(&mainEventQueue, CPU_Tick_Pri), cpu(c)
406650Sksewell@umich.edu{
416650Sksewell@umich.edu}
426650Sksewell@umich.edu
432474SN/A
442207SN/Avoid
452447SN/AAtomicSimpleCPU::TickEvent::process()
462474SN/A{
472447SN/A    cpu->tick();
485154Sgblack@eecs.umich.edu}
495154Sgblack@eecs.umich.edu
505154Sgblack@eecs.umich.educonst char *
512474SN/AAtomicSimpleCPU::TickEvent::description()
522686Sksewell@umich.edu{
532686Sksewell@umich.edu    return "AtomicSimpleCPU tick event";
542935Sksewell@umich.edu}
552474SN/A
562474SN/A
572474SN/Avoid
582474SN/AAtomicSimpleCPU::init()
592686Sksewell@umich.edu{
602686Sksewell@umich.edu    //Create Memory Ports (conect them up)
612686Sksewell@umich.edu    Port *mem_dport = mem->getPort("");
622686Sksewell@umich.edu    dcachePort.setPeer(mem_dport);
636811SMatt DeVuyst    mem_dport->setPeer(&dcachePort);
646811SMatt DeVuyst
652474SN/A    Port *mem_iport = mem->getPort("");
662474SN/A    icachePort.setPeer(mem_iport);
672474SN/A    mem_iport->setPeer(&icachePort);
687532Ssteve.reinhardt@amd.com
692474SN/A    BaseCPU::init();
707532Ssteve.reinhardt@amd.com#if FULL_SYSTEM
716650Sksewell@umich.edu    for (int i = 0; i < execContexts.size(); ++i) {
726811SMatt DeVuyst        ExecContext *xc = execContexts[i];
732474SN/A
745958Sgblack@eecs.umich.edu        // initialize CPU, including PC
756811SMatt DeVuyst        TheISA::initCPU(xc, xc->readCpuId());
766650Sksewell@umich.edu    }
776811SMatt DeVuyst#endif
786650Sksewell@umich.edu}
796811SMatt DeVuyst
806811SMatt DeVuystbool
816650Sksewell@umich.eduAtomicSimpleCPU::CpuPort::recvTiming(Packet *pkt)
826650Sksewell@umich.edu{
836650Sksewell@umich.edu    panic("AtomicSimpleCPU doesn't expect recvAtomic callback!");
846811SMatt DeVuyst    return true;
856811SMatt DeVuyst}
866811SMatt DeVuyst
876811SMatt DeVuystTick
886811SMatt DeVuystAtomicSimpleCPU::CpuPort::recvAtomic(Packet *pkt)
896811SMatt DeVuyst{
906811SMatt DeVuyst    panic("AtomicSimpleCPU doesn't expect recvAtomic callback!");
916811SMatt DeVuyst    return curTick;
926811SMatt DeVuyst}
936811SMatt DeVuyst
946811SMatt DeVuystvoid
956811SMatt DeVuystAtomicSimpleCPU::CpuPort::recvFunctional(Packet *pkt)
966811SMatt DeVuyst{
976811SMatt DeVuyst    panic("AtomicSimpleCPU doesn't expect recvFunctional callback!");
986811SMatt DeVuyst}
996811SMatt DeVuyst
1006811SMatt DeVuystvoid
1016811SMatt DeVuystAtomicSimpleCPU::CpuPort::recvStatusChange(Status status)
1026811SMatt DeVuyst{
1036811SMatt DeVuyst    if (status == RangeChange)
1046811SMatt DeVuyst        return;
1056811SMatt DeVuyst
1066811SMatt DeVuyst    panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!");
1076811SMatt DeVuyst}
1086811SMatt DeVuyst
1096811SMatt DeVuystvoid
1106811SMatt DeVuystAtomicSimpleCPU::CpuPort::recvRetry()
1116811SMatt DeVuyst{
1126811SMatt DeVuyst    panic("AtomicSimpleCPU doesn't expect recvRetry callback!");
1136650Sksewell@umich.edu}
1146650Sksewell@umich.edu
1156811SMatt DeVuyst
1166811SMatt DeVuystAtomicSimpleCPU::AtomicSimpleCPU(Params *p)
1176650Sksewell@umich.edu    : BaseSimpleCPU(p), tickEvent(this),
1186650Sksewell@umich.edu      width(p->width), simulate_stalls(p->simulate_stalls),
1196650Sksewell@umich.edu      icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this)
1206650Sksewell@umich.edu{
1216650Sksewell@umich.edu    _status = Idle;
1226650Sksewell@umich.edu
1236650Sksewell@umich.edu    ifetch_req = new Request(true);
1246650Sksewell@umich.edu    ifetch_req->setAsid(0);
1256650Sksewell@umich.edu    // @todo fix me and get the real cpu iD!!!
1266650Sksewell@umich.edu    ifetch_req->setCpuNum(0);
1276811SMatt DeVuyst    ifetch_req->setSize(sizeof(MachInst));
1286811SMatt DeVuyst    ifetch_pkt = new Packet(ifetch_req, Packet::ReadReq, Packet::Broadcast);
1296811SMatt DeVuyst    ifetch_pkt->dataStatic(&inst);
1306811SMatt DeVuyst
1316811SMatt DeVuyst    data_read_req = new Request(true);
1326650Sksewell@umich.edu    // @todo fix me and get the real cpu iD!!!
1336650Sksewell@umich.edu    data_read_req->setCpuNum(0);
1346650Sksewell@umich.edu    data_read_req->setAsid(0);
1356650Sksewell@umich.edu    data_read_pkt = new Packet(data_read_req, Packet::ReadReq,
1366650Sksewell@umich.edu                               Packet::Broadcast);
1376650Sksewell@umich.edu    data_read_pkt->dataStatic(&dataReg);
1386650Sksewell@umich.edu
1396650Sksewell@umich.edu    data_write_req = new Request(true);
1406650Sksewell@umich.edu    // @todo fix me and get the real cpu iD!!!
1416650Sksewell@umich.edu    data_write_req->setCpuNum(0);
1426811SMatt DeVuyst    data_write_req->setAsid(0);
1436811SMatt DeVuyst    data_write_pkt = new Packet(data_write_req, Packet::WriteReq,
1446811SMatt DeVuyst                                Packet::Broadcast);
1456811SMatt DeVuyst}
1466811SMatt DeVuyst
1476650Sksewell@umich.edu
1486650Sksewell@umich.eduAtomicSimpleCPU::~AtomicSimpleCPU()
1496811SMatt DeVuyst{
1506650Sksewell@umich.edu}
1516811SMatt DeVuyst
1526650Sksewell@umich.eduvoid
1536650Sksewell@umich.eduAtomicSimpleCPU::serialize(ostream &os)
1546650Sksewell@umich.edu{
1556650Sksewell@umich.edu    BaseSimpleCPU::serialize(os);
1566650Sksewell@umich.edu    SERIALIZE_ENUM(_status);
1576650Sksewell@umich.edu    nameOut(os, csprintf("%s.tickEvent", name()));
1586650Sksewell@umich.edu    tickEvent.serialize(os);
1596811SMatt DeVuyst}
1606811SMatt DeVuyst
1616811SMatt DeVuystvoid
1626811SMatt DeVuystAtomicSimpleCPU::unserialize(Checkpoint *cp, const string &section)
1636811SMatt DeVuyst{
1646811SMatt DeVuyst    BaseSimpleCPU::unserialize(cp, section);
1656811SMatt DeVuyst    UNSERIALIZE_ENUM(_status);
1666811SMatt DeVuyst    tickEvent.unserialize(cp, csprintf("%s.tickEvent", section));
1676811SMatt DeVuyst}
1686811SMatt DeVuyst
1696811SMatt DeVuystvoid
1706811SMatt DeVuystAtomicSimpleCPU::switchOut(Sampler *s)
1716811SMatt DeVuyst{
1726811SMatt DeVuyst    sampler = s;
1736811SMatt DeVuyst    if (status() == Running) {
1746650Sksewell@umich.edu        _status = SwitchedOut;
1756650Sksewell@umich.edu
1766650Sksewell@umich.edu        tickEvent.squash();
1776650Sksewell@umich.edu    }
1786650Sksewell@umich.edu    sampler->signalSwitched();
1796650Sksewell@umich.edu}
1807720Sgblack@eecs.umich.edu
1816650Sksewell@umich.edu
1826650Sksewell@umich.eduvoid
1836650Sksewell@umich.eduAtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
1845958Sgblack@eecs.umich.edu{
1856701Sgblack@eecs.umich.edu    BaseCPU::takeOverFrom(oldCPU);
1865958Sgblack@eecs.umich.edu
1875958Sgblack@eecs.umich.edu    assert(!tickEvent.scheduled());
1886701Sgblack@eecs.umich.edu
1895958Sgblack@eecs.umich.edu    // if any of this CPU's ExecContexts are active, mark the CPU as
1905958Sgblack@eecs.umich.edu    // running and schedule its tick event.
1915958Sgblack@eecs.umich.edu    for (int i = 0; i < execContexts.size(); ++i) {
1925958Sgblack@eecs.umich.edu        ExecContext *xc = execContexts[i];
1935958Sgblack@eecs.umich.edu        if (xc->status() == ExecContext::Active && _status != Running) {
1945958Sgblack@eecs.umich.edu            _status = Running;
1955958Sgblack@eecs.umich.edu            tickEvent.schedule(curTick);
1965958Sgblack@eecs.umich.edu            break;
1975958Sgblack@eecs.umich.edu        }
1985958Sgblack@eecs.umich.edu    }
1995958Sgblack@eecs.umich.edu}
2005958Sgblack@eecs.umich.edu
2015958Sgblack@eecs.umich.edu
2025958Sgblack@eecs.umich.eduvoid
2035958Sgblack@eecs.umich.eduAtomicSimpleCPU::activateContext(int thread_num, int delay)
2045958Sgblack@eecs.umich.edu{
2055958Sgblack@eecs.umich.edu    assert(thread_num == 0);
2065958Sgblack@eecs.umich.edu    assert(cpuXC);
2075958Sgblack@eecs.umich.edu
2085958Sgblack@eecs.umich.edu    assert(_status == Idle);
2095958Sgblack@eecs.umich.edu    assert(!tickEvent.scheduled());
2105958Sgblack@eecs.umich.edu
2115958Sgblack@eecs.umich.edu    notIdleFraction++;
2125958Sgblack@eecs.umich.edu    tickEvent.schedule(curTick + cycles(delay));
213    _status = Running;
214}
215
216
217void
218AtomicSimpleCPU::suspendContext(int thread_num)
219{
220    assert(thread_num == 0);
221    assert(cpuXC);
222
223    assert(_status == Running);
224
225    // tick event may not be scheduled if this gets called from inside
226    // an instruction's execution, e.g. "quiesce"
227    if (tickEvent.scheduled())
228        tickEvent.deschedule();
229
230    notIdleFraction--;
231    _status = Idle;
232}
233
234
235template <class T>
236Fault
237AtomicSimpleCPU::read(Addr addr, T &data, unsigned flags)
238{
239    data_read_req->setVaddr(addr);
240    data_read_req->setSize(sizeof(T));
241    data_read_req->setFlags(flags);
242    data_read_req->setTime(curTick);
243
244    if (traceData) {
245        traceData->setAddr(addr);
246    }
247
248    // translate to physical address
249    Fault fault = cpuXC->translateDataReadReq(data_read_req);
250
251    // Now do the access.
252    if (fault == NoFault) {
253        data_read_pkt->reset();
254        data_read_pkt->reinitFromRequest();
255
256        dcache_complete = dcachePort.sendAtomic(data_read_pkt);
257        dcache_access = true;
258
259        assert(data_read_pkt->result == Packet::Success);
260        data = data_read_pkt->get<T>();
261
262    }
263
264    // This will need a new way to tell if it has a dcache attached.
265    if (data_read_req->getFlags() & UNCACHEABLE)
266        recordEvent("Uncached Read");
267
268    return fault;
269}
270
271#ifndef DOXYGEN_SHOULD_SKIP_THIS
272
273template
274Fault
275AtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
276
277template
278Fault
279AtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
280
281template
282Fault
283AtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
284
285template
286Fault
287AtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
288
289#endif //DOXYGEN_SHOULD_SKIP_THIS
290
291template<>
292Fault
293AtomicSimpleCPU::read(Addr addr, double &data, unsigned flags)
294{
295    return read(addr, *(uint64_t*)&data, flags);
296}
297
298template<>
299Fault
300AtomicSimpleCPU::read(Addr addr, float &data, unsigned flags)
301{
302    return read(addr, *(uint32_t*)&data, flags);
303}
304
305
306template<>
307Fault
308AtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
309{
310    return read(addr, (uint32_t&)data, flags);
311}
312
313
314template <class T>
315Fault
316AtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
317{
318    data_write_req->setVaddr(addr);
319    data_write_req->setTime(curTick);
320    data_write_req->setSize(sizeof(T));
321    data_write_req->setFlags(flags);
322
323    if (traceData) {
324        traceData->setAddr(addr);
325    }
326
327    // translate to physical address
328    Fault fault = cpuXC->translateDataWriteReq(data_write_req);
329
330    // Now do the access.
331    if (fault == NoFault) {
332        data_write_pkt->reset();
333        data = htog(data);
334        data_write_pkt->dataStatic(&data);
335        data_write_pkt->reinitFromRequest();
336
337        dcache_complete = dcachePort.sendAtomic(data_write_pkt);
338        dcache_access = true;
339
340        assert(data_write_pkt->result == Packet::Success);
341
342        if (res && data_write_req->getFlags() & LOCKED) {
343            *res = data_write_req->getScResult();
344        }
345    }
346
347    // This will need a new way to tell if it's hooked up to a cache or not.
348    if (data_write_req->getFlags() & UNCACHEABLE)
349        recordEvent("Uncached Write");
350
351    // If the write needs to have a fault on the access, consider calling
352    // changeStatus() and changing it to "bad addr write" or something.
353    return fault;
354}
355
356
357#ifndef DOXYGEN_SHOULD_SKIP_THIS
358template
359Fault
360AtomicSimpleCPU::write(uint64_t data, Addr addr,
361                       unsigned flags, uint64_t *res);
362
363template
364Fault
365AtomicSimpleCPU::write(uint32_t data, Addr addr,
366                       unsigned flags, uint64_t *res);
367
368template
369Fault
370AtomicSimpleCPU::write(uint16_t data, Addr addr,
371                       unsigned flags, uint64_t *res);
372
373template
374Fault
375AtomicSimpleCPU::write(uint8_t data, Addr addr,
376                       unsigned flags, uint64_t *res);
377
378#endif //DOXYGEN_SHOULD_SKIP_THIS
379
380template<>
381Fault
382AtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
383{
384    return write(*(uint64_t*)&data, addr, flags, res);
385}
386
387template<>
388Fault
389AtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
390{
391    return write(*(uint32_t*)&data, addr, flags, res);
392}
393
394
395template<>
396Fault
397AtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
398{
399    return write((uint32_t)data, addr, flags, res);
400}
401
402
403void
404AtomicSimpleCPU::tick()
405{
406    Tick latency = cycles(1); // instruction takes one cycle by default
407
408    for (int i = 0; i < width; ++i) {
409        numCycles++;
410
411        checkForInterrupts();
412
413        ifetch_req->resetMin();
414        ifetch_pkt->reset();
415        Fault fault = setupFetchPacket(ifetch_pkt);
416
417        if (fault == NoFault) {
418            Tick icache_complete = icachePort.sendAtomic(ifetch_pkt);
419            // ifetch_req is initialized to read the instruction directly
420            // into the CPU object's inst field.
421
422            dcache_access = false; // assume no dcache access
423            preExecute();
424            fault = curStaticInst->execute(this, traceData);
425            postExecute();
426
427            if (simulate_stalls) {
428                // This calculation assumes that the icache and dcache
429                // access latencies are always a multiple of the CPU's
430                // cycle time.  If not, the next tick event may get
431                // scheduled at a non-integer multiple of the CPU
432                // cycle time.
433                Tick icache_stall = icache_complete - curTick - cycles(1);
434                Tick dcache_stall =
435                    dcache_access ? dcache_complete - curTick - cycles(1) : 0;
436                latency += icache_stall + dcache_stall;
437            }
438
439        }
440
441        advancePC(fault);
442    }
443
444    if (_status != Idle)
445        tickEvent.schedule(curTick + latency);
446}
447
448
449////////////////////////////////////////////////////////////////////////
450//
451//  AtomicSimpleCPU Simulation Object
452//
453BEGIN_DECLARE_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
454
455    Param<Counter> max_insts_any_thread;
456    Param<Counter> max_insts_all_threads;
457    Param<Counter> max_loads_any_thread;
458    Param<Counter> max_loads_all_threads;
459    SimObjectParam<MemObject *> mem;
460
461#if FULL_SYSTEM
462    SimObjectParam<AlphaITB *> itb;
463    SimObjectParam<AlphaDTB *> dtb;
464    SimObjectParam<System *> system;
465    Param<int> cpu_id;
466    Param<Tick> profile;
467#else
468    SimObjectParam<Process *> workload;
469#endif // FULL_SYSTEM
470
471    Param<int> clock;
472
473    Param<bool> defer_registration;
474    Param<int> width;
475    Param<bool> function_trace;
476    Param<Tick> function_trace_start;
477    Param<bool> simulate_stalls;
478
479END_DECLARE_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
480
481BEGIN_INIT_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
482
483    INIT_PARAM(max_insts_any_thread,
484               "terminate when any thread reaches this inst count"),
485    INIT_PARAM(max_insts_all_threads,
486               "terminate when all threads have reached this inst count"),
487    INIT_PARAM(max_loads_any_thread,
488               "terminate when any thread reaches this load count"),
489    INIT_PARAM(max_loads_all_threads,
490               "terminate when all threads have reached this load count"),
491    INIT_PARAM(mem, "memory"),
492
493#if FULL_SYSTEM
494    INIT_PARAM(itb, "Instruction TLB"),
495    INIT_PARAM(dtb, "Data TLB"),
496    INIT_PARAM(system, "system object"),
497    INIT_PARAM(cpu_id, "processor ID"),
498    INIT_PARAM(profile, ""),
499#else
500    INIT_PARAM(workload, "processes to run"),
501#endif // FULL_SYSTEM
502
503    INIT_PARAM(clock, "clock speed"),
504    INIT_PARAM(defer_registration, "defer system registration (for sampling)"),
505    INIT_PARAM(width, "cpu width"),
506    INIT_PARAM(function_trace, "Enable function trace"),
507    INIT_PARAM(function_trace_start, "Cycle to start function trace"),
508    INIT_PARAM(simulate_stalls, "Simulate cache stall cycles")
509
510END_INIT_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
511
512
513CREATE_SIM_OBJECT(AtomicSimpleCPU)
514{
515    AtomicSimpleCPU::Params *params = new AtomicSimpleCPU::Params();
516    params->name = getInstanceName();
517    params->numberOfThreads = 1;
518    params->max_insts_any_thread = max_insts_any_thread;
519    params->max_insts_all_threads = max_insts_all_threads;
520    params->max_loads_any_thread = max_loads_any_thread;
521    params->max_loads_all_threads = max_loads_all_threads;
522    params->deferRegistration = defer_registration;
523    params->clock = clock;
524    params->functionTrace = function_trace;
525    params->functionTraceStart = function_trace_start;
526    params->width = width;
527    params->simulate_stalls = simulate_stalls;
528    params->mem = mem;
529
530#if FULL_SYSTEM
531    params->itb = itb;
532    params->dtb = dtb;
533    params->system = system;
534    params->cpu_id = cpu_id;
535    params->profile = profile;
536#else
537    params->process = workload;
538#endif
539
540    AtomicSimpleCPU *cpu = new AtomicSimpleCPU(params);
541    return cpu;
542}
543
544REGISTER_SIM_OBJECT("AtomicSimpleCPU", AtomicSimpleCPU)
545
546