base.cc revision 2449
14762Snate@binkert.org/*
27534Ssteve.reinhardt@amd.com * Copyright (c) 2002-2005 The Regents of The University of Michigan
34762Snate@binkert.org * All rights reserved.
44762Snate@binkert.org *
54762Snate@binkert.org * Redistribution and use in source and binary forms, with or without
64762Snate@binkert.org * modification, are permitted provided that the following conditions are
74762Snate@binkert.org * met: redistributions of source code must retain the above copyright
84762Snate@binkert.org * notice, this list of conditions and the following disclaimer;
94762Snate@binkert.org * redistributions in binary form must reproduce the above copyright
104762Snate@binkert.org * notice, this list of conditions and the following disclaimer in the
114762Snate@binkert.org * documentation and/or other materials provided with the distribution;
124762Snate@binkert.org * neither the name of the copyright holders nor the names of its
134762Snate@binkert.org * contributors may be used to endorse or promote products derived from
144762Snate@binkert.org * this software without specific prior written permission.
154762Snate@binkert.org *
164762Snate@binkert.org * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
174762Snate@binkert.org * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
184762Snate@binkert.org * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
194762Snate@binkert.org * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
204762Snate@binkert.org * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
214762Snate@binkert.org * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
224762Snate@binkert.org * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
234762Snate@binkert.org * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
244762Snate@binkert.org * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
254762Snate@binkert.org * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
264762Snate@binkert.org * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
274762Snate@binkert.org */
284762Snate@binkert.org
294762Snate@binkert.org#include <cmath>
304762Snate@binkert.org#include <cstdio>
314762Snate@binkert.org#include <cstdlib>
324762Snate@binkert.org#include <iostream>
334762Snate@binkert.org#include <iomanip>
348664SAli.Saidi@ARM.com#include <list>
354762Snate@binkert.org#include <sstream>
364762Snate@binkert.org#include <string>
374762Snate@binkert.org
386001Snate@binkert.org#include "arch/utility.hh"
396001Snate@binkert.org#include "base/cprintf.hh"
404762Snate@binkert.org#include "base/inifile.hh"
414762Snate@binkert.org#include "base/loader/symtab.hh"
424851Snate@binkert.org#include "base/misc.hh"
437525Ssteve.reinhardt@amd.com#include "base/pollevent.hh"
448664SAli.Saidi@ARM.com#include "base/range.hh"
454762Snate@binkert.org#include "base/stats/events.hh"
466654Snate@binkert.org#include "base/trace.hh"
476654Snate@binkert.org#include "cpu/base.hh"
486654Snate@binkert.org#include "cpu/cpu_exec_context.hh"
494762Snate@binkert.org#include "cpu/exec_context.hh"
504762Snate@binkert.org#include "cpu/exetrace.hh"
517531Ssteve.reinhardt@amd.com#include "cpu/profile.hh"
528245Snate@binkert.org#include "cpu/sampler/sampler.hh"
538234Snate@binkert.org#include "cpu/simple/cpu.hh"
547525Ssteve.reinhardt@amd.com#include "cpu/smt.hh"
557525Ssteve.reinhardt@amd.com#include "cpu/static_inst.hh"
567525Ssteve.reinhardt@amd.com#include "kern/kernel_stats.hh"
577525Ssteve.reinhardt@amd.com#include "sim/byteswap.hh"
587525Ssteve.reinhardt@amd.com#include "sim/builder.hh"
594762Snate@binkert.org#include "sim/debug.hh"
604762Snate@binkert.org#include "sim/host.hh"
614762Snate@binkert.org#include "sim/sim_events.hh"
627528Ssteve.reinhardt@amd.com#include "sim/sim_object.hh"
637528Ssteve.reinhardt@amd.com#include "sim/stats.hh"
647528Ssteve.reinhardt@amd.com
657528Ssteve.reinhardt@amd.com#if FULL_SYSTEM
667527Ssteve.reinhardt@amd.com#include "base/remote_gdb.hh"
677527Ssteve.reinhardt@amd.com#include "mem/functional/memory_control.hh"
685037Smilesck@eecs.umich.edu#include "mem/functional/physical.hh"
695773Snate@binkert.org#include "sim/system.hh"
705773Snate@binkert.org#include "arch/tlb.hh"
717527Ssteve.reinhardt@amd.com#include "arch/stacktrace.hh"
727527Ssteve.reinhardt@amd.com#include "arch/vtophys.hh"
737527Ssteve.reinhardt@amd.com#else // !FULL_SYSTEM
745773Snate@binkert.org#include "mem/memory.hh"
754762Snate@binkert.org#endif // FULL_SYSTEM
768664SAli.Saidi@ARM.com
778664SAli.Saidi@ARM.comusing namespace std;
788664SAli.Saidi@ARM.comusing namespace TheISA;
798664SAli.Saidi@ARM.com
808664SAli.Saidi@ARM.comSimpleCPU::TickEvent::TickEvent(SimpleCPU *c, int w)
818664SAli.Saidi@ARM.com    : Event(&mainEventQueue, CPU_Tick_Pri), cpu(c), width(w)
828664SAli.Saidi@ARM.com{
834762Snate@binkert.org}
846001Snate@binkert.org
854762Snate@binkert.org
864762Snate@binkert.orgvoid
877527Ssteve.reinhardt@amd.comSimpleCPU::init()
887527Ssteve.reinhardt@amd.com{
894762Snate@binkert.org    BaseCPU::init();
904762Snate@binkert.org#if FULL_SYSTEM
917527Ssteve.reinhardt@amd.com    for (int i = 0; i < execContexts.size(); ++i) {
924762Snate@binkert.org        ExecContext *xc = execContexts[i];
934762Snate@binkert.org
947527Ssteve.reinhardt@amd.com        // initialize CPU, including PC
957527Ssteve.reinhardt@amd.com        TheISA::initCPU(xc, xc->readCpuId());
964762Snate@binkert.org    }
976001Snate@binkert.org#endif
986001Snate@binkert.org}
994762Snate@binkert.org
1007531Ssteve.reinhardt@amd.comvoid
1017531Ssteve.reinhardt@amd.comSimpleCPU::TickEvent::process()
1027532Ssteve.reinhardt@amd.com{
1037532Ssteve.reinhardt@amd.com    int count = width;
1047532Ssteve.reinhardt@amd.com    do {
1057531Ssteve.reinhardt@amd.com        cpu->tick();
1067532Ssteve.reinhardt@amd.com    } while (--count > 0 && cpu->status() == Running);
1077532Ssteve.reinhardt@amd.com}
1087531Ssteve.reinhardt@amd.com
1094762Snate@binkert.orgconst char *
1106001Snate@binkert.orgSimpleCPU::TickEvent::description()
1114762Snate@binkert.org{
1124762Snate@binkert.org    return "SimpleCPU tick event";
1134762Snate@binkert.org}
1144762Snate@binkert.org
1154762Snate@binkert.org
1164762Snate@binkert.orgbool
1174762Snate@binkert.orgSimpleCPU::CpuPort::recvTiming(Packet &pkt)
1184762Snate@binkert.org{
1194762Snate@binkert.org    cpu->processResponse(pkt);
1204762Snate@binkert.org    return true;
1214762Snate@binkert.org}
1224762Snate@binkert.org
1234762Snate@binkert.orgTick
1244762Snate@binkert.orgSimpleCPU::CpuPort::recvAtomic(Packet &pkt)
1254762Snate@binkert.org{
1264762Snate@binkert.org    panic("CPU doesn't expect callback!");
1274762Snate@binkert.org    return curTick;
1287527Ssteve.reinhardt@amd.com}
1297527Ssteve.reinhardt@amd.com
1304762Snate@binkert.orgvoid
1314762Snate@binkert.orgSimpleCPU::CpuPort::recvFunctional(Packet &pkt)
1324762Snate@binkert.org{
1334762Snate@binkert.org    panic("CPU doesn't expect callback!");
1344762Snate@binkert.org}
1354762Snate@binkert.org
1364762Snate@binkert.orgvoid
1374762Snate@binkert.orgSimpleCPU::CpuPort::recvStatusChange(Status status)
1384762Snate@binkert.org{
1394762Snate@binkert.org    cpu->recvStatusChange(status);
1407823Ssteve.reinhardt@amd.com}
1414762Snate@binkert.org
1424762Snate@binkert.orgPacket *
1438296Snate@binkert.orgSimpleCPU::CpuPort::recvRetry()
1444762Snate@binkert.org{
1454762Snate@binkert.org    return cpu->processRetry();
1464762Snate@binkert.org}
1474762Snate@binkert.org
1484762Snate@binkert.orgSimpleCPU::SimpleCPU(Params *p)
1494762Snate@binkert.org    : BaseCPU(p), icachePort(this),
1504762Snate@binkert.org      dcachePort(this), tickEvent(this, p->width), cpuXC(NULL)
1514762Snate@binkert.org{
1524762Snate@binkert.org    _status = Idle;
1534762Snate@binkert.org
1544762Snate@binkert.org    //Create Memory Ports (conect them up)
1554762Snate@binkert.org    p->mem->addPort("DCACHE");
1564762Snate@binkert.org    dcachePort.setPeer(p->mem->getPort("DCACHE"));
1574762Snate@binkert.org    (p->mem->getPort("DCACHE"))->setPeer(&dcachePort);
1584762Snate@binkert.org
1594762Snate@binkert.org    p->mem->addPort("ICACHE");
1604762Snate@binkert.org    icachePort.setPeer(p->mem->getPort("ICACHE"));
1617527Ssteve.reinhardt@amd.com    (p->mem->getPort("ICACHE"))->setPeer(&icachePort);
1624762Snate@binkert.org
1637527Ssteve.reinhardt@amd.com#if FULL_SYSTEM
1647527Ssteve.reinhardt@amd.com    cpuXC = new CPUExecContext(this, 0, p->system, p->itb, p->dtb, p->mem);
1654762Snate@binkert.org#else
1664762Snate@binkert.org    cpuXC = new CPUExecContext(this, /* thread_num */ 0, p->process, /* asid */ 0,
1674762Snate@binkert.org                         &dcachePort);
1684762Snate@binkert.org#endif // !FULL_SYSTEM
1694762Snate@binkert.org
1704762Snate@binkert.org    xcProxy = cpuXC->getProxy();
1714762Snate@binkert.org
1727527Ssteve.reinhardt@amd.com#if SIMPLE_CPU_MEM_ATOMIC || SIMPLE_CPU_MEM_IMMEDIATE
1734762Snate@binkert.org    ifetch_req = new CpuRequest;
1747525Ssteve.reinhardt@amd.com    ifetch_req->asid = 0;
1757525Ssteve.reinhardt@amd.com    ifetch_req->size = sizeof(MachInst);
1764762Snate@binkert.org    ifetch_pkt = new Packet;
1774762Snate@binkert.org    ifetch_pkt->cmd = Read;
1784762Snate@binkert.org    ifetch_pkt->data = (uint8_t *)&inst;
1794762Snate@binkert.org    ifetch_pkt->req = ifetch_req;
1804859Snate@binkert.org    ifetch_pkt->size = sizeof(MachInst);
1814762Snate@binkert.org
1824762Snate@binkert.org    data_read_req = new CpuRequest;
1834762Snate@binkert.org    data_read_req->asid = 0;
1844762Snate@binkert.org    data_read_pkt = new Packet;
1854762Snate@binkert.org    data_read_pkt->cmd = Read;
1864762Snate@binkert.org    data_read_pkt->data = new uint8_t[8];
1874945Snate@binkert.org    data_read_pkt->req = data_read_req;
1884762Snate@binkert.org
1894762Snate@binkert.org    data_write_req = new CpuRequest;
1907527Ssteve.reinhardt@amd.com    data_write_req->asid = 0;
1917527Ssteve.reinhardt@amd.com    data_write_pkt = new Packet;
1924762Snate@binkert.org    data_write_pkt->cmd = Write;
1934762Snate@binkert.org    data_write_pkt->req = data_write_req;
1944762Snate@binkert.org#endif
1954762Snate@binkert.org
1964762Snate@binkert.org    numInst = 0;
1974762Snate@binkert.org    startNumInst = 0;
1984945Snate@binkert.org    numLoad = 0;
1994762Snate@binkert.org    startNumLoad = 0;
2004762Snate@binkert.org    lastIcacheStall = 0;
2017527Ssteve.reinhardt@amd.com    lastDcacheStall = 0;
2027527Ssteve.reinhardt@amd.com
2034762Snate@binkert.org    execContexts.push_back(xcProxy);
2044762Snate@binkert.org}
2054762Snate@binkert.org
2064762Snate@binkert.orgSimpleCPU::~SimpleCPU()
2074762Snate@binkert.org{
2084946Snate@binkert.org}
2094946Snate@binkert.org
2104762Snate@binkert.orgvoid
2114762Snate@binkert.orgSimpleCPU::switchOut(Sampler *s)
2124946Snate@binkert.org{
2134946Snate@binkert.org    sampler = s;
2144946Snate@binkert.org    if (status() == DcacheWaitResponse) {
2154946Snate@binkert.org        DPRINTF(Sampler,"Outstanding dcache access, waiting for completion\n");
2164946Snate@binkert.org        _status = DcacheWaitSwitch;
2174762Snate@binkert.org    }
2184946Snate@binkert.org    else {
2194946Snate@binkert.org        _status = SwitchedOut;
2204946Snate@binkert.org
2214762Snate@binkert.org        if (tickEvent.scheduled())
2224946Snate@binkert.org            tickEvent.squash();
2234946Snate@binkert.org
2245523Snate@binkert.org        sampler->signalSwitched();
2255523Snate@binkert.org    }
226}
227
228
229void
230SimpleCPU::takeOverFrom(BaseCPU *oldCPU)
231{
232    BaseCPU::takeOverFrom(oldCPU);
233
234    assert(!tickEvent.scheduled());
235
236    // if any of this CPU's ExecContexts are active, mark the CPU as
237    // running and schedule its tick event.
238    for (int i = 0; i < execContexts.size(); ++i) {
239        ExecContext *xc = execContexts[i];
240        if (xc->status() == ExecContext::Active && _status != Running) {
241            _status = Running;
242            tickEvent.schedule(curTick);
243        }
244    }
245}
246
247
248void
249SimpleCPU::activateContext(int thread_num, int delay)
250{
251    assert(thread_num == 0);
252    assert(cpuXC);
253
254    assert(_status == Idle);
255    notIdleFraction++;
256    scheduleTickEvent(delay);
257    _status = Running;
258}
259
260
261void
262SimpleCPU::suspendContext(int thread_num)
263{
264    assert(thread_num == 0);
265    assert(cpuXC);
266
267    assert(_status == Running);
268    notIdleFraction--;
269    unscheduleTickEvent();
270    _status = Idle;
271}
272
273
274void
275SimpleCPU::deallocateContext(int thread_num)
276{
277    // for now, these are equivalent
278    suspendContext(thread_num);
279}
280
281
282void
283SimpleCPU::haltContext(int thread_num)
284{
285    // for now, these are equivalent
286    suspendContext(thread_num);
287}
288
289
290void
291SimpleCPU::regStats()
292{
293    using namespace Stats;
294
295    BaseCPU::regStats();
296
297    numInsts
298        .name(name() + ".num_insts")
299        .desc("Number of instructions executed")
300        ;
301
302    numMemRefs
303        .name(name() + ".num_refs")
304        .desc("Number of memory references")
305        ;
306
307    notIdleFraction
308        .name(name() + ".not_idle_fraction")
309        .desc("Percentage of non-idle cycles")
310        ;
311
312    idleFraction
313        .name(name() + ".idle_fraction")
314        .desc("Percentage of idle cycles")
315        ;
316
317    icacheStallCycles
318        .name(name() + ".icache_stall_cycles")
319        .desc("ICache total stall cycles")
320        .prereq(icacheStallCycles)
321        ;
322
323    dcacheStallCycles
324        .name(name() + ".dcache_stall_cycles")
325        .desc("DCache total stall cycles")
326        .prereq(dcacheStallCycles)
327        ;
328
329    icacheRetryCycles
330        .name(name() + ".icache_retry_cycles")
331        .desc("ICache total retry cycles")
332        .prereq(icacheRetryCycles)
333        ;
334
335    dcacheRetryCycles
336        .name(name() + ".dcache_retry_cycles")
337        .desc("DCache total retry cycles")
338        .prereq(dcacheRetryCycles)
339        ;
340
341    idleFraction = constant(1.0) - notIdleFraction;
342}
343
344void
345SimpleCPU::resetStats()
346{
347    startNumInst = numInst;
348    notIdleFraction = (_status != Idle);
349}
350
351void
352SimpleCPU::serialize(ostream &os)
353{
354    BaseCPU::serialize(os);
355    SERIALIZE_ENUM(_status);
356    SERIALIZE_SCALAR(inst);
357    nameOut(os, csprintf("%s.xc", name()));
358    cpuXC->serialize(os);
359    nameOut(os, csprintf("%s.tickEvent", name()));
360    tickEvent.serialize(os);
361    nameOut(os, csprintf("%s.cacheCompletionEvent", name()));
362}
363
364void
365SimpleCPU::unserialize(Checkpoint *cp, const string &section)
366{
367    BaseCPU::unserialize(cp, section);
368    UNSERIALIZE_ENUM(_status);
369    UNSERIALIZE_SCALAR(inst);
370    cpuXC->unserialize(cp, csprintf("%s.xc", section));
371    tickEvent.unserialize(cp, csprintf("%s.tickEvent", section));
372}
373
374void
375change_thread_state(int thread_number, int activate, int priority)
376{
377}
378
379Fault
380SimpleCPU::copySrcTranslate(Addr src)
381{
382#if 0
383    static bool no_warn = true;
384    int blk_size = (dcacheInterface) ? dcacheInterface->getBlockSize() : 64;
385    // Only support block sizes of 64 atm.
386    assert(blk_size == 64);
387    int offset = src & (blk_size - 1);
388
389    // Make sure block doesn't span page
390    if (no_warn &&
391        (src & PageMask) != ((src + blk_size) & PageMask) &&
392        (src >> 40) != 0xfffffc) {
393        warn("Copied block source spans pages %x.", src);
394        no_warn = false;
395    }
396
397    memReq->reset(src & ~(blk_size - 1), blk_size);
398
399    // translate to physical address    Fault fault = cpuXC->translateDataReadReq(req);
400
401    if (fault == NoFault) {
402        cpuXC->copySrcAddr = src;
403        cpuXC->copySrcPhysAddr = memReq->paddr + offset;
404    } else {
405        assert(!fault->isAlignmentFault());
406
407        cpuXC->copySrcAddr = 0;
408        cpuXC->copySrcPhysAddr = 0;
409    }
410    return fault;
411#else
412    return NoFault;
413#endif
414}
415
416Fault
417SimpleCPU::copy(Addr dest)
418{
419#if 0
420    static bool no_warn = true;
421    int blk_size = (dcacheInterface) ? dcacheInterface->getBlockSize() : 64;
422    // Only support block sizes of 64 atm.
423    assert(blk_size == 64);
424    uint8_t data[blk_size];
425    //assert(cpuXC->copySrcAddr);
426    int offset = dest & (blk_size - 1);
427
428    // Make sure block doesn't span page
429    if (no_warn &&
430        (dest & PageMask) != ((dest + blk_size) & PageMask) &&
431        (dest >> 40) != 0xfffffc) {
432        no_warn = false;
433        warn("Copied block destination spans pages %x. ", dest);
434    }
435
436    memReq->reset(dest & ~(blk_size -1), blk_size);
437    // translate to physical address
438    Fault fault = cpuXC->translateDataWriteReq(req);
439
440    if (fault == NoFault) {
441        Addr dest_addr = memReq->paddr + offset;
442        // Need to read straight from memory since we have more than 8 bytes.
443        memReq->paddr = cpuXC->copySrcPhysAddr;
444        cpuXC->mem->read(memReq, data);
445        memReq->paddr = dest_addr;
446        cpuXC->mem->write(memReq, data);
447        if (dcacheInterface) {
448            memReq->cmd = Copy;
449            memReq->completionEvent = NULL;
450            memReq->paddr = cpuXC->copySrcPhysAddr;
451            memReq->dest = dest_addr;
452            memReq->size = 64;
453            memReq->time = curTick;
454            memReq->flags &= ~INST_READ;
455            dcacheInterface->access(memReq);
456        }
457    }
458    else
459        assert(!fault->isAlignmentFault());
460
461    return fault;
462#else
463    panic("copy not implemented");
464    return NoFault;
465#endif
466}
467
468// precise architected memory state accessor macros
469template <class T>
470Fault
471SimpleCPU::read(Addr addr, T &data, unsigned flags)
472{
473    if (status() == DcacheWaitResponse || status() == DcacheWaitSwitch) {
474//	Fault fault = xc->read(memReq,data);
475        // Not sure what to check for no fault...
476        if (data_read_pkt->result == Success) {
477            memcpy(&data, data_read_pkt->data, sizeof(T));
478        }
479
480        if (traceData) {
481            traceData->setAddr(addr);
482        }
483
484        // @todo: Figure out a way to create a Fault from the packet result.
485        return NoFault;
486    }
487
488//    memReq->reset(addr, sizeof(T), flags);
489
490#if SIMPLE_CPU_MEM_TIMING
491    CpuRequest *data_read_req = new CpuRequest;
492#endif
493
494    data_read_req->vaddr = addr;
495    data_read_req->size = sizeof(T);
496    data_read_req->flags = flags;
497    data_read_req->time = curTick;
498
499    // translate to physical address
500    Fault fault = cpuXC->translateDataReadReq(data_read_req);
501
502    // Now do the access.
503    if (fault == NoFault) {
504#if SIMPLE_CPU_MEM_TIMING
505        data_read_pkt = new Packet;
506        data_read_pkt->cmd = Read;
507        data_read_pkt->req = data_read_req;
508        data_read_pkt->data = new uint8_t[8];
509#endif
510        data_read_pkt->addr = data_read_req->paddr;
511        data_read_pkt->size = sizeof(T);
512
513        sendDcacheRequest(data_read_pkt);
514
515#if SIMPLE_CPU_MEM_IMMEDIATE
516        // Need to find a way to not duplicate code above.
517
518        if (data_read_pkt->result == Success) {
519            memcpy(&data, data_read_pkt->data, sizeof(T));
520        }
521
522        if (traceData) {
523            traceData->setAddr(addr);
524        }
525
526        // @todo: Figure out a way to create a Fault from the packet result.
527        return NoFault;
528#endif
529    }
530/*
531        memReq->cmd = Read;
532        memReq->completionEvent = NULL;
533        memReq->time = curTick;
534        memReq->flags &= ~INST_READ;
535        MemAccessResult result = dcacheInterface->access(memReq);
536
537        // Ugly hack to get an event scheduled *only* if the access is
538        // a miss.  We really should add first-class support for this
539        // at some point.
540        if (result != MA_HIT && dcacheInterface->doEvents()) {
541            memReq->completionEvent = &cacheCompletionEvent;
542            lastDcacheStall = curTick;
543            unscheduleTickEvent();
544            _status = DcacheMissStall;
545        } else {
546            // do functional access
547            fault = cpuXC->read(memReq, data);
548
549        }
550    } else if(fault == NoFault) {
551        // do functional access
552        fault = cpuXC->read(memReq, data);
553
554    }
555*/
556    // This will need a new way to tell if it has a dcache attached.
557    if (data_read_req->flags & UNCACHEABLE)
558        recordEvent("Uncached Read");
559
560    return fault;
561}
562
563#ifndef DOXYGEN_SHOULD_SKIP_THIS
564
565template
566Fault
567SimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
568
569template
570Fault
571SimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
572
573template
574Fault
575SimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
576
577template
578Fault
579SimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
580
581#endif //DOXYGEN_SHOULD_SKIP_THIS
582
583template<>
584Fault
585SimpleCPU::read(Addr addr, double &data, unsigned flags)
586{
587    return read(addr, *(uint64_t*)&data, flags);
588}
589
590template<>
591Fault
592SimpleCPU::read(Addr addr, float &data, unsigned flags)
593{
594    return read(addr, *(uint32_t*)&data, flags);
595}
596
597
598template<>
599Fault
600SimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
601{
602    return read(addr, (uint32_t&)data, flags);
603}
604
605
606template <class T>
607Fault
608SimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
609{
610    data_write_req->vaddr = addr;
611    data_write_req->time = curTick;
612    data_write_req->size = sizeof(T);
613    data_write_req->flags = flags;
614
615    // translate to physical address
616    Fault fault = cpuXC->translateDataWriteReq(data_write_req);
617    // Now do the access.
618    if (fault == NoFault) {
619#if SIMPLE_CPU_MEM_TIMING
620        data_write_pkt = new Packet;
621        data_write_pkt->cmd = Write;
622        data_write_pkt->req = data_write_req;
623        data_write_pkt->data = new uint8_t[64];
624        memcpy(data_write_pkt->data, &data, sizeof(T));
625#else
626        data_write_pkt->data = (uint8_t *)&data;
627#endif
628        data_write_pkt->addr = data_write_req->paddr;
629        data_write_pkt->size = sizeof(T);
630
631        sendDcacheRequest(data_write_pkt);
632    }
633
634/*
635    // do functional access
636    if (fault == NoFault)
637        fault = cpuXC->write(memReq, data);
638
639    if (fault == NoFault && dcacheInterface) {
640        memReq->cmd = Write;
641        memcpy(memReq->data,(uint8_t *)&data,memReq->size);
642        memReq->completionEvent = NULL;
643        memReq->time = curTick;
644        memReq->flags &= ~INST_READ;
645        MemAccessResult result = dcacheInterface->access(memReq);
646
647        // Ugly hack to get an event scheduled *only* if the access is
648        // a miss.  We really should add first-class support for this
649        // at some point.
650        if (result != MA_HIT && dcacheInterface->doEvents()) {
651            memReq->completionEvent = &cacheCompletionEvent;
652            lastDcacheStall = curTick;
653            unscheduleTickEvent();
654            _status = DcacheMissStall;
655        }
656    }
657*/
658    if (res && (fault == NoFault))
659        *res = data_write_pkt->result;
660
661    // This will need a new way to tell if it's hooked up to a cache or not.
662    if (data_write_req->flags & UNCACHEABLE)
663        recordEvent("Uncached Write");
664
665    // If the write needs to have a fault on the access, consider calling
666    // changeStatus() and changing it to "bad addr write" or something.
667    return fault;
668}
669
670
671#ifndef DOXYGEN_SHOULD_SKIP_THIS
672template
673Fault
674SimpleCPU::write(uint64_t data, Addr addr, unsigned flags, uint64_t *res);
675
676template
677Fault
678SimpleCPU::write(uint32_t data, Addr addr, unsigned flags, uint64_t *res);
679
680template
681Fault
682SimpleCPU::write(uint16_t data, Addr addr, unsigned flags, uint64_t *res);
683
684template
685Fault
686SimpleCPU::write(uint8_t data, Addr addr, unsigned flags, uint64_t *res);
687
688#endif //DOXYGEN_SHOULD_SKIP_THIS
689
690template<>
691Fault
692SimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
693{
694    return write(*(uint64_t*)&data, addr, flags, res);
695}
696
697template<>
698Fault
699SimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
700{
701    return write(*(uint32_t*)&data, addr, flags, res);
702}
703
704
705template<>
706Fault
707SimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
708{
709    return write((uint32_t)data, addr, flags, res);
710}
711
712
713#if FULL_SYSTEM
714Addr
715SimpleCPU::dbg_vtophys(Addr addr)
716{
717    return vtophys(xcProxy, addr);
718}
719#endif // FULL_SYSTEM
720
721void
722SimpleCPU::sendIcacheRequest(Packet *pkt)
723{
724    assert(!tickEvent.scheduled());
725#if SIMPLE_CPU_MEM_TIMING
726    retry_pkt = pkt;
727    bool success = icachePort.sendTiming(*pkt);
728
729    unscheduleTickEvent();
730
731    lastIcacheStall = curTick;
732
733    if (!success) {
734        // Need to wait for retry
735        _status = IcacheRetry;
736    } else {
737        // Need to wait for cache to respond
738        _status = IcacheWaitResponse;
739    }
740#elif SIMPLE_CPU_MEM_ATOMIC
741    Tick latency = icachePort.sendAtomic(*pkt);
742
743    unscheduleTickEvent();
744    scheduleTickEvent(latency);
745
746    // Note that Icache miss cycles will be incorrect.  Unless
747    // we check the status of the packet sent (is this valid?),
748    // we won't know if the latency is a hit or a miss.
749    icacheStallCycles += latency;
750
751    _status = IcacheAccessComplete;
752#elif SIMPLE_CPU_MEM_IMMEDIATE
753    icachePort.sendAtomic(*pkt);
754#else
755#error "SimpleCPU has no mem model set"
756#endif
757}
758
759void
760SimpleCPU::sendDcacheRequest(Packet *pkt)
761{
762    assert(!tickEvent.scheduled());
763#if SIMPLE_CPU_MEM_TIMING
764    unscheduleTickEvent();
765
766    retry_pkt = pkt;
767    bool success = dcachePort.sendTiming(*pkt);
768
769    lastDcacheStall = curTick;
770
771    if (!success) {
772        _status = DcacheRetry;
773    } else {
774        _status = DcacheWaitResponse;
775    }
776#elif SIMPLE_CPU_MEM_ATOMIC
777    unscheduleTickEvent();
778
779    Tick latency = dcachePort.sendAtomic(*pkt);
780
781    scheduleTickEvent(latency);
782
783    // Note that Dcache miss cycles will be incorrect.  Unless
784    // we check the status of the packet sent (is this valid?),
785    // we won't know if the latency is a hit or a miss.
786    dcacheStallCycles += latency;
787#elif SIMPLE_CPU_MEM_IMMEDIATE
788    dcachePort.sendAtomic(*pkt);
789#else
790#error "SimpleCPU has no mem model set"
791#endif
792}
793
794void
795SimpleCPU::processResponse(Packet &response)
796{
797    assert(SIMPLE_CPU_MEM_TIMING);
798
799    // For what things is the CPU the consumer of the packet it sent
800    // out?  This may create a memory leak if that's the case and it's
801    // expected of the SimpleCPU to delete its own packet.
802    Packet *pkt = &response;
803
804    switch (status()) {
805      case IcacheWaitResponse:
806        icacheStallCycles += curTick - lastIcacheStall;
807
808        _status = IcacheAccessComplete;
809        scheduleTickEvent(1);
810
811        // Copy the icache data into the instruction itself.
812        memcpy(&inst, pkt->data, sizeof(inst));
813
814        delete pkt;
815        break;
816      case DcacheWaitResponse:
817        if (pkt->cmd == Read) {
818            curStaticInst->execute(this,traceData);
819            if (traceData)
820                traceData->finalize();
821        }
822
823        delete pkt;
824
825        dcacheStallCycles += curTick - lastDcacheStall;
826        _status = Running;
827        scheduleTickEvent(1);
828        break;
829      case DcacheWaitSwitch:
830        if (pkt->cmd == Read) {
831            curStaticInst->execute(this,traceData);
832            if (traceData)
833                traceData->finalize();
834        }
835
836        delete pkt;
837
838        _status = SwitchedOut;
839        sampler->signalSwitched();
840      case SwitchedOut:
841        // If this CPU has been switched out due to sampling/warm-up,
842        // ignore any further status changes (e.g., due to cache
843        // misses outstanding at the time of the switch).
844        delete pkt;
845
846        return;
847      default:
848        panic("SimpleCPU::processCacheCompletion: bad state");
849        break;
850    }
851}
852
853Packet *
854SimpleCPU::processRetry()
855{
856#if SIMPLE_CPU_MEM_TIMING
857    switch(status()) {
858      case IcacheRetry:
859        icacheRetryCycles += curTick - lastIcacheStall;
860        return retry_pkt;
861        break;
862      case DcacheRetry:
863        dcacheRetryCycles += curTick - lastDcacheStall;
864        return retry_pkt;
865        break;
866      default:
867        panic("SimpleCPU::processRetry: bad state");
868        break;
869    }
870#else
871    panic("shouldn't be here");
872#endif
873}
874
875#if FULL_SYSTEM
876void
877SimpleCPU::post_interrupt(int int_num, int index)
878{
879    BaseCPU::post_interrupt(int_num, index);
880
881    if (cpuXC->status() == ExecContext::Suspended) {
882                DPRINTF(IPI,"Suspended Processor awoke\n");
883        cpuXC->activate();
884    }
885}
886#endif // FULL_SYSTEM
887
888/* start simulation, program loaded, processor precise state initialized */
889void
890SimpleCPU::tick()
891{
892    numCycles++;
893
894    traceData = NULL;
895
896    Fault fault = NoFault;
897
898#if FULL_SYSTEM
899    if (checkInterrupts && check_interrupts() && !cpuXC->inPalMode() &&
900        status() != IcacheMissComplete) {
901        int ipl = 0;
902        int summary = 0;
903        checkInterrupts = false;
904
905        if (cpuXC->readMiscReg(IPR_SIRR)) {
906            for (int i = INTLEVEL_SOFTWARE_MIN;
907                 i < INTLEVEL_SOFTWARE_MAX; i++) {
908                if (cpuXC->readMiscReg(IPR_SIRR) & (ULL(1) << i)) {
909                    // See table 4-19 of 21164 hardware reference
910                    ipl = (i - INTLEVEL_SOFTWARE_MIN) + 1;
911                    summary |= (ULL(1) << i);
912                }
913            }
914        }
915
916        uint64_t interrupts = cpuXC->cpu->intr_status();
917        for (int i = INTLEVEL_EXTERNAL_MIN;
918            i < INTLEVEL_EXTERNAL_MAX; i++) {
919            if (interrupts & (ULL(1) << i)) {
920                // See table 4-19 of 21164 hardware reference
921                ipl = i;
922                summary |= (ULL(1) << i);
923            }
924        }
925
926        if (cpuXC->readMiscReg(IPR_ASTRR))
927            panic("asynchronous traps not implemented\n");
928
929        if (ipl && ipl > cpuXC->readMiscReg(IPR_IPLR)) {
930            cpuXC->setMiscReg(IPR_ISR, summary);
931            cpuXC->setMiscReg(IPR_INTID, ipl);
932
933            Fault(new InterruptFault)->invoke(xcProxy);
934
935            DPRINTF(Flow, "Interrupt! IPLR=%d ipl=%d summary=%x\n",
936                    cpuXC->readMiscReg(IPR_IPLR), ipl, summary);
937        }
938    }
939#endif
940
941    // maintain $r0 semantics
942    cpuXC->setIntReg(ZeroReg, 0);
943#if THE_ISA == ALPHA_ISA
944    cpuXC->setFloatRegDouble(ZeroReg, 0.0);
945#endif // ALPHA_ISA
946
947    if (status() == IcacheAccessComplete) {
948        // We've already fetched an instruction and were stalled on an
949        // I-cache miss.  No need to fetch it again.
950
951        // Set status to running; tick event will get rescheduled if
952        // necessary at end of tick() function.
953        _status = Running;
954    } else {
955        // Try to fetch an instruction
956
957        // set up memory request for instruction fetch
958#if FULL_SYSTEM
959#define IFETCH_FLAGS(pc)	((pc) & 1) ? PHYSICAL : 0
960#else
961#define IFETCH_FLAGS(pc)	0
962#endif
963
964#if SIMPLE_CPU_MEM_TIMING
965        CpuRequest *ifetch_req = new CpuRequest();
966        ifetch_req->size = sizeof(MachInst);
967#endif
968
969        ifetch_req->vaddr = cpuXC->readPC() & ~3;
970        ifetch_req->time = curTick;
971
972/*	memReq->reset(xc->regs.pc & ~3, sizeof(uint32_t),
973                     IFETCH_FLAGS(xc->regs.pc));
974*/
975
976        fault = cpuXC->translateInstReq(ifetch_req);
977
978        if (fault == NoFault) {
979#if SIMPLE_CPU_MEM_TIMING
980            Packet *ifetch_pkt = new Packet;
981            ifetch_pkt->cmd = Read;
982            ifetch_pkt->data = (uint8_t *)&inst;
983            ifetch_pkt->req = ifetch_req;
984            ifetch_pkt->size = sizeof(MachInst);
985#endif
986            ifetch_pkt->addr = ifetch_req->paddr;
987
988            sendIcacheRequest(ifetch_pkt);
989#if SIMPLE_CPU_MEM_TIMING || SIMPLE_CPU_MEM_ATOMIC
990            return;
991#endif
992/*
993        if (icacheInterface && fault == NoFault) {
994            memReq->completionEvent = NULL;
995
996            memReq->time = curTick;
997            memReq->flags |= INST_READ;
998            MemAccessResult result = icacheInterface->access(memReq);
999
1000            // Ugly hack to get an event scheduled *only* if the access is
1001            // a miss.  We really should add first-class support for this
1002            // at some point.
1003                if (result != MA_HIT && icacheInterface->doEvents()) {
1004                memReq->completionEvent = &cacheCompletionEvent;
1005                lastIcacheStall = curTick;
1006                unscheduleTickEvent();
1007                _status = IcacheMissStall;
1008                return;
1009            }
1010        }
1011*/
1012        }
1013    }
1014
1015    // If we've got a valid instruction (i.e., no fault on instruction
1016    // fetch), then execute it.
1017    if (fault == NoFault) {
1018
1019        // keep an instruction count
1020        numInst++;
1021        numInsts++;
1022
1023        // check for instruction-count-based events
1024        comInstEventQueue[0]->serviceEvents(numInst);
1025
1026        // decode the instruction
1027        inst = gtoh(inst);
1028        curStaticInst = StaticInst::decode(makeExtMI(inst, cpuXC->readPC()));
1029
1030        traceData = Trace::getInstRecord(curTick, xcProxy, this, curStaticInst,
1031                                         cpuXC->readPC());
1032
1033#if FULL_SYSTEM
1034        cpuXC->setInst(inst);
1035#endif // FULL_SYSTEM
1036
1037        cpuXC->func_exe_inst++;
1038
1039        fault = curStaticInst->execute(this, traceData);
1040
1041#if FULL_SYSTEM
1042        if (system->kernelBinning->fnbin) {
1043            assert(kernelStats);
1044            system->kernelBinning->execute(xcProxy, inst);
1045        }
1046
1047        if (cpuXC->profile) {
1048            bool usermode =
1049                (cpuXC->readMiscReg(AlphaISA::IPR_DTB_CM) & 0x18) != 0;
1050            cpuXC->profilePC = usermode ? 1 : cpuXC->readPC();
1051            ProfileNode *node = cpuXC->profile->consume(xcProxy, inst);
1052            if (node)
1053                cpuXC->profileNode = node;
1054        }
1055#endif
1056
1057        if (curStaticInst->isMemRef()) {
1058            numMemRefs++;
1059        }
1060
1061        if (curStaticInst->isLoad()) {
1062            ++numLoad;
1063            comLoadEventQueue[0]->serviceEvents(numLoad);
1064        }
1065
1066        // If we have a dcache miss, then we can't finialize the instruction
1067        // trace yet because we want to populate it with the data later
1068        if (traceData && (status() != DcacheWaitResponse)) {
1069            traceData->finalize();
1070        }
1071
1072        traceFunctions(cpuXC->readPC());
1073
1074    }	// if (fault == NoFault)
1075
1076    if (fault != NoFault) {
1077#if FULL_SYSTEM
1078        fault->invoke(xcProxy);
1079#else // !FULL_SYSTEM
1080        fatal("fault (%d) detected @ PC %08p", fault, cpuXC->readPC());
1081#endif // FULL_SYSTEM
1082    }
1083    else {
1084#if THE_ISA != MIPS_ISA
1085        // go to the next instruction
1086        cpuXC->setPC(cpuXC->readNextPC());
1087        cpuXC->setNextPC(cpuXC->readNextPC() + sizeof(MachInst));
1088#else
1089        // go to the next instruction
1090        cpuXC->setPC(cpuXC->readNextPC());
1091        cpuXC->setNextPC(cpuXC->readNextNPC());
1092        cpuXC->setNextNPC(cpuXC->readNextNPC() + sizeof(MachInst));
1093#endif
1094
1095    }
1096
1097#if FULL_SYSTEM
1098    Addr oldpc;
1099    do {
1100        oldpc = cpuXC->readPC();
1101        system->pcEventQueue.service(xcProxy);
1102    } while (oldpc != cpuXC->readPC());
1103#endif
1104
1105    assert(status() == Running ||
1106           status() == Idle ||
1107           status() == DcacheWaitResponse);
1108
1109    if (status() == Running && !tickEvent.scheduled())
1110        tickEvent.schedule(curTick + cycles(1));
1111}
1112
1113////////////////////////////////////////////////////////////////////////
1114//
1115//  SimpleCPU Simulation Object
1116//
1117BEGIN_DECLARE_SIM_OBJECT_PARAMS(SimpleCPU)
1118
1119    Param<Counter> max_insts_any_thread;
1120    Param<Counter> max_insts_all_threads;
1121    Param<Counter> max_loads_any_thread;
1122    Param<Counter> max_loads_all_threads;
1123
1124#if FULL_SYSTEM
1125    SimObjectParam<AlphaITB *> itb;
1126    SimObjectParam<AlphaDTB *> dtb;
1127    SimObjectParam<System *> system;
1128    Param<int> cpu_id;
1129    Param<Tick> profile;
1130#else
1131    SimObjectParam<Memory *> mem;
1132    SimObjectParam<Process *> workload;
1133#endif // FULL_SYSTEM
1134
1135    Param<int> clock;
1136
1137    Param<bool> defer_registration;
1138    Param<int> width;
1139    Param<bool> function_trace;
1140    Param<Tick> function_trace_start;
1141
1142END_DECLARE_SIM_OBJECT_PARAMS(SimpleCPU)
1143
1144BEGIN_INIT_SIM_OBJECT_PARAMS(SimpleCPU)
1145
1146    INIT_PARAM(max_insts_any_thread,
1147               "terminate when any thread reaches this inst count"),
1148    INIT_PARAM(max_insts_all_threads,
1149               "terminate when all threads have reached this inst count"),
1150    INIT_PARAM(max_loads_any_thread,
1151               "terminate when any thread reaches this load count"),
1152    INIT_PARAM(max_loads_all_threads,
1153               "terminate when all threads have reached this load count"),
1154
1155#if FULL_SYSTEM
1156    INIT_PARAM(itb, "Instruction TLB"),
1157    INIT_PARAM(dtb, "Data TLB"),
1158    INIT_PARAM(system, "system object"),
1159    INIT_PARAM(cpu_id, "processor ID"),
1160    INIT_PARAM(profile, ""),
1161#else
1162    INIT_PARAM(mem, "memory"),
1163    INIT_PARAM(workload, "processes to run"),
1164#endif // FULL_SYSTEM
1165
1166    INIT_PARAM(clock, "clock speed"),
1167    INIT_PARAM(defer_registration, "defer system registration (for sampling)"),
1168    INIT_PARAM(width, "cpu width"),
1169    INIT_PARAM(function_trace, "Enable function trace"),
1170    INIT_PARAM(function_trace_start, "Cycle to start function trace")
1171
1172END_INIT_SIM_OBJECT_PARAMS(SimpleCPU)
1173
1174
1175CREATE_SIM_OBJECT(SimpleCPU)
1176{
1177    SimpleCPU::Params *params = new SimpleCPU::Params();
1178    params->name = getInstanceName();
1179    params->numberOfThreads = 1;
1180    params->max_insts_any_thread = max_insts_any_thread;
1181    params->max_insts_all_threads = max_insts_all_threads;
1182    params->max_loads_any_thread = max_loads_any_thread;
1183    params->max_loads_all_threads = max_loads_all_threads;
1184    params->deferRegistration = defer_registration;
1185    params->clock = clock;
1186    params->functionTrace = function_trace;
1187    params->functionTraceStart = function_trace_start;
1188    params->width = width;
1189
1190#if FULL_SYSTEM
1191    params->itb = itb;
1192    params->dtb = dtb;
1193    params->system = system;
1194    params->cpu_id = cpu_id;
1195    params->profile = profile;
1196#else
1197    params->mem = mem;
1198    params->process = workload;
1199#endif
1200
1201    SimpleCPU *cpu = new SimpleCPU(params);
1202    return cpu;
1203}
1204
1205REGISTER_SIM_OBJECT("SimpleCPU", SimpleCPU)
1206
1207