base.cc revision 9684
19651SAndreas.Sandberg@ARM.com/*
29651SAndreas.Sandberg@ARM.com * Copyright (c) 2012 ARM Limited
39651SAndreas.Sandberg@ARM.com * All rights reserved
49651SAndreas.Sandberg@ARM.com *
59651SAndreas.Sandberg@ARM.com * The license below extends only to copyright in the software and shall
69651SAndreas.Sandberg@ARM.com * not be construed as granting a license to any other intellectual
79651SAndreas.Sandberg@ARM.com * property including but not limited to intellectual property relating
89651SAndreas.Sandberg@ARM.com * to a hardware implementation of the functionality of the software
99651SAndreas.Sandberg@ARM.com * licensed hereunder.  You may use the software subject to the license
109651SAndreas.Sandberg@ARM.com * terms below provided that you ensure that this notice is replicated
119651SAndreas.Sandberg@ARM.com * unmodified and in its entirety in all distributions of the software,
129651SAndreas.Sandberg@ARM.com * modified or unmodified, in source code or in binary form.
139651SAndreas.Sandberg@ARM.com *
149651SAndreas.Sandberg@ARM.com * Redistribution and use in source and binary forms, with or without
159651SAndreas.Sandberg@ARM.com * modification, are permitted provided that the following conditions are
169651SAndreas.Sandberg@ARM.com * met: redistributions of source code must retain the above copyright
179651SAndreas.Sandberg@ARM.com * notice, this list of conditions and the following disclaimer;
189651SAndreas.Sandberg@ARM.com * redistributions in binary form must reproduce the above copyright
199651SAndreas.Sandberg@ARM.com * notice, this list of conditions and the following disclaimer in the
209651SAndreas.Sandberg@ARM.com * documentation and/or other materials provided with the distribution;
219651SAndreas.Sandberg@ARM.com * neither the name of the copyright holders nor the names of its
229651SAndreas.Sandberg@ARM.com * contributors may be used to endorse or promote products derived from
239651SAndreas.Sandberg@ARM.com * this software without specific prior written permission.
249651SAndreas.Sandberg@ARM.com *
259651SAndreas.Sandberg@ARM.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
269651SAndreas.Sandberg@ARM.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
279651SAndreas.Sandberg@ARM.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
289651SAndreas.Sandberg@ARM.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
299651SAndreas.Sandberg@ARM.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
309651SAndreas.Sandberg@ARM.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
319651SAndreas.Sandberg@ARM.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
329651SAndreas.Sandberg@ARM.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
339651SAndreas.Sandberg@ARM.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
349651SAndreas.Sandberg@ARM.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
359651SAndreas.Sandberg@ARM.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
369651SAndreas.Sandberg@ARM.com *
379651SAndreas.Sandberg@ARM.com * Authors: Andreas Sandberg
389651SAndreas.Sandberg@ARM.com */
399651SAndreas.Sandberg@ARM.com
409651SAndreas.Sandberg@ARM.com#include <linux/kvm.h>
419651SAndreas.Sandberg@ARM.com#include <sys/ioctl.h>
429651SAndreas.Sandberg@ARM.com#include <sys/mman.h>
439651SAndreas.Sandberg@ARM.com#include <unistd.h>
449651SAndreas.Sandberg@ARM.com
459651SAndreas.Sandberg@ARM.com#include <cerrno>
469651SAndreas.Sandberg@ARM.com#include <csignal>
479651SAndreas.Sandberg@ARM.com#include <ostream>
489651SAndreas.Sandberg@ARM.com
499651SAndreas.Sandberg@ARM.com#include "arch/utility.hh"
509651SAndreas.Sandberg@ARM.com#include "cpu/kvm/base.hh"
519683Sandreas@sandberg.pp.se#include "debug/Checkpoint.hh"
529651SAndreas.Sandberg@ARM.com#include "debug/Kvm.hh"
539651SAndreas.Sandberg@ARM.com#include "debug/KvmIO.hh"
549651SAndreas.Sandberg@ARM.com#include "debug/KvmRun.hh"
559651SAndreas.Sandberg@ARM.com#include "params/BaseKvmCPU.hh"
569651SAndreas.Sandberg@ARM.com#include "sim/process.hh"
579651SAndreas.Sandberg@ARM.com#include "sim/system.hh"
589651SAndreas.Sandberg@ARM.com
599651SAndreas.Sandberg@ARM.com/* Used by some KVM macros */
609651SAndreas.Sandberg@ARM.com#define PAGE_SIZE pageSize
619651SAndreas.Sandberg@ARM.com
629651SAndreas.Sandberg@ARM.comvolatile bool timerOverflowed = false;
639651SAndreas.Sandberg@ARM.com
649651SAndreas.Sandberg@ARM.comstatic void
659651SAndreas.Sandberg@ARM.comonTimerOverflow(int signo, siginfo_t *si, void *data)
669651SAndreas.Sandberg@ARM.com{
679651SAndreas.Sandberg@ARM.com    timerOverflowed = true;
689651SAndreas.Sandberg@ARM.com}
699651SAndreas.Sandberg@ARM.com
709651SAndreas.Sandberg@ARM.comBaseKvmCPU::BaseKvmCPU(BaseKvmCPUParams *params)
719651SAndreas.Sandberg@ARM.com    : BaseCPU(params),
729651SAndreas.Sandberg@ARM.com      vm(*params->kvmVM),
739651SAndreas.Sandberg@ARM.com      _status(Idle),
749651SAndreas.Sandberg@ARM.com      dataPort(name() + ".dcache_port", this),
759651SAndreas.Sandberg@ARM.com      instPort(name() + ".icache_port", this),
769652SAndreas.Sandberg@ARM.com      threadContextDirty(true),
779652SAndreas.Sandberg@ARM.com      kvmStateDirty(false),
789651SAndreas.Sandberg@ARM.com      vcpuID(vm.allocVCPUID()), vcpuFD(-1), vcpuMMapSize(0),
799651SAndreas.Sandberg@ARM.com      _kvmRun(NULL), mmioRing(NULL),
809651SAndreas.Sandberg@ARM.com      pageSize(sysconf(_SC_PAGE_SIZE)),
819651SAndreas.Sandberg@ARM.com      tickEvent(*this),
829655SAndreas.Sandberg@ARM.com      perfControlledByTimer(params->usePerfOverflow),
839651SAndreas.Sandberg@ARM.com      hostFactor(params->hostFactor)
849651SAndreas.Sandberg@ARM.com{
859651SAndreas.Sandberg@ARM.com    if (pageSize == -1)
869651SAndreas.Sandberg@ARM.com        panic("KVM: Failed to determine host page size (%i)\n",
879651SAndreas.Sandberg@ARM.com              errno);
889651SAndreas.Sandberg@ARM.com
899651SAndreas.Sandberg@ARM.com    thread = new SimpleThread(this, 0, params->system,
909651SAndreas.Sandberg@ARM.com                              params->itb, params->dtb, params->isa[0]);
919651SAndreas.Sandberg@ARM.com    thread->setStatus(ThreadContext::Halted);
929651SAndreas.Sandberg@ARM.com    tc = thread->getTC();
939651SAndreas.Sandberg@ARM.com    threadContexts.push_back(tc);
949651SAndreas.Sandberg@ARM.com
959651SAndreas.Sandberg@ARM.com    setupCounters();
969651SAndreas.Sandberg@ARM.com    setupSignalHandler();
979651SAndreas.Sandberg@ARM.com
989655SAndreas.Sandberg@ARM.com    if (params->usePerfOverflow)
999655SAndreas.Sandberg@ARM.com        runTimer.reset(new PerfKvmTimer(hwCycles,
1009655SAndreas.Sandberg@ARM.com                                        KVM_TIMER_SIGNAL,
1019655SAndreas.Sandberg@ARM.com                                        params->hostFactor,
1029655SAndreas.Sandberg@ARM.com                                        params->clock));
1039655SAndreas.Sandberg@ARM.com    else
1049655SAndreas.Sandberg@ARM.com        runTimer.reset(new PosixKvmTimer(KVM_TIMER_SIGNAL, CLOCK_MONOTONIC,
1059655SAndreas.Sandberg@ARM.com                                         params->hostFactor,
1069655SAndreas.Sandberg@ARM.com                                         params->clock));
1079651SAndreas.Sandberg@ARM.com}
1089651SAndreas.Sandberg@ARM.com
1099651SAndreas.Sandberg@ARM.comBaseKvmCPU::~BaseKvmCPU()
1109651SAndreas.Sandberg@ARM.com{
1119651SAndreas.Sandberg@ARM.com    if (_kvmRun)
1129651SAndreas.Sandberg@ARM.com        munmap(_kvmRun, vcpuMMapSize);
1139651SAndreas.Sandberg@ARM.com    close(vcpuFD);
1149651SAndreas.Sandberg@ARM.com}
1159651SAndreas.Sandberg@ARM.com
1169651SAndreas.Sandberg@ARM.comvoid
1179651SAndreas.Sandberg@ARM.comBaseKvmCPU::init()
1189651SAndreas.Sandberg@ARM.com{
1199651SAndreas.Sandberg@ARM.com    BaseCPU::init();
1209651SAndreas.Sandberg@ARM.com
1219651SAndreas.Sandberg@ARM.com    if (numThreads != 1)
1229651SAndreas.Sandberg@ARM.com        fatal("KVM: Multithreading not supported");
1239651SAndreas.Sandberg@ARM.com
1249651SAndreas.Sandberg@ARM.com    tc->initMemProxies(tc);
1259651SAndreas.Sandberg@ARM.com
1269651SAndreas.Sandberg@ARM.com    // initialize CPU, including PC
1279651SAndreas.Sandberg@ARM.com    if (FullSystem && !switchedOut())
1289651SAndreas.Sandberg@ARM.com        TheISA::initCPU(tc, tc->contextId());
1299651SAndreas.Sandberg@ARM.com
1309651SAndreas.Sandberg@ARM.com    mmio_req.setThreadContext(tc->contextId(), 0);
1319651SAndreas.Sandberg@ARM.com}
1329651SAndreas.Sandberg@ARM.com
1339651SAndreas.Sandberg@ARM.comvoid
1349651SAndreas.Sandberg@ARM.comBaseKvmCPU::startup()
1359651SAndreas.Sandberg@ARM.com{
1369651SAndreas.Sandberg@ARM.com    Kvm &kvm(vm.kvm);
1379651SAndreas.Sandberg@ARM.com
1389651SAndreas.Sandberg@ARM.com    BaseCPU::startup();
1399651SAndreas.Sandberg@ARM.com
1409651SAndreas.Sandberg@ARM.com    assert(vcpuFD == -1);
1419651SAndreas.Sandberg@ARM.com
1429651SAndreas.Sandberg@ARM.com    // Tell the VM that a CPU is about to start.
1439651SAndreas.Sandberg@ARM.com    vm.cpuStartup();
1449651SAndreas.Sandberg@ARM.com
1459651SAndreas.Sandberg@ARM.com    // We can't initialize KVM CPUs in BaseKvmCPU::init() since we are
1469651SAndreas.Sandberg@ARM.com    // not guaranteed that the parent KVM VM has initialized at that
1479651SAndreas.Sandberg@ARM.com    // point. Initialize virtual CPUs here instead.
1489651SAndreas.Sandberg@ARM.com    vcpuFD = vm.createVCPU(vcpuID);
1499651SAndreas.Sandberg@ARM.com
1509651SAndreas.Sandberg@ARM.com    // Map the KVM run structure */
1519651SAndreas.Sandberg@ARM.com    vcpuMMapSize = kvm.getVCPUMMapSize();
1529651SAndreas.Sandberg@ARM.com    _kvmRun = (struct kvm_run *)mmap(0, vcpuMMapSize,
1539651SAndreas.Sandberg@ARM.com                                     PROT_READ | PROT_WRITE, MAP_SHARED,
1549651SAndreas.Sandberg@ARM.com                                     vcpuFD, 0);
1559651SAndreas.Sandberg@ARM.com    if (_kvmRun == MAP_FAILED)
1569651SAndreas.Sandberg@ARM.com        panic("KVM: Failed to map run data structure\n");
1579651SAndreas.Sandberg@ARM.com
1589651SAndreas.Sandberg@ARM.com    // Setup a pointer to the MMIO ring buffer if coalesced MMIO is
1599651SAndreas.Sandberg@ARM.com    // available. The offset into the KVM's communication page is
1609651SAndreas.Sandberg@ARM.com    // provided by the coalesced MMIO capability.
1619651SAndreas.Sandberg@ARM.com    int mmioOffset(kvm.capCoalescedMMIO());
1629651SAndreas.Sandberg@ARM.com    if (mmioOffset) {
1639651SAndreas.Sandberg@ARM.com        inform("KVM: Coalesced IO available\n");
1649651SAndreas.Sandberg@ARM.com        mmioRing = (struct kvm_coalesced_mmio_ring *)(
1659651SAndreas.Sandberg@ARM.com            (char *)_kvmRun + (mmioOffset * pageSize));
1669651SAndreas.Sandberg@ARM.com    } else {
1679651SAndreas.Sandberg@ARM.com        inform("KVM: Coalesced not supported by host OS\n");
1689651SAndreas.Sandberg@ARM.com    }
1699651SAndreas.Sandberg@ARM.com}
1709651SAndreas.Sandberg@ARM.com
1719651SAndreas.Sandberg@ARM.comvoid
1729651SAndreas.Sandberg@ARM.comBaseKvmCPU::regStats()
1739651SAndreas.Sandberg@ARM.com{
1749651SAndreas.Sandberg@ARM.com    using namespace Stats;
1759651SAndreas.Sandberg@ARM.com
1769651SAndreas.Sandberg@ARM.com    BaseCPU::regStats();
1779651SAndreas.Sandberg@ARM.com
1789684Sandreas@sandberg.pp.se    numInsts
1799684Sandreas@sandberg.pp.se        .name(name() + ".committedInsts")
1809684Sandreas@sandberg.pp.se        .desc("Number of instructions committed")
1819684Sandreas@sandberg.pp.se        ;
1829684Sandreas@sandberg.pp.se
1839651SAndreas.Sandberg@ARM.com    numVMExits
1849651SAndreas.Sandberg@ARM.com        .name(name() + ".numVMExits")
1859651SAndreas.Sandberg@ARM.com        .desc("total number of KVM exits")
1869651SAndreas.Sandberg@ARM.com        ;
1879651SAndreas.Sandberg@ARM.com
1889651SAndreas.Sandberg@ARM.com    numMMIO
1899651SAndreas.Sandberg@ARM.com        .name(name() + ".numMMIO")
1909651SAndreas.Sandberg@ARM.com        .desc("number of VM exits due to memory mapped IO")
1919651SAndreas.Sandberg@ARM.com        ;
1929651SAndreas.Sandberg@ARM.com
1939651SAndreas.Sandberg@ARM.com    numCoalescedMMIO
1949651SAndreas.Sandberg@ARM.com        .name(name() + ".numCoalescedMMIO")
1959651SAndreas.Sandberg@ARM.com        .desc("number of coalesced memory mapped IO requests")
1969651SAndreas.Sandberg@ARM.com        ;
1979651SAndreas.Sandberg@ARM.com
1989651SAndreas.Sandberg@ARM.com    numIO
1999651SAndreas.Sandberg@ARM.com        .name(name() + ".numIO")
2009651SAndreas.Sandberg@ARM.com        .desc("number of VM exits due to legacy IO")
2019651SAndreas.Sandberg@ARM.com        ;
2029651SAndreas.Sandberg@ARM.com
2039651SAndreas.Sandberg@ARM.com    numHalt
2049651SAndreas.Sandberg@ARM.com        .name(name() + ".numHalt")
2059651SAndreas.Sandberg@ARM.com        .desc("number of VM exits due to wait for interrupt instructions")
2069651SAndreas.Sandberg@ARM.com        ;
2079651SAndreas.Sandberg@ARM.com
2089651SAndreas.Sandberg@ARM.com    numInterrupts
2099651SAndreas.Sandberg@ARM.com        .name(name() + ".numInterrupts")
2109651SAndreas.Sandberg@ARM.com        .desc("number of interrupts delivered")
2119651SAndreas.Sandberg@ARM.com        ;
2129651SAndreas.Sandberg@ARM.com
2139651SAndreas.Sandberg@ARM.com    numHypercalls
2149651SAndreas.Sandberg@ARM.com        .name(name() + ".numHypercalls")
2159651SAndreas.Sandberg@ARM.com        .desc("number of hypercalls")
2169651SAndreas.Sandberg@ARM.com        ;
2179651SAndreas.Sandberg@ARM.com}
2189651SAndreas.Sandberg@ARM.com
2199651SAndreas.Sandberg@ARM.comvoid
2209651SAndreas.Sandberg@ARM.comBaseKvmCPU::serializeThread(std::ostream &os, ThreadID tid)
2219651SAndreas.Sandberg@ARM.com{
2229683Sandreas@sandberg.pp.se    if (DTRACE(Checkpoint)) {
2239683Sandreas@sandberg.pp.se        DPRINTF(Checkpoint, "KVM: Serializing thread %i:\n", tid);
2249683Sandreas@sandberg.pp.se        dump();
2259683Sandreas@sandberg.pp.se    }
2269683Sandreas@sandberg.pp.se
2279652SAndreas.Sandberg@ARM.com    // Update the thread context so we have something to serialize.
2289652SAndreas.Sandberg@ARM.com    syncThreadContext();
2299652SAndreas.Sandberg@ARM.com
2309651SAndreas.Sandberg@ARM.com    assert(tid == 0);
2319651SAndreas.Sandberg@ARM.com    assert(_status == Idle);
2329651SAndreas.Sandberg@ARM.com    thread->serialize(os);
2339651SAndreas.Sandberg@ARM.com}
2349651SAndreas.Sandberg@ARM.com
2359651SAndreas.Sandberg@ARM.comvoid
2369651SAndreas.Sandberg@ARM.comBaseKvmCPU::unserializeThread(Checkpoint *cp, const std::string &section,
2379651SAndreas.Sandberg@ARM.com                              ThreadID tid)
2389651SAndreas.Sandberg@ARM.com{
2399683Sandreas@sandberg.pp.se    DPRINTF(Checkpoint, "KVM: Unserialize thread %i:\n", tid);
2409683Sandreas@sandberg.pp.se
2419651SAndreas.Sandberg@ARM.com    assert(tid == 0);
2429651SAndreas.Sandberg@ARM.com    assert(_status == Idle);
2439651SAndreas.Sandberg@ARM.com    thread->unserialize(cp, section);
2449652SAndreas.Sandberg@ARM.com    threadContextDirty = true;
2459651SAndreas.Sandberg@ARM.com}
2469651SAndreas.Sandberg@ARM.com
2479651SAndreas.Sandberg@ARM.comunsigned int
2489651SAndreas.Sandberg@ARM.comBaseKvmCPU::drain(DrainManager *dm)
2499651SAndreas.Sandberg@ARM.com{
2509651SAndreas.Sandberg@ARM.com    if (switchedOut())
2519651SAndreas.Sandberg@ARM.com        return 0;
2529651SAndreas.Sandberg@ARM.com
2539651SAndreas.Sandberg@ARM.com    DPRINTF(Kvm, "drain\n");
2549651SAndreas.Sandberg@ARM.com
2559651SAndreas.Sandberg@ARM.com    // De-schedule the tick event so we don't insert any more MMIOs
2569651SAndreas.Sandberg@ARM.com    // into the system while it is draining.
2579651SAndreas.Sandberg@ARM.com    if (tickEvent.scheduled())
2589651SAndreas.Sandberg@ARM.com        deschedule(tickEvent);
2599651SAndreas.Sandberg@ARM.com
2609651SAndreas.Sandberg@ARM.com    _status = Idle;
2619651SAndreas.Sandberg@ARM.com    return 0;
2629651SAndreas.Sandberg@ARM.com}
2639651SAndreas.Sandberg@ARM.com
2649651SAndreas.Sandberg@ARM.comvoid
2659651SAndreas.Sandberg@ARM.comBaseKvmCPU::drainResume()
2669651SAndreas.Sandberg@ARM.com{
2679651SAndreas.Sandberg@ARM.com    assert(!tickEvent.scheduled());
2689651SAndreas.Sandberg@ARM.com
2699651SAndreas.Sandberg@ARM.com    // We might have been switched out. In that case, we don't need to
2709651SAndreas.Sandberg@ARM.com    // do anything.
2719651SAndreas.Sandberg@ARM.com    if (switchedOut())
2729651SAndreas.Sandberg@ARM.com        return;
2739651SAndreas.Sandberg@ARM.com
2749651SAndreas.Sandberg@ARM.com    DPRINTF(Kvm, "drainResume\n");
2759651SAndreas.Sandberg@ARM.com    verifyMemoryMode();
2769651SAndreas.Sandberg@ARM.com
2779651SAndreas.Sandberg@ARM.com    // The tick event is de-scheduled as a part of the draining
2789651SAndreas.Sandberg@ARM.com    // process. Re-schedule it if the thread context is active.
2799651SAndreas.Sandberg@ARM.com    if (tc->status() == ThreadContext::Active) {
2809651SAndreas.Sandberg@ARM.com        schedule(tickEvent, nextCycle());
2819651SAndreas.Sandberg@ARM.com        _status = Running;
2829651SAndreas.Sandberg@ARM.com    } else {
2839651SAndreas.Sandberg@ARM.com        _status = Idle;
2849651SAndreas.Sandberg@ARM.com    }
2859651SAndreas.Sandberg@ARM.com}
2869651SAndreas.Sandberg@ARM.com
2879651SAndreas.Sandberg@ARM.comvoid
2889651SAndreas.Sandberg@ARM.comBaseKvmCPU::switchOut()
2899651SAndreas.Sandberg@ARM.com{
2909652SAndreas.Sandberg@ARM.com    DPRINTF(Kvm, "switchOut\n");
2919652SAndreas.Sandberg@ARM.com
2929652SAndreas.Sandberg@ARM.com    // Make sure to update the thread context in case, the new CPU
2939652SAndreas.Sandberg@ARM.com    // will need to access it.
2949652SAndreas.Sandberg@ARM.com    syncThreadContext();
2959652SAndreas.Sandberg@ARM.com
2969651SAndreas.Sandberg@ARM.com    BaseCPU::switchOut();
2979651SAndreas.Sandberg@ARM.com
2989651SAndreas.Sandberg@ARM.com    // We should have drained prior to executing a switchOut, which
2999651SAndreas.Sandberg@ARM.com    // means that the tick event shouldn't be scheduled and the CPU is
3009651SAndreas.Sandberg@ARM.com    // idle.
3019651SAndreas.Sandberg@ARM.com    assert(!tickEvent.scheduled());
3029651SAndreas.Sandberg@ARM.com    assert(_status == Idle);
3039651SAndreas.Sandberg@ARM.com}
3049651SAndreas.Sandberg@ARM.com
3059651SAndreas.Sandberg@ARM.comvoid
3069651SAndreas.Sandberg@ARM.comBaseKvmCPU::takeOverFrom(BaseCPU *cpu)
3079651SAndreas.Sandberg@ARM.com{
3089651SAndreas.Sandberg@ARM.com    DPRINTF(Kvm, "takeOverFrom\n");
3099651SAndreas.Sandberg@ARM.com
3109651SAndreas.Sandberg@ARM.com    BaseCPU::takeOverFrom(cpu);
3119651SAndreas.Sandberg@ARM.com
3129651SAndreas.Sandberg@ARM.com    // We should have drained prior to executing a switchOut, which
3139651SAndreas.Sandberg@ARM.com    // means that the tick event shouldn't be scheduled and the CPU is
3149651SAndreas.Sandberg@ARM.com    // idle.
3159651SAndreas.Sandberg@ARM.com    assert(!tickEvent.scheduled());
3169651SAndreas.Sandberg@ARM.com    assert(_status == Idle);
3179651SAndreas.Sandberg@ARM.com    assert(threadContexts.size() == 1);
3189651SAndreas.Sandberg@ARM.com
3199652SAndreas.Sandberg@ARM.com    // The BaseCPU updated the thread context, make sure that we
3209652SAndreas.Sandberg@ARM.com    // synchronize next time we enter start the CPU.
3219652SAndreas.Sandberg@ARM.com    threadContextDirty = true;
3229651SAndreas.Sandberg@ARM.com}
3239651SAndreas.Sandberg@ARM.com
3249651SAndreas.Sandberg@ARM.comvoid
3259651SAndreas.Sandberg@ARM.comBaseKvmCPU::verifyMemoryMode() const
3269651SAndreas.Sandberg@ARM.com{
3279651SAndreas.Sandberg@ARM.com    if (!(system->isAtomicMode() && system->bypassCaches())) {
3289651SAndreas.Sandberg@ARM.com        fatal("The KVM-based CPUs requires the memory system to be in the "
3299651SAndreas.Sandberg@ARM.com              "'atomic_noncaching' mode.\n");
3309651SAndreas.Sandberg@ARM.com    }
3319651SAndreas.Sandberg@ARM.com}
3329651SAndreas.Sandberg@ARM.com
3339651SAndreas.Sandberg@ARM.comvoid
3349651SAndreas.Sandberg@ARM.comBaseKvmCPU::wakeup()
3359651SAndreas.Sandberg@ARM.com{
3369651SAndreas.Sandberg@ARM.com    DPRINTF(Kvm, "wakeup()\n");
3379651SAndreas.Sandberg@ARM.com
3389651SAndreas.Sandberg@ARM.com    if (thread->status() != ThreadContext::Suspended)
3399651SAndreas.Sandberg@ARM.com        return;
3409651SAndreas.Sandberg@ARM.com
3419651SAndreas.Sandberg@ARM.com    thread->activate();
3429651SAndreas.Sandberg@ARM.com}
3439651SAndreas.Sandberg@ARM.com
3449651SAndreas.Sandberg@ARM.comvoid
3459651SAndreas.Sandberg@ARM.comBaseKvmCPU::activateContext(ThreadID thread_num, Cycles delay)
3469651SAndreas.Sandberg@ARM.com{
3479651SAndreas.Sandberg@ARM.com    DPRINTF(Kvm, "ActivateContext %d (%d cycles)\n", thread_num, delay);
3489651SAndreas.Sandberg@ARM.com
3499651SAndreas.Sandberg@ARM.com    assert(thread_num == 0);
3509651SAndreas.Sandberg@ARM.com    assert(thread);
3519651SAndreas.Sandberg@ARM.com
3529651SAndreas.Sandberg@ARM.com    assert(_status == Idle);
3539651SAndreas.Sandberg@ARM.com    assert(!tickEvent.scheduled());
3549651SAndreas.Sandberg@ARM.com
3559651SAndreas.Sandberg@ARM.com    numCycles += ticksToCycles(thread->lastActivate - thread->lastSuspend)
3569651SAndreas.Sandberg@ARM.com        * hostFactor;
3579651SAndreas.Sandberg@ARM.com
3589651SAndreas.Sandberg@ARM.com    schedule(tickEvent, clockEdge(delay));
3599651SAndreas.Sandberg@ARM.com    _status = Running;
3609651SAndreas.Sandberg@ARM.com}
3619651SAndreas.Sandberg@ARM.com
3629651SAndreas.Sandberg@ARM.com
3639651SAndreas.Sandberg@ARM.comvoid
3649651SAndreas.Sandberg@ARM.comBaseKvmCPU::suspendContext(ThreadID thread_num)
3659651SAndreas.Sandberg@ARM.com{
3669651SAndreas.Sandberg@ARM.com    DPRINTF(Kvm, "SuspendContext %d\n", thread_num);
3679651SAndreas.Sandberg@ARM.com
3689651SAndreas.Sandberg@ARM.com    assert(thread_num == 0);
3699651SAndreas.Sandberg@ARM.com    assert(thread);
3709651SAndreas.Sandberg@ARM.com
3719651SAndreas.Sandberg@ARM.com    if (_status == Idle)
3729651SAndreas.Sandberg@ARM.com        return;
3739651SAndreas.Sandberg@ARM.com
3749651SAndreas.Sandberg@ARM.com    assert(_status == Running);
3759651SAndreas.Sandberg@ARM.com
3769651SAndreas.Sandberg@ARM.com    // The tick event may no be scheduled if the quest has requested
3779651SAndreas.Sandberg@ARM.com    // the monitor to wait for interrupts. The normal CPU models can
3789651SAndreas.Sandberg@ARM.com    // get their tick events descheduled by quiesce instructions, but
3799651SAndreas.Sandberg@ARM.com    // that can't happen here.
3809651SAndreas.Sandberg@ARM.com    if (tickEvent.scheduled())
3819651SAndreas.Sandberg@ARM.com        deschedule(tickEvent);
3829651SAndreas.Sandberg@ARM.com
3839651SAndreas.Sandberg@ARM.com    _status = Idle;
3849651SAndreas.Sandberg@ARM.com}
3859651SAndreas.Sandberg@ARM.com
3869651SAndreas.Sandberg@ARM.comvoid
3879651SAndreas.Sandberg@ARM.comBaseKvmCPU::deallocateContext(ThreadID thread_num)
3889651SAndreas.Sandberg@ARM.com{
3899651SAndreas.Sandberg@ARM.com    // for now, these are equivalent
3909651SAndreas.Sandberg@ARM.com    suspendContext(thread_num);
3919651SAndreas.Sandberg@ARM.com}
3929651SAndreas.Sandberg@ARM.com
3939651SAndreas.Sandberg@ARM.comvoid
3949651SAndreas.Sandberg@ARM.comBaseKvmCPU::haltContext(ThreadID thread_num)
3959651SAndreas.Sandberg@ARM.com{
3969651SAndreas.Sandberg@ARM.com    // for now, these are equivalent
3979651SAndreas.Sandberg@ARM.com    suspendContext(thread_num);
3989651SAndreas.Sandberg@ARM.com}
3999651SAndreas.Sandberg@ARM.com
4009652SAndreas.Sandberg@ARM.comThreadContext *
4019652SAndreas.Sandberg@ARM.comBaseKvmCPU::getContext(int tn)
4029652SAndreas.Sandberg@ARM.com{
4039652SAndreas.Sandberg@ARM.com    assert(tn == 0);
4049652SAndreas.Sandberg@ARM.com    syncThreadContext();
4059652SAndreas.Sandberg@ARM.com    return tc;
4069652SAndreas.Sandberg@ARM.com}
4079652SAndreas.Sandberg@ARM.com
4089652SAndreas.Sandberg@ARM.com
4099651SAndreas.Sandberg@ARM.comCounter
4109651SAndreas.Sandberg@ARM.comBaseKvmCPU::totalInsts() const
4119651SAndreas.Sandberg@ARM.com{
4129651SAndreas.Sandberg@ARM.com    return hwInstructions.read();
4139651SAndreas.Sandberg@ARM.com}
4149651SAndreas.Sandberg@ARM.com
4159651SAndreas.Sandberg@ARM.comCounter
4169651SAndreas.Sandberg@ARM.comBaseKvmCPU::totalOps() const
4179651SAndreas.Sandberg@ARM.com{
4189651SAndreas.Sandberg@ARM.com    hack_once("Pretending totalOps is equivalent to totalInsts()\n");
4199651SAndreas.Sandberg@ARM.com    return hwInstructions.read();
4209651SAndreas.Sandberg@ARM.com}
4219651SAndreas.Sandberg@ARM.com
4229651SAndreas.Sandberg@ARM.comvoid
4239651SAndreas.Sandberg@ARM.comBaseKvmCPU::dump()
4249651SAndreas.Sandberg@ARM.com{
4259651SAndreas.Sandberg@ARM.com    inform("State dumping not implemented.");
4269651SAndreas.Sandberg@ARM.com}
4279651SAndreas.Sandberg@ARM.com
4289651SAndreas.Sandberg@ARM.comvoid
4299651SAndreas.Sandberg@ARM.comBaseKvmCPU::tick()
4309651SAndreas.Sandberg@ARM.com{
4319651SAndreas.Sandberg@ARM.com    assert(_status == Running);
4329651SAndreas.Sandberg@ARM.com
4339651SAndreas.Sandberg@ARM.com    DPRINTF(KvmRun, "Entering KVM...\n");
4349651SAndreas.Sandberg@ARM.com
4359651SAndreas.Sandberg@ARM.com    Tick ticksToExecute(mainEventQueue.nextTick() - curTick());
4369651SAndreas.Sandberg@ARM.com    Tick ticksExecuted(kvmRun(ticksToExecute));
4379651SAndreas.Sandberg@ARM.com
4389651SAndreas.Sandberg@ARM.com    Tick delay(ticksExecuted + handleKvmExit());
4399651SAndreas.Sandberg@ARM.com
4409651SAndreas.Sandberg@ARM.com    switch (_status) {
4419651SAndreas.Sandberg@ARM.com      case Running:
4429651SAndreas.Sandberg@ARM.com        schedule(tickEvent, clockEdge(ticksToCycles(delay)));
4439651SAndreas.Sandberg@ARM.com        break;
4449651SAndreas.Sandberg@ARM.com
4459651SAndreas.Sandberg@ARM.com      default:
4469651SAndreas.Sandberg@ARM.com        /* The CPU is halted or waiting for an interrupt from a
4479651SAndreas.Sandberg@ARM.com         * device. Don't start it. */
4489651SAndreas.Sandberg@ARM.com        break;
4499651SAndreas.Sandberg@ARM.com    }
4509651SAndreas.Sandberg@ARM.com}
4519651SAndreas.Sandberg@ARM.com
4529651SAndreas.Sandberg@ARM.comTick
4539651SAndreas.Sandberg@ARM.comBaseKvmCPU::kvmRun(Tick ticks)
4549651SAndreas.Sandberg@ARM.com{
4559651SAndreas.Sandberg@ARM.com    uint64_t baseCycles(hwCycles.read());
4569651SAndreas.Sandberg@ARM.com    uint64_t baseInstrs(hwInstructions.read());
4579651SAndreas.Sandberg@ARM.com
4589652SAndreas.Sandberg@ARM.com    // We might need to update the KVM state.
4599652SAndreas.Sandberg@ARM.com    syncKvmState();
4609652SAndreas.Sandberg@ARM.com    // Entering into KVM implies that we'll have to reload the thread
4619652SAndreas.Sandberg@ARM.com    // context from KVM if we want to access it. Flag the KVM state as
4629652SAndreas.Sandberg@ARM.com    // dirty with respect to the cached thread context.
4639652SAndreas.Sandberg@ARM.com    kvmStateDirty = true;
4649652SAndreas.Sandberg@ARM.com
4659651SAndreas.Sandberg@ARM.com    if (ticks < runTimer->resolution()) {
4669651SAndreas.Sandberg@ARM.com        DPRINTF(KvmRun, "KVM: Adjusting tick count (%i -> %i)\n",
4679651SAndreas.Sandberg@ARM.com                ticks, runTimer->resolution());
4689651SAndreas.Sandberg@ARM.com        ticks = runTimer->resolution();
4699651SAndreas.Sandberg@ARM.com    }
4709651SAndreas.Sandberg@ARM.com
4719651SAndreas.Sandberg@ARM.com    DPRINTF(KvmRun, "KVM: Executing for %i ticks\n", ticks);
4729651SAndreas.Sandberg@ARM.com    timerOverflowed = false;
4739655SAndreas.Sandberg@ARM.com
4749655SAndreas.Sandberg@ARM.com    // Arm the run timer and start the cycle timer if it isn't
4759655SAndreas.Sandberg@ARM.com    // controlled by the overflow timer. Starting/stopping the cycle
4769655SAndreas.Sandberg@ARM.com    // timer automatically starts the other perf timers as they are in
4779655SAndreas.Sandberg@ARM.com    // the same counter group.
4789651SAndreas.Sandberg@ARM.com    runTimer->arm(ticks);
4799655SAndreas.Sandberg@ARM.com    if (!perfControlledByTimer)
4809655SAndreas.Sandberg@ARM.com        hwCycles.start();
4819655SAndreas.Sandberg@ARM.com
4829651SAndreas.Sandberg@ARM.com    if (ioctl(KVM_RUN) == -1) {
4839651SAndreas.Sandberg@ARM.com        if (errno != EINTR)
4849651SAndreas.Sandberg@ARM.com            panic("KVM: Failed to start virtual CPU (errno: %i)\n",
4859651SAndreas.Sandberg@ARM.com                  errno);
4869651SAndreas.Sandberg@ARM.com    }
4879655SAndreas.Sandberg@ARM.com
4889651SAndreas.Sandberg@ARM.com    runTimer->disarm();
4899655SAndreas.Sandberg@ARM.com    if (!perfControlledByTimer)
4909655SAndreas.Sandberg@ARM.com        hwCycles.stop();
4919655SAndreas.Sandberg@ARM.com
4929651SAndreas.Sandberg@ARM.com
4939684Sandreas@sandberg.pp.se    const uint64_t hostCyclesExecuted(hwCycles.read() - baseCycles);
4949684Sandreas@sandberg.pp.se    const uint64_t simCyclesExecuted(hostCyclesExecuted * hostFactor);
4959684Sandreas@sandberg.pp.se    const uint64_t instsExecuted(hwInstructions.read() - baseInstrs);
4969684Sandreas@sandberg.pp.se    const Tick ticksExecuted(runTimer->ticksFromHostCycles(hostCyclesExecuted));
4979651SAndreas.Sandberg@ARM.com
4989651SAndreas.Sandberg@ARM.com    if (ticksExecuted < ticks &&
4999651SAndreas.Sandberg@ARM.com        timerOverflowed &&
5009651SAndreas.Sandberg@ARM.com        _kvmRun->exit_reason == KVM_EXIT_INTR) {
5019651SAndreas.Sandberg@ARM.com        // TODO: We should probably do something clever here...
5029651SAndreas.Sandberg@ARM.com        warn("KVM: Early timer event, requested %i ticks but got %i ticks.\n",
5039651SAndreas.Sandberg@ARM.com             ticks, ticksExecuted);
5049651SAndreas.Sandberg@ARM.com    }
5059651SAndreas.Sandberg@ARM.com
5069684Sandreas@sandberg.pp.se    /* Update statistics */
5079684Sandreas@sandberg.pp.se    numCycles += simCyclesExecuted;;
5089651SAndreas.Sandberg@ARM.com    ++numVMExits;
5099684Sandreas@sandberg.pp.se    numInsts += instsExecuted;
5109651SAndreas.Sandberg@ARM.com
5119651SAndreas.Sandberg@ARM.com    DPRINTF(KvmRun, "KVM: Executed %i instructions in %i cycles (%i ticks, sim cycles: %i).\n",
5129684Sandreas@sandberg.pp.se            instsExecuted, hostCyclesExecuted, ticksExecuted, simCyclesExecuted);
5139651SAndreas.Sandberg@ARM.com
5149651SAndreas.Sandberg@ARM.com    return ticksExecuted + flushCoalescedMMIO();
5159651SAndreas.Sandberg@ARM.com}
5169651SAndreas.Sandberg@ARM.com
5179651SAndreas.Sandberg@ARM.comvoid
5189651SAndreas.Sandberg@ARM.comBaseKvmCPU::kvmNonMaskableInterrupt()
5199651SAndreas.Sandberg@ARM.com{
5209651SAndreas.Sandberg@ARM.com    ++numInterrupts;
5219651SAndreas.Sandberg@ARM.com    if (ioctl(KVM_NMI) == -1)
5229651SAndreas.Sandberg@ARM.com        panic("KVM: Failed to deliver NMI to virtual CPU\n");
5239651SAndreas.Sandberg@ARM.com}
5249651SAndreas.Sandberg@ARM.com
5259651SAndreas.Sandberg@ARM.comvoid
5269651SAndreas.Sandberg@ARM.comBaseKvmCPU::kvmInterrupt(const struct kvm_interrupt &interrupt)
5279651SAndreas.Sandberg@ARM.com{
5289651SAndreas.Sandberg@ARM.com    ++numInterrupts;
5299651SAndreas.Sandberg@ARM.com    if (ioctl(KVM_INTERRUPT, (void *)&interrupt) == -1)
5309651SAndreas.Sandberg@ARM.com        panic("KVM: Failed to deliver interrupt to virtual CPU\n");
5319651SAndreas.Sandberg@ARM.com}
5329651SAndreas.Sandberg@ARM.com
5339651SAndreas.Sandberg@ARM.comvoid
5349651SAndreas.Sandberg@ARM.comBaseKvmCPU::getRegisters(struct kvm_regs &regs) const
5359651SAndreas.Sandberg@ARM.com{
5369651SAndreas.Sandberg@ARM.com    if (ioctl(KVM_GET_REGS, &regs) == -1)
5379651SAndreas.Sandberg@ARM.com        panic("KVM: Failed to get guest registers\n");
5389651SAndreas.Sandberg@ARM.com}
5399651SAndreas.Sandberg@ARM.com
5409651SAndreas.Sandberg@ARM.comvoid
5419651SAndreas.Sandberg@ARM.comBaseKvmCPU::setRegisters(const struct kvm_regs &regs)
5429651SAndreas.Sandberg@ARM.com{
5439651SAndreas.Sandberg@ARM.com    if (ioctl(KVM_SET_REGS, (void *)&regs) == -1)
5449651SAndreas.Sandberg@ARM.com        panic("KVM: Failed to set guest registers\n");
5459651SAndreas.Sandberg@ARM.com}
5469651SAndreas.Sandberg@ARM.com
5479651SAndreas.Sandberg@ARM.comvoid
5489651SAndreas.Sandberg@ARM.comBaseKvmCPU::getSpecialRegisters(struct kvm_sregs &regs) const
5499651SAndreas.Sandberg@ARM.com{
5509651SAndreas.Sandberg@ARM.com    if (ioctl(KVM_GET_SREGS, &regs) == -1)
5519651SAndreas.Sandberg@ARM.com        panic("KVM: Failed to get guest special registers\n");
5529651SAndreas.Sandberg@ARM.com}
5539651SAndreas.Sandberg@ARM.com
5549651SAndreas.Sandberg@ARM.comvoid
5559651SAndreas.Sandberg@ARM.comBaseKvmCPU::setSpecialRegisters(const struct kvm_sregs &regs)
5569651SAndreas.Sandberg@ARM.com{
5579651SAndreas.Sandberg@ARM.com    if (ioctl(KVM_SET_SREGS, (void *)&regs) == -1)
5589651SAndreas.Sandberg@ARM.com        panic("KVM: Failed to set guest special registers\n");
5599651SAndreas.Sandberg@ARM.com}
5609651SAndreas.Sandberg@ARM.com
5619651SAndreas.Sandberg@ARM.comvoid
5629651SAndreas.Sandberg@ARM.comBaseKvmCPU::getFPUState(struct kvm_fpu &state) const
5639651SAndreas.Sandberg@ARM.com{
5649651SAndreas.Sandberg@ARM.com    if (ioctl(KVM_GET_FPU, &state) == -1)
5659651SAndreas.Sandberg@ARM.com        panic("KVM: Failed to get guest FPU state\n");
5669651SAndreas.Sandberg@ARM.com}
5679651SAndreas.Sandberg@ARM.com
5689651SAndreas.Sandberg@ARM.comvoid
5699651SAndreas.Sandberg@ARM.comBaseKvmCPU::setFPUState(const struct kvm_fpu &state)
5709651SAndreas.Sandberg@ARM.com{
5719651SAndreas.Sandberg@ARM.com    if (ioctl(KVM_SET_FPU, (void *)&state) == -1)
5729651SAndreas.Sandberg@ARM.com        panic("KVM: Failed to set guest FPU state\n");
5739651SAndreas.Sandberg@ARM.com}
5749651SAndreas.Sandberg@ARM.com
5759651SAndreas.Sandberg@ARM.com
5769651SAndreas.Sandberg@ARM.comvoid
5779651SAndreas.Sandberg@ARM.comBaseKvmCPU::setOneReg(uint64_t id, const void *addr)
5789651SAndreas.Sandberg@ARM.com{
5799651SAndreas.Sandberg@ARM.com#ifdef KVM_SET_ONE_REG
5809651SAndreas.Sandberg@ARM.com    struct kvm_one_reg reg;
5819651SAndreas.Sandberg@ARM.com    reg.id = id;
5829651SAndreas.Sandberg@ARM.com    reg.addr = (uint64_t)addr;
5839651SAndreas.Sandberg@ARM.com
5849651SAndreas.Sandberg@ARM.com    if (ioctl(KVM_SET_ONE_REG, &reg) == -1) {
5859651SAndreas.Sandberg@ARM.com        panic("KVM: Failed to set register (0x%x) value (errno: %i)\n",
5869651SAndreas.Sandberg@ARM.com              id, errno);
5879651SAndreas.Sandberg@ARM.com    }
5889651SAndreas.Sandberg@ARM.com#else
5899651SAndreas.Sandberg@ARM.com    panic("KVM_SET_ONE_REG is unsupported on this platform.\n");
5909651SAndreas.Sandberg@ARM.com#endif
5919651SAndreas.Sandberg@ARM.com}
5929651SAndreas.Sandberg@ARM.com
5939651SAndreas.Sandberg@ARM.comvoid
5949651SAndreas.Sandberg@ARM.comBaseKvmCPU::getOneReg(uint64_t id, void *addr) const
5959651SAndreas.Sandberg@ARM.com{
5969651SAndreas.Sandberg@ARM.com#ifdef KVM_GET_ONE_REG
5979651SAndreas.Sandberg@ARM.com    struct kvm_one_reg reg;
5989651SAndreas.Sandberg@ARM.com    reg.id = id;
5999651SAndreas.Sandberg@ARM.com    reg.addr = (uint64_t)addr;
6009651SAndreas.Sandberg@ARM.com
6019651SAndreas.Sandberg@ARM.com    if (ioctl(KVM_GET_ONE_REG, &reg) == -1) {
6029651SAndreas.Sandberg@ARM.com        panic("KVM: Failed to get register (0x%x) value (errno: %i)\n",
6039651SAndreas.Sandberg@ARM.com              id, errno);
6049651SAndreas.Sandberg@ARM.com    }
6059651SAndreas.Sandberg@ARM.com#else
6069651SAndreas.Sandberg@ARM.com    panic("KVM_GET_ONE_REG is unsupported on this platform.\n");
6079651SAndreas.Sandberg@ARM.com#endif
6089651SAndreas.Sandberg@ARM.com}
6099651SAndreas.Sandberg@ARM.com
6109651SAndreas.Sandberg@ARM.comstd::string
6119651SAndreas.Sandberg@ARM.comBaseKvmCPU::getAndFormatOneReg(uint64_t id) const
6129651SAndreas.Sandberg@ARM.com{
6139651SAndreas.Sandberg@ARM.com#ifdef KVM_GET_ONE_REG
6149651SAndreas.Sandberg@ARM.com    std::ostringstream ss;
6159651SAndreas.Sandberg@ARM.com
6169651SAndreas.Sandberg@ARM.com    ss.setf(std::ios::hex, std::ios::basefield);
6179651SAndreas.Sandberg@ARM.com    ss.setf(std::ios::showbase);
6189651SAndreas.Sandberg@ARM.com#define HANDLE_INTTYPE(len)                      \
6199651SAndreas.Sandberg@ARM.com    case KVM_REG_SIZE_U ## len: {                \
6209651SAndreas.Sandberg@ARM.com        uint ## len ## _t value;                 \
6219651SAndreas.Sandberg@ARM.com        getOneReg(id, &value);                   \
6229651SAndreas.Sandberg@ARM.com        ss << value;                             \
6239651SAndreas.Sandberg@ARM.com    }  break
6249651SAndreas.Sandberg@ARM.com
6259651SAndreas.Sandberg@ARM.com#define HANDLE_ARRAY(len)                       \
6269651SAndreas.Sandberg@ARM.com    case KVM_REG_SIZE_U ## len: {               \
6279651SAndreas.Sandberg@ARM.com        uint8_t value[len / 8];                 \
6289651SAndreas.Sandberg@ARM.com        getOneReg(id, value);                   \
6299651SAndreas.Sandberg@ARM.com        ss << "[" << value[0];                  \
6309651SAndreas.Sandberg@ARM.com        for (int i = 1; i < len  / 8; ++i)      \
6319651SAndreas.Sandberg@ARM.com            ss << ", " << value[i];             \
6329651SAndreas.Sandberg@ARM.com        ss << "]";                              \
6339651SAndreas.Sandberg@ARM.com      } break
6349651SAndreas.Sandberg@ARM.com
6359651SAndreas.Sandberg@ARM.com    switch (id & KVM_REG_SIZE_MASK) {
6369651SAndreas.Sandberg@ARM.com        HANDLE_INTTYPE(8);
6379651SAndreas.Sandberg@ARM.com        HANDLE_INTTYPE(16);
6389651SAndreas.Sandberg@ARM.com        HANDLE_INTTYPE(32);
6399651SAndreas.Sandberg@ARM.com        HANDLE_INTTYPE(64);
6409651SAndreas.Sandberg@ARM.com        HANDLE_ARRAY(128);
6419651SAndreas.Sandberg@ARM.com        HANDLE_ARRAY(256);
6429651SAndreas.Sandberg@ARM.com        HANDLE_ARRAY(512);
6439651SAndreas.Sandberg@ARM.com        HANDLE_ARRAY(1024);
6449651SAndreas.Sandberg@ARM.com      default:
6459651SAndreas.Sandberg@ARM.com        ss << "??";
6469651SAndreas.Sandberg@ARM.com    }
6479651SAndreas.Sandberg@ARM.com
6489651SAndreas.Sandberg@ARM.com#undef HANDLE_INTTYPE
6499651SAndreas.Sandberg@ARM.com#undef HANDLE_ARRAY
6509651SAndreas.Sandberg@ARM.com
6519651SAndreas.Sandberg@ARM.com    return ss.str();
6529651SAndreas.Sandberg@ARM.com#else
6539651SAndreas.Sandberg@ARM.com    panic("KVM_GET_ONE_REG is unsupported on this platform.\n");
6549651SAndreas.Sandberg@ARM.com#endif
6559651SAndreas.Sandberg@ARM.com}
6569651SAndreas.Sandberg@ARM.com
6579652SAndreas.Sandberg@ARM.comvoid
6589652SAndreas.Sandberg@ARM.comBaseKvmCPU::syncThreadContext()
6599652SAndreas.Sandberg@ARM.com{
6609652SAndreas.Sandberg@ARM.com    if (!kvmStateDirty)
6619652SAndreas.Sandberg@ARM.com        return;
6629652SAndreas.Sandberg@ARM.com
6639652SAndreas.Sandberg@ARM.com    assert(!threadContextDirty);
6649652SAndreas.Sandberg@ARM.com
6659652SAndreas.Sandberg@ARM.com    updateThreadContext();
6669652SAndreas.Sandberg@ARM.com    kvmStateDirty = false;
6679652SAndreas.Sandberg@ARM.com}
6689652SAndreas.Sandberg@ARM.com
6699652SAndreas.Sandberg@ARM.comvoid
6709652SAndreas.Sandberg@ARM.comBaseKvmCPU::syncKvmState()
6719652SAndreas.Sandberg@ARM.com{
6729652SAndreas.Sandberg@ARM.com    if (!threadContextDirty)
6739652SAndreas.Sandberg@ARM.com        return;
6749652SAndreas.Sandberg@ARM.com
6759652SAndreas.Sandberg@ARM.com    assert(!kvmStateDirty);
6769652SAndreas.Sandberg@ARM.com
6779652SAndreas.Sandberg@ARM.com    updateKvmState();
6789652SAndreas.Sandberg@ARM.com    threadContextDirty = false;
6799652SAndreas.Sandberg@ARM.com}
6809652SAndreas.Sandberg@ARM.com
6819651SAndreas.Sandberg@ARM.comTick
6829651SAndreas.Sandberg@ARM.comBaseKvmCPU::handleKvmExit()
6839651SAndreas.Sandberg@ARM.com{
6849651SAndreas.Sandberg@ARM.com    DPRINTF(KvmRun, "handleKvmExit (exit_reason: %i)\n", _kvmRun->exit_reason);
6859651SAndreas.Sandberg@ARM.com
6869651SAndreas.Sandberg@ARM.com    switch (_kvmRun->exit_reason) {
6879651SAndreas.Sandberg@ARM.com      case KVM_EXIT_UNKNOWN:
6889651SAndreas.Sandberg@ARM.com        return handleKvmExitUnknown();
6899651SAndreas.Sandberg@ARM.com
6909651SAndreas.Sandberg@ARM.com      case KVM_EXIT_EXCEPTION:
6919651SAndreas.Sandberg@ARM.com        return handleKvmExitException();
6929651SAndreas.Sandberg@ARM.com
6939651SAndreas.Sandberg@ARM.com      case KVM_EXIT_IO:
6949651SAndreas.Sandberg@ARM.com        ++numIO;
6959651SAndreas.Sandberg@ARM.com        return handleKvmExitIO();
6969651SAndreas.Sandberg@ARM.com
6979651SAndreas.Sandberg@ARM.com      case KVM_EXIT_HYPERCALL:
6989651SAndreas.Sandberg@ARM.com        ++numHypercalls;
6999651SAndreas.Sandberg@ARM.com        return handleKvmExitHypercall();
7009651SAndreas.Sandberg@ARM.com
7019651SAndreas.Sandberg@ARM.com      case KVM_EXIT_HLT:
7029651SAndreas.Sandberg@ARM.com        /* The guest has halted and is waiting for interrupts */
7039651SAndreas.Sandberg@ARM.com        DPRINTF(Kvm, "handleKvmExitHalt\n");
7049651SAndreas.Sandberg@ARM.com        ++numHalt;
7059651SAndreas.Sandberg@ARM.com
7069651SAndreas.Sandberg@ARM.com        // Suspend the thread until the next interrupt arrives
7079651SAndreas.Sandberg@ARM.com        thread->suspend();
7089651SAndreas.Sandberg@ARM.com
7099651SAndreas.Sandberg@ARM.com        // This is actually ignored since the thread is suspended.
7109651SAndreas.Sandberg@ARM.com        return 0;
7119651SAndreas.Sandberg@ARM.com
7129651SAndreas.Sandberg@ARM.com      case KVM_EXIT_MMIO:
7139651SAndreas.Sandberg@ARM.com        /* Service memory mapped IO requests */
7149651SAndreas.Sandberg@ARM.com        DPRINTF(KvmIO, "KVM: Handling MMIO (w: %u, addr: 0x%x, len: %u)\n",
7159651SAndreas.Sandberg@ARM.com                _kvmRun->mmio.is_write,
7169651SAndreas.Sandberg@ARM.com                _kvmRun->mmio.phys_addr, _kvmRun->mmio.len);
7179651SAndreas.Sandberg@ARM.com
7189651SAndreas.Sandberg@ARM.com        ++numMMIO;
7199651SAndreas.Sandberg@ARM.com        return doMMIOAccess(_kvmRun->mmio.phys_addr, _kvmRun->mmio.data,
7209651SAndreas.Sandberg@ARM.com                            _kvmRun->mmio.len, _kvmRun->mmio.is_write);
7219651SAndreas.Sandberg@ARM.com
7229651SAndreas.Sandberg@ARM.com      case KVM_EXIT_IRQ_WINDOW_OPEN:
7239651SAndreas.Sandberg@ARM.com        return handleKvmExitIRQWindowOpen();
7249651SAndreas.Sandberg@ARM.com
7259651SAndreas.Sandberg@ARM.com      case KVM_EXIT_FAIL_ENTRY:
7269651SAndreas.Sandberg@ARM.com        return handleKvmExitFailEntry();
7279651SAndreas.Sandberg@ARM.com
7289651SAndreas.Sandberg@ARM.com      case KVM_EXIT_INTR:
7299651SAndreas.Sandberg@ARM.com        /* KVM was interrupted by a signal, restart it in the next
7309651SAndreas.Sandberg@ARM.com         * tick. */
7319651SAndreas.Sandberg@ARM.com        return 0;
7329651SAndreas.Sandberg@ARM.com
7339651SAndreas.Sandberg@ARM.com      case KVM_EXIT_INTERNAL_ERROR:
7349651SAndreas.Sandberg@ARM.com        panic("KVM: Internal error (suberror: %u)\n",
7359651SAndreas.Sandberg@ARM.com              _kvmRun->internal.suberror);
7369651SAndreas.Sandberg@ARM.com
7379651SAndreas.Sandberg@ARM.com      default:
7389651SAndreas.Sandberg@ARM.com        panic("KVM: Unexpected exit (exit_reason: %u)\n", _kvmRun->exit_reason);
7399651SAndreas.Sandberg@ARM.com    }
7409651SAndreas.Sandberg@ARM.com}
7419651SAndreas.Sandberg@ARM.com
7429651SAndreas.Sandberg@ARM.comTick
7439651SAndreas.Sandberg@ARM.comBaseKvmCPU::handleKvmExitIO()
7449651SAndreas.Sandberg@ARM.com{
7459651SAndreas.Sandberg@ARM.com    panic("KVM: Unhandled guest IO (dir: %i, size: %i, port: 0x%x, count: %i)\n",
7469651SAndreas.Sandberg@ARM.com          _kvmRun->io.direction, _kvmRun->io.size,
7479651SAndreas.Sandberg@ARM.com          _kvmRun->io.port, _kvmRun->io.count);
7489651SAndreas.Sandberg@ARM.com}
7499651SAndreas.Sandberg@ARM.com
7509651SAndreas.Sandberg@ARM.comTick
7519651SAndreas.Sandberg@ARM.comBaseKvmCPU::handleKvmExitHypercall()
7529651SAndreas.Sandberg@ARM.com{
7539651SAndreas.Sandberg@ARM.com    panic("KVM: Unhandled hypercall\n");
7549651SAndreas.Sandberg@ARM.com}
7559651SAndreas.Sandberg@ARM.com
7569651SAndreas.Sandberg@ARM.comTick
7579651SAndreas.Sandberg@ARM.comBaseKvmCPU::handleKvmExitIRQWindowOpen()
7589651SAndreas.Sandberg@ARM.com{
7599651SAndreas.Sandberg@ARM.com    warn("KVM: Unhandled IRQ window.\n");
7609651SAndreas.Sandberg@ARM.com    return 0;
7619651SAndreas.Sandberg@ARM.com}
7629651SAndreas.Sandberg@ARM.com
7639651SAndreas.Sandberg@ARM.com
7649651SAndreas.Sandberg@ARM.comTick
7659651SAndreas.Sandberg@ARM.comBaseKvmCPU::handleKvmExitUnknown()
7669651SAndreas.Sandberg@ARM.com{
7679651SAndreas.Sandberg@ARM.com    panic("KVM: Unknown error when starting vCPU (hw reason: 0x%llx)\n",
7689651SAndreas.Sandberg@ARM.com          _kvmRun->hw.hardware_exit_reason);
7699651SAndreas.Sandberg@ARM.com}
7709651SAndreas.Sandberg@ARM.com
7719651SAndreas.Sandberg@ARM.comTick
7729651SAndreas.Sandberg@ARM.comBaseKvmCPU::handleKvmExitException()
7739651SAndreas.Sandberg@ARM.com{
7749651SAndreas.Sandberg@ARM.com    panic("KVM: Got exception when starting vCPU "
7759651SAndreas.Sandberg@ARM.com          "(exception: %u, error_code: %u)\n",
7769651SAndreas.Sandberg@ARM.com          _kvmRun->ex.exception, _kvmRun->ex.error_code);
7779651SAndreas.Sandberg@ARM.com}
7789651SAndreas.Sandberg@ARM.com
7799651SAndreas.Sandberg@ARM.comTick
7809651SAndreas.Sandberg@ARM.comBaseKvmCPU::handleKvmExitFailEntry()
7819651SAndreas.Sandberg@ARM.com{
7829651SAndreas.Sandberg@ARM.com    panic("KVM: Failed to enter virtualized mode (hw reason: 0x%llx)\n",
7839651SAndreas.Sandberg@ARM.com          _kvmRun->fail_entry.hardware_entry_failure_reason);
7849651SAndreas.Sandberg@ARM.com}
7859651SAndreas.Sandberg@ARM.com
7869651SAndreas.Sandberg@ARM.comTick
7879651SAndreas.Sandberg@ARM.comBaseKvmCPU::doMMIOAccess(Addr paddr, void *data, int size, bool write)
7889651SAndreas.Sandberg@ARM.com{
7899682Sandreas@sandberg.pp.se    mmio_req.setPhys(paddr, size, Request::UNCACHEABLE, dataMasterId());
7909651SAndreas.Sandberg@ARM.com
7919651SAndreas.Sandberg@ARM.com    const MemCmd cmd(write ? MemCmd::WriteReq : MemCmd::ReadReq);
7929651SAndreas.Sandberg@ARM.com    Packet pkt(&mmio_req, cmd);
7939651SAndreas.Sandberg@ARM.com    pkt.dataStatic(data);
7949651SAndreas.Sandberg@ARM.com    return dataPort.sendAtomic(&pkt);
7959651SAndreas.Sandberg@ARM.com}
7969651SAndreas.Sandberg@ARM.com
7979651SAndreas.Sandberg@ARM.comint
7989651SAndreas.Sandberg@ARM.comBaseKvmCPU::ioctl(int request, long p1) const
7999651SAndreas.Sandberg@ARM.com{
8009651SAndreas.Sandberg@ARM.com    if (vcpuFD == -1)
8019651SAndreas.Sandberg@ARM.com        panic("KVM: CPU ioctl called before initialization\n");
8029651SAndreas.Sandberg@ARM.com
8039651SAndreas.Sandberg@ARM.com    return ::ioctl(vcpuFD, request, p1);
8049651SAndreas.Sandberg@ARM.com}
8059651SAndreas.Sandberg@ARM.com
8069651SAndreas.Sandberg@ARM.comTick
8079651SAndreas.Sandberg@ARM.comBaseKvmCPU::flushCoalescedMMIO()
8089651SAndreas.Sandberg@ARM.com{
8099651SAndreas.Sandberg@ARM.com    if (!mmioRing)
8109651SAndreas.Sandberg@ARM.com        return 0;
8119651SAndreas.Sandberg@ARM.com
8129651SAndreas.Sandberg@ARM.com    DPRINTF(KvmIO, "KVM: Flushing the coalesced MMIO ring buffer\n");
8139651SAndreas.Sandberg@ARM.com
8149651SAndreas.Sandberg@ARM.com    // TODO: We might need to do synchronization when we start to
8159651SAndreas.Sandberg@ARM.com    // support multiple CPUs
8169651SAndreas.Sandberg@ARM.com    Tick ticks(0);
8179651SAndreas.Sandberg@ARM.com    while (mmioRing->first != mmioRing->last) {
8189651SAndreas.Sandberg@ARM.com        struct kvm_coalesced_mmio &ent(
8199651SAndreas.Sandberg@ARM.com            mmioRing->coalesced_mmio[mmioRing->first]);
8209651SAndreas.Sandberg@ARM.com
8219651SAndreas.Sandberg@ARM.com        DPRINTF(KvmIO, "KVM: Handling coalesced MMIO (addr: 0x%x, len: %u)\n",
8229651SAndreas.Sandberg@ARM.com                ent.phys_addr, ent.len);
8239651SAndreas.Sandberg@ARM.com
8249651SAndreas.Sandberg@ARM.com        ++numCoalescedMMIO;
8259651SAndreas.Sandberg@ARM.com        ticks += doMMIOAccess(ent.phys_addr, ent.data, ent.len, true);
8269651SAndreas.Sandberg@ARM.com
8279651SAndreas.Sandberg@ARM.com        mmioRing->first = (mmioRing->first + 1) % KVM_COALESCED_MMIO_MAX;
8289651SAndreas.Sandberg@ARM.com    }
8299651SAndreas.Sandberg@ARM.com
8309651SAndreas.Sandberg@ARM.com    return ticks;
8319651SAndreas.Sandberg@ARM.com}
8329651SAndreas.Sandberg@ARM.com
8339651SAndreas.Sandberg@ARM.comvoid
8349651SAndreas.Sandberg@ARM.comBaseKvmCPU::setupSignalHandler()
8359651SAndreas.Sandberg@ARM.com{
8369651SAndreas.Sandberg@ARM.com    struct sigaction sa;
8379651SAndreas.Sandberg@ARM.com
8389651SAndreas.Sandberg@ARM.com    memset(&sa, 0, sizeof(sa));
8399651SAndreas.Sandberg@ARM.com    sa.sa_sigaction = onTimerOverflow;
8409651SAndreas.Sandberg@ARM.com    sa.sa_flags = SA_SIGINFO | SA_RESTART;
8419651SAndreas.Sandberg@ARM.com    if (sigaction(KVM_TIMER_SIGNAL, &sa, NULL) == -1)
8429651SAndreas.Sandberg@ARM.com        panic("KVM: Failed to setup vCPU signal handler\n");
8439651SAndreas.Sandberg@ARM.com}
8449651SAndreas.Sandberg@ARM.com
8459651SAndreas.Sandberg@ARM.comvoid
8469651SAndreas.Sandberg@ARM.comBaseKvmCPU::setupCounters()
8479651SAndreas.Sandberg@ARM.com{
8489651SAndreas.Sandberg@ARM.com    DPRINTF(Kvm, "Attaching cycle counter...\n");
8499651SAndreas.Sandberg@ARM.com    PerfKvmCounterConfig cfgCycles(PERF_TYPE_HARDWARE,
8509651SAndreas.Sandberg@ARM.com                                PERF_COUNT_HW_CPU_CYCLES);
8519651SAndreas.Sandberg@ARM.com    cfgCycles.disabled(true)
8529651SAndreas.Sandberg@ARM.com        .pinned(true);
8539655SAndreas.Sandberg@ARM.com
8549655SAndreas.Sandberg@ARM.com    if (perfControlledByTimer) {
8559655SAndreas.Sandberg@ARM.com        // We need to configure the cycles counter to send overflows
8569655SAndreas.Sandberg@ARM.com        // since we are going to use it to trigger timer signals that
8579655SAndreas.Sandberg@ARM.com        // trap back into m5 from KVM. In practice, this means that we
8589655SAndreas.Sandberg@ARM.com        // need to set some non-zero sample period that gets
8599655SAndreas.Sandberg@ARM.com        // overridden when the timer is armed.
8609655SAndreas.Sandberg@ARM.com        cfgCycles.wakeupEvents(1)
8619655SAndreas.Sandberg@ARM.com            .samplePeriod(42);
8629655SAndreas.Sandberg@ARM.com    }
8639655SAndreas.Sandberg@ARM.com
8649651SAndreas.Sandberg@ARM.com    hwCycles.attach(cfgCycles,
8659651SAndreas.Sandberg@ARM.com                    0); // TID (0 => currentThread)
8669651SAndreas.Sandberg@ARM.com
8679651SAndreas.Sandberg@ARM.com    DPRINTF(Kvm, "Attaching instruction counter...\n");
8689651SAndreas.Sandberg@ARM.com    PerfKvmCounterConfig cfgInstructions(PERF_TYPE_HARDWARE,
8699651SAndreas.Sandberg@ARM.com                                      PERF_COUNT_HW_INSTRUCTIONS);
8709651SAndreas.Sandberg@ARM.com    hwInstructions.attach(cfgInstructions,
8719651SAndreas.Sandberg@ARM.com                          0, // TID (0 => currentThread)
8729651SAndreas.Sandberg@ARM.com                          hwCycles);
8739651SAndreas.Sandberg@ARM.com}
874