1/*
2 * Copyright (c) 2012, 2015, 2017 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software

--- 167 unchanged lines hidden (view full) ---

176 ? RunningMMIOPending : RunningServiceCompletion;
177}
178
179Tick
180BaseKvmCPU::KVMCpuPort::submitIO(PacketPtr pkt)
181{
182 if (cpu->system->isAtomicMode()) {
183 Tick delay = sendAtomic(pkt);
184 delete pkt->req;
184 delete pkt;
185 return delay;
186 } else {
187 if (pendingMMIOPkts.empty() && sendTimingReq(pkt)) {
188 activeMMIOReqs++;
189 } else {
190 pendingMMIOPkts.push(pkt);
191 }
192 // Return value is irrelevant for timing-mode accesses.
193 return 0;
194 }
195}
196
197bool
198BaseKvmCPU::KVMCpuPort::recvTimingResp(PacketPtr pkt)
199{
200 DPRINTF(KvmIO, "KVM: Finished timing request\n");
201
203 delete pkt->req;
202 delete pkt;
203 activeMMIOReqs--;
204
205 // We can switch back into KVM when all pending and in-flight MMIO
206 // operations have completed.
207 if (!(activeMMIOReqs || pendingMMIOPkts.size())) {
208 DPRINTF(KvmIO, "KVM: Finished all outstanding timing requests\n");
209 cpu->finishMMIOPending();

--- 902 unchanged lines hidden (view full) ---

1112}
1113
1114Tick
1115BaseKvmCPU::doMMIOAccess(Addr paddr, void *data, int size, bool write)
1116{
1117 ThreadContext *tc(thread->getTC());
1118 syncThreadContext();
1119
1122 RequestPtr mmio_req = new Request(paddr, size, Request::UNCACHEABLE,
1123 dataMasterId());
1120 RequestPtr mmio_req = std::make_shared<Request>(
1121 paddr, size, Request::UNCACHEABLE, dataMasterId());
1122
1123 mmio_req->setContext(tc->contextId());
1124 // Some architectures do need to massage physical addresses a bit
1125 // before they are inserted into the memory system. This enables
1126 // APIC accesses on x86 and m5ops where supported through a MMIO
1127 // interface.
1128 BaseTLB::Mode tlb_mode(write ? BaseTLB::Write : BaseTLB::Read);
1129 Fault fault(tc->getDTBPtr()->finalizePhysical(mmio_req, tc, tlb_mode));
1130 if (fault != NoFault)

--- 7 unchanged lines hidden (view full) ---

1138 if (mmio_req->isMmappedIpr()) {
1139 // We currently assume that there is no need to migrate to a
1140 // different event queue when doing IPRs. Currently, IPRs are
1141 // only used for m5ops, so it should be a valid assumption.
1142 const Cycles ipr_delay(write ?
1143 TheISA::handleIprWrite(tc, pkt) :
1144 TheISA::handleIprRead(tc, pkt));
1145 threadContextDirty = true;
1147 delete pkt->req;
1146 delete pkt;
1147 return clockPeriod() * ipr_delay;
1148 } else {
1149 // Temporarily lock and migrate to the device event queue to
1150 // prevent races in multi-core mode.
1151 EventQueue::ScopedMigration migrate(deviceEventQueue());
1152
1153 return dataPort.submitIO(pkt);

--- 245 unchanged lines hidden ---