x86_cpu.cc (10113:f02b907bb9e8) x86_cpu.cc (10157:5c2ecad1a3c9)
1/*
2 * Copyright (c) 2013 Andreas Sandberg
3 * All rights reserved
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;

--- 1120 unchanged lines hidden (view full) ---

1129
1130 tc->setMiscReg(X86ISA::msrMap.at(entry->index), entry->data);
1131 }
1132}
1133
1134void
1135X86KvmCPU::deliverInterrupts()
1136{
1/*
2 * Copyright (c) 2013 Andreas Sandberg
3 * All rights reserved
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;

--- 1120 unchanged lines hidden (view full) ---

1129
1130 tc->setMiscReg(X86ISA::msrMap.at(entry->index), entry->data);
1131 }
1132}
1133
1134void
1135X86KvmCPU::deliverInterrupts()
1136{
1137 Fault fault;
1138
1137 syncThreadContext();
1138
1139 syncThreadContext();
1140
1139 Fault fault(interrupts->getInterrupt(tc));
1140 interrupts->updateIntrInfo(tc);
1141 {
1142 // Migrate to the interrupt controller's thread to get the
1143 // interrupt. Even though the individual methods are safe to
1144 // call across threads, we might still lose interrupts unless
1145 // they are getInterrupt() and updateIntrInfo() are called
1146 // atomically.
1147 EventQueue::ScopedMigration migrate(interrupts->eventQueue());
1148 fault = interrupts->getInterrupt(tc);
1149 interrupts->updateIntrInfo(tc);
1150 }
1141
1142 X86Interrupt *x86int(dynamic_cast<X86Interrupt *>(fault.get()));
1143 if (dynamic_cast<NonMaskableInterrupt *>(fault.get())) {
1144 DPRINTF(KvmInt, "Delivering NMI\n");
1145 kvmNonMaskableInterrupt();
1146 } else if (dynamic_cast<InitInterrupt *>(fault.get())) {
1147 DPRINTF(KvmInt, "INIT interrupt\n");
1148 fault.get()->invoke(tc);

--- 186 unchanged lines hidden (view full) ---

1335 } else {
1336 pAddr = X86ISA::x86IOAddress(port);
1337 }
1338
1339 io_req.setPhys(pAddr, kvm_run.io.size, Request::UNCACHEABLE,
1340 dataMasterId());
1341
1342 const MemCmd cmd(isWrite ? MemCmd::WriteReq : MemCmd::ReadReq);
1151
1152 X86Interrupt *x86int(dynamic_cast<X86Interrupt *>(fault.get()));
1153 if (dynamic_cast<NonMaskableInterrupt *>(fault.get())) {
1154 DPRINTF(KvmInt, "Delivering NMI\n");
1155 kvmNonMaskableInterrupt();
1156 } else if (dynamic_cast<InitInterrupt *>(fault.get())) {
1157 DPRINTF(KvmInt, "INIT interrupt\n");
1158 fault.get()->invoke(tc);

--- 186 unchanged lines hidden (view full) ---

1345 } else {
1346 pAddr = X86ISA::x86IOAddress(port);
1347 }
1348
1349 io_req.setPhys(pAddr, kvm_run.io.size, Request::UNCACHEABLE,
1350 dataMasterId());
1351
1352 const MemCmd cmd(isWrite ? MemCmd::WriteReq : MemCmd::ReadReq);
1353 // Temporarily lock and migrate to the event queue of the
1354 // VM. This queue is assumed to "own" all devices we need to
1355 // access if running in multi-core mode.
1356 EventQueue::ScopedMigration migrate(vm.eventQueue());
1343 for (int i = 0; i < count; ++i) {
1344 Packet pkt(&io_req, cmd);
1345
1346 pkt.dataStatic(guestData);
1347 delay += dataPort.sendAtomic(&pkt);
1348
1349 guestData += kvm_run.io.size;
1350 }

--- 265 unchanged lines hidden ---
1357 for (int i = 0; i < count; ++i) {
1358 Packet pkt(&io_req, cmd);
1359
1360 pkt.dataStatic(guestData);
1361 delay += dataPort.sendAtomic(&pkt);
1362
1363 guestData += kvm_run.io.size;
1364 }

--- 265 unchanged lines hidden ---