vgic.cc revision 11005
1/* 2 * Copyright (c) 2013 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions are 16 * met: redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer; 18 * redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution; 21 * neither the name of the copyright holders nor the names of its 22 * contributors may be used to endorse or promote products derived from 23 * this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 * 37 * Authors: Matt Evans 38 */ 39 40#include "base/trace.hh" 41#include "debug/Checkpoint.hh" 42#include "debug/VGIC.hh" 43#include "dev/arm/base_gic.hh" 44#include "dev/arm/vgic.hh" 45#include "dev/terminal.hh" 46#include "mem/packet.hh" 47#include "mem/packet_access.hh" 48 49VGic::VGic(const Params *p) 50 : PioDevice(p), platform(p->platform), gic(p->gic), vcpuAddr(p->vcpu_addr), 51 hvAddr(p->hv_addr), pioDelay(p->pio_delay), 52 maintInt(p->ppint) 53{ 54 for (int x = 0; x < VGIC_CPU_MAX; x++) { 55 postVIntEvent[x] = new PostVIntEvent(x, p->platform); 56 maintIntPosted[x] = false; 57 vIntPosted[x] = false; 58 } 59 assert(sys->numRunningContexts() <= VGIC_CPU_MAX); 60} 61 62Tick 63VGic::read(PacketPtr pkt) 64{ 65 Addr addr = pkt->getAddr(); 66 67 if (addr >= vcpuAddr && addr < vcpuAddr + GICV_SIZE) 68 return readVCpu(pkt); 69 else if (addr >= hvAddr && addr < hvAddr + GICH_REG_SIZE) 70 return readCtrl(pkt); 71 else 72 panic("Read to unknown address %#x\n", pkt->getAddr()); 73} 74 75Tick 76VGic::write(PacketPtr pkt) 77{ 78 Addr addr = pkt->getAddr(); 79 80 if (addr >= vcpuAddr && addr < vcpuAddr + GICV_SIZE) 81 return writeVCpu(pkt); 82 else if (addr >= hvAddr && addr < hvAddr + GICH_REG_SIZE) 83 return writeCtrl(pkt); 84 else 85 panic("Write to unknown address %#x\n", pkt->getAddr()); 86} 87 88Tick 89VGic::readVCpu(PacketPtr pkt) 90{ 91 Addr daddr = pkt->getAddr() - vcpuAddr; 92 93 ContextID ctx_id = pkt->req->contextId(); 94 assert(ctx_id < VGIC_CPU_MAX); 95 struct vcpuIntData *vid = &vcpuData[ctx_id]; 96 97 DPRINTF(VGIC, "VGIC VCPU read register %#x\n", daddr); 98 99 switch (daddr) { 100 case GICV_CTLR: 101 pkt->set<uint32_t>(vid->vctrl); 102 break; 103 case GICV_IAR: { 104 int i = findHighestPendingLR(vid); 105 if (i < 0 || !vid->vctrl.En) { 106 pkt->set<uint32_t>(1023); // "No int" marker 107 } else { 108 ListReg *lr = &vid->LR[i]; 109 110 pkt->set<uint32_t>(lr->VirtualID | 111 (((int)lr->CpuID) << 10)); 112 // We don't support auto-EOI of HW interrupts via real GIC! 113 // Fortunately, KVM doesn't use this. How about Xen...? Ulp! 114 if (lr->HW) 115 panic("VGIC does not support 'HW' List Register feature (LR %#x)!\n", 116 *lr); 117 lr->State = LR_ACTIVE; 118 DPRINTF(VGIC, "Consumed interrupt %d (cpu%d) from LR%d (EOI%d)\n", 119 lr->VirtualID, lr->CpuID, i, lr->EOI); 120 } 121 } break; 122 default: 123 panic("VGIC VCPU read of bad address %#x\n", daddr); 124 } 125 126 updateIntState(ctx_id); 127 128 pkt->makeAtomicResponse(); 129 return pioDelay; 130} 131 132Tick 133VGic::readCtrl(PacketPtr pkt) 134{ 135 Addr daddr = pkt->getAddr() - hvAddr; 136 137 ContextID ctx_id = pkt->req->contextId(); 138 139 DPRINTF(VGIC, "VGIC HVCtrl read register %#x\n", daddr); 140 141 /* Munge the address: 0-0xfff is the usual space banked by requester CPU. 142 * Anything > that is 0x200-sized slices of 'per CPU' regs. 143 */ 144 if (daddr & ~0x1ff) { 145 ctx_id = (daddr >> 9); 146 if (ctx_id > 8) 147 panic("VGIC: Weird unbanked hv ctrl address %#x!\n", daddr); 148 daddr &= ~0x1ff; 149 } 150 assert(ctx_id < VGIC_CPU_MAX); 151 struct vcpuIntData *vid = &vcpuData[ctx_id]; 152 153 switch (daddr) { 154 case GICH_HCR: 155 pkt->set<uint32_t>(vid->hcr); 156 break; 157 158 case GICH_VTR: 159 pkt->set<uint32_t>(0x44000000 | (NUM_LR - 1)); 160 break; 161 162 case GICH_VMCR: 163 pkt->set<uint32_t>( 164 ((uint32_t)vid->VMPriMask << 27) | 165 ((uint32_t)vid->VMBP << 21) | 166 ((uint32_t)vid->VMABP << 18) | 167 ((uint32_t)vid->VEM << 9) | 168 ((uint32_t)vid->VMCBPR << 4) | 169 ((uint32_t)vid->VMFiqEn << 3) | 170 ((uint32_t)vid->VMAckCtl << 2) | 171 ((uint32_t)vid->VMGrp1En << 1) | 172 ((uint32_t)vid->VMGrp0En << 0) 173 ); 174 break; 175 176 case GICH_MISR: 177 pkt->set<uint32_t>(getMISR(vid)); 178 break; 179 180 case GICH_EISR0: 181 pkt->set<uint32_t>(vid->eisr & 0xffffffff); 182 break; 183 184 case GICH_EISR1: 185 pkt->set<uint32_t>(vid->eisr >> 32); 186 break; 187 188 case GICH_ELSR0: { 189 uint32_t bm = 0; 190 for (int i = 0; i < ((NUM_LR < 32) ? NUM_LR : 32); i++) { 191 if (!vid->LR[i].State) 192 bm |= 1 << i; 193 } 194 pkt->set<uint32_t>(bm); 195 } break; 196 197 case GICH_ELSR1: { 198 uint32_t bm = 0; 199 for (int i = 32; i < NUM_LR; i++) { 200 if (!vid->LR[i].State) 201 bm |= 1 << (i-32); 202 } 203 pkt->set<uint32_t>(bm); 204 } break; 205 206 case GICH_APR0: 207 warn_once("VGIC GICH_APR read!\n"); 208 pkt->set<uint32_t>(0); 209 break; 210 211 case GICH_LR0: 212 case GICH_LR1: 213 case GICH_LR2: 214 case GICH_LR3: 215 pkt->set<uint32_t>(vid->LR[(daddr - GICH_LR0) >> 2]); 216 break; 217 218 default: 219 panic("VGIC HVCtrl read of bad address %#x\n", daddr); 220 } 221 222 pkt->makeAtomicResponse(); 223 return pioDelay; 224} 225 226Tick 227VGic::writeVCpu(PacketPtr pkt) 228{ 229 Addr daddr = pkt->getAddr() - vcpuAddr; 230 231 ContextID ctx_id = pkt->req->contextId(); 232 assert(ctx_id < VGIC_CPU_MAX); 233 struct vcpuIntData *vid = &vcpuData[ctx_id]; 234 235 DPRINTF(VGIC, "VGIC VCPU write register %#x <= %#x\n", daddr, pkt->get<uint32_t>()); 236 237 switch (daddr) { 238 case GICV_CTLR: 239 vid->vctrl = pkt->get<uint32_t>(); 240 break; 241 case GICV_PMR: 242 vid->VMPriMask = pkt->get<uint32_t>(); 243 break; 244 case GICV_EOIR: { 245 // We don't handle the split EOI-then-DIR mode. Linux (guest) 246 // doesn't need it though. 247 assert(!vid->vctrl.EOImode); 248 uint32_t w = pkt->get<uint32_t>(); 249 unsigned int virq = w & 0x3ff; 250 unsigned int vcpu = (w >> 10) & 7; 251 int i = findLRForVIRQ(vid, virq, vcpu); 252 if (i < 0) { 253 DPRINTF(VGIC, "EOIR: No LR for irq %d(cpu%d)\n", virq, vcpu); 254 } else { 255 DPRINTF(VGIC, "EOIR: Found LR%d for irq %d(cpu%d)\n", i, virq, vcpu); 256 ListReg *lr = &vid->LR[i]; 257 lr->State = 0; 258 // Maintenance interrupt -- via eisr -- is flagged when 259 // LRs have EOI=1 and State=INVALID! 260 } 261 } break; 262 default: 263 panic("VGIC VCPU write %#x to unk address %#x\n", pkt->get<uint32_t>(), daddr); 264 } 265 266 // This updates the EISRs and flags IRQs: 267 updateIntState(ctx_id); 268 269 pkt->makeAtomicResponse(); 270 return pioDelay; 271} 272 273Tick 274VGic::writeCtrl(PacketPtr pkt) 275{ 276 Addr daddr = pkt->getAddr() - hvAddr; 277 278 ContextID ctx_id = pkt->req->contextId(); 279 280 DPRINTF(VGIC, "VGIC HVCtrl write register %#x <= %#x\n", daddr, pkt->get<uint32_t>()); 281 282 /* Munge the address: 0-0xfff is the usual space banked by requester CPU. 283 * Anything > that is 0x200-sized slices of 'per CPU' regs. 284 */ 285 if (daddr & ~0x1ff) { 286 ctx_id = (daddr >> 9); 287 if (ctx_id > 8) 288 panic("VGIC: Weird unbanked hv ctrl address %#x!\n", daddr); 289 daddr &= ~0x1ff; 290 } 291 assert(ctx_id < VGIC_CPU_MAX); 292 struct vcpuIntData *vid = &vcpuData[ctx_id]; 293 294 switch (daddr) { 295 case GICH_HCR: 296 vid->hcr = pkt->get<uint32_t>(); 297 // update int state 298 break; 299 300 case GICH_VMCR: { 301 uint32_t d = pkt->get<uint32_t>(); 302 vid->VMPriMask = d >> 27; 303 vid->VMBP = (d >> 21) & 7; 304 vid->VMABP = (d >> 18) & 7; 305 vid->VEM = (d >> 9) & 1; 306 vid->VMCBPR = (d >> 4) & 1; 307 vid->VMFiqEn = (d >> 3) & 1; 308 vid->VMAckCtl = (d >> 2) & 1; 309 vid->VMGrp1En = (d >> 1) & 1; 310 vid->VMGrp0En = d & 1; 311 } break; 312 313 case GICH_APR0: 314 warn_once("VGIC GICH_APR0 written, ignored\n"); 315 break; 316 317 case GICH_LR0: 318 case GICH_LR1: 319 case GICH_LR2: 320 case GICH_LR3: 321 vid->LR[(daddr - GICH_LR0) >> 2] = pkt->get<uint32_t>(); 322 // update int state 323 break; 324 325 default: 326 panic("VGIC HVCtrl write to bad address %#x\n", daddr); 327 } 328 329 updateIntState(ctx_id); 330 331 pkt->makeAtomicResponse(); 332 return pioDelay; 333} 334 335 336uint32_t 337VGic::getMISR(struct vcpuIntData *vid) 338{ 339 return (!!vid->hcr.VGrp1DIE && !vid->VMGrp1En ? 0x80 : 0) | 340 (!!vid->hcr.VGrp1EIE && vid->VMGrp1En ? 0x40 : 0) | 341 (!!vid->hcr.VGrp0DIE && !vid->VMGrp0En ? 0x20 : 0) | 342 (!!vid->hcr.VGrp0EIE && vid->VMGrp0En ? 0x10 : 0) | 343 (!!vid->hcr.NPIE && !lrPending(vid) ? 0x08 : 0) | 344 (!!vid->hcr.LRENPIE && vid->hcr.EOICount ? 0x04 : 0) | 345 (!!vid->hcr.UIE && lrValid(vid) <= 1 ? 0x02 : 0) | 346 (vid->eisr ? 0x01 : 0); 347} 348 349void 350VGic::postVInt(uint32_t cpu, Tick when) 351{ 352 DPRINTF(VGIC, "Posting VIRQ to %d\n", cpu); 353 if (!(postVIntEvent[cpu]->scheduled())) 354 eventq->schedule(postVIntEvent[cpu], when); 355} 356 357void 358VGic::unPostVInt(uint32_t cpu) 359{ 360 DPRINTF(VGIC, "Unposting VIRQ to %d\n", cpu); 361 platform->intrctrl->clear(cpu, ArmISA::INT_VIRT_IRQ, 0); 362} 363 364void 365VGic::postMaintInt(uint32_t cpu) 366{ 367 DPRINTF(VGIC, "Posting maintenance PPI to GIC/cpu%d\n", cpu); 368 // Linux DT configures this as Level. 369 gic->sendPPInt(maintInt, cpu); 370} 371 372void 373VGic::unPostMaintInt(uint32_t cpu) 374{ 375 DPRINTF(VGIC, "Unposting maintenance PPI to GIC/cpu%d\n", cpu); 376 gic->clearPPInt(maintInt, cpu); 377} 378 379/* Update state (in general); something concerned with ctx_id has changed. 380 * This may raise a maintenance interrupt. 381 */ 382void 383VGic::updateIntState(ContextID ctx_id) 384{ 385 // @todo This should update APRs! 386 387 // Build EISR contents: 388 // (Cached so that regs can read them without messing about again) 389 struct vcpuIntData *tvid = &vcpuData[ctx_id]; 390 391 tvid->eisr = 0; 392 for (int i = 0; i < NUM_LR; i++) { 393 if (!tvid->LR[i].State && tvid->LR[i].EOI) { 394 tvid->eisr |= 1 << i; 395 } 396 } 397 398 assert(sys->numRunningContexts() <= VGIC_CPU_MAX); 399 for (int i = 0; i < sys->numRunningContexts(); i++) { 400 struct vcpuIntData *vid = &vcpuData[i]; 401 // Are any LRs active that weren't before? 402 if (!vIntPosted[i]) { 403 if (lrPending(vid) && vid->vctrl.En) { 404 vIntPosted[i] = true; 405 postVInt(i, curTick() + 1); 406 } 407 } else if (!lrPending(vid)) { 408 vIntPosted[i] = false; 409 unPostVInt(i); 410 } 411 412 // Any maintenance ints to send? 413 if (!maintIntPosted[i]) { 414 if (vid->hcr.En && getMISR(vid)) { 415 maintIntPosted[i] = true; 416 postMaintInt(i); 417 } 418 } else { 419 if (!vid->hcr.En || !getMISR(vid)) { 420 unPostMaintInt(i); 421 maintIntPosted[i] = false; 422 } 423 } 424 } 425} 426 427AddrRangeList 428VGic::getAddrRanges() const 429{ 430 AddrRangeList ranges; 431 ranges.push_back(RangeSize(hvAddr, GICH_REG_SIZE)); 432 ranges.push_back(RangeSize(vcpuAddr, GICV_SIZE)); 433 return ranges; 434} 435 436void 437VGic::serialize(CheckpointOut &cp) const 438{ 439 Tick interrupt_time[VGIC_CPU_MAX]; 440 for (uint32_t cpu = 0; cpu < VGIC_CPU_MAX; cpu++) { 441 interrupt_time[cpu] = 0; 442 if (postVIntEvent[cpu]->scheduled()) { 443 interrupt_time[cpu] = postVIntEvent[cpu]->when(); 444 } 445 } 446 447 DPRINTF(Checkpoint, "Serializing VGIC\n"); 448 449 SERIALIZE_ARRAY(interrupt_time, VGIC_CPU_MAX); 450 SERIALIZE_ARRAY(maintIntPosted, VGIC_CPU_MAX); 451 SERIALIZE_ARRAY(vIntPosted, VGIC_CPU_MAX); 452 SERIALIZE_SCALAR(vcpuAddr); 453 SERIALIZE_SCALAR(hvAddr); 454 SERIALIZE_SCALAR(pioDelay); 455 SERIALIZE_SCALAR(maintInt); 456 457 for (uint32_t cpu = 0; cpu < VGIC_CPU_MAX; cpu++) 458 vcpuData[cpu].serializeSection(cp, csprintf("vcpuData%d", cpu)); 459} 460 461void 462VGic::vcpuIntData::serialize(CheckpointOut &cp) const 463{ 464 uint32_t vctrl_val = vctrl; 465 SERIALIZE_SCALAR(vctrl_val); 466 uint32_t hcr_val = hcr; 467 SERIALIZE_SCALAR(hcr_val); 468 uint64_t eisr_val = eisr; 469 SERIALIZE_SCALAR(eisr_val); 470 uint8_t VMGrp0En_val = VMGrp0En; 471 SERIALIZE_SCALAR(VMGrp0En_val); 472 uint8_t VMGrp1En_val = VMGrp1En; 473 SERIALIZE_SCALAR(VMGrp1En_val); 474 uint8_t VMAckCtl_val = VMAckCtl; 475 SERIALIZE_SCALAR(VMAckCtl_val); 476 uint8_t VMFiqEn_val = VMFiqEn; 477 SERIALIZE_SCALAR(VMFiqEn_val); 478 uint8_t VMCBPR_val = VMCBPR; 479 SERIALIZE_SCALAR(VMCBPR_val); 480 uint8_t VEM_val = VEM; 481 SERIALIZE_SCALAR(VEM_val); 482 uint8_t VMABP_val = VMABP; 483 SERIALIZE_SCALAR(VMABP_val); 484 uint8_t VMBP_val = VMBP; 485 SERIALIZE_SCALAR(VMBP_val); 486 uint8_t VMPriMask_val = VMPriMask; 487 SERIALIZE_SCALAR(VMPriMask_val); 488 489 for (int i = 0; i < NUM_LR; i++) { 490 ScopedCheckpointSection sec_lr(cp, csprintf("LR%d", i)); 491 paramOut(cp, "lr", LR[i]); 492 } 493} 494 495void VGic::unserialize(CheckpointIn &cp) 496{ 497 DPRINTF(Checkpoint, "Unserializing Arm GIC\n"); 498 499 Tick interrupt_time[VGIC_CPU_MAX]; 500 UNSERIALIZE_ARRAY(interrupt_time, VGIC_CPU_MAX); 501 for (uint32_t cpu = 0; cpu < VGIC_CPU_MAX; cpu++) { 502 if (interrupt_time[cpu]) 503 schedule(postVIntEvent[cpu], interrupt_time[cpu]); 504 505 vcpuData[cpu].unserializeSection(cp, csprintf("vcpuData%d", cpu)); 506 } 507 UNSERIALIZE_ARRAY(maintIntPosted, VGIC_CPU_MAX); 508 UNSERIALIZE_ARRAY(vIntPosted, VGIC_CPU_MAX); 509 UNSERIALIZE_SCALAR(vcpuAddr); 510 UNSERIALIZE_SCALAR(hvAddr); 511 UNSERIALIZE_SCALAR(pioDelay); 512 UNSERIALIZE_SCALAR(maintInt); 513} 514 515void 516VGic::vcpuIntData::unserialize(CheckpointIn &cp) 517{ 518 paramIn(cp, "vctrl_val", vctrl); 519 paramIn(cp, "hcr_val", hcr); 520 paramIn(cp, "eisr_val", eisr); 521 paramIn(cp, "VMGrp0En_val", VMGrp0En); 522 paramIn(cp, "VMGrp1En_val", VMGrp1En); 523 paramIn(cp, "VMAckCtl_val", VMAckCtl); 524 paramIn(cp, "VMFiqEn_val", VMFiqEn); 525 paramIn(cp, "VMCBPR_val", VMCBPR); 526 paramIn(cp, "VEM_val", VEM); 527 paramIn(cp, "VMABP_val", VMABP); 528 paramIn(cp, "VMPriMask_val", VMPriMask); 529 530 for (int i = 0; i < NUM_LR; i++) { 531 ScopedCheckpointSection sec_lr(cp, csprintf("LR%d", i)); 532 paramIn(cp, "lr", LR[i]); 533 } 534} 535 536VGic * 537VGicParams::create() 538{ 539 return new VGic(this); 540} 541