1/* 2 * Copyright (c) 2013,2018 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions are 16 * met: redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer; 18 * redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution; 21 * neither the name of the copyright holders nor the names of its 22 * contributors may be used to endorse or promote products derived from 23 * this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 * 37 * Authors: Matt Evans 38 */ 39 40#include "dev/arm/vgic.hh" 41 42#include "base/trace.hh" 43#include "debug/Checkpoint.hh" 44#include "debug/VGIC.hh" 45#include "dev/arm/base_gic.hh" 46#include "mem/packet.hh" 47#include "mem/packet_access.hh" 48 49VGic::VGic(const Params *p) 50 : PioDevice(p), gicvIIDR(p->gicv_iidr), platform(p->platform), 51 gic(p->gic), vcpuAddr(p->vcpu_addr), hvAddr(p->hv_addr), 52 pioDelay(p->pio_delay), maintInt(p->maint_int) 53{ 54 for (int x = 0; x < VGIC_CPU_MAX; x++) { 55 postVIntEvent[x] = new EventFunctionWrapper( 56 [this, x]{ processPostVIntEvent(x); }, 57 "Post VInterrupt to CPU"); 58 maintIntPosted[x] = false; 59 vIntPosted[x] = false; 60 } 61 assert(sys->numRunningContexts() <= VGIC_CPU_MAX); 62} 63 64VGic::~VGic() 65{ 66 for (int x = 0; x < VGIC_CPU_MAX; x++) 67 delete postVIntEvent[x]; 68} 69 70Tick 71VGic::read(PacketPtr pkt) 72{ 73 Addr addr = pkt->getAddr(); 74 75 if (addr >= vcpuAddr && addr < vcpuAddr + GICV_SIZE) 76 return readVCpu(pkt); 77 else if (addr >= hvAddr && addr < hvAddr + GICH_REG_SIZE) 78 return readCtrl(pkt); 79 else 80 panic("Read to unknown address %#x\n", pkt->getAddr()); 81} 82 83Tick 84VGic::write(PacketPtr pkt) 85{ 86 Addr addr = pkt->getAddr(); 87 88 if (addr >= vcpuAddr && addr < vcpuAddr + GICV_SIZE) 89 return writeVCpu(pkt); 90 else if (addr >= hvAddr && addr < hvAddr + GICH_REG_SIZE) 91 return writeCtrl(pkt); 92 else 93 panic("Write to unknown address %#x\n", pkt->getAddr()); 94} 95 96Tick 97VGic::readVCpu(PacketPtr pkt) 98{ 99 Addr daddr = pkt->getAddr() - vcpuAddr; 100 101 ContextID ctx_id = pkt->req->contextId(); 102 assert(ctx_id < VGIC_CPU_MAX); 103 struct vcpuIntData *vid = &vcpuData[ctx_id]; 104 105 DPRINTF(VGIC, "VGIC VCPU read register %#x\n", daddr); 106 107 switch (daddr) { 108 case GICV_CTLR: 109 pkt->setLE<uint32_t>(vid->vctrl); 110 break; 111 case GICV_IAR: { 112 int i = findHighestPendingLR(vid); 113 if (i < 0 || !vid->vctrl.En) { 114 pkt->setLE<uint32_t>(1023); // "No int" marker 115 } else { 116 ListReg *lr = &vid->LR[i]; 117 118 pkt->setLE<uint32_t>(lr->VirtualID | 119 (((int)lr->CpuID) << 10)); 120 // We don't support auto-EOI of HW interrupts via real GIC! 121 // Fortunately, KVM doesn't use this. How about Xen...? Ulp! 122 if (lr->HW) 123 panic("VGIC does not support 'HW' List Register feature (LR %#x)!\n", 124 *lr); 125 lr->State = LR_ACTIVE; 126 DPRINTF(VGIC, "Consumed interrupt %d (cpu%d) from LR%d (EOI%d)\n", 127 lr->VirtualID, lr->CpuID, i, lr->EOI); 128 } 129 } break; 130 case GICV_IIDR: 131 pkt->setLE<uint32_t>(gicvIIDR); 132 break; 133 default: 134 panic("VGIC VCPU read of bad address %#x\n", daddr); 135 } 136 137 updateIntState(ctx_id); 138 139 pkt->makeAtomicResponse(); 140 return pioDelay; 141} 142 143Tick 144VGic::readCtrl(PacketPtr pkt) 145{ 146 Addr daddr = pkt->getAddr() - hvAddr; 147 148 ContextID ctx_id = pkt->req->contextId(); 149 150 DPRINTF(VGIC, "VGIC HVCtrl read register %#x\n", daddr); 151 152 /* Munge the address: 0-0xfff is the usual space banked by requester CPU. 153 * Anything > that is 0x200-sized slices of 'per CPU' regs. 154 */ 155 if (daddr & ~0x1ff) { 156 ctx_id = (daddr >> 9); 157 if (ctx_id > 8) 158 panic("VGIC: Weird unbanked hv ctrl address %#x!\n", daddr); 159 daddr &= ~0x1ff; 160 } 161 assert(ctx_id < VGIC_CPU_MAX); 162 struct vcpuIntData *vid = &vcpuData[ctx_id]; 163 164 switch (daddr) { 165 case GICH_HCR: 166 pkt->setLE<uint32_t>(vid->hcr); 167 break; 168 169 case GICH_VTR: 170 pkt->setLE<uint32_t>(0x44000000 | (NUM_LR - 1)); 171 break; 172 173 case GICH_VMCR: 174 pkt->setLE<uint32_t>( 175 ((uint32_t)vid->VMPriMask << 27) | 176 ((uint32_t)vid->VMBP << 21) | 177 ((uint32_t)vid->VMABP << 18) | 178 ((uint32_t)vid->VEM << 9) | 179 ((uint32_t)vid->VMCBPR << 4) | 180 ((uint32_t)vid->VMFiqEn << 3) | 181 ((uint32_t)vid->VMAckCtl << 2) | 182 ((uint32_t)vid->VMGrp1En << 1) | 183 ((uint32_t)vid->VMGrp0En << 0) 184 ); 185 break; 186 187 case GICH_MISR: 188 pkt->setLE<uint32_t>(getMISR(vid)); 189 break; 190 191 case GICH_EISR0: 192 pkt->setLE<uint32_t>(vid->eisr & 0xffffffff); 193 break; 194 195 case GICH_EISR1: 196 pkt->setLE<uint32_t>(vid->eisr >> 32); 197 break; 198 199 case GICH_ELSR0: { 200 uint32_t bm = 0; 201 for (int i = 0; i < ((NUM_LR < 32) ? NUM_LR : 32); i++) { 202 if (!vid->LR[i].State) 203 bm |= 1 << i; 204 } 205 pkt->setLE<uint32_t>(bm); 206 } break; 207 208 case GICH_ELSR1: { 209 uint32_t bm = 0; 210 for (int i = 32; i < NUM_LR; i++) { 211 if (!vid->LR[i].State) 212 bm |= 1 << (i-32); 213 } 214 pkt->setLE<uint32_t>(bm); 215 } break; 216 217 case GICH_APR0: 218 warn_once("VGIC GICH_APR read!\n"); 219 pkt->setLE<uint32_t>(0); 220 break; 221 222 case GICH_LR0: 223 case GICH_LR1: 224 case GICH_LR2: 225 case GICH_LR3: 226 pkt->setLE<uint32_t>(vid->LR[(daddr - GICH_LR0) >> 2]); 227 break; 228 229 default: 230 panic("VGIC HVCtrl read of bad address %#x\n", daddr); 231 } 232 233 pkt->makeAtomicResponse(); 234 return pioDelay; 235} 236 237Tick 238VGic::writeVCpu(PacketPtr pkt) 239{ 240 Addr daddr = pkt->getAddr() - vcpuAddr; 241 242 ContextID ctx_id = pkt->req->contextId(); 243 assert(ctx_id < VGIC_CPU_MAX); 244 struct vcpuIntData *vid = &vcpuData[ctx_id]; 245 246 DPRINTF(VGIC, "VGIC VCPU write register %#x <= %#x\n", 247 daddr, pkt->getLE<uint32_t>()); 248 249 switch (daddr) { 250 case GICV_CTLR: 251 vid->vctrl = pkt->getLE<uint32_t>(); 252 break; 253 case GICV_PMR: 254 vid->VMPriMask = pkt->getLE<uint32_t>(); 255 break; 256 case GICV_EOIR: { 257 // We don't handle the split EOI-then-DIR mode. Linux (guest) 258 // doesn't need it though. 259 assert(!vid->vctrl.EOImode); 260 uint32_t w = pkt->getLE<uint32_t>(); 261 unsigned int virq = w & 0x3ff; 262 unsigned int vcpu = (w >> 10) & 7; 263 int i = findLRForVIRQ(vid, virq, vcpu); 264 if (i < 0) { 265 DPRINTF(VGIC, "EOIR: No LR for irq %d(cpu%d)\n", virq, vcpu); 266 } else { 267 DPRINTF(VGIC, "EOIR: Found LR%d for irq %d(cpu%d)\n", i, virq, vcpu); 268 ListReg *lr = &vid->LR[i]; 269 lr->State = 0; 270 // Maintenance interrupt -- via eisr -- is flagged when 271 // LRs have EOI=1 and State=INVALID! 272 } 273 } break; 274 default: 275 panic("VGIC VCPU write %#x to unk address %#x\n", 276 pkt->getLE<uint32_t>(), daddr); 277 } 278 279 // This updates the EISRs and flags IRQs: 280 updateIntState(ctx_id); 281 282 pkt->makeAtomicResponse(); 283 return pioDelay; 284} 285 286Tick 287VGic::writeCtrl(PacketPtr pkt) 288{ 289 Addr daddr = pkt->getAddr() - hvAddr; 290 291 ContextID ctx_id = pkt->req->contextId(); 292 293 DPRINTF(VGIC, "VGIC HVCtrl write register %#x <= %#x\n", 294 daddr, pkt->getLE<uint32_t>()); 295 296 /* Munge the address: 0-0xfff is the usual space banked by requester CPU. 297 * Anything > that is 0x200-sized slices of 'per CPU' regs. 298 */ 299 if (daddr & ~0x1ff) { 300 ctx_id = (daddr >> 9); 301 if (ctx_id > 8) 302 panic("VGIC: Weird unbanked hv ctrl address %#x!\n", daddr); 303 daddr &= ~0x1ff; 304 } 305 assert(ctx_id < VGIC_CPU_MAX); 306 struct vcpuIntData *vid = &vcpuData[ctx_id]; 307 308 switch (daddr) { 309 case GICH_HCR: 310 vid->hcr = pkt->getLE<uint32_t>(); 311 // update int state 312 break; 313 314 case GICH_VMCR: { 315 uint32_t d = pkt->getLE<uint32_t>(); 316 vid->VMPriMask = d >> 27; 317 vid->VMBP = (d >> 21) & 7; 318 vid->VMABP = (d >> 18) & 7; 319 vid->VEM = (d >> 9) & 1; 320 vid->VMCBPR = (d >> 4) & 1; 321 vid->VMFiqEn = (d >> 3) & 1; 322 vid->VMAckCtl = (d >> 2) & 1; 323 vid->VMGrp1En = (d >> 1) & 1; 324 vid->VMGrp0En = d & 1; 325 } break; 326 327 case GICH_APR0: 328 warn_once("VGIC GICH_APR0 written, ignored\n"); 329 break; 330 331 case GICH_LR0: 332 case GICH_LR1: 333 case GICH_LR2: 334 case GICH_LR3: 335 vid->LR[(daddr - GICH_LR0) >> 2] = pkt->getLE<uint32_t>(); 336 // update int state 337 break; 338 339 default: 340 panic("VGIC HVCtrl write to bad address %#x\n", daddr); 341 } 342 343 updateIntState(ctx_id); 344 345 pkt->makeAtomicResponse(); 346 return pioDelay; 347} 348 349 350uint32_t 351VGic::getMISR(struct vcpuIntData *vid) 352{ 353 return (!!vid->hcr.VGrp1DIE && !vid->VMGrp1En ? 0x80 : 0) | 354 (!!vid->hcr.VGrp1EIE && vid->VMGrp1En ? 0x40 : 0) | 355 (!!vid->hcr.VGrp0DIE && !vid->VMGrp0En ? 0x20 : 0) | 356 (!!vid->hcr.VGrp0EIE && vid->VMGrp0En ? 0x10 : 0) | 357 (!!vid->hcr.NPIE && !lrPending(vid) ? 0x08 : 0) | 358 (!!vid->hcr.LRENPIE && vid->hcr.EOICount ? 0x04 : 0) | 359 (!!vid->hcr.UIE && lrValid(vid) <= 1 ? 0x02 : 0) | 360 (vid->eisr ? 0x01 : 0); 361} 362 363void 364VGic::postVInt(uint32_t cpu, Tick when) 365{ 366 DPRINTF(VGIC, "Posting VIRQ to %d\n", cpu); 367 if (!(postVIntEvent[cpu]->scheduled())) 368 eventq->schedule(postVIntEvent[cpu], when); 369} 370 371void 372VGic::unPostVInt(uint32_t cpu) 373{ 374 DPRINTF(VGIC, "Unposting VIRQ to %d\n", cpu); 375 platform->intrctrl->clear(cpu, ArmISA::INT_VIRT_IRQ, 0); 376} 377 378void 379VGic::processPostVIntEvent(uint32_t cpu) 380{ 381 platform->intrctrl->post(cpu, ArmISA::INT_VIRT_IRQ, 0); 382} 383 384 385void 386VGic::postMaintInt(uint32_t cpu) 387{ 388 DPRINTF(VGIC, "Posting maintenance PPI to GIC/cpu%d\n", cpu); 389 // Linux DT configures this as Level. 390 gic->sendPPInt(maintInt, cpu); 391} 392 393void 394VGic::unPostMaintInt(uint32_t cpu) 395{ 396 DPRINTF(VGIC, "Unposting maintenance PPI to GIC/cpu%d\n", cpu); 397 gic->clearPPInt(maintInt, cpu); 398} 399 400/* Update state (in general); something concerned with ctx_id has changed. 401 * This may raise a maintenance interrupt. 402 */ 403void 404VGic::updateIntState(ContextID ctx_id) 405{ 406 // @todo This should update APRs! 407 408 // Build EISR contents: 409 // (Cached so that regs can read them without messing about again) 410 struct vcpuIntData *tvid = &vcpuData[ctx_id]; 411 412 tvid->eisr = 0; 413 for (int i = 0; i < NUM_LR; i++) { 414 if (!tvid->LR[i].State && tvid->LR[i].EOI) { 415 tvid->eisr |= 1 << i; 416 } 417 } 418 419 assert(sys->numRunningContexts() <= VGIC_CPU_MAX); 420 for (int i = 0; i < sys->numRunningContexts(); i++) { 421 struct vcpuIntData *vid = &vcpuData[i]; 422 // Are any LRs active that weren't before? 423 if (!vIntPosted[i]) { 424 if (lrPending(vid) && vid->vctrl.En) { 425 vIntPosted[i] = true; 426 postVInt(i, curTick() + 1); 427 } 428 } else if (!lrPending(vid)) { 429 vIntPosted[i] = false; 430 unPostVInt(i); 431 } 432 433 // Any maintenance ints to send? 434 if (!maintIntPosted[i]) { 435 if (vid->hcr.En && getMISR(vid)) { 436 maintIntPosted[i] = true; 437 postMaintInt(i); 438 } 439 } else { 440 if (!vid->hcr.En || !getMISR(vid)) { 441 unPostMaintInt(i); 442 maintIntPosted[i] = false; 443 } 444 } 445 } 446} 447 448AddrRangeList 449VGic::getAddrRanges() const 450{ 451 AddrRangeList ranges; 452 ranges.push_back(RangeSize(hvAddr, GICH_REG_SIZE)); 453 ranges.push_back(RangeSize(vcpuAddr, GICV_SIZE)); 454 return ranges; 455} 456 457void 458VGic::serialize(CheckpointOut &cp) const 459{ 460 Tick interrupt_time[VGIC_CPU_MAX]; 461 for (uint32_t cpu = 0; cpu < VGIC_CPU_MAX; cpu++) { 462 interrupt_time[cpu] = 0; 463 if (postVIntEvent[cpu]->scheduled()) { 464 interrupt_time[cpu] = postVIntEvent[cpu]->when(); 465 } 466 } 467 468 DPRINTF(Checkpoint, "Serializing VGIC\n"); 469 470 SERIALIZE_ARRAY(interrupt_time, VGIC_CPU_MAX); 471 SERIALIZE_ARRAY(maintIntPosted, VGIC_CPU_MAX); 472 SERIALIZE_ARRAY(vIntPosted, VGIC_CPU_MAX); 473 SERIALIZE_SCALAR(vcpuAddr); 474 SERIALIZE_SCALAR(hvAddr); 475 SERIALIZE_SCALAR(pioDelay); 476 SERIALIZE_SCALAR(maintInt); 477 478 for (uint32_t cpu = 0; cpu < VGIC_CPU_MAX; cpu++) 479 vcpuData[cpu].serializeSection(cp, csprintf("vcpuData%d", cpu)); 480} 481 482void 483VGic::vcpuIntData::serialize(CheckpointOut &cp) const 484{ 485 uint32_t vctrl_val = vctrl; 486 SERIALIZE_SCALAR(vctrl_val); 487 uint32_t hcr_val = hcr; 488 SERIALIZE_SCALAR(hcr_val); 489 uint64_t eisr_val = eisr; 490 SERIALIZE_SCALAR(eisr_val); 491 uint8_t VMGrp0En_val = VMGrp0En; 492 SERIALIZE_SCALAR(VMGrp0En_val); 493 uint8_t VMGrp1En_val = VMGrp1En; 494 SERIALIZE_SCALAR(VMGrp1En_val); 495 uint8_t VMAckCtl_val = VMAckCtl; 496 SERIALIZE_SCALAR(VMAckCtl_val); 497 uint8_t VMFiqEn_val = VMFiqEn; 498 SERIALIZE_SCALAR(VMFiqEn_val); 499 uint8_t VMCBPR_val = VMCBPR; 500 SERIALIZE_SCALAR(VMCBPR_val); 501 uint8_t VEM_val = VEM; 502 SERIALIZE_SCALAR(VEM_val); 503 uint8_t VMABP_val = VMABP; 504 SERIALIZE_SCALAR(VMABP_val); 505 uint8_t VMBP_val = VMBP; 506 SERIALIZE_SCALAR(VMBP_val); 507 uint8_t VMPriMask_val = VMPriMask; 508 SERIALIZE_SCALAR(VMPriMask_val); 509 510 for (int i = 0; i < NUM_LR; i++) { 511 ScopedCheckpointSection sec_lr(cp, csprintf("LR%d", i)); 512 paramOut(cp, "lr", LR[i]); 513 } 514} 515 516void VGic::unserialize(CheckpointIn &cp) 517{ 518 DPRINTF(Checkpoint, "Unserializing Arm GIC\n"); 519 520 Tick interrupt_time[VGIC_CPU_MAX]; 521 UNSERIALIZE_ARRAY(interrupt_time, VGIC_CPU_MAX); 522 for (uint32_t cpu = 0; cpu < VGIC_CPU_MAX; cpu++) { 523 if (interrupt_time[cpu]) 524 schedule(postVIntEvent[cpu], interrupt_time[cpu]); 525 526 vcpuData[cpu].unserializeSection(cp, csprintf("vcpuData%d", cpu)); 527 } 528 UNSERIALIZE_ARRAY(maintIntPosted, VGIC_CPU_MAX); 529 UNSERIALIZE_ARRAY(vIntPosted, VGIC_CPU_MAX); 530 UNSERIALIZE_SCALAR(vcpuAddr); 531 UNSERIALIZE_SCALAR(hvAddr); 532 UNSERIALIZE_SCALAR(pioDelay); 533 UNSERIALIZE_SCALAR(maintInt); 534} 535 536void 537VGic::vcpuIntData::unserialize(CheckpointIn &cp) 538{ 539 paramIn(cp, "vctrl_val", vctrl); 540 paramIn(cp, "hcr_val", hcr); 541 paramIn(cp, "eisr_val", eisr); 542 paramIn(cp, "VMGrp0En_val", VMGrp0En); 543 paramIn(cp, "VMGrp1En_val", VMGrp1En); 544 paramIn(cp, "VMAckCtl_val", VMAckCtl); 545 paramIn(cp, "VMFiqEn_val", VMFiqEn); 546 paramIn(cp, "VMCBPR_val", VMCBPR); 547 paramIn(cp, "VEM_val", VEM); 548 paramIn(cp, "VMABP_val", VMABP); 549 paramIn(cp, "VMPriMask_val", VMPriMask); 550 551 for (int i = 0; i < NUM_LR; i++) { 552 ScopedCheckpointSection sec_lr(cp, csprintf("LR%d", i)); 553 paramIn(cp, "lr", LR[i]); 554 } 555} 556 557VGic * 558VGicParams::create() 559{ 560 return new VGic(this); 561} 562