1/* 2 * Copyright (c) 2010, 2013, 2015-2018 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Ali Saidi 41 * Prakash Ramrakhyani 42 */ 43 44#include "dev/arm/gic_v2.hh" 45 46#include "base/trace.hh" 47#include "debug/Checkpoint.hh" 48#include "debug/GIC.hh" 49#include "debug/IPI.hh" 50#include "debug/Interrupt.hh" 51#include "mem/packet.hh" 52#include "mem/packet_access.hh" 53 54const AddrRange GicV2::GICD_IGROUPR (0x080, 0x0ff); 55const AddrRange GicV2::GICD_ISENABLER (0x100, 0x17f); 56const AddrRange GicV2::GICD_ICENABLER (0x180, 0x1ff); 57const AddrRange GicV2::GICD_ISPENDR (0x200, 0x27f); 58const AddrRange GicV2::GICD_ICPENDR (0x280, 0x2ff); 59const AddrRange GicV2::GICD_ISACTIVER (0x300, 0x37f); 60const AddrRange GicV2::GICD_ICACTIVER (0x380, 0x3ff); 61const AddrRange GicV2::GICD_IPRIORITYR(0x400, 0x7ff); 62const AddrRange GicV2::GICD_ITARGETSR (0x800, 0xbff); 63const AddrRange GicV2::GICD_ICFGR (0xc00, 0xcff); 64 65GicV2::GicV2(const Params *p) 66 : BaseGic(p), 67 gicdPIDR(p->gicd_pidr), 68 gicdIIDR(p->gicd_iidr), 69 giccIIDR(p->gicc_iidr), 70 distRange(RangeSize(p->dist_addr, DIST_SIZE)), 71 cpuRange(RangeSize(p->cpu_addr, p->cpu_size)), 72 addrRanges{distRange, cpuRange}, 73 distPioDelay(p->dist_pio_delay), 74 cpuPioDelay(p->cpu_pio_delay), intLatency(p->int_latency), 75 enabled(false), haveGem5Extensions(p->gem5_extensions), 76 itLines(p->it_lines), 77 intEnabled {}, pendingInt {}, activeInt {}, 78 intPriority {}, cpuTarget {}, intConfig {}, 79 cpuSgiPending {}, cpuSgiActive {}, 80 cpuSgiPendingExt {}, cpuSgiActiveExt {}, 81 cpuPpiPending {}, cpuPpiActive {}, 82 pendingDelayedInterrupts(0) 83{ 84 for (int x = 0; x < CPU_MAX; x++) { 85 iccrpr[x] = 0xff; 86 cpuControl[x] = 0; 87 cpuPriority[x] = 0xff; 88 cpuBpr[x] = GICC_BPR_MINIMUM; 89 // Initialize cpu highest int 90 cpuHighestInt[x] = SPURIOUS_INT; 91 postIntEvent[x] = 92 new EventFunctionWrapper([this, x]{ postDelayedInt(x); }, 93 "Post Interrupt to CPU"); 94 postFiqEvent[x] = 95 new EventFunctionWrapper([this, x]{ postDelayedFiq(x); }, 96 "Post FIQ to CPU"); 97 } 98 DPRINTF(Interrupt, "cpuEnabled[0]=%d cpuEnabled[1]=%d\n", cpuEnabled(0), 99 cpuEnabled(1)); 100 101 gem5ExtensionsEnabled = false; 102} 103 104GicV2::~GicV2() 105{ 106 for (int x = 0; x < CPU_MAX; x++) { 107 delete postIntEvent[x]; 108 delete postFiqEvent[x]; 109 } 110} 111 112Tick 113GicV2::read(PacketPtr pkt) 114{ 115 const Addr addr = pkt->getAddr(); 116 117 if (distRange.contains(addr)) 118 return readDistributor(pkt); 119 else if (cpuRange.contains(addr)) 120 return readCpu(pkt); 121 else 122 panic("Read to unknown address %#x\n", pkt->getAddr()); 123} 124 125 126Tick 127GicV2::write(PacketPtr pkt) 128{ 129 const Addr addr = pkt->getAddr(); 130 131 if (distRange.contains(addr)) 132 return writeDistributor(pkt); 133 else if (cpuRange.contains(addr)) 134 return writeCpu(pkt); 135 else 136 panic("Write to unknown address %#x\n", pkt->getAddr()); 137} 138 139Tick 140GicV2::readDistributor(PacketPtr pkt) 141{ 142 const Addr daddr = pkt->getAddr() - distRange.start(); 143 const ContextID ctx = pkt->req->contextId(); 144 145 DPRINTF(GIC, "gic distributor read register %#x\n", daddr); 146 147 const uint32_t resp = readDistributor(ctx, daddr, pkt->getSize()); 148 149 switch (pkt->getSize()) { 150 case 1: 151 pkt->setLE<uint8_t>(resp); 152 break; 153 case 2: 154 pkt->setLE<uint16_t>(resp); 155 break; 156 case 4: 157 pkt->setLE<uint32_t>(resp); 158 break; 159 default: 160 panic("Invalid size while reading Distributor regs in GIC: %d\n", 161 pkt->getSize()); 162 } 163 164 pkt->makeAtomicResponse(); 165 return distPioDelay; 166} 167 168uint32_t 169GicV2::readDistributor(ContextID ctx, Addr daddr, size_t resp_sz) 170{ 171 if (GICD_IGROUPR.contains(daddr)) { 172 uint32_t ix = (daddr - GICD_IGROUPR.start()) >> 2; 173 assert(ix < 32); 174 return getIntGroup(ctx, ix); 175 } 176 177 if (GICD_ISENABLER.contains(daddr)) { 178 uint32_t ix = (daddr - GICD_ISENABLER.start()) >> 2; 179 assert(ix < 32); 180 return getIntEnabled(ctx, ix); 181 } 182 183 if (GICD_ICENABLER.contains(daddr)) { 184 uint32_t ix = (daddr - GICD_ICENABLER.start()) >> 2; 185 assert(ix < 32); 186 return getIntEnabled(ctx, ix); 187 } 188 189 if (GICD_ISPENDR.contains(daddr)) { 190 uint32_t ix = (daddr - GICD_ISPENDR.start()) >> 2; 191 assert(ix < 32); 192 return getPendingInt(ctx, ix); 193 } 194 195 if (GICD_ICPENDR.contains(daddr)) { 196 uint32_t ix = (daddr - GICD_ICPENDR.start()) >> 2; 197 assert(ix < 32); 198 return getPendingInt(ctx, ix); 199 } 200 201 if (GICD_ISACTIVER.contains(daddr)) { 202 uint32_t ix = (daddr - GICD_ISACTIVER.start()) >> 2; 203 assert(ix < 32); 204 return getActiveInt(ctx, ix); 205 } 206 207 if (GICD_ICACTIVER.contains(daddr)) { 208 uint32_t ix = (daddr - GICD_ICACTIVER.start()) >> 2; 209 assert(ix < 32); 210 return getActiveInt(ctx, ix); 211 } 212 213 if (GICD_IPRIORITYR.contains(daddr)) { 214 Addr int_num = daddr - GICD_IPRIORITYR.start(); 215 assert(int_num < INT_LINES_MAX); 216 DPRINTF(Interrupt, "Reading interrupt priority at int# %#x \n", 217 int_num); 218 219 switch (resp_sz) { 220 default: // will panic() after return to caller anyway 221 case 1: 222 return getIntPriority(ctx, int_num); 223 case 2: 224 assert((int_num + 1) < INT_LINES_MAX); 225 return (getIntPriority(ctx, int_num) | 226 getIntPriority(ctx, int_num+1) << 8); 227 case 4: 228 assert((int_num + 3) < INT_LINES_MAX); 229 return (getIntPriority(ctx, int_num) | 230 getIntPriority(ctx, int_num+1) << 8 | 231 getIntPriority(ctx, int_num+2) << 16 | 232 getIntPriority(ctx, int_num+3) << 24); 233 } 234 } 235 236 if (GICD_ITARGETSR.contains(daddr)) { 237 Addr int_num = daddr - GICD_ITARGETSR.start(); 238 DPRINTF(GIC, "Reading processor target register for int# %#x \n", 239 int_num); 240 assert(int_num < INT_LINES_MAX); 241 242 if (resp_sz == 1) { 243 return getCpuTarget(ctx, int_num); 244 } else { 245 assert(resp_sz == 4); 246 int_num = mbits(int_num, 31, 2); 247 return (getCpuTarget(ctx, int_num) | 248 getCpuTarget(ctx, int_num+1) << 8 | 249 getCpuTarget(ctx, int_num+2) << 16 | 250 getCpuTarget(ctx, int_num+3) << 24) ; 251 } 252 } 253 254 if (GICD_ICFGR.contains(daddr)) { 255 uint32_t ix = (daddr - GICD_ICFGR.start()) >> 2; 256 assert(ix < 64); 257 /** @todo software generated interrupts and PPIs 258 * can't be configured in some ways */ 259 return intConfig[ix]; 260 } 261 262 switch(daddr) { 263 case GICD_CTLR: 264 return enabled; 265 case GICD_TYPER: 266 /* The 0x100 is a made-up flag to show that gem5 extensions 267 * are available, 268 * write 0x200 to this register to enable it. */ 269 return (((sys->numRunningContexts() - 1) << 5) | 270 (itLines/INT_BITS_MAX -1) | 271 (haveGem5Extensions ? 0x100 : 0x0)); 272 case GICD_PIDR0: 273 //ARM defined DevID 274 return (gicdPIDR & 0xFF); 275 case GICD_PIDR1: 276 return ((gicdPIDR >> 8) & 0xFF); 277 case GICD_PIDR2: 278 return ((gicdPIDR >> 16) & 0xFF); 279 case GICD_PIDR3: 280 return ((gicdPIDR >> 24) & 0xFF); 281 case GICD_IIDR: 282 /* revision id is resorted to 1 and variant to 0*/ 283 return gicdIIDR; 284 default: 285 panic("Tried to read Gic distributor at offset %#x\n", daddr); 286 break; 287 } 288} 289 290Tick 291GicV2::readCpu(PacketPtr pkt) 292{ 293 const Addr daddr = pkt->getAddr() - cpuRange.start(); 294 295 assert(pkt->req->hasContextId()); 296 const ContextID ctx = pkt->req->contextId(); 297 assert(ctx < sys->numRunningContexts()); 298 299 DPRINTF(GIC, "gic cpu read register %#x cpu context: %d\n", daddr, 300 ctx); 301 302 pkt->setLE<uint32_t>(readCpu(ctx, daddr)); 303 304 pkt->makeAtomicResponse(); 305 return cpuPioDelay; 306} 307 308uint32_t 309GicV2::readCpu(ContextID ctx, Addr daddr) 310{ 311 switch(daddr) { 312 case GICC_IIDR: 313 return giccIIDR; 314 case GICC_CTLR: 315 return cpuControl[ctx]; 316 case GICC_PMR: 317 return cpuPriority[ctx]; 318 case GICC_BPR: 319 return cpuBpr[ctx]; 320 case GICC_IAR: 321 if (enabled && cpuEnabled(ctx)) { 322 int active_int = cpuHighestInt[ctx]; 323 IAR iar = 0; 324 iar.ack_id = active_int; 325 iar.cpu_id = 0; 326 if (active_int < SGI_MAX) { 327 // this is a software interrupt from another CPU 328 if (!gem5ExtensionsEnabled) { 329 panic_if(!cpuSgiPending[active_int], 330 "Interrupt %d active but no CPU generated it?\n", 331 active_int); 332 for (int x = 0; x < sys->numRunningContexts(); x++) { 333 // See which CPU generated the interrupt 334 uint8_t cpugen = 335 bits(cpuSgiPending[active_int], 7 + 8 * x, 8 * x); 336 if (cpugen & (1 << ctx)) { 337 iar.cpu_id = x; 338 break; 339 } 340 } 341 uint64_t sgi_num = ULL(1) << (ctx + 8 * iar.cpu_id); 342 cpuSgiActive[iar.ack_id] |= sgi_num; 343 cpuSgiPending[iar.ack_id] &= ~sgi_num; 344 } else { 345 uint64_t sgi_num = ULL(1) << iar.ack_id; 346 cpuSgiActiveExt[ctx] |= sgi_num; 347 cpuSgiPendingExt[ctx] &= ~sgi_num; 348 } 349 } else if (active_int < (SGI_MAX + PPI_MAX) ) { 350 uint32_t int_num = 1 << (cpuHighestInt[ctx] - SGI_MAX); 351 cpuPpiActive[ctx] |= int_num; 352 updateRunPri(); 353 cpuPpiPending[ctx] &= ~int_num; 354 355 } else { 356 uint32_t int_num = 1 << intNumToBit(cpuHighestInt[ctx]); 357 getActiveInt(ctx, intNumToWord(cpuHighestInt[ctx])) |= int_num; 358 updateRunPri(); 359 if (!isLevelSensitive(ctx, active_int)) { 360 getPendingInt(ctx, intNumToWord(cpuHighestInt[ctx])) 361 &= ~int_num; 362 } 363 } 364 365 DPRINTF(Interrupt, 366 "CPU %d reading IAR.id=%d IAR.cpu=%d, iar=0x%x\n", 367 ctx, iar.ack_id, iar.cpu_id, iar); 368 cpuHighestInt[ctx] = SPURIOUS_INT; 369 updateIntState(-1); 370 clearInt(ctx, active_int); 371 return iar; 372 } else { 373 return SPURIOUS_INT; 374 } 375 376 break; 377 case GICC_RPR: 378 return iccrpr[0]; 379 case GICC_HPPIR: 380 panic("Need to implement HPIR"); 381 break; 382 default: 383 panic("Tried to read Gic cpu at offset %#x\n", daddr); 384 break; 385 } 386} 387 388Tick 389GicV2::writeDistributor(PacketPtr pkt) 390{ 391 const Addr daddr = pkt->getAddr() - distRange.start(); 392 393 assert(pkt->req->hasContextId()); 394 const ContextID ctx = pkt->req->contextId(); 395 const size_t data_sz = pkt->getSize(); 396 397 uint32_t pkt_data M5_VAR_USED; 398 switch (data_sz) 399 { 400 case 1: 401 pkt_data = pkt->getLE<uint8_t>(); 402 break; 403 case 2: 404 pkt_data = pkt->getLE<uint16_t>(); 405 break; 406 case 4: 407 pkt_data = pkt->getLE<uint32_t>(); 408 break; 409 default: 410 panic("Invalid size when writing to priority regs in Gic: %d\n", 411 data_sz); 412 } 413 414 DPRINTF(GIC, "gic distributor write register %#x size %#x value %#x \n", 415 daddr, data_sz, pkt_data); 416 417 writeDistributor(ctx, daddr, pkt_data, data_sz); 418 419 pkt->makeAtomicResponse(); 420 return distPioDelay; 421} 422 423void 424GicV2::writeDistributor(ContextID ctx, Addr daddr, uint32_t data, 425 size_t data_sz) 426{ 427 if (GICD_IGROUPR.contains(daddr)) { 428 uint32_t ix = (daddr - GICD_IGROUPR.start()) >> 2; 429 assert(ix < 32); 430 getIntGroup(ctx, ix) |= data; 431 return; 432 } 433 434 if (GICD_ISENABLER.contains(daddr)) { 435 uint32_t ix = (daddr - GICD_ISENABLER.start()) >> 2; 436 assert(ix < 32); 437 getIntEnabled(ctx, ix) |= data; 438 return; 439 } 440 441 if (GICD_ICENABLER.contains(daddr)) { 442 uint32_t ix = (daddr - GICD_ICENABLER.start()) >> 2; 443 assert(ix < 32); 444 getIntEnabled(ctx, ix) &= ~data; 445 return; 446 } 447 448 if (GICD_ISPENDR.contains(daddr)) { 449 uint32_t ix = (daddr - GICD_ISPENDR.start()) >> 2; 450 auto mask = data; 451 if (ix == 0) mask &= SGI_MASK; // Don't allow SGIs to be changed 452 getPendingInt(ctx, ix) |= mask; 453 updateIntState(ix); 454 return; 455 } 456 457 if (GICD_ICPENDR.contains(daddr)) { 458 uint32_t ix = (daddr - GICD_ICPENDR.start()) >> 2; 459 auto mask = data; 460 if (ix == 0) mask &= SGI_MASK; // Don't allow SGIs to be changed 461 getPendingInt(ctx, ix) &= ~mask; 462 updateIntState(ix); 463 return; 464 } 465 466 if (GICD_ISACTIVER.contains(daddr)) { 467 uint32_t ix = (daddr - GICD_ISACTIVER.start()) >> 2; 468 getActiveInt(ctx, ix) |= data; 469 return; 470 } 471 472 if (GICD_ICACTIVER.contains(daddr)) { 473 uint32_t ix = (daddr - GICD_ICACTIVER.start()) >> 2; 474 getActiveInt(ctx, ix) &= ~data; 475 return; 476 } 477 478 if (GICD_IPRIORITYR.contains(daddr)) { 479 Addr int_num = daddr - GICD_IPRIORITYR.start(); 480 switch(data_sz) { 481 case 1: 482 getIntPriority(ctx, int_num) = data; 483 break; 484 case 2: { 485 getIntPriority(ctx, int_num) = bits(data, 7, 0); 486 getIntPriority(ctx, int_num + 1) = bits(data, 15, 8); 487 break; 488 } 489 case 4: { 490 getIntPriority(ctx, int_num) = bits(data, 7, 0); 491 getIntPriority(ctx, int_num + 1) = bits(data, 15, 8); 492 getIntPriority(ctx, int_num + 2) = bits(data, 23, 16); 493 getIntPriority(ctx, int_num + 3) = bits(data, 31, 24); 494 break; 495 } 496 default: 497 panic("Invalid size when writing to priority regs in Gic: %d\n", 498 data_sz); 499 } 500 501 updateIntState(-1); 502 updateRunPri(); 503 return; 504 } 505 506 if (GICD_ITARGETSR.contains(daddr)) { 507 Addr int_num = daddr - GICD_ITARGETSR.start(); 508 // Interrupts 0-31 are read only 509 unsigned offset = SGI_MAX + PPI_MAX; 510 if (int_num >= offset) { 511 unsigned ix = int_num - offset; // index into cpuTarget array 512 if (data_sz == 1) { 513 cpuTarget[ix] = data & 0xff; 514 } else { 515 assert (data_sz == 4); 516 cpuTarget[ix] = bits(data, 7, 0); 517 cpuTarget[ix+1] = bits(data, 15, 8); 518 cpuTarget[ix+2] = bits(data, 23, 16); 519 cpuTarget[ix+3] = bits(data, 31, 24); 520 } 521 updateIntState(int_num >> 2); 522 } 523 return; 524 } 525 526 if (GICD_ICFGR.contains(daddr)) { 527 uint32_t ix = (daddr - GICD_ICFGR.start()) >> 2; 528 assert(ix < INT_BITS_MAX*2); 529 intConfig[ix] = data; 530 if (data & NN_CONFIG_MASK) 531 warn("GIC N:N mode selected and not supported at this time\n"); 532 return; 533 } 534 535 switch(daddr) { 536 case GICD_CTLR: 537 enabled = data; 538 DPRINTF(Interrupt, "Distributor enable flag set to = %d\n", enabled); 539 break; 540 case GICD_TYPER: 541 /* 0x200 is a made-up flag to enable gem5 extension functionality. 542 * This reg is not normally written. 543 */ 544 gem5ExtensionsEnabled = (data & 0x200) && haveGem5Extensions; 545 DPRINTF(GIC, "gem5 extensions %s\n", 546 gem5ExtensionsEnabled ? "enabled" : "disabled"); 547 break; 548 case GICD_SGIR: 549 softInt(ctx, data); 550 break; 551 default: 552 panic("Tried to write Gic distributor at offset %#x\n", daddr); 553 break; 554 } 555} 556 557Tick 558GicV2::writeCpu(PacketPtr pkt) 559{ 560 const Addr daddr = pkt->getAddr() - cpuRange.start(); 561 562 assert(pkt->req->hasContextId()); 563 const ContextID ctx = pkt->req->contextId(); 564 const uint32_t data = pkt->getLE<uint32_t>(); 565 566 DPRINTF(GIC, "gic cpu write register cpu:%d %#x val: %#x\n", 567 ctx, daddr, data); 568 569 writeCpu(ctx, daddr, data); 570 571 pkt->makeAtomicResponse(); 572 return cpuPioDelay; 573} 574 575void 576GicV2::writeCpu(ContextID ctx, Addr daddr, uint32_t data) 577{ 578 switch(daddr) { 579 case GICC_CTLR: 580 cpuControl[ctx] = data; 581 break; 582 case GICC_PMR: 583 cpuPriority[ctx] = data; 584 break; 585 case GICC_BPR: { 586 auto bpr = data & 0x7; 587 if (bpr < GICC_BPR_MINIMUM) 588 bpr = GICC_BPR_MINIMUM; 589 cpuBpr[ctx] = bpr; 590 break; 591 } 592 case GICC_EOIR: { 593 const IAR iar = data; 594 if (iar.ack_id < SGI_MAX) { 595 // Clear out the bit that corresponds to the cleared int 596 uint64_t clr_int = ULL(1) << (ctx + 8 * iar.cpu_id); 597 if (!(cpuSgiActive[iar.ack_id] & clr_int) && 598 !(cpuSgiActiveExt[ctx] & (1 << iar.ack_id))) 599 panic("Done handling a SGI that isn't active?\n"); 600 if (gem5ExtensionsEnabled) 601 cpuSgiActiveExt[ctx] &= ~(1 << iar.ack_id); 602 else 603 cpuSgiActive[iar.ack_id] &= ~clr_int; 604 } else if (iar.ack_id < (SGI_MAX + PPI_MAX) ) { 605 uint32_t int_num = 1 << (iar.ack_id - SGI_MAX); 606 if (!(cpuPpiActive[ctx] & int_num)) 607 warn("CPU %d Done handling a PPI interrupt " 608 "that isn't active?\n", ctx); 609 cpuPpiActive[ctx] &= ~int_num; 610 } else { 611 uint32_t int_num = 1 << intNumToBit(iar.ack_id); 612 if (!(getActiveInt(ctx, intNumToWord(iar.ack_id)) & int_num)) 613 warn("Done handling interrupt that isn't active: %d\n", 614 intNumToBit(iar.ack_id)); 615 getActiveInt(ctx, intNumToWord(iar.ack_id)) &= ~int_num; 616 } 617 updateRunPri(); 618 DPRINTF(Interrupt, "CPU %d done handling intr IAR = %d from cpu %d\n", 619 ctx, iar.ack_id, iar.cpu_id); 620 break; 621 } 622 case GICC_APR0: 623 case GICC_APR1: 624 case GICC_APR2: 625 case GICC_APR3: 626 warn("GIC APRn write ignored because not implemented: %#x\n", daddr); 627 break; 628 case GICC_DIR: 629 warn("GIC DIR write ignored because not implemented: %#x\n", daddr); 630 break; 631 default: 632 panic("Tried to write Gic cpu at offset %#x\n", daddr); 633 break; 634 } 635 if (cpuEnabled(ctx)) updateIntState(-1); 636} 637 638GicV2::BankedRegs& 639GicV2::getBankedRegs(ContextID ctx) { 640 if (bankedRegs.size() <= ctx) 641 bankedRegs.resize(ctx + 1); 642 643 if (!bankedRegs[ctx]) 644 bankedRegs[ctx] = new BankedRegs; 645 return *bankedRegs[ctx]; 646} 647 648void 649GicV2::softInt(ContextID ctx, SWI swi) 650{ 651 if (gem5ExtensionsEnabled) { 652 switch (swi.list_type) { 653 case 0: { 654 // interrupt cpus specified 655 int dest = swi.cpu_list; 656 DPRINTF(IPI, "Generating softIRQ from CPU %d for CPU %d\n", 657 ctx, dest); 658 if (cpuEnabled(dest)) { 659 cpuSgiPendingExt[dest] |= (1 << swi.sgi_id); 660 DPRINTF(IPI, "SGI[%d]=%#x\n", dest, 661 cpuSgiPendingExt[dest]); 662 } 663 } break; 664 case 1: { 665 // interrupt all 666 for (int i = 0; i < sys->numContexts(); i++) { 667 DPRINTF(IPI, "Processing CPU %d\n", i); 668 if (!cpuEnabled(i)) 669 continue; 670 cpuSgiPendingExt[i] |= 1 << swi.sgi_id; 671 DPRINTF(IPI, "SGI[%d]=%#x\n", swi.sgi_id, 672 cpuSgiPendingExt[i]); 673 } 674 } break; 675 case 2: { 676 // Interrupt requesting cpu only 677 DPRINTF(IPI, "Generating softIRQ from CPU %d for CPU %d\n", 678 ctx, ctx); 679 if (cpuEnabled(ctx)) { 680 cpuSgiPendingExt[ctx] |= (1 << swi.sgi_id); 681 DPRINTF(IPI, "SGI[%d]=%#x\n", ctx, 682 cpuSgiPendingExt[ctx]); 683 } 684 } break; 685 } 686 } else { 687 switch (swi.list_type) { 688 case 1: 689 // interrupt all 690 uint8_t cpu_list; 691 cpu_list = 0; 692 for (int x = 0; x < sys->numContexts(); x++) 693 cpu_list |= cpuEnabled(x) ? 1 << x : 0; 694 swi.cpu_list = cpu_list; 695 break; 696 case 2: 697 // interrupt requesting cpu only 698 swi.cpu_list = 1 << ctx; 699 break; 700 // else interrupt cpus specified 701 } 702 703 DPRINTF(IPI, "Generating softIRQ from CPU %d for %#x\n", ctx, 704 swi.cpu_list); 705 for (int i = 0; i < sys->numContexts(); i++) { 706 DPRINTF(IPI, "Processing CPU %d\n", i); 707 if (!cpuEnabled(i)) 708 continue; 709 if (swi.cpu_list & (1 << i)) 710 cpuSgiPending[swi.sgi_id] |= (1 << i) << (8 * ctx); 711 DPRINTF(IPI, "SGI[%d]=%#x\n", swi.sgi_id, 712 cpuSgiPending[swi.sgi_id]); 713 } 714 } 715 updateIntState(-1); 716} 717 718uint64_t 719GicV2::genSwiMask(int cpu) 720{ 721 if (cpu > sys->numContexts()) 722 panic("Invalid CPU ID\n"); 723 return ULL(0x0101010101010101) << cpu; 724} 725 726uint8_t 727GicV2::getCpuPriority(unsigned cpu) 728{ 729 // see Table 3-2 in IHI0048B.b (GICv2) 730 // mask some low-order priority bits per BPR value 731 // NB: the GIC prioritization scheme is upside down: 732 // lower values are higher priority; masking off bits 733 // actually creates a higher priority, not lower. 734 return cpuPriority[cpu] & (0xff00 >> (7 - cpuBpr[cpu])); 735} 736 737void 738GicV2::updateIntState(int hint) 739{ 740 for (int cpu = 0; cpu < sys->numContexts(); cpu++) { 741 if (!cpuEnabled(cpu)) 742 continue; 743 744 /*@todo use hint to do less work. */ 745 int highest_int = SPURIOUS_INT; 746 // Priorities below that set in GICC_PMR can be ignored 747 uint8_t highest_pri = getCpuPriority(cpu); 748 749 // Check SGIs 750 for (int swi = 0; swi < SGI_MAX; swi++) { 751 if (!cpuSgiPending[swi] && !cpuSgiPendingExt[cpu]) 752 continue; 753 if ((cpuSgiPending[swi] & genSwiMask(cpu)) || 754 (cpuSgiPendingExt[cpu] & (1 << swi))) 755 if (highest_pri > getIntPriority(cpu, swi)) { 756 highest_pri = getIntPriority(cpu, swi); 757 highest_int = swi; 758 } 759 } 760 761 // Check PPIs 762 if (cpuPpiPending[cpu]) { 763 for (int ppi_idx = 0, int_num = SGI_MAX; 764 int_num < PPI_MAX + SGI_MAX; 765 ppi_idx++, int_num++) { 766 767 const bool ppi_pending = bits(cpuPpiPending[cpu], ppi_idx); 768 const bool ppi_enabled = bits(getIntEnabled(cpu, 0), int_num); 769 const bool higher_priority = 770 highest_pri > getIntPriority(cpu, int_num); 771 772 if (ppi_pending && ppi_enabled && higher_priority) { 773 highest_pri = getIntPriority(cpu, int_num); 774 highest_int = int_num; 775 } 776 } 777 } 778 779 bool mp_sys = sys->numRunningContexts() > 1; 780 // Check other ints 781 for (int x = 0; x < (itLines/INT_BITS_MAX); x++) { 782 if (getIntEnabled(cpu, x) & getPendingInt(cpu, x)) { 783 for (int y = 0; y < INT_BITS_MAX; y++) { 784 uint32_t int_nm = x * INT_BITS_MAX + y; 785 DPRINTF(GIC, "Checking for interrupt# %d \n",int_nm); 786 /* Set current pending int as highest int for current cpu 787 if the interrupt's priority higher than current priority 788 and if current cpu is the target (for mp configs only) 789 */ 790 if ((bits(getIntEnabled(cpu, x), y) 791 &bits(getPendingInt(cpu, x), y)) && 792 (getIntPriority(cpu, int_nm) < highest_pri)) 793 if ((!mp_sys) || 794 (gem5ExtensionsEnabled 795 ? (getCpuTarget(cpu, int_nm) == cpu) 796 : (getCpuTarget(cpu, int_nm) & (1 << cpu)))) { 797 highest_pri = getIntPriority(cpu, int_nm); 798 highest_int = int_nm; 799 } 800 } 801 } 802 } 803 804 uint32_t prev_highest = cpuHighestInt[cpu]; 805 cpuHighestInt[cpu] = highest_int; 806 807 if (highest_int == SPURIOUS_INT) { 808 if (isLevelSensitive(cpu, prev_highest)) { 809 810 DPRINTF(Interrupt, "Clear IRQ for cpu%d\n", cpu); 811 clearInt(cpu, prev_highest); 812 } 813 continue; 814 } 815 816 /* @todo make this work for more than one cpu, need to handle 1:N, N:N 817 * models */ 818 if (enabled && cpuEnabled(cpu) && 819 (highest_pri < getCpuPriority(cpu)) && 820 !(getActiveInt(cpu, intNumToWord(highest_int)) 821 & (1 << intNumToBit(highest_int)))) { 822 823 DPRINTF(Interrupt, "Posting interrupt %d to cpu%d\n", highest_int, 824 cpu); 825 826 if (isFiq(cpu, highest_int)) { 827 postFiq(cpu, curTick() + intLatency); 828 } else { 829 postInt(cpu, curTick() + intLatency); 830 } 831 } 832 } 833} 834 835void 836GicV2::updateRunPri() 837{ 838 for (int cpu = 0; cpu < sys->numContexts(); cpu++) { 839 if (!cpuEnabled(cpu)) 840 continue; 841 uint8_t maxPriority = 0xff; 842 for (int i = 0; i < itLines; i++) { 843 if (i < SGI_MAX) { 844 if (((cpuSgiActive[i] & genSwiMask(cpu)) || 845 (cpuSgiActiveExt[cpu] & (1 << i))) && 846 (getIntPriority(cpu, i) < maxPriority)) 847 maxPriority = getIntPriority(cpu, i); 848 } else if (i < (SGI_MAX + PPI_MAX)) { 849 if ((cpuPpiActive[cpu] & ( 1 << (i - SGI_MAX))) && 850 (getIntPriority(cpu, i) < maxPriority)) 851 maxPriority = getIntPriority(cpu, i); 852 853 } else { 854 if (getActiveInt(cpu, intNumToWord(i)) 855 & (1 << intNumToBit(i))) 856 if (getIntPriority(cpu, i) < maxPriority) 857 maxPriority = getIntPriority(cpu, i); 858 } 859 } 860 iccrpr[cpu] = maxPriority; 861 } 862} 863 864void 865GicV2::sendInt(uint32_t num) 866{ 867 uint8_t target = getCpuTarget(0, num); 868 DPRINTF(Interrupt, "Received Interrupt number %d, cpuTarget %#x: \n", 869 num, target); 870 if ((target & (target - 1)) && !gem5ExtensionsEnabled) 871 panic("Multiple targets for peripheral interrupts is not supported\n"); 872 panic_if(num < SGI_MAX + PPI_MAX, 873 "sentInt() must only be used for interrupts 32 and higher"); 874 getPendingInt(target, intNumToWord(num)) |= 1 << intNumToBit(num); 875 updateIntState(intNumToWord(num)); 876} 877 878void 879GicV2::sendPPInt(uint32_t num, uint32_t cpu) 880{ 881 DPRINTF(Interrupt, "Received PPI %d, cpuTarget %#x: \n", 882 num, cpu); 883 cpuPpiPending[cpu] |= 1 << (num - SGI_MAX); 884 updateIntState(intNumToWord(num)); 885} 886 887void 888GicV2::clearInt(uint32_t num) 889{ 890 if (isLevelSensitive(0, num)) { 891 uint8_t target = getCpuTarget(0, num); 892 893 DPRINTF(Interrupt, 894 "Received Clear interrupt number %d, cpuTarget %#x:\n", 895 num, target); 896 897 getPendingInt(target, intNumToWord(num)) &= ~(1 << intNumToBit(num)); 898 updateIntState(intNumToWord(num)); 899 } else { 900 /* Nothing to do : 901 * Edge-triggered interrupt remain pending until software 902 * writes GICD_ICPENDR or reads GICC_IAR */ 903 } 904} 905 906void 907GicV2::clearPPInt(uint32_t num, uint32_t cpu) 908{ 909 DPRINTF(Interrupt, "Clearing PPI %d, cpuTarget %#x: \n", 910 num, cpu); 911 cpuPpiPending[cpu] &= ~(1 << (num - SGI_MAX)); 912 updateIntState(intNumToWord(num)); 913} 914 915void 916GicV2::clearInt(ContextID ctx, uint32_t int_num) 917{ 918 if (isFiq(ctx, int_num)) { 919 platform->intrctrl->clear(ctx, ArmISA::INT_FIQ, 0); 920 } else { 921 platform->intrctrl->clear(ctx, ArmISA::INT_IRQ, 0); 922 } 923} 924 925void 926GicV2::postInt(uint32_t cpu, Tick when) 927{ 928 if (!(postIntEvent[cpu]->scheduled())) { 929 ++pendingDelayedInterrupts; 930 eventq->schedule(postIntEvent[cpu], when); 931 } 932} 933 934void 935GicV2::postDelayedInt(uint32_t cpu) 936{ 937 platform->intrctrl->post(cpu, ArmISA::INT_IRQ, 0); 938 --pendingDelayedInterrupts; 939 assert(pendingDelayedInterrupts >= 0); 940 if (pendingDelayedInterrupts == 0) 941 signalDrainDone(); 942} 943 944void 945GicV2::postFiq(uint32_t cpu, Tick when) 946{ 947 if (!(postFiqEvent[cpu]->scheduled())) { 948 ++pendingDelayedInterrupts; 949 eventq->schedule(postFiqEvent[cpu], when); 950 } 951} 952 953void 954GicV2::postDelayedFiq(uint32_t cpu) 955{ 956 platform->intrctrl->post(cpu, ArmISA::INT_FIQ, 0); 957 --pendingDelayedInterrupts; 958 assert(pendingDelayedInterrupts >= 0); 959 if (pendingDelayedInterrupts == 0) 960 signalDrainDone(); 961} 962 963DrainState 964GicV2::drain() 965{ 966 if (pendingDelayedInterrupts == 0) { 967 return DrainState::Drained; 968 } else { 969 return DrainState::Draining; 970 } 971} 972 973 974void 975GicV2::drainResume() 976{ 977 // There may be pending interrupts if checkpointed from Kvm; post them. 978 updateIntState(-1); 979} 980 981void 982GicV2::serialize(CheckpointOut &cp) const 983{ 984 DPRINTF(Checkpoint, "Serializing Arm GIC\n"); 985 986 SERIALIZE_SCALAR(enabled); 987 SERIALIZE_SCALAR(itLines); 988 SERIALIZE_ARRAY(intEnabled, INT_BITS_MAX-1); 989 SERIALIZE_ARRAY(pendingInt, INT_BITS_MAX-1); 990 SERIALIZE_ARRAY(activeInt, INT_BITS_MAX-1); 991 SERIALIZE_ARRAY(intGroup, INT_BITS_MAX-1); 992 SERIALIZE_ARRAY(iccrpr, CPU_MAX); 993 SERIALIZE_ARRAY(intPriority, GLOBAL_INT_LINES); 994 SERIALIZE_ARRAY(cpuTarget, GLOBAL_INT_LINES); 995 SERIALIZE_ARRAY(intConfig, INT_BITS_MAX * 2); 996 SERIALIZE_ARRAY(cpuControl, CPU_MAX); 997 SERIALIZE_ARRAY(cpuPriority, CPU_MAX); 998 SERIALIZE_ARRAY(cpuBpr, CPU_MAX); 999 SERIALIZE_ARRAY(cpuHighestInt, CPU_MAX); 1000 SERIALIZE_ARRAY(cpuSgiActive, SGI_MAX); 1001 SERIALIZE_ARRAY(cpuSgiPending, SGI_MAX); 1002 SERIALIZE_ARRAY(cpuSgiActiveExt, CPU_MAX); 1003 SERIALIZE_ARRAY(cpuSgiPendingExt, CPU_MAX); 1004 SERIALIZE_ARRAY(cpuPpiActive, CPU_MAX); 1005 SERIALIZE_ARRAY(cpuPpiPending, CPU_MAX); 1006 SERIALIZE_SCALAR(gem5ExtensionsEnabled); 1007 1008 for (uint32_t i=0; i < bankedRegs.size(); ++i) { 1009 if (!bankedRegs[i]) 1010 continue; 1011 bankedRegs[i]->serializeSection(cp, csprintf("bankedRegs%i", i)); 1012 } 1013} 1014 1015void 1016GicV2::BankedRegs::serialize(CheckpointOut &cp) const 1017{ 1018 SERIALIZE_SCALAR(intEnabled); 1019 SERIALIZE_SCALAR(pendingInt); 1020 SERIALIZE_SCALAR(activeInt); 1021 SERIALIZE_SCALAR(intGroup); 1022 SERIALIZE_ARRAY(intPriority, SGI_MAX + PPI_MAX); 1023} 1024 1025void 1026GicV2::unserialize(CheckpointIn &cp) 1027{ 1028 DPRINTF(Checkpoint, "Unserializing Arm GIC\n"); 1029 1030 UNSERIALIZE_SCALAR(enabled); 1031 UNSERIALIZE_SCALAR(itLines); 1032 UNSERIALIZE_ARRAY(intEnabled, INT_BITS_MAX-1); 1033 UNSERIALIZE_ARRAY(pendingInt, INT_BITS_MAX-1); 1034 UNSERIALIZE_ARRAY(activeInt, INT_BITS_MAX-1); 1035 UNSERIALIZE_ARRAY(intGroup, INT_BITS_MAX-1); 1036 UNSERIALIZE_ARRAY(iccrpr, CPU_MAX); 1037 UNSERIALIZE_ARRAY(intPriority, GLOBAL_INT_LINES); 1038 UNSERIALIZE_ARRAY(cpuTarget, GLOBAL_INT_LINES); 1039 UNSERIALIZE_ARRAY(intConfig, INT_BITS_MAX * 2); 1040 UNSERIALIZE_ARRAY(cpuControl, CPU_MAX); 1041 UNSERIALIZE_ARRAY(cpuPriority, CPU_MAX); 1042 UNSERIALIZE_ARRAY(cpuBpr, CPU_MAX); 1043 UNSERIALIZE_ARRAY(cpuHighestInt, CPU_MAX); 1044 UNSERIALIZE_ARRAY(cpuSgiActive, SGI_MAX); 1045 UNSERIALIZE_ARRAY(cpuSgiPending, SGI_MAX); 1046 UNSERIALIZE_ARRAY(cpuSgiActiveExt, CPU_MAX); 1047 UNSERIALIZE_ARRAY(cpuSgiPendingExt, CPU_MAX); 1048 UNSERIALIZE_ARRAY(cpuPpiActive, CPU_MAX); 1049 UNSERIALIZE_ARRAY(cpuPpiPending, CPU_MAX); 1050 1051 // Handle checkpoints from before we drained the GIC to prevent 1052 // in-flight interrupts. 1053 if (cp.entryExists(Serializable::currentSection(), "interrupt_time")) { 1054 Tick interrupt_time[CPU_MAX]; 1055 UNSERIALIZE_ARRAY(interrupt_time, CPU_MAX); 1056 1057 for (uint32_t cpu = 0; cpu < CPU_MAX; cpu++) { 1058 if (interrupt_time[cpu]) 1059 schedule(postIntEvent[cpu], interrupt_time[cpu]); 1060 } 1061 } 1062 1063 if (!UNSERIALIZE_OPT_SCALAR(gem5ExtensionsEnabled)) 1064 gem5ExtensionsEnabled = false; 1065 1066 for (uint32_t i=0; i < CPU_MAX; ++i) { 1067 ScopedCheckpointSection sec(cp, csprintf("bankedRegs%i", i)); 1068 if (cp.sectionExists(Serializable::currentSection())) { 1069 getBankedRegs(i).unserialize(cp); 1070 } 1071 } 1072} 1073 1074void 1075GicV2::BankedRegs::unserialize(CheckpointIn &cp) 1076{ 1077 UNSERIALIZE_SCALAR(intEnabled); 1078 UNSERIALIZE_SCALAR(pendingInt); 1079 UNSERIALIZE_SCALAR(activeInt); 1080 UNSERIALIZE_SCALAR(intGroup); 1081 UNSERIALIZE_ARRAY(intPriority, SGI_MAX + PPI_MAX); 1082} 1083 1084GicV2 * 1085GicV2Params::create() 1086{ 1087 return new GicV2(this); 1088} 1089