Deleted Added
sdiff udiff text old ( 13503:43d086278416 ) new ( 13505:e699fce12780 )
full compact
1/*
2 * Copyright (c) 2010, 2013, 2015-2018 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 * Prakash Ramrakhyani
42 */
43
44#include "dev/arm/gic_v2.hh"
45
46#include "base/trace.hh"
47#include "debug/Checkpoint.hh"
48#include "debug/GIC.hh"
49#include "debug/IPI.hh"
50#include "debug/Interrupt.hh"
51#include "mem/packet.hh"
52#include "mem/packet_access.hh"
53
54const AddrRange GicV2::GICD_IGROUPR (0x080, 0x0ff);
55const AddrRange GicV2::GICD_ISENABLER (0x100, 0x17f);
56const AddrRange GicV2::GICD_ICENABLER (0x180, 0x1ff);
57const AddrRange GicV2::GICD_ISPENDR (0x200, 0x27f);
58const AddrRange GicV2::GICD_ICPENDR (0x280, 0x2ff);
59const AddrRange GicV2::GICD_ISACTIVER (0x300, 0x37f);
60const AddrRange GicV2::GICD_ICACTIVER (0x380, 0x3ff);
61const AddrRange GicV2::GICD_IPRIORITYR(0x400, 0x7ff);
62const AddrRange GicV2::GICD_ITARGETSR (0x800, 0xbff);
63const AddrRange GicV2::GICD_ICFGR (0xc00, 0xcff);
64
65GicV2::GicV2(const Params *p)
66 : BaseGic(p),
67 distRange(RangeSize(p->dist_addr, DIST_SIZE)),
68 cpuRange(RangeSize(p->cpu_addr, p->cpu_size)),
69 addrRanges{distRange, cpuRange},
70 distPioDelay(p->dist_pio_delay),
71 cpuPioDelay(p->cpu_pio_delay), intLatency(p->int_latency),
72 enabled(false), haveGem5Extensions(p->gem5_extensions),
73 itLines(p->it_lines),
74 intEnabled {}, pendingInt {}, activeInt {},
75 intPriority {}, cpuTarget {}, intConfig {},
76 cpuSgiPending {}, cpuSgiActive {},
77 cpuSgiPendingExt {}, cpuSgiActiveExt {},
78 cpuPpiPending {}, cpuPpiActive {},
79 pendingDelayedInterrupts(0)
80{
81 for (int x = 0; x < CPU_MAX; x++) {
82 iccrpr[x] = 0xff;
83 cpuControl[x] = 0;
84 cpuPriority[x] = 0xff;
85 cpuBpr[x] = GICC_BPR_MINIMUM;
86 // Initialize cpu highest int
87 cpuHighestInt[x] = SPURIOUS_INT;
88 postIntEvent[x] =
89 new EventFunctionWrapper([this, x]{ postDelayedInt(x); },
90 "Post Interrupt to CPU");
91 postFiqEvent[x] =
92 new EventFunctionWrapper([this, x]{ postDelayedFiq(x); },
93 "Post FIQ to CPU");
94 }
95 DPRINTF(Interrupt, "cpuEnabled[0]=%d cpuEnabled[1]=%d\n", cpuEnabled(0),
96 cpuEnabled(1));
97
98 gem5ExtensionsEnabled = false;
99}
100
101GicV2::~GicV2()
102{
103 for (int x = 0; x < CPU_MAX; x++) {
104 delete postIntEvent[x];
105 delete postFiqEvent[x];
106 }
107}
108
109Tick
110GicV2::read(PacketPtr pkt)
111{
112 const Addr addr = pkt->getAddr();
113
114 if (distRange.contains(addr))
115 return readDistributor(pkt);
116 else if (cpuRange.contains(addr))
117 return readCpu(pkt);
118 else
119 panic("Read to unknown address %#x\n", pkt->getAddr());
120}
121
122
123Tick
124GicV2::write(PacketPtr pkt)
125{
126 const Addr addr = pkt->getAddr();
127
128 if (distRange.contains(addr))
129 return writeDistributor(pkt);
130 else if (cpuRange.contains(addr))
131 return writeCpu(pkt);
132 else
133 panic("Write to unknown address %#x\n", pkt->getAddr());
134}
135
136Tick
137GicV2::readDistributor(PacketPtr pkt)
138{
139 const Addr daddr = pkt->getAddr() - distRange.start();
140 const ContextID ctx = pkt->req->contextId();
141
142 DPRINTF(GIC, "gic distributor read register %#x\n", daddr);
143
144 const uint32_t resp = readDistributor(ctx, daddr, pkt->getSize());
145
146 switch (pkt->getSize()) {
147 case 1:
148 pkt->setLE<uint8_t>(resp);
149 break;
150 case 2:
151 pkt->setLE<uint16_t>(resp);
152 break;
153 case 4:
154 pkt->setLE<uint32_t>(resp);
155 break;
156 default:
157 panic("Invalid size while reading Distributor regs in GIC: %d\n",
158 pkt->getSize());
159 }
160
161 pkt->makeAtomicResponse();
162 return distPioDelay;
163}
164
165uint32_t
166GicV2::readDistributor(ContextID ctx, Addr daddr, size_t resp_sz)
167{
168 if (GICD_IGROUPR.contains(daddr)) {
169 uint32_t ix = (daddr - GICD_IGROUPR.start()) >> 2;
170 assert(ix < 32);
171 return getIntGroup(ctx, ix);
172 }
173
174 if (GICD_ISENABLER.contains(daddr)) {
175 uint32_t ix = (daddr - GICD_ISENABLER.start()) >> 2;
176 assert(ix < 32);
177 return getIntEnabled(ctx, ix);
178 }
179
180 if (GICD_ICENABLER.contains(daddr)) {
181 uint32_t ix = (daddr - GICD_ICENABLER.start()) >> 2;
182 assert(ix < 32);
183 return getIntEnabled(ctx, ix);
184 }
185
186 if (GICD_ISPENDR.contains(daddr)) {
187 uint32_t ix = (daddr - GICD_ISPENDR.start()) >> 2;
188 assert(ix < 32);
189 return getPendingInt(ctx, ix);
190 }
191
192 if (GICD_ICPENDR.contains(daddr)) {
193 uint32_t ix = (daddr - GICD_ICPENDR.start()) >> 2;
194 assert(ix < 32);
195 return getPendingInt(ctx, ix);
196 }
197
198 if (GICD_ISACTIVER.contains(daddr)) {
199 uint32_t ix = (daddr - GICD_ISACTIVER.start()) >> 2;
200 assert(ix < 32);
201 return getActiveInt(ctx, ix);
202 }
203
204 if (GICD_ICACTIVER.contains(daddr)) {
205 uint32_t ix = (daddr - GICD_ICACTIVER.start()) >> 2;
206 assert(ix < 32);
207 return getActiveInt(ctx, ix);
208 }
209
210 if (GICD_IPRIORITYR.contains(daddr)) {
211 Addr int_num = daddr - GICD_IPRIORITYR.start();
212 assert(int_num < INT_LINES_MAX);
213 DPRINTF(Interrupt, "Reading interrupt priority at int# %#x \n",
214 int_num);
215
216 switch (resp_sz) {
217 default: // will panic() after return to caller anyway
218 case 1:
219 return getIntPriority(ctx, int_num);
220 case 2:
221 assert((int_num + 1) < INT_LINES_MAX);
222 return (getIntPriority(ctx, int_num) |
223 getIntPriority(ctx, int_num+1) << 8);
224 case 4:
225 assert((int_num + 3) < INT_LINES_MAX);
226 return (getIntPriority(ctx, int_num) |
227 getIntPriority(ctx, int_num+1) << 8 |
228 getIntPriority(ctx, int_num+2) << 16 |
229 getIntPriority(ctx, int_num+3) << 24);
230 }
231 }
232
233 if (GICD_ITARGETSR.contains(daddr)) {
234 Addr int_num = daddr - GICD_ITARGETSR.start();
235 DPRINTF(GIC, "Reading processor target register for int# %#x \n",
236 int_num);
237 assert(int_num < INT_LINES_MAX);
238
239 if (resp_sz == 1) {
240 return getCpuTarget(ctx, int_num);
241 } else {
242 assert(resp_sz == 4);
243 int_num = mbits(int_num, 31, 2);
244 return (getCpuTarget(ctx, int_num) |
245 getCpuTarget(ctx, int_num+1) << 8 |
246 getCpuTarget(ctx, int_num+2) << 16 |
247 getCpuTarget(ctx, int_num+3) << 24) ;
248 }
249 }
250
251 if (GICD_ICFGR.contains(daddr)) {
252 uint32_t ix = (daddr - GICD_ICFGR.start()) >> 2;
253 assert(ix < 64);
254 /** @todo software generated interrupts and PPIs
255 * can't be configured in some ways */
256 return intConfig[ix];
257 }
258
259 switch(daddr) {
260 case GICD_CTLR:
261 return enabled;
262 case GICD_TYPER:
263 /* The 0x100 is a made-up flag to show that gem5 extensions
264 * are available,
265 * write 0x200 to this register to enable it. */
266 return (((sys->numRunningContexts() - 1) << 5) |
267 (itLines/INT_BITS_MAX -1) |
268 (haveGem5Extensions ? 0x100 : 0x0));
269 case GICD_PIDR0:
270 //ARM defined DevID
271 return (GICD_400_PIDR_VALUE & 0xFF);
272 case GICD_PIDR1:
273 return ((GICD_400_PIDR_VALUE >> 8) & 0xFF);
274 case GICD_PIDR2:
275 return ((GICD_400_PIDR_VALUE >> 16) & 0xFF);
276 case GICD_PIDR3:
277 return ((GICD_400_PIDR_VALUE >> 24) & 0xFF);
278 case GICD_IIDR:
279 /* revision id is resorted to 1 and variant to 0*/
280 return GICD_400_IIDR_VALUE;
281 default:
282 panic("Tried to read Gic distributor at offset %#x\n", daddr);
283 break;
284 }
285}
286
287Tick
288GicV2::readCpu(PacketPtr pkt)
289{
290 const Addr daddr = pkt->getAddr() - cpuRange.start();
291
292 assert(pkt->req->hasContextId());
293 const ContextID ctx = pkt->req->contextId();
294 assert(ctx < sys->numRunningContexts());
295
296 DPRINTF(GIC, "gic cpu read register %#x cpu context: %d\n", daddr,
297 ctx);
298
299 pkt->setLE<uint32_t>(readCpu(ctx, daddr));
300
301 pkt->makeAtomicResponse();
302 return cpuPioDelay;
303}
304
305uint32_t
306GicV2::readCpu(ContextID ctx, Addr daddr)
307{
308 switch(daddr) {
309 case GICC_IIDR:
310 return GICC_400_IIDR_VALUE;
311 case GICC_CTLR:
312 return cpuControl[ctx];
313 case GICC_PMR:
314 return cpuPriority[ctx];
315 case GICC_BPR:
316 return cpuBpr[ctx];
317 case GICC_IAR:
318 if (enabled && cpuEnabled(ctx)) {
319 int active_int = cpuHighestInt[ctx];
320 IAR iar = 0;
321 iar.ack_id = active_int;
322 iar.cpu_id = 0;
323 if (active_int < SGI_MAX) {
324 // this is a software interrupt from another CPU
325 if (!gem5ExtensionsEnabled) {
326 panic_if(!cpuSgiPending[active_int],
327 "Interrupt %d active but no CPU generated it?\n",
328 active_int);
329 for (int x = 0; x < sys->numRunningContexts(); x++) {
330 // See which CPU generated the interrupt
331 uint8_t cpugen =
332 bits(cpuSgiPending[active_int], 7 + 8 * x, 8 * x);
333 if (cpugen & (1 << ctx)) {
334 iar.cpu_id = x;
335 break;
336 }
337 }
338 uint64_t sgi_num = ULL(1) << (ctx + 8 * iar.cpu_id);
339 cpuSgiActive[iar.ack_id] |= sgi_num;
340 cpuSgiPending[iar.ack_id] &= ~sgi_num;
341 } else {
342 uint64_t sgi_num = ULL(1) << iar.ack_id;
343 cpuSgiActiveExt[ctx] |= sgi_num;
344 cpuSgiPendingExt[ctx] &= ~sgi_num;
345 }
346 } else if (active_int < (SGI_MAX + PPI_MAX) ) {
347 uint32_t int_num = 1 << (cpuHighestInt[ctx] - SGI_MAX);
348 cpuPpiActive[ctx] |= int_num;
349 updateRunPri();
350 cpuPpiPending[ctx] &= ~int_num;
351
352 } else {
353 uint32_t int_num = 1 << intNumToBit(cpuHighestInt[ctx]);
354 getActiveInt(ctx, intNumToWord(cpuHighestInt[ctx])) |= int_num;
355 updateRunPri();
356 if (!isLevelSensitive(ctx, active_int)) {
357 getPendingInt(ctx, intNumToWord(cpuHighestInt[ctx]))
358 &= ~int_num;
359 }
360 }
361
362 DPRINTF(Interrupt,
363 "CPU %d reading IAR.id=%d IAR.cpu=%d, iar=0x%x\n",
364 ctx, iar.ack_id, iar.cpu_id, iar);
365 cpuHighestInt[ctx] = SPURIOUS_INT;
366 updateIntState(-1);
367 clearInt(ctx, active_int);
368 return iar;
369 } else {
370 return SPURIOUS_INT;
371 }
372
373 break;
374 case GICC_RPR:
375 return iccrpr[0];
376 case GICC_HPPIR:
377 panic("Need to implement HPIR");
378 break;
379 default:
380 panic("Tried to read Gic cpu at offset %#x\n", daddr);
381 break;
382 }
383}
384
385Tick
386GicV2::writeDistributor(PacketPtr pkt)
387{
388 const Addr daddr = pkt->getAddr() - distRange.start();
389
390 assert(pkt->req->hasContextId());
391 const ContextID ctx = pkt->req->contextId();
392 const size_t data_sz = pkt->getSize();
393
394 uint32_t pkt_data M5_VAR_USED;
395 switch (data_sz)
396 {
397 case 1:
398 pkt_data = pkt->getLE<uint8_t>();
399 break;
400 case 2:
401 pkt_data = pkt->getLE<uint16_t>();
402 break;
403 case 4:
404 pkt_data = pkt->getLE<uint32_t>();
405 break;
406 default:
407 panic("Invalid size when writing to priority regs in Gic: %d\n",
408 data_sz);
409 }
410
411 DPRINTF(GIC, "gic distributor write register %#x size %#x value %#x \n",
412 daddr, data_sz, pkt_data);
413
414 writeDistributor(ctx, daddr, pkt_data, data_sz);
415
416 pkt->makeAtomicResponse();
417 return distPioDelay;
418}
419
420void
421GicV2::writeDistributor(ContextID ctx, Addr daddr, uint32_t data,
422 size_t data_sz)
423{
424 if (GICD_IGROUPR.contains(daddr)) {
425 uint32_t ix = (daddr - GICD_IGROUPR.start()) >> 2;
426 assert(ix < 32);
427 getIntGroup(ctx, ix) |= data;
428 return;
429 }
430
431 if (GICD_ISENABLER.contains(daddr)) {
432 uint32_t ix = (daddr - GICD_ISENABLER.start()) >> 2;
433 assert(ix < 32);
434 getIntEnabled(ctx, ix) |= data;
435 return;
436 }
437
438 if (GICD_ICENABLER.contains(daddr)) {
439 uint32_t ix = (daddr - GICD_ICENABLER.start()) >> 2;
440 assert(ix < 32);
441 getIntEnabled(ctx, ix) &= ~data;
442 return;
443 }
444
445 if (GICD_ISPENDR.contains(daddr)) {
446 uint32_t ix = (daddr - GICD_ISPENDR.start()) >> 2;
447 auto mask = data;
448 if (ix == 0) mask &= SGI_MASK; // Don't allow SGIs to be changed
449 getPendingInt(ctx, ix) |= mask;
450 updateIntState(ix);
451 return;
452 }
453
454 if (GICD_ICPENDR.contains(daddr)) {
455 uint32_t ix = (daddr - GICD_ICPENDR.start()) >> 2;
456 auto mask = data;
457 if (ix == 0) mask &= SGI_MASK; // Don't allow SGIs to be changed
458 getPendingInt(ctx, ix) &= ~mask;
459 updateIntState(ix);
460 return;
461 }
462
463 if (GICD_ISACTIVER.contains(daddr)) {
464 uint32_t ix = (daddr - GICD_ISACTIVER.start()) >> 2;
465 getActiveInt(ctx, ix) |= data;
466 return;
467 }
468
469 if (GICD_ICACTIVER.contains(daddr)) {
470 uint32_t ix = (daddr - GICD_ICACTIVER.start()) >> 2;
471 getActiveInt(ctx, ix) &= ~data;
472 return;
473 }
474
475 if (GICD_IPRIORITYR.contains(daddr)) {
476 Addr int_num = daddr - GICD_IPRIORITYR.start();
477 switch(data_sz) {
478 case 1:
479 getIntPriority(ctx, int_num) = data;
480 break;
481 case 2: {
482 getIntPriority(ctx, int_num) = bits(data, 7, 0);
483 getIntPriority(ctx, int_num + 1) = bits(data, 15, 8);
484 break;
485 }
486 case 4: {
487 getIntPriority(ctx, int_num) = bits(data, 7, 0);
488 getIntPriority(ctx, int_num + 1) = bits(data, 15, 8);
489 getIntPriority(ctx, int_num + 2) = bits(data, 23, 16);
490 getIntPriority(ctx, int_num + 3) = bits(data, 31, 24);
491 break;
492 }
493 default:
494 panic("Invalid size when writing to priority regs in Gic: %d\n",
495 data_sz);
496 }
497
498 updateIntState(-1);
499 updateRunPri();
500 return;
501 }
502
503 if (GICD_ITARGETSR.contains(daddr)) {
504 Addr int_num = daddr - GICD_ITARGETSR.start();
505 // Interrupts 0-31 are read only
506 unsigned offset = SGI_MAX + PPI_MAX;
507 if (int_num >= offset) {
508 unsigned ix = int_num - offset; // index into cpuTarget array
509 if (data_sz == 1) {
510 cpuTarget[ix] = data & 0xff;
511 } else {
512 assert (data_sz == 4);
513 cpuTarget[ix] = bits(data, 7, 0);
514 cpuTarget[ix+1] = bits(data, 15, 8);
515 cpuTarget[ix+2] = bits(data, 23, 16);
516 cpuTarget[ix+3] = bits(data, 31, 24);
517 }
518 updateIntState(int_num >> 2);
519 }
520 return;
521 }
522
523 if (GICD_ICFGR.contains(daddr)) {
524 uint32_t ix = (daddr - GICD_ICFGR.start()) >> 2;
525 assert(ix < INT_BITS_MAX*2);
526 intConfig[ix] = data;
527 if (data & NN_CONFIG_MASK)
528 warn("GIC N:N mode selected and not supported at this time\n");
529 return;
530 }
531
532 switch(daddr) {
533 case GICD_CTLR:
534 enabled = data;
535 DPRINTF(Interrupt, "Distributor enable flag set to = %d\n", enabled);
536 break;
537 case GICD_TYPER:
538 /* 0x200 is a made-up flag to enable gem5 extension functionality.
539 * This reg is not normally written.
540 */
541 gem5ExtensionsEnabled = (data & 0x200) && haveGem5Extensions;
542 DPRINTF(GIC, "gem5 extensions %s\n",
543 gem5ExtensionsEnabled ? "enabled" : "disabled");
544 break;
545 case GICD_SGIR:
546 softInt(ctx, data);
547 break;
548 default:
549 panic("Tried to write Gic distributor at offset %#x\n", daddr);
550 break;
551 }
552}
553
554Tick
555GicV2::writeCpu(PacketPtr pkt)
556{
557 const Addr daddr = pkt->getAddr() - cpuRange.start();
558
559 assert(pkt->req->hasContextId());
560 const ContextID ctx = pkt->req->contextId();
561 const uint32_t data = pkt->getLE<uint32_t>();
562
563 DPRINTF(GIC, "gic cpu write register cpu:%d %#x val: %#x\n",
564 ctx, daddr, data);
565
566 writeCpu(ctx, daddr, data);
567
568 pkt->makeAtomicResponse();
569 return cpuPioDelay;
570}
571
572void
573GicV2::writeCpu(ContextID ctx, Addr daddr, uint32_t data)
574{
575 switch(daddr) {
576 case GICC_CTLR:
577 cpuControl[ctx] = data;
578 break;
579 case GICC_PMR:
580 cpuPriority[ctx] = data;
581 break;
582 case GICC_BPR: {
583 auto bpr = data & 0x7;
584 if (bpr < GICC_BPR_MINIMUM)
585 bpr = GICC_BPR_MINIMUM;
586 cpuBpr[ctx] = bpr;
587 break;
588 }
589 case GICC_EOIR: {
590 const IAR iar = data;
591 if (iar.ack_id < SGI_MAX) {
592 // Clear out the bit that corresponds to the cleared int
593 uint64_t clr_int = ULL(1) << (ctx + 8 * iar.cpu_id);
594 if (!(cpuSgiActive[iar.ack_id] & clr_int) &&
595 !(cpuSgiActiveExt[ctx] & (1 << iar.ack_id)))
596 panic("Done handling a SGI that isn't active?\n");
597 if (gem5ExtensionsEnabled)
598 cpuSgiActiveExt[ctx] &= ~(1 << iar.ack_id);
599 else
600 cpuSgiActive[iar.ack_id] &= ~clr_int;
601 } else if (iar.ack_id < (SGI_MAX + PPI_MAX) ) {
602 uint32_t int_num = 1 << (iar.ack_id - SGI_MAX);
603 if (!(cpuPpiActive[ctx] & int_num))
604 warn("CPU %d Done handling a PPI interrupt "
605 "that isn't active?\n", ctx);
606 cpuPpiActive[ctx] &= ~int_num;
607 } else {
608 uint32_t int_num = 1 << intNumToBit(iar.ack_id);
609 if (!(getActiveInt(ctx, intNumToWord(iar.ack_id)) & int_num))
610 warn("Done handling interrupt that isn't active: %d\n",
611 intNumToBit(iar.ack_id));
612 getActiveInt(ctx, intNumToWord(iar.ack_id)) &= ~int_num;
613 }
614 updateRunPri();
615 DPRINTF(Interrupt, "CPU %d done handling intr IAR = %d from cpu %d\n",
616 ctx, iar.ack_id, iar.cpu_id);
617 break;
618 }
619 case GICC_APR0:
620 case GICC_APR1:
621 case GICC_APR2:
622 case GICC_APR3:
623 warn("GIC APRn write ignored because not implemented: %#x\n", daddr);
624 break;
625 case GICC_DIR:
626 warn("GIC DIR write ignored because not implemented: %#x\n", daddr);
627 break;
628 default:
629 panic("Tried to write Gic cpu at offset %#x\n", daddr);
630 break;
631 }
632 if (cpuEnabled(ctx)) updateIntState(-1);
633}
634
635GicV2::BankedRegs&
636GicV2::getBankedRegs(ContextID ctx) {
637 if (bankedRegs.size() <= ctx)
638 bankedRegs.resize(ctx + 1);
639
640 if (!bankedRegs[ctx])
641 bankedRegs[ctx] = new BankedRegs;
642 return *bankedRegs[ctx];
643}
644
645void
646GicV2::softInt(ContextID ctx, SWI swi)
647{
648 if (gem5ExtensionsEnabled) {
649 switch (swi.list_type) {
650 case 0: {
651 // interrupt cpus specified
652 int dest = swi.cpu_list;
653 DPRINTF(IPI, "Generating softIRQ from CPU %d for CPU %d\n",
654 ctx, dest);
655 if (cpuEnabled(dest)) {
656 cpuSgiPendingExt[dest] |= (1 << swi.sgi_id);
657 DPRINTF(IPI, "SGI[%d]=%#x\n", dest,
658 cpuSgiPendingExt[dest]);
659 }
660 } break;
661 case 1: {
662 // interrupt all
663 for (int i = 0; i < sys->numContexts(); i++) {
664 DPRINTF(IPI, "Processing CPU %d\n", i);
665 if (!cpuEnabled(i))
666 continue;
667 cpuSgiPendingExt[i] |= 1 << swi.sgi_id;
668 DPRINTF(IPI, "SGI[%d]=%#x\n", swi.sgi_id,
669 cpuSgiPendingExt[i]);
670 }
671 } break;
672 case 2: {
673 // Interrupt requesting cpu only
674 DPRINTF(IPI, "Generating softIRQ from CPU %d for CPU %d\n",
675 ctx, ctx);
676 if (cpuEnabled(ctx)) {
677 cpuSgiPendingExt[ctx] |= (1 << swi.sgi_id);
678 DPRINTF(IPI, "SGI[%d]=%#x\n", ctx,
679 cpuSgiPendingExt[ctx]);
680 }
681 } break;
682 }
683 } else {
684 switch (swi.list_type) {
685 case 1:
686 // interrupt all
687 uint8_t cpu_list;
688 cpu_list = 0;
689 for (int x = 0; x < sys->numContexts(); x++)
690 cpu_list |= cpuEnabled(x) ? 1 << x : 0;
691 swi.cpu_list = cpu_list;
692 break;
693 case 2:
694 // interrupt requesting cpu only
695 swi.cpu_list = 1 << ctx;
696 break;
697 // else interrupt cpus specified
698 }
699
700 DPRINTF(IPI, "Generating softIRQ from CPU %d for %#x\n", ctx,
701 swi.cpu_list);
702 for (int i = 0; i < sys->numContexts(); i++) {
703 DPRINTF(IPI, "Processing CPU %d\n", i);
704 if (!cpuEnabled(i))
705 continue;
706 if (swi.cpu_list & (1 << i))
707 cpuSgiPending[swi.sgi_id] |= (1 << i) << (8 * ctx);
708 DPRINTF(IPI, "SGI[%d]=%#x\n", swi.sgi_id,
709 cpuSgiPending[swi.sgi_id]);
710 }
711 }
712 updateIntState(-1);
713}
714
715uint64_t
716GicV2::genSwiMask(int cpu)
717{
718 if (cpu > sys->numContexts())
719 panic("Invalid CPU ID\n");
720 return ULL(0x0101010101010101) << cpu;
721}
722
723uint8_t
724GicV2::getCpuPriority(unsigned cpu)
725{
726 // see Table 3-2 in IHI0048B.b (GICv2)
727 // mask some low-order priority bits per BPR value
728 // NB: the GIC prioritization scheme is upside down:
729 // lower values are higher priority; masking off bits
730 // actually creates a higher priority, not lower.
731 return cpuPriority[cpu] & (0xff00 >> (7 - cpuBpr[cpu]));
732}
733
734void
735GicV2::updateIntState(int hint)
736{
737 for (int cpu = 0; cpu < sys->numContexts(); cpu++) {
738 if (!cpuEnabled(cpu))
739 continue;
740
741 /*@todo use hint to do less work. */
742 int highest_int = SPURIOUS_INT;
743 // Priorities below that set in GICC_PMR can be ignored
744 uint8_t highest_pri = getCpuPriority(cpu);
745
746 // Check SGIs
747 for (int swi = 0; swi < SGI_MAX; swi++) {
748 if (!cpuSgiPending[swi] && !cpuSgiPendingExt[cpu])
749 continue;
750 if ((cpuSgiPending[swi] & genSwiMask(cpu)) ||
751 (cpuSgiPendingExt[cpu] & (1 << swi)))
752 if (highest_pri > getIntPriority(cpu, swi)) {
753 highest_pri = getIntPriority(cpu, swi);
754 highest_int = swi;
755 }
756 }
757
758 // Check PPIs
759 if (cpuPpiPending[cpu]) {
760 for (int ppi_idx = 0, int_num = SGI_MAX;
761 int_num < PPI_MAX + SGI_MAX;
762 ppi_idx++, int_num++) {
763
764 const bool ppi_pending = bits(cpuPpiPending[cpu], ppi_idx);
765 const bool ppi_enabled = bits(getIntEnabled(cpu, 0), int_num);
766 const bool higher_priority =
767 highest_pri > getIntPriority(cpu, int_num);
768
769 if (ppi_pending && ppi_enabled && higher_priority) {
770 highest_pri = getIntPriority(cpu, int_num);
771 highest_int = int_num;
772 }
773 }
774 }
775
776 bool mp_sys = sys->numRunningContexts() > 1;
777 // Check other ints
778 for (int x = 0; x < (itLines/INT_BITS_MAX); x++) {
779 if (getIntEnabled(cpu, x) & getPendingInt(cpu, x)) {
780 for (int y = 0; y < INT_BITS_MAX; y++) {
781 uint32_t int_nm = x * INT_BITS_MAX + y;
782 DPRINTF(GIC, "Checking for interrupt# %d \n",int_nm);
783 /* Set current pending int as highest int for current cpu
784 if the interrupt's priority higher than current priority
785 and if current cpu is the target (for mp configs only)
786 */
787 if ((bits(getIntEnabled(cpu, x), y)
788 &bits(getPendingInt(cpu, x), y)) &&
789 (getIntPriority(cpu, int_nm) < highest_pri))
790 if ((!mp_sys) ||
791 (gem5ExtensionsEnabled
792 ? (getCpuTarget(cpu, int_nm) == cpu)
793 : (getCpuTarget(cpu, int_nm) & (1 << cpu)))) {
794 highest_pri = getIntPriority(cpu, int_nm);
795 highest_int = int_nm;
796 }
797 }
798 }
799 }
800
801 uint32_t prev_highest = cpuHighestInt[cpu];
802 cpuHighestInt[cpu] = highest_int;
803
804 if (highest_int == SPURIOUS_INT) {
805 if (isLevelSensitive(cpu, prev_highest)) {
806
807 DPRINTF(Interrupt, "Clear IRQ for cpu%d\n", cpu);
808 clearInt(cpu, prev_highest);
809 }
810 continue;
811 }
812
813 /* @todo make this work for more than one cpu, need to handle 1:N, N:N
814 * models */
815 if (enabled && cpuEnabled(cpu) &&
816 (highest_pri < getCpuPriority(cpu)) &&
817 !(getActiveInt(cpu, intNumToWord(highest_int))
818 & (1 << intNumToBit(highest_int)))) {
819
820 DPRINTF(Interrupt, "Posting interrupt %d to cpu%d\n", highest_int,
821 cpu);
822
823 if (isFiq(cpu, highest_int)) {
824 postFiq(cpu, curTick() + intLatency);
825 } else {
826 postInt(cpu, curTick() + intLatency);
827 }
828 }
829 }
830}
831
832void
833GicV2::updateRunPri()
834{
835 for (int cpu = 0; cpu < sys->numContexts(); cpu++) {
836 if (!cpuEnabled(cpu))
837 continue;
838 uint8_t maxPriority = 0xff;
839 for (int i = 0; i < itLines; i++) {
840 if (i < SGI_MAX) {
841 if (((cpuSgiActive[i] & genSwiMask(cpu)) ||
842 (cpuSgiActiveExt[cpu] & (1 << i))) &&
843 (getIntPriority(cpu, i) < maxPriority))
844 maxPriority = getIntPriority(cpu, i);
845 } else if (i < (SGI_MAX + PPI_MAX)) {
846 if ((cpuPpiActive[cpu] & ( 1 << (i - SGI_MAX))) &&
847 (getIntPriority(cpu, i) < maxPriority))
848 maxPriority = getIntPriority(cpu, i);
849
850 } else {
851 if (getActiveInt(cpu, intNumToWord(i))
852 & (1 << intNumToBit(i)))
853 if (getIntPriority(cpu, i) < maxPriority)
854 maxPriority = getIntPriority(cpu, i);
855 }
856 }
857 iccrpr[cpu] = maxPriority;
858 }
859}
860
861void
862GicV2::sendInt(uint32_t num)
863{
864 uint8_t target = getCpuTarget(0, num);
865 DPRINTF(Interrupt, "Received Interrupt number %d, cpuTarget %#x: \n",
866 num, target);
867 if ((target & (target - 1)) && !gem5ExtensionsEnabled)
868 panic("Multiple targets for peripheral interrupts is not supported\n");
869 panic_if(num < SGI_MAX + PPI_MAX,
870 "sentInt() must only be used for interrupts 32 and higher");
871 getPendingInt(target, intNumToWord(num)) |= 1 << intNumToBit(num);
872 updateIntState(intNumToWord(num));
873}
874
875void
876GicV2::sendPPInt(uint32_t num, uint32_t cpu)
877{
878 DPRINTF(Interrupt, "Received PPI %d, cpuTarget %#x: \n",
879 num, cpu);
880 cpuPpiPending[cpu] |= 1 << (num - SGI_MAX);
881 updateIntState(intNumToWord(num));
882}
883
884void
885GicV2::clearInt(uint32_t num)
886{
887 if (isLevelSensitive(0, num)) {
888 uint8_t target = getCpuTarget(0, num);
889
890 DPRINTF(Interrupt,
891 "Received Clear interrupt number %d, cpuTarget %#x:\n",
892 num, target);
893
894 getPendingInt(target, intNumToWord(num)) &= ~(1 << intNumToBit(num));
895 updateIntState(intNumToWord(num));
896 } else {
897 /* Nothing to do :
898 * Edge-triggered interrupt remain pending until software
899 * writes GICD_ICPENDR or reads GICC_IAR */
900 }
901}
902
903void
904GicV2::clearPPInt(uint32_t num, uint32_t cpu)
905{
906 DPRINTF(Interrupt, "Clearing PPI %d, cpuTarget %#x: \n",
907 num, cpu);
908 cpuPpiPending[cpu] &= ~(1 << (num - SGI_MAX));
909 updateIntState(intNumToWord(num));
910}
911
912void
913GicV2::clearInt(ContextID ctx, uint32_t int_num)
914{
915 if (isFiq(ctx, int_num)) {
916 platform->intrctrl->clear(ctx, ArmISA::INT_FIQ, 0);
917 } else {
918 platform->intrctrl->clear(ctx, ArmISA::INT_IRQ, 0);
919 }
920}
921
922void
923GicV2::postInt(uint32_t cpu, Tick when)
924{
925 if (!(postIntEvent[cpu]->scheduled())) {
926 ++pendingDelayedInterrupts;
927 eventq->schedule(postIntEvent[cpu], when);
928 }
929}
930
931void
932GicV2::postDelayedInt(uint32_t cpu)
933{
934 platform->intrctrl->post(cpu, ArmISA::INT_IRQ, 0);
935 --pendingDelayedInterrupts;
936 assert(pendingDelayedInterrupts >= 0);
937 if (pendingDelayedInterrupts == 0)
938 signalDrainDone();
939}
940
941void
942GicV2::postFiq(uint32_t cpu, Tick when)
943{
944 if (!(postFiqEvent[cpu]->scheduled())) {
945 ++pendingDelayedInterrupts;
946 eventq->schedule(postFiqEvent[cpu], when);
947 }
948}
949
950void
951GicV2::postDelayedFiq(uint32_t cpu)
952{
953 platform->intrctrl->post(cpu, ArmISA::INT_FIQ, 0);
954 --pendingDelayedInterrupts;
955 assert(pendingDelayedInterrupts >= 0);
956 if (pendingDelayedInterrupts == 0)
957 signalDrainDone();
958}
959
960DrainState
961GicV2::drain()
962{
963 if (pendingDelayedInterrupts == 0) {
964 return DrainState::Drained;
965 } else {
966 return DrainState::Draining;
967 }
968}
969
970
971void
972GicV2::drainResume()
973{
974 // There may be pending interrupts if checkpointed from Kvm; post them.
975 updateIntState(-1);
976}
977
978void
979GicV2::serialize(CheckpointOut &cp) const
980{
981 DPRINTF(Checkpoint, "Serializing Arm GIC\n");
982
983 SERIALIZE_SCALAR(enabled);
984 SERIALIZE_SCALAR(itLines);
985 SERIALIZE_ARRAY(intEnabled, INT_BITS_MAX-1);
986 SERIALIZE_ARRAY(pendingInt, INT_BITS_MAX-1);
987 SERIALIZE_ARRAY(activeInt, INT_BITS_MAX-1);
988 SERIALIZE_ARRAY(intGroup, INT_BITS_MAX-1);
989 SERIALIZE_ARRAY(iccrpr, CPU_MAX);
990 SERIALIZE_ARRAY(intPriority, GLOBAL_INT_LINES);
991 SERIALIZE_ARRAY(cpuTarget, GLOBAL_INT_LINES);
992 SERIALIZE_ARRAY(intConfig, INT_BITS_MAX * 2);
993 SERIALIZE_ARRAY(cpuControl, CPU_MAX);
994 SERIALIZE_ARRAY(cpuPriority, CPU_MAX);
995 SERIALIZE_ARRAY(cpuBpr, CPU_MAX);
996 SERIALIZE_ARRAY(cpuHighestInt, CPU_MAX);
997 SERIALIZE_ARRAY(cpuSgiActive, SGI_MAX);
998 SERIALIZE_ARRAY(cpuSgiPending, SGI_MAX);
999 SERIALIZE_ARRAY(cpuSgiActiveExt, CPU_MAX);
1000 SERIALIZE_ARRAY(cpuSgiPendingExt, CPU_MAX);
1001 SERIALIZE_ARRAY(cpuPpiActive, CPU_MAX);
1002 SERIALIZE_ARRAY(cpuPpiPending, CPU_MAX);
1003 SERIALIZE_SCALAR(gem5ExtensionsEnabled);
1004
1005 for (uint32_t i=0; i < bankedRegs.size(); ++i) {
1006 if (!bankedRegs[i])
1007 continue;
1008 bankedRegs[i]->serializeSection(cp, csprintf("bankedRegs%i", i));
1009 }
1010}
1011
1012void
1013GicV2::BankedRegs::serialize(CheckpointOut &cp) const
1014{
1015 SERIALIZE_SCALAR(intEnabled);
1016 SERIALIZE_SCALAR(pendingInt);
1017 SERIALIZE_SCALAR(activeInt);
1018 SERIALIZE_SCALAR(intGroup);
1019 SERIALIZE_ARRAY(intPriority, SGI_MAX + PPI_MAX);
1020}
1021
1022void
1023GicV2::unserialize(CheckpointIn &cp)
1024{
1025 DPRINTF(Checkpoint, "Unserializing Arm GIC\n");
1026
1027 UNSERIALIZE_SCALAR(enabled);
1028 UNSERIALIZE_SCALAR(itLines);
1029 UNSERIALIZE_ARRAY(intEnabled, INT_BITS_MAX-1);
1030 UNSERIALIZE_ARRAY(pendingInt, INT_BITS_MAX-1);
1031 UNSERIALIZE_ARRAY(activeInt, INT_BITS_MAX-1);
1032 UNSERIALIZE_ARRAY(intGroup, INT_BITS_MAX-1);
1033 UNSERIALIZE_ARRAY(iccrpr, CPU_MAX);
1034 UNSERIALIZE_ARRAY(intPriority, GLOBAL_INT_LINES);
1035 UNSERIALIZE_ARRAY(cpuTarget, GLOBAL_INT_LINES);
1036 UNSERIALIZE_ARRAY(intConfig, INT_BITS_MAX * 2);
1037 UNSERIALIZE_ARRAY(cpuControl, CPU_MAX);
1038 UNSERIALIZE_ARRAY(cpuPriority, CPU_MAX);
1039 UNSERIALIZE_ARRAY(cpuBpr, CPU_MAX);
1040 UNSERIALIZE_ARRAY(cpuHighestInt, CPU_MAX);
1041 UNSERIALIZE_ARRAY(cpuSgiActive, SGI_MAX);
1042 UNSERIALIZE_ARRAY(cpuSgiPending, SGI_MAX);
1043 UNSERIALIZE_ARRAY(cpuSgiActiveExt, CPU_MAX);
1044 UNSERIALIZE_ARRAY(cpuSgiPendingExt, CPU_MAX);
1045 UNSERIALIZE_ARRAY(cpuPpiActive, CPU_MAX);
1046 UNSERIALIZE_ARRAY(cpuPpiPending, CPU_MAX);
1047
1048 // Handle checkpoints from before we drained the GIC to prevent
1049 // in-flight interrupts.
1050 if (cp.entryExists(Serializable::currentSection(), "interrupt_time")) {
1051 Tick interrupt_time[CPU_MAX];
1052 UNSERIALIZE_ARRAY(interrupt_time, CPU_MAX);
1053
1054 for (uint32_t cpu = 0; cpu < CPU_MAX; cpu++) {
1055 if (interrupt_time[cpu])
1056 schedule(postIntEvent[cpu], interrupt_time[cpu]);
1057 }
1058 }
1059
1060 if (!UNSERIALIZE_OPT_SCALAR(gem5ExtensionsEnabled))
1061 gem5ExtensionsEnabled = false;
1062
1063 for (uint32_t i=0; i < CPU_MAX; ++i) {
1064 ScopedCheckpointSection sec(cp, csprintf("bankedRegs%i", i));
1065 if (cp.sectionExists(Serializable::currentSection())) {
1066 getBankedRegs(i).unserialize(cp);
1067 }
1068 }
1069}
1070
1071void
1072GicV2::BankedRegs::unserialize(CheckpointIn &cp)
1073{
1074 UNSERIALIZE_SCALAR(intEnabled);
1075 UNSERIALIZE_SCALAR(pendingInt);
1076 UNSERIALIZE_SCALAR(activeInt);
1077 UNSERIALIZE_SCALAR(intGroup);
1078 UNSERIALIZE_ARRAY(intPriority, SGI_MAX + PPI_MAX);
1079}
1080
1081GicV2 *
1082GicV2Params::create()
1083{
1084 return new GicV2(this);
1085}