1/*
2 * Copyright (c) 2013 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Matt Evans
38 */
39
40#include "dev/arm/vgic.hh"
41
42#include "base/trace.hh"
43#include "debug/Checkpoint.hh"
44#include "debug/VGIC.hh"
45#include "dev/arm/base_gic.hh"
44#include "dev/arm/vgic.hh"
46#include "dev/terminal.hh"
47#include "mem/packet.hh"
48#include "mem/packet_access.hh"
49
50VGic::VGic(const Params *p)
51 : PioDevice(p), platform(p->platform), gic(p->gic), vcpuAddr(p->vcpu_addr),
52 hvAddr(p->hv_addr), pioDelay(p->pio_delay),
53 maintInt(p->ppint)
54{
55 for (int x = 0; x < VGIC_CPU_MAX; x++) {
56 postVIntEvent[x] = new PostVIntEvent(x, p->platform);
57 maintIntPosted[x] = false;
58 vIntPosted[x] = false;
59 }
60 assert(sys->numRunningContexts() <= VGIC_CPU_MAX);
61}
62
63Tick
64VGic::read(PacketPtr pkt)
65{
66 Addr addr = pkt->getAddr();
67
68 if (addr >= vcpuAddr && addr < vcpuAddr + GICV_SIZE)
69 return readVCpu(pkt);
70 else if (addr >= hvAddr && addr < hvAddr + GICH_REG_SIZE)
71 return readCtrl(pkt);
72 else
73 panic("Read to unknown address %#x\n", pkt->getAddr());
74}
75
76Tick
77VGic::write(PacketPtr pkt)
78{
79 Addr addr = pkt->getAddr();
80
81 if (addr >= vcpuAddr && addr < vcpuAddr + GICV_SIZE)
82 return writeVCpu(pkt);
83 else if (addr >= hvAddr && addr < hvAddr + GICH_REG_SIZE)
84 return writeCtrl(pkt);
85 else
86 panic("Write to unknown address %#x\n", pkt->getAddr());
87}
88
89Tick
90VGic::readVCpu(PacketPtr pkt)
91{
92 Addr daddr = pkt->getAddr() - vcpuAddr;
93
94 ContextID ctx_id = pkt->req->contextId();
95 assert(ctx_id < VGIC_CPU_MAX);
96 struct vcpuIntData *vid = &vcpuData[ctx_id];
97
98 DPRINTF(VGIC, "VGIC VCPU read register %#x\n", daddr);
99
100 switch (daddr) {
101 case GICV_CTLR:
102 pkt->set<uint32_t>(vid->vctrl);
103 break;
104 case GICV_IAR: {
105 int i = findHighestPendingLR(vid);
106 if (i < 0 || !vid->vctrl.En) {
107 pkt->set<uint32_t>(1023); // "No int" marker
108 } else {
109 ListReg *lr = &vid->LR[i];
110
111 pkt->set<uint32_t>(lr->VirtualID |
112 (((int)lr->CpuID) << 10));
113 // We don't support auto-EOI of HW interrupts via real GIC!
114 // Fortunately, KVM doesn't use this. How about Xen...? Ulp!
115 if (lr->HW)
116 panic("VGIC does not support 'HW' List Register feature (LR %#x)!\n",
117 *lr);
118 lr->State = LR_ACTIVE;
119 DPRINTF(VGIC, "Consumed interrupt %d (cpu%d) from LR%d (EOI%d)\n",
120 lr->VirtualID, lr->CpuID, i, lr->EOI);
121 }
122 } break;
123 default:
124 panic("VGIC VCPU read of bad address %#x\n", daddr);
125 }
126
127 updateIntState(ctx_id);
128
129 pkt->makeAtomicResponse();
130 return pioDelay;
131}
132
133Tick
134VGic::readCtrl(PacketPtr pkt)
135{
136 Addr daddr = pkt->getAddr() - hvAddr;
137
138 ContextID ctx_id = pkt->req->contextId();
139
140 DPRINTF(VGIC, "VGIC HVCtrl read register %#x\n", daddr);
141
142 /* Munge the address: 0-0xfff is the usual space banked by requester CPU.
143 * Anything > that is 0x200-sized slices of 'per CPU' regs.
144 */
145 if (daddr & ~0x1ff) {
146 ctx_id = (daddr >> 9);
147 if (ctx_id > 8)
148 panic("VGIC: Weird unbanked hv ctrl address %#x!\n", daddr);
149 daddr &= ~0x1ff;
150 }
151 assert(ctx_id < VGIC_CPU_MAX);
152 struct vcpuIntData *vid = &vcpuData[ctx_id];
153
154 switch (daddr) {
155 case GICH_HCR:
156 pkt->set<uint32_t>(vid->hcr);
157 break;
158
159 case GICH_VTR:
160 pkt->set<uint32_t>(0x44000000 | (NUM_LR - 1));
161 break;
162
163 case GICH_VMCR:
164 pkt->set<uint32_t>(
165 ((uint32_t)vid->VMPriMask << 27) |
166 ((uint32_t)vid->VMBP << 21) |
167 ((uint32_t)vid->VMABP << 18) |
168 ((uint32_t)vid->VEM << 9) |
169 ((uint32_t)vid->VMCBPR << 4) |
170 ((uint32_t)vid->VMFiqEn << 3) |
171 ((uint32_t)vid->VMAckCtl << 2) |
172 ((uint32_t)vid->VMGrp1En << 1) |
173 ((uint32_t)vid->VMGrp0En << 0)
174 );
175 break;
176
177 case GICH_MISR:
178 pkt->set<uint32_t>(getMISR(vid));
179 break;
180
181 case GICH_EISR0:
182 pkt->set<uint32_t>(vid->eisr & 0xffffffff);
183 break;
184
185 case GICH_EISR1:
186 pkt->set<uint32_t>(vid->eisr >> 32);
187 break;
188
189 case GICH_ELSR0: {
190 uint32_t bm = 0;
191 for (int i = 0; i < ((NUM_LR < 32) ? NUM_LR : 32); i++) {
192 if (!vid->LR[i].State)
193 bm |= 1 << i;
194 }
195 pkt->set<uint32_t>(bm);
196 } break;
197
198 case GICH_ELSR1: {
199 uint32_t bm = 0;
200 for (int i = 32; i < NUM_LR; i++) {
201 if (!vid->LR[i].State)
202 bm |= 1 << (i-32);
203 }
204 pkt->set<uint32_t>(bm);
205 } break;
206
207 case GICH_APR0:
208 warn_once("VGIC GICH_APR read!\n");
209 pkt->set<uint32_t>(0);
210 break;
211
212 case GICH_LR0:
213 case GICH_LR1:
214 case GICH_LR2:
215 case GICH_LR3:
216 pkt->set<uint32_t>(vid->LR[(daddr - GICH_LR0) >> 2]);
217 break;
218
219 default:
220 panic("VGIC HVCtrl read of bad address %#x\n", daddr);
221 }
222
223 pkt->makeAtomicResponse();
224 return pioDelay;
225}
226
227Tick
228VGic::writeVCpu(PacketPtr pkt)
229{
230 Addr daddr = pkt->getAddr() - vcpuAddr;
231
232 ContextID ctx_id = pkt->req->contextId();
233 assert(ctx_id < VGIC_CPU_MAX);
234 struct vcpuIntData *vid = &vcpuData[ctx_id];
235
236 DPRINTF(VGIC, "VGIC VCPU write register %#x <= %#x\n", daddr, pkt->get<uint32_t>());
237
238 switch (daddr) {
239 case GICV_CTLR:
240 vid->vctrl = pkt->get<uint32_t>();
241 break;
242 case GICV_PMR:
243 vid->VMPriMask = pkt->get<uint32_t>();
244 break;
245 case GICV_EOIR: {
246 // We don't handle the split EOI-then-DIR mode. Linux (guest)
247 // doesn't need it though.
248 assert(!vid->vctrl.EOImode);
249 uint32_t w = pkt->get<uint32_t>();
250 unsigned int virq = w & 0x3ff;
251 unsigned int vcpu = (w >> 10) & 7;
252 int i = findLRForVIRQ(vid, virq, vcpu);
253 if (i < 0) {
254 DPRINTF(VGIC, "EOIR: No LR for irq %d(cpu%d)\n", virq, vcpu);
255 } else {
256 DPRINTF(VGIC, "EOIR: Found LR%d for irq %d(cpu%d)\n", i, virq, vcpu);
257 ListReg *lr = &vid->LR[i];
258 lr->State = 0;
259 // Maintenance interrupt -- via eisr -- is flagged when
260 // LRs have EOI=1 and State=INVALID!
261 }
262 } break;
263 default:
264 panic("VGIC VCPU write %#x to unk address %#x\n", pkt->get<uint32_t>(), daddr);
265 }
266
267 // This updates the EISRs and flags IRQs:
268 updateIntState(ctx_id);
269
270 pkt->makeAtomicResponse();
271 return pioDelay;
272}
273
274Tick
275VGic::writeCtrl(PacketPtr pkt)
276{
277 Addr daddr = pkt->getAddr() - hvAddr;
278
279 ContextID ctx_id = pkt->req->contextId();
280
281 DPRINTF(VGIC, "VGIC HVCtrl write register %#x <= %#x\n", daddr, pkt->get<uint32_t>());
282
283 /* Munge the address: 0-0xfff is the usual space banked by requester CPU.
284 * Anything > that is 0x200-sized slices of 'per CPU' regs.
285 */
286 if (daddr & ~0x1ff) {
287 ctx_id = (daddr >> 9);
288 if (ctx_id > 8)
289 panic("VGIC: Weird unbanked hv ctrl address %#x!\n", daddr);
290 daddr &= ~0x1ff;
291 }
292 assert(ctx_id < VGIC_CPU_MAX);
293 struct vcpuIntData *vid = &vcpuData[ctx_id];
294
295 switch (daddr) {
296 case GICH_HCR:
297 vid->hcr = pkt->get<uint32_t>();
298 // update int state
299 break;
300
301 case GICH_VMCR: {
302 uint32_t d = pkt->get<uint32_t>();
303 vid->VMPriMask = d >> 27;
304 vid->VMBP = (d >> 21) & 7;
305 vid->VMABP = (d >> 18) & 7;
306 vid->VEM = (d >> 9) & 1;
307 vid->VMCBPR = (d >> 4) & 1;
308 vid->VMFiqEn = (d >> 3) & 1;
309 vid->VMAckCtl = (d >> 2) & 1;
310 vid->VMGrp1En = (d >> 1) & 1;
311 vid->VMGrp0En = d & 1;
312 } break;
313
314 case GICH_APR0:
315 warn_once("VGIC GICH_APR0 written, ignored\n");
316 break;
317
318 case GICH_LR0:
319 case GICH_LR1:
320 case GICH_LR2:
321 case GICH_LR3:
322 vid->LR[(daddr - GICH_LR0) >> 2] = pkt->get<uint32_t>();
323 // update int state
324 break;
325
326 default:
327 panic("VGIC HVCtrl write to bad address %#x\n", daddr);
328 }
329
330 updateIntState(ctx_id);
331
332 pkt->makeAtomicResponse();
333 return pioDelay;
334}
335
336
337uint32_t
338VGic::getMISR(struct vcpuIntData *vid)
339{
340 return (!!vid->hcr.VGrp1DIE && !vid->VMGrp1En ? 0x80 : 0) |
341 (!!vid->hcr.VGrp1EIE && vid->VMGrp1En ? 0x40 : 0) |
342 (!!vid->hcr.VGrp0DIE && !vid->VMGrp0En ? 0x20 : 0) |
343 (!!vid->hcr.VGrp0EIE && vid->VMGrp0En ? 0x10 : 0) |
344 (!!vid->hcr.NPIE && !lrPending(vid) ? 0x08 : 0) |
345 (!!vid->hcr.LRENPIE && vid->hcr.EOICount ? 0x04 : 0) |
346 (!!vid->hcr.UIE && lrValid(vid) <= 1 ? 0x02 : 0) |
347 (vid->eisr ? 0x01 : 0);
348}
349
350void
351VGic::postVInt(uint32_t cpu, Tick when)
352{
353 DPRINTF(VGIC, "Posting VIRQ to %d\n", cpu);
354 if (!(postVIntEvent[cpu]->scheduled()))
355 eventq->schedule(postVIntEvent[cpu], when);
356}
357
358void
359VGic::unPostVInt(uint32_t cpu)
360{
361 DPRINTF(VGIC, "Unposting VIRQ to %d\n", cpu);
362 platform->intrctrl->clear(cpu, ArmISA::INT_VIRT_IRQ, 0);
363}
364
365void
366VGic::postMaintInt(uint32_t cpu)
367{
368 DPRINTF(VGIC, "Posting maintenance PPI to GIC/cpu%d\n", cpu);
369 // Linux DT configures this as Level.
370 gic->sendPPInt(maintInt, cpu);
371}
372
373void
374VGic::unPostMaintInt(uint32_t cpu)
375{
376 DPRINTF(VGIC, "Unposting maintenance PPI to GIC/cpu%d\n", cpu);
377 gic->clearPPInt(maintInt, cpu);
378}
379
380/* Update state (in general); something concerned with ctx_id has changed.
381 * This may raise a maintenance interrupt.
382 */
383void
384VGic::updateIntState(ContextID ctx_id)
385{
386 // @todo This should update APRs!
387
388 // Build EISR contents:
389 // (Cached so that regs can read them without messing about again)
390 struct vcpuIntData *tvid = &vcpuData[ctx_id];
391
392 tvid->eisr = 0;
393 for (int i = 0; i < NUM_LR; i++) {
394 if (!tvid->LR[i].State && tvid->LR[i].EOI) {
395 tvid->eisr |= 1 << i;
396 }
397 }
398
399 assert(sys->numRunningContexts() <= VGIC_CPU_MAX);
400 for (int i = 0; i < sys->numRunningContexts(); i++) {
401 struct vcpuIntData *vid = &vcpuData[i];
402 // Are any LRs active that weren't before?
403 if (!vIntPosted[i]) {
404 if (lrPending(vid) && vid->vctrl.En) {
405 vIntPosted[i] = true;
406 postVInt(i, curTick() + 1);
407 }
408 } else if (!lrPending(vid)) {
409 vIntPosted[i] = false;
410 unPostVInt(i);
411 }
412
413 // Any maintenance ints to send?
414 if (!maintIntPosted[i]) {
415 if (vid->hcr.En && getMISR(vid)) {
416 maintIntPosted[i] = true;
417 postMaintInt(i);
418 }
419 } else {
420 if (!vid->hcr.En || !getMISR(vid)) {
421 unPostMaintInt(i);
422 maintIntPosted[i] = false;
423 }
424 }
425 }
426}
427
428AddrRangeList
429VGic::getAddrRanges() const
430{
431 AddrRangeList ranges;
432 ranges.push_back(RangeSize(hvAddr, GICH_REG_SIZE));
433 ranges.push_back(RangeSize(vcpuAddr, GICV_SIZE));
434 return ranges;
435}
436
437void
438VGic::serialize(CheckpointOut &cp) const
439{
440 Tick interrupt_time[VGIC_CPU_MAX];
441 for (uint32_t cpu = 0; cpu < VGIC_CPU_MAX; cpu++) {
442 interrupt_time[cpu] = 0;
443 if (postVIntEvent[cpu]->scheduled()) {
444 interrupt_time[cpu] = postVIntEvent[cpu]->when();
445 }
446 }
447
448 DPRINTF(Checkpoint, "Serializing VGIC\n");
449
450 SERIALIZE_ARRAY(interrupt_time, VGIC_CPU_MAX);
451 SERIALIZE_ARRAY(maintIntPosted, VGIC_CPU_MAX);
452 SERIALIZE_ARRAY(vIntPosted, VGIC_CPU_MAX);
453 SERIALIZE_SCALAR(vcpuAddr);
454 SERIALIZE_SCALAR(hvAddr);
455 SERIALIZE_SCALAR(pioDelay);
456 SERIALIZE_SCALAR(maintInt);
457
458 for (uint32_t cpu = 0; cpu < VGIC_CPU_MAX; cpu++)
459 vcpuData[cpu].serializeSection(cp, csprintf("vcpuData%d", cpu));
460}
461
462void
463VGic::vcpuIntData::serialize(CheckpointOut &cp) const
464{
465 uint32_t vctrl_val = vctrl;
466 SERIALIZE_SCALAR(vctrl_val);
467 uint32_t hcr_val = hcr;
468 SERIALIZE_SCALAR(hcr_val);
469 uint64_t eisr_val = eisr;
470 SERIALIZE_SCALAR(eisr_val);
471 uint8_t VMGrp0En_val = VMGrp0En;
472 SERIALIZE_SCALAR(VMGrp0En_val);
473 uint8_t VMGrp1En_val = VMGrp1En;
474 SERIALIZE_SCALAR(VMGrp1En_val);
475 uint8_t VMAckCtl_val = VMAckCtl;
476 SERIALIZE_SCALAR(VMAckCtl_val);
477 uint8_t VMFiqEn_val = VMFiqEn;
478 SERIALIZE_SCALAR(VMFiqEn_val);
479 uint8_t VMCBPR_val = VMCBPR;
480 SERIALIZE_SCALAR(VMCBPR_val);
481 uint8_t VEM_val = VEM;
482 SERIALIZE_SCALAR(VEM_val);
483 uint8_t VMABP_val = VMABP;
484 SERIALIZE_SCALAR(VMABP_val);
485 uint8_t VMBP_val = VMBP;
486 SERIALIZE_SCALAR(VMBP_val);
487 uint8_t VMPriMask_val = VMPriMask;
488 SERIALIZE_SCALAR(VMPriMask_val);
489
490 for (int i = 0; i < NUM_LR; i++) {
491 ScopedCheckpointSection sec_lr(cp, csprintf("LR%d", i));
492 paramOut(cp, "lr", LR[i]);
493 }
494}
495
496void VGic::unserialize(CheckpointIn &cp)
497{
498 DPRINTF(Checkpoint, "Unserializing Arm GIC\n");
499
500 Tick interrupt_time[VGIC_CPU_MAX];
501 UNSERIALIZE_ARRAY(interrupt_time, VGIC_CPU_MAX);
502 for (uint32_t cpu = 0; cpu < VGIC_CPU_MAX; cpu++) {
503 if (interrupt_time[cpu])
504 schedule(postVIntEvent[cpu], interrupt_time[cpu]);
505
506 vcpuData[cpu].unserializeSection(cp, csprintf("vcpuData%d", cpu));
507 }
508 UNSERIALIZE_ARRAY(maintIntPosted, VGIC_CPU_MAX);
509 UNSERIALIZE_ARRAY(vIntPosted, VGIC_CPU_MAX);
510 UNSERIALIZE_SCALAR(vcpuAddr);
511 UNSERIALIZE_SCALAR(hvAddr);
512 UNSERIALIZE_SCALAR(pioDelay);
513 UNSERIALIZE_SCALAR(maintInt);
514}
515
516void
517VGic::vcpuIntData::unserialize(CheckpointIn &cp)
518{
519 paramIn(cp, "vctrl_val", vctrl);
520 paramIn(cp, "hcr_val", hcr);
521 paramIn(cp, "eisr_val", eisr);
522 paramIn(cp, "VMGrp0En_val", VMGrp0En);
523 paramIn(cp, "VMGrp1En_val", VMGrp1En);
524 paramIn(cp, "VMAckCtl_val", VMAckCtl);
525 paramIn(cp, "VMFiqEn_val", VMFiqEn);
526 paramIn(cp, "VMCBPR_val", VMCBPR);
527 paramIn(cp, "VEM_val", VEM);
528 paramIn(cp, "VMABP_val", VMABP);
529 paramIn(cp, "VMPriMask_val", VMPriMask);
530
531 for (int i = 0; i < NUM_LR; i++) {
532 ScopedCheckpointSection sec_lr(cp, csprintf("LR%d", i));
533 paramIn(cp, "lr", LR[i]);
534 }
535}
536
537VGic *
538VGicParams::create()
539{
540 return new VGic(this);
541}