vgic.cc (10565:23593fdaadcd) vgic.cc (10905:a6ca6831e775)
1/*
2 * Copyright (c) 2013 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Matt Evans
38 */
39
40#include "base/trace.hh"
41#include "debug/Checkpoint.hh"
42#include "debug/VGIC.hh"
43#include "dev/arm/base_gic.hh"
44#include "dev/arm/vgic.hh"
45#include "dev/terminal.hh"
46#include "mem/packet.hh"
47#include "mem/packet_access.hh"
48
49VGic::VGic(const Params *p)
50 : PioDevice(p), platform(p->platform), gic(p->gic), vcpuAddr(p->vcpu_addr),
51 hvAddr(p->hv_addr), pioDelay(p->pio_delay),
52 maintInt(p->ppint)
53{
54 for (int x = 0; x < VGIC_CPU_MAX; x++) {
55 postVIntEvent[x] = new PostVIntEvent(x, p->platform);
56 maintIntPosted[x] = false;
57 vIntPosted[x] = false;
58 }
1/*
2 * Copyright (c) 2013 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Matt Evans
38 */
39
40#include "base/trace.hh"
41#include "debug/Checkpoint.hh"
42#include "debug/VGIC.hh"
43#include "dev/arm/base_gic.hh"
44#include "dev/arm/vgic.hh"
45#include "dev/terminal.hh"
46#include "mem/packet.hh"
47#include "mem/packet_access.hh"
48
49VGic::VGic(const Params *p)
50 : PioDevice(p), platform(p->platform), gic(p->gic), vcpuAddr(p->vcpu_addr),
51 hvAddr(p->hv_addr), pioDelay(p->pio_delay),
52 maintInt(p->ppint)
53{
54 for (int x = 0; x < VGIC_CPU_MAX; x++) {
55 postVIntEvent[x] = new PostVIntEvent(x, p->platform);
56 maintIntPosted[x] = false;
57 vIntPosted[x] = false;
58 }
59 for (int c = 0; c < VGIC_CPU_MAX; c++) {
60 memset(&vcpuData[c], 0, sizeof(struct vcpuIntData));
61 }
62 assert(sys->numRunningContexts() <= VGIC_CPU_MAX);
63}
64
65Tick
66VGic::read(PacketPtr pkt)
67{
68 Addr addr = pkt->getAddr();
69
70 if (addr >= vcpuAddr && addr < vcpuAddr + GICV_SIZE)
71 return readVCpu(pkt);
72 else if (addr >= hvAddr && addr < hvAddr + GICH_REG_SIZE)
73 return readCtrl(pkt);
74 else
75 panic("Read to unknown address %#x\n", pkt->getAddr());
76}
77
78Tick
79VGic::write(PacketPtr pkt)
80{
81 Addr addr = pkt->getAddr();
82
83 if (addr >= vcpuAddr && addr < vcpuAddr + GICV_SIZE)
84 return writeVCpu(pkt);
85 else if (addr >= hvAddr && addr < hvAddr + GICH_REG_SIZE)
86 return writeCtrl(pkt);
87 else
88 panic("Write to unknown address %#x\n", pkt->getAddr());
89}
90
91Tick
92VGic::readVCpu(PacketPtr pkt)
93{
94 Addr daddr = pkt->getAddr() - vcpuAddr;
95
96 int ctx_id = pkt->req->contextId();
97 assert(ctx_id < VGIC_CPU_MAX);
98 struct vcpuIntData *vid = &vcpuData[ctx_id];
99
100 DPRINTF(VGIC, "VGIC VCPU read register %#x\n", daddr);
101
102 switch (daddr) {
103 case GICV_CTLR:
104 pkt->set<uint32_t>(vid->vctrl);
105 break;
106 case GICV_IAR: {
107 int i = findHighestPendingLR(vid);
108 if (i < 0 || !vid->vctrl.En) {
109 pkt->set<uint32_t>(1023); // "No int" marker
110 } else {
111 ListReg *lr = &vid->LR[i];
112
113 pkt->set<uint32_t>(lr->VirtualID |
114 (((int)lr->CpuID) << 10));
115 // We don't support auto-EOI of HW interrupts via real GIC!
116 // Fortunately, KVM doesn't use this. How about Xen...? Ulp!
117 if (lr->HW)
118 panic("VGIC does not support 'HW' List Register feature (LR %#x)!\n",
119 *lr);
120 lr->State = LR_ACTIVE;
121 DPRINTF(VGIC, "Consumed interrupt %d (cpu%d) from LR%d (EOI%d)\n",
122 lr->VirtualID, lr->CpuID, i, lr->EOI);
123 }
124 } break;
125 default:
126 panic("VGIC VCPU read of bad address %#x\n", daddr);
127 }
128
129 updateIntState(ctx_id);
130
131 pkt->makeAtomicResponse();
132 return pioDelay;
133}
134
135Tick
136VGic::readCtrl(PacketPtr pkt)
137{
138 Addr daddr = pkt->getAddr() - hvAddr;
139
140 int ctx_id = pkt->req->contextId();
141
142 DPRINTF(VGIC, "VGIC HVCtrl read register %#x\n", daddr);
143
144 /* Munge the address: 0-0xfff is the usual space banked by requester CPU.
145 * Anything > that is 0x200-sized slices of 'per CPU' regs.
146 */
147 if (daddr & ~0x1ff) {
148 ctx_id = (daddr >> 9);
149 if (ctx_id > 8)
150 panic("VGIC: Weird unbanked hv ctrl address %#x!\n", daddr);
151 daddr &= ~0x1ff;
152 }
153 assert(ctx_id < VGIC_CPU_MAX);
154 struct vcpuIntData *vid = &vcpuData[ctx_id];
155
156 switch (daddr) {
157 case GICH_HCR:
158 pkt->set<uint32_t>(vid->hcr);
159 break;
160
161 case GICH_VTR:
162 pkt->set<uint32_t>(0x44000000 | (NUM_LR - 1));
163 break;
164
165 case GICH_VMCR:
166 pkt->set<uint32_t>(
167 ((uint32_t)vid->VMPriMask << 27) |
168 ((uint32_t)vid->VMBP << 21) |
169 ((uint32_t)vid->VMABP << 18) |
170 ((uint32_t)vid->VEM << 9) |
171 ((uint32_t)vid->VMCBPR << 4) |
172 ((uint32_t)vid->VMFiqEn << 3) |
173 ((uint32_t)vid->VMAckCtl << 2) |
174 ((uint32_t)vid->VMGrp1En << 1) |
175 ((uint32_t)vid->VMGrp0En << 0)
176 );
177 break;
178
179 case GICH_MISR:
180 pkt->set<uint32_t>(getMISR(vid));
181 break;
182
183 case GICH_EISR0:
184 pkt->set<uint32_t>(vid->eisr & 0xffffffff);
185 break;
186
187 case GICH_EISR1:
188 pkt->set<uint32_t>(vid->eisr >> 32);
189 break;
190
191 case GICH_ELSR0: {
192 uint32_t bm = 0;
193 for (int i = 0; i < ((NUM_LR < 32) ? NUM_LR : 32); i++) {
194 if (!vid->LR[i].State)
195 bm |= 1 << i;
196 }
197 pkt->set<uint32_t>(bm);
198 } break;
199
200 case GICH_ELSR1: {
201 uint32_t bm = 0;
202 for (int i = 32; i < NUM_LR; i++) {
203 if (!vid->LR[i].State)
204 bm |= 1 << (i-32);
205 }
206 pkt->set<uint32_t>(bm);
207 } break;
208
209 case GICH_APR0:
210 warn_once("VGIC GICH_APR read!\n");
211 pkt->set<uint32_t>(0);
212 break;
213
214 case GICH_LR0:
215 case GICH_LR1:
216 case GICH_LR2:
217 case GICH_LR3:
218 pkt->set<uint32_t>(vid->LR[(daddr - GICH_LR0) >> 2]);
219 break;
220
221 default:
222 panic("VGIC HVCtrl read of bad address %#x\n", daddr);
223 }
224
225 pkt->makeAtomicResponse();
226 return pioDelay;
227}
228
229Tick
230VGic::writeVCpu(PacketPtr pkt)
231{
232 Addr daddr = pkt->getAddr() - vcpuAddr;
233
234 int ctx_id = pkt->req->contextId();
235 assert(ctx_id < VGIC_CPU_MAX);
236 struct vcpuIntData *vid = &vcpuData[ctx_id];
237
238 DPRINTF(VGIC, "VGIC VCPU write register %#x <= %#x\n", daddr, pkt->get<uint32_t>());
239
240 switch (daddr) {
241 case GICV_CTLR:
242 vid->vctrl = pkt->get<uint32_t>();
243 break;
244 case GICV_PMR:
245 vid->VMPriMask = pkt->get<uint32_t>();
246 break;
247 case GICV_EOIR: {
248 // We don't handle the split EOI-then-DIR mode. Linux (guest)
249 // doesn't need it though.
250 assert(!vid->vctrl.EOImode);
251 uint32_t w = pkt->get<uint32_t>();
252 unsigned int virq = w & 0x3ff;
253 unsigned int vcpu = (w >> 10) & 7;
254 int i = findLRForVIRQ(vid, virq, vcpu);
255 if (i < 0) {
256 DPRINTF(VGIC, "EOIR: No LR for irq %d(cpu%d)\n", virq, vcpu);
257 } else {
258 DPRINTF(VGIC, "EOIR: Found LR%d for irq %d(cpu%d)\n", i, virq, vcpu);
259 ListReg *lr = &vid->LR[i];
260 lr->State = 0;
261 // Maintenance interrupt -- via eisr -- is flagged when
262 // LRs have EOI=1 and State=INVALID!
263 }
264 } break;
265 default:
266 panic("VGIC VCPU write %#x to unk address %#x\n", pkt->get<uint32_t>(), daddr);
267 }
268
269 // This updates the EISRs and flags IRQs:
270 updateIntState(ctx_id);
271
272 pkt->makeAtomicResponse();
273 return pioDelay;
274}
275
276Tick
277VGic::writeCtrl(PacketPtr pkt)
278{
279 Addr daddr = pkt->getAddr() - hvAddr;
280
281 int ctx_id = pkt->req->contextId();
282
283 DPRINTF(VGIC, "VGIC HVCtrl write register %#x <= %#x\n", daddr, pkt->get<uint32_t>());
284
285 /* Munge the address: 0-0xfff is the usual space banked by requester CPU.
286 * Anything > that is 0x200-sized slices of 'per CPU' regs.
287 */
288 if (daddr & ~0x1ff) {
289 ctx_id = (daddr >> 9);
290 if (ctx_id > 8)
291 panic("VGIC: Weird unbanked hv ctrl address %#x!\n", daddr);
292 daddr &= ~0x1ff;
293 }
294 assert(ctx_id < VGIC_CPU_MAX);
295 struct vcpuIntData *vid = &vcpuData[ctx_id];
296
297 switch (daddr) {
298 case GICH_HCR:
299 vid->hcr = pkt->get<uint32_t>();
300 // update int state
301 break;
302
303 case GICH_VMCR: {
304 uint32_t d = pkt->get<uint32_t>();
305 vid->VMPriMask = d >> 27;
306 vid->VMBP = (d >> 21) & 7;
307 vid->VMABP = (d >> 18) & 7;
308 vid->VEM = (d >> 9) & 1;
309 vid->VMCBPR = (d >> 4) & 1;
310 vid->VMFiqEn = (d >> 3) & 1;
311 vid->VMAckCtl = (d >> 2) & 1;
312 vid->VMGrp1En = (d >> 1) & 1;
313 vid->VMGrp0En = d & 1;
314 } break;
315
316 case GICH_APR0:
317 warn_once("VGIC GICH_APR0 written, ignored\n");
318 break;
319
320 case GICH_LR0:
321 case GICH_LR1:
322 case GICH_LR2:
323 case GICH_LR3:
324 vid->LR[(daddr - GICH_LR0) >> 2] = pkt->get<uint32_t>();
325 // update int state
326 break;
327
328 default:
329 panic("VGIC HVCtrl write to bad address %#x\n", daddr);
330 }
331
332 updateIntState(ctx_id);
333
334 pkt->makeAtomicResponse();
335 return pioDelay;
336}
337
338
339uint32_t
340VGic::getMISR(struct vcpuIntData *vid)
341{
342 return (!!vid->hcr.VGrp1DIE && !vid->VMGrp1En ? 0x80 : 0) |
343 (!!vid->hcr.VGrp1EIE && vid->VMGrp1En ? 0x40 : 0) |
344 (!!vid->hcr.VGrp0DIE && !vid->VMGrp0En ? 0x20 : 0) |
345 (!!vid->hcr.VGrp0EIE && vid->VMGrp0En ? 0x10 : 0) |
346 (!!vid->hcr.NPIE && !lrPending(vid) ? 0x08 : 0) |
347 (!!vid->hcr.LRENPIE && vid->hcr.EOICount ? 0x04 : 0) |
348 (!!vid->hcr.UIE && lrValid(vid) <= 1 ? 0x02 : 0) |
349 (vid->eisr ? 0x01 : 0);
350}
351
352void
353VGic::postVInt(uint32_t cpu, Tick when)
354{
355 DPRINTF(VGIC, "Posting VIRQ to %d\n", cpu);
356 if (!(postVIntEvent[cpu]->scheduled()))
357 eventq->schedule(postVIntEvent[cpu], when);
358}
359
360void
361VGic::unPostVInt(uint32_t cpu)
362{
363 DPRINTF(VGIC, "Unposting VIRQ to %d\n", cpu);
364 platform->intrctrl->clear(cpu, ArmISA::INT_VIRT_IRQ, 0);
365}
366
367void
368VGic::postMaintInt(uint32_t cpu)
369{
370 DPRINTF(VGIC, "Posting maintenance PPI to GIC/cpu%d\n", cpu);
371 // Linux DT configures this as Level.
372 gic->sendPPInt(maintInt, cpu);
373}
374
375void
376VGic::unPostMaintInt(uint32_t cpu)
377{
378 DPRINTF(VGIC, "Unposting maintenance PPI to GIC/cpu%d\n", cpu);
379 gic->clearPPInt(maintInt, cpu);
380}
381
382/* Update state (in general); something concerned with ctx_id has changed.
383 * This may raise a maintenance interrupt.
384 */
385void
386VGic::updateIntState(int ctx_id)
387{
388 // @todo This should update APRs!
389
390 // Build EISR contents:
391 // (Cached so that regs can read them without messing about again)
392 struct vcpuIntData *tvid = &vcpuData[ctx_id];
393
394 tvid->eisr = 0;
395 for (int i = 0; i < NUM_LR; i++) {
396 if (!tvid->LR[i].State && tvid->LR[i].EOI) {
397 tvid->eisr |= 1 << i;
398 }
399 }
400
401 assert(sys->numRunningContexts() <= VGIC_CPU_MAX);
402 for (int i = 0; i < sys->numRunningContexts(); i++) {
403 struct vcpuIntData *vid = &vcpuData[i];
404 // Are any LRs active that weren't before?
405 if (!vIntPosted[i]) {
406 if (lrPending(vid) && vid->vctrl.En) {
407 vIntPosted[i] = true;
408 postVInt(i, curTick() + 1);
409 }
410 } else if (!lrPending(vid)) {
411 vIntPosted[i] = false;
412 unPostVInt(i);
413 }
414
415 // Any maintenance ints to send?
416 if (!maintIntPosted[i]) {
417 if (vid->hcr.En && getMISR(vid)) {
418 maintIntPosted[i] = true;
419 postMaintInt(i);
420 }
421 } else {
422 if (!vid->hcr.En || !getMISR(vid)) {
423 unPostMaintInt(i);
424 maintIntPosted[i] = false;
425 }
426 }
427 }
428}
429
430AddrRangeList
431VGic::getAddrRanges() const
432{
433 AddrRangeList ranges;
434 ranges.push_back(RangeSize(hvAddr, GICH_REG_SIZE));
435 ranges.push_back(RangeSize(vcpuAddr, GICV_SIZE));
436 return ranges;
437}
438
439void
59 assert(sys->numRunningContexts() <= VGIC_CPU_MAX);
60}
61
62Tick
63VGic::read(PacketPtr pkt)
64{
65 Addr addr = pkt->getAddr();
66
67 if (addr >= vcpuAddr && addr < vcpuAddr + GICV_SIZE)
68 return readVCpu(pkt);
69 else if (addr >= hvAddr && addr < hvAddr + GICH_REG_SIZE)
70 return readCtrl(pkt);
71 else
72 panic("Read to unknown address %#x\n", pkt->getAddr());
73}
74
75Tick
76VGic::write(PacketPtr pkt)
77{
78 Addr addr = pkt->getAddr();
79
80 if (addr >= vcpuAddr && addr < vcpuAddr + GICV_SIZE)
81 return writeVCpu(pkt);
82 else if (addr >= hvAddr && addr < hvAddr + GICH_REG_SIZE)
83 return writeCtrl(pkt);
84 else
85 panic("Write to unknown address %#x\n", pkt->getAddr());
86}
87
88Tick
89VGic::readVCpu(PacketPtr pkt)
90{
91 Addr daddr = pkt->getAddr() - vcpuAddr;
92
93 int ctx_id = pkt->req->contextId();
94 assert(ctx_id < VGIC_CPU_MAX);
95 struct vcpuIntData *vid = &vcpuData[ctx_id];
96
97 DPRINTF(VGIC, "VGIC VCPU read register %#x\n", daddr);
98
99 switch (daddr) {
100 case GICV_CTLR:
101 pkt->set<uint32_t>(vid->vctrl);
102 break;
103 case GICV_IAR: {
104 int i = findHighestPendingLR(vid);
105 if (i < 0 || !vid->vctrl.En) {
106 pkt->set<uint32_t>(1023); // "No int" marker
107 } else {
108 ListReg *lr = &vid->LR[i];
109
110 pkt->set<uint32_t>(lr->VirtualID |
111 (((int)lr->CpuID) << 10));
112 // We don't support auto-EOI of HW interrupts via real GIC!
113 // Fortunately, KVM doesn't use this. How about Xen...? Ulp!
114 if (lr->HW)
115 panic("VGIC does not support 'HW' List Register feature (LR %#x)!\n",
116 *lr);
117 lr->State = LR_ACTIVE;
118 DPRINTF(VGIC, "Consumed interrupt %d (cpu%d) from LR%d (EOI%d)\n",
119 lr->VirtualID, lr->CpuID, i, lr->EOI);
120 }
121 } break;
122 default:
123 panic("VGIC VCPU read of bad address %#x\n", daddr);
124 }
125
126 updateIntState(ctx_id);
127
128 pkt->makeAtomicResponse();
129 return pioDelay;
130}
131
132Tick
133VGic::readCtrl(PacketPtr pkt)
134{
135 Addr daddr = pkt->getAddr() - hvAddr;
136
137 int ctx_id = pkt->req->contextId();
138
139 DPRINTF(VGIC, "VGIC HVCtrl read register %#x\n", daddr);
140
141 /* Munge the address: 0-0xfff is the usual space banked by requester CPU.
142 * Anything > that is 0x200-sized slices of 'per CPU' regs.
143 */
144 if (daddr & ~0x1ff) {
145 ctx_id = (daddr >> 9);
146 if (ctx_id > 8)
147 panic("VGIC: Weird unbanked hv ctrl address %#x!\n", daddr);
148 daddr &= ~0x1ff;
149 }
150 assert(ctx_id < VGIC_CPU_MAX);
151 struct vcpuIntData *vid = &vcpuData[ctx_id];
152
153 switch (daddr) {
154 case GICH_HCR:
155 pkt->set<uint32_t>(vid->hcr);
156 break;
157
158 case GICH_VTR:
159 pkt->set<uint32_t>(0x44000000 | (NUM_LR - 1));
160 break;
161
162 case GICH_VMCR:
163 pkt->set<uint32_t>(
164 ((uint32_t)vid->VMPriMask << 27) |
165 ((uint32_t)vid->VMBP << 21) |
166 ((uint32_t)vid->VMABP << 18) |
167 ((uint32_t)vid->VEM << 9) |
168 ((uint32_t)vid->VMCBPR << 4) |
169 ((uint32_t)vid->VMFiqEn << 3) |
170 ((uint32_t)vid->VMAckCtl << 2) |
171 ((uint32_t)vid->VMGrp1En << 1) |
172 ((uint32_t)vid->VMGrp0En << 0)
173 );
174 break;
175
176 case GICH_MISR:
177 pkt->set<uint32_t>(getMISR(vid));
178 break;
179
180 case GICH_EISR0:
181 pkt->set<uint32_t>(vid->eisr & 0xffffffff);
182 break;
183
184 case GICH_EISR1:
185 pkt->set<uint32_t>(vid->eisr >> 32);
186 break;
187
188 case GICH_ELSR0: {
189 uint32_t bm = 0;
190 for (int i = 0; i < ((NUM_LR < 32) ? NUM_LR : 32); i++) {
191 if (!vid->LR[i].State)
192 bm |= 1 << i;
193 }
194 pkt->set<uint32_t>(bm);
195 } break;
196
197 case GICH_ELSR1: {
198 uint32_t bm = 0;
199 for (int i = 32; i < NUM_LR; i++) {
200 if (!vid->LR[i].State)
201 bm |= 1 << (i-32);
202 }
203 pkt->set<uint32_t>(bm);
204 } break;
205
206 case GICH_APR0:
207 warn_once("VGIC GICH_APR read!\n");
208 pkt->set<uint32_t>(0);
209 break;
210
211 case GICH_LR0:
212 case GICH_LR1:
213 case GICH_LR2:
214 case GICH_LR3:
215 pkt->set<uint32_t>(vid->LR[(daddr - GICH_LR0) >> 2]);
216 break;
217
218 default:
219 panic("VGIC HVCtrl read of bad address %#x\n", daddr);
220 }
221
222 pkt->makeAtomicResponse();
223 return pioDelay;
224}
225
226Tick
227VGic::writeVCpu(PacketPtr pkt)
228{
229 Addr daddr = pkt->getAddr() - vcpuAddr;
230
231 int ctx_id = pkt->req->contextId();
232 assert(ctx_id < VGIC_CPU_MAX);
233 struct vcpuIntData *vid = &vcpuData[ctx_id];
234
235 DPRINTF(VGIC, "VGIC VCPU write register %#x <= %#x\n", daddr, pkt->get<uint32_t>());
236
237 switch (daddr) {
238 case GICV_CTLR:
239 vid->vctrl = pkt->get<uint32_t>();
240 break;
241 case GICV_PMR:
242 vid->VMPriMask = pkt->get<uint32_t>();
243 break;
244 case GICV_EOIR: {
245 // We don't handle the split EOI-then-DIR mode. Linux (guest)
246 // doesn't need it though.
247 assert(!vid->vctrl.EOImode);
248 uint32_t w = pkt->get<uint32_t>();
249 unsigned int virq = w & 0x3ff;
250 unsigned int vcpu = (w >> 10) & 7;
251 int i = findLRForVIRQ(vid, virq, vcpu);
252 if (i < 0) {
253 DPRINTF(VGIC, "EOIR: No LR for irq %d(cpu%d)\n", virq, vcpu);
254 } else {
255 DPRINTF(VGIC, "EOIR: Found LR%d for irq %d(cpu%d)\n", i, virq, vcpu);
256 ListReg *lr = &vid->LR[i];
257 lr->State = 0;
258 // Maintenance interrupt -- via eisr -- is flagged when
259 // LRs have EOI=1 and State=INVALID!
260 }
261 } break;
262 default:
263 panic("VGIC VCPU write %#x to unk address %#x\n", pkt->get<uint32_t>(), daddr);
264 }
265
266 // This updates the EISRs and flags IRQs:
267 updateIntState(ctx_id);
268
269 pkt->makeAtomicResponse();
270 return pioDelay;
271}
272
273Tick
274VGic::writeCtrl(PacketPtr pkt)
275{
276 Addr daddr = pkt->getAddr() - hvAddr;
277
278 int ctx_id = pkt->req->contextId();
279
280 DPRINTF(VGIC, "VGIC HVCtrl write register %#x <= %#x\n", daddr, pkt->get<uint32_t>());
281
282 /* Munge the address: 0-0xfff is the usual space banked by requester CPU.
283 * Anything > that is 0x200-sized slices of 'per CPU' regs.
284 */
285 if (daddr & ~0x1ff) {
286 ctx_id = (daddr >> 9);
287 if (ctx_id > 8)
288 panic("VGIC: Weird unbanked hv ctrl address %#x!\n", daddr);
289 daddr &= ~0x1ff;
290 }
291 assert(ctx_id < VGIC_CPU_MAX);
292 struct vcpuIntData *vid = &vcpuData[ctx_id];
293
294 switch (daddr) {
295 case GICH_HCR:
296 vid->hcr = pkt->get<uint32_t>();
297 // update int state
298 break;
299
300 case GICH_VMCR: {
301 uint32_t d = pkt->get<uint32_t>();
302 vid->VMPriMask = d >> 27;
303 vid->VMBP = (d >> 21) & 7;
304 vid->VMABP = (d >> 18) & 7;
305 vid->VEM = (d >> 9) & 1;
306 vid->VMCBPR = (d >> 4) & 1;
307 vid->VMFiqEn = (d >> 3) & 1;
308 vid->VMAckCtl = (d >> 2) & 1;
309 vid->VMGrp1En = (d >> 1) & 1;
310 vid->VMGrp0En = d & 1;
311 } break;
312
313 case GICH_APR0:
314 warn_once("VGIC GICH_APR0 written, ignored\n");
315 break;
316
317 case GICH_LR0:
318 case GICH_LR1:
319 case GICH_LR2:
320 case GICH_LR3:
321 vid->LR[(daddr - GICH_LR0) >> 2] = pkt->get<uint32_t>();
322 // update int state
323 break;
324
325 default:
326 panic("VGIC HVCtrl write to bad address %#x\n", daddr);
327 }
328
329 updateIntState(ctx_id);
330
331 pkt->makeAtomicResponse();
332 return pioDelay;
333}
334
335
336uint32_t
337VGic::getMISR(struct vcpuIntData *vid)
338{
339 return (!!vid->hcr.VGrp1DIE && !vid->VMGrp1En ? 0x80 : 0) |
340 (!!vid->hcr.VGrp1EIE && vid->VMGrp1En ? 0x40 : 0) |
341 (!!vid->hcr.VGrp0DIE && !vid->VMGrp0En ? 0x20 : 0) |
342 (!!vid->hcr.VGrp0EIE && vid->VMGrp0En ? 0x10 : 0) |
343 (!!vid->hcr.NPIE && !lrPending(vid) ? 0x08 : 0) |
344 (!!vid->hcr.LRENPIE && vid->hcr.EOICount ? 0x04 : 0) |
345 (!!vid->hcr.UIE && lrValid(vid) <= 1 ? 0x02 : 0) |
346 (vid->eisr ? 0x01 : 0);
347}
348
349void
350VGic::postVInt(uint32_t cpu, Tick when)
351{
352 DPRINTF(VGIC, "Posting VIRQ to %d\n", cpu);
353 if (!(postVIntEvent[cpu]->scheduled()))
354 eventq->schedule(postVIntEvent[cpu], when);
355}
356
357void
358VGic::unPostVInt(uint32_t cpu)
359{
360 DPRINTF(VGIC, "Unposting VIRQ to %d\n", cpu);
361 platform->intrctrl->clear(cpu, ArmISA::INT_VIRT_IRQ, 0);
362}
363
364void
365VGic::postMaintInt(uint32_t cpu)
366{
367 DPRINTF(VGIC, "Posting maintenance PPI to GIC/cpu%d\n", cpu);
368 // Linux DT configures this as Level.
369 gic->sendPPInt(maintInt, cpu);
370}
371
372void
373VGic::unPostMaintInt(uint32_t cpu)
374{
375 DPRINTF(VGIC, "Unposting maintenance PPI to GIC/cpu%d\n", cpu);
376 gic->clearPPInt(maintInt, cpu);
377}
378
379/* Update state (in general); something concerned with ctx_id has changed.
380 * This may raise a maintenance interrupt.
381 */
382void
383VGic::updateIntState(int ctx_id)
384{
385 // @todo This should update APRs!
386
387 // Build EISR contents:
388 // (Cached so that regs can read them without messing about again)
389 struct vcpuIntData *tvid = &vcpuData[ctx_id];
390
391 tvid->eisr = 0;
392 for (int i = 0; i < NUM_LR; i++) {
393 if (!tvid->LR[i].State && tvid->LR[i].EOI) {
394 tvid->eisr |= 1 << i;
395 }
396 }
397
398 assert(sys->numRunningContexts() <= VGIC_CPU_MAX);
399 for (int i = 0; i < sys->numRunningContexts(); i++) {
400 struct vcpuIntData *vid = &vcpuData[i];
401 // Are any LRs active that weren't before?
402 if (!vIntPosted[i]) {
403 if (lrPending(vid) && vid->vctrl.En) {
404 vIntPosted[i] = true;
405 postVInt(i, curTick() + 1);
406 }
407 } else if (!lrPending(vid)) {
408 vIntPosted[i] = false;
409 unPostVInt(i);
410 }
411
412 // Any maintenance ints to send?
413 if (!maintIntPosted[i]) {
414 if (vid->hcr.En && getMISR(vid)) {
415 maintIntPosted[i] = true;
416 postMaintInt(i);
417 }
418 } else {
419 if (!vid->hcr.En || !getMISR(vid)) {
420 unPostMaintInt(i);
421 maintIntPosted[i] = false;
422 }
423 }
424 }
425}
426
427AddrRangeList
428VGic::getAddrRanges() const
429{
430 AddrRangeList ranges;
431 ranges.push_back(RangeSize(hvAddr, GICH_REG_SIZE));
432 ranges.push_back(RangeSize(vcpuAddr, GICV_SIZE));
433 return ranges;
434}
435
436void
440VGic::serialize(std::ostream &os)
437VGic::serialize(CheckpointOut &cp) const
441{
442 Tick interrupt_time[VGIC_CPU_MAX];
443 for (uint32_t cpu = 0; cpu < VGIC_CPU_MAX; cpu++) {
444 interrupt_time[cpu] = 0;
445 if (postVIntEvent[cpu]->scheduled()) {
446 interrupt_time[cpu] = postVIntEvent[cpu]->when();
447 }
448 }
449
450 DPRINTF(Checkpoint, "Serializing VGIC\n");
451
452 SERIALIZE_ARRAY(interrupt_time, VGIC_CPU_MAX);
453 SERIALIZE_ARRAY(maintIntPosted, VGIC_CPU_MAX);
454 SERIALIZE_ARRAY(vIntPosted, VGIC_CPU_MAX);
455 SERIALIZE_SCALAR(vcpuAddr);
456 SERIALIZE_SCALAR(hvAddr);
457 SERIALIZE_SCALAR(pioDelay);
458 SERIALIZE_SCALAR(maintInt);
459
438{
439 Tick interrupt_time[VGIC_CPU_MAX];
440 for (uint32_t cpu = 0; cpu < VGIC_CPU_MAX; cpu++) {
441 interrupt_time[cpu] = 0;
442 if (postVIntEvent[cpu]->scheduled()) {
443 interrupt_time[cpu] = postVIntEvent[cpu]->when();
444 }
445 }
446
447 DPRINTF(Checkpoint, "Serializing VGIC\n");
448
449 SERIALIZE_ARRAY(interrupt_time, VGIC_CPU_MAX);
450 SERIALIZE_ARRAY(maintIntPosted, VGIC_CPU_MAX);
451 SERIALIZE_ARRAY(vIntPosted, VGIC_CPU_MAX);
452 SERIALIZE_SCALAR(vcpuAddr);
453 SERIALIZE_SCALAR(hvAddr);
454 SERIALIZE_SCALAR(pioDelay);
455 SERIALIZE_SCALAR(maintInt);
456
460 for (uint32_t cpu = 0; cpu < VGIC_CPU_MAX; cpu++) {
461 nameOut(os, csprintf("%s.vcpuData%d", name(), cpu));
462 uint32_t vctrl_val = vcpuData[cpu].vctrl;
463 SERIALIZE_SCALAR(vctrl_val);
464 uint32_t hcr_val = vcpuData[cpu].hcr;
465 SERIALIZE_SCALAR(hcr_val);
466 uint64_t eisr_val = vcpuData[cpu].eisr;
467 SERIALIZE_SCALAR(eisr_val);
468 uint8_t VMGrp0En_val = vcpuData[cpu].VMGrp0En;
469 SERIALIZE_SCALAR(VMGrp0En_val);
470 uint8_t VMGrp1En_val = vcpuData[cpu].VMGrp1En;
471 SERIALIZE_SCALAR(VMGrp1En_val);
472 uint8_t VMAckCtl_val = vcpuData[cpu].VMAckCtl;
473 SERIALIZE_SCALAR(VMAckCtl_val);
474 uint8_t VMFiqEn_val = vcpuData[cpu].VMFiqEn;
475 SERIALIZE_SCALAR(VMFiqEn_val);
476 uint8_t VMCBPR_val = vcpuData[cpu].VMCBPR;
477 SERIALIZE_SCALAR(VMCBPR_val);
478 uint8_t VEM_val = vcpuData[cpu].VEM;
479 SERIALIZE_SCALAR(VEM_val);
480 uint8_t VMABP_val = vcpuData[cpu].VMABP;
481 SERIALIZE_SCALAR(VMABP_val);
482 uint8_t VMBP_val = vcpuData[cpu].VMBP;
483 SERIALIZE_SCALAR(VMBP_val);
484 uint8_t VMPriMask_val = vcpuData[cpu].VMPriMask;
485 SERIALIZE_SCALAR(VMPriMask_val);
457 for (uint32_t cpu = 0; cpu < VGIC_CPU_MAX; cpu++)
458 vcpuData[cpu].serializeSection(cp, csprintf("vcpuData%d", cpu));
459}
486
460
487 for (int i = 0; i < NUM_LR; i++) {
488 uint32_t lr = vcpuData[cpu].LR[i];
489 nameOut(os, csprintf("%s.vcpuData%d.LR%d", name(), cpu, i));
490 SERIALIZE_SCALAR(lr);
491 }
461void
462VGic::vcpuIntData::serialize(CheckpointOut &cp) const
463{
464 uint32_t vctrl_val = vctrl;
465 SERIALIZE_SCALAR(vctrl_val);
466 uint32_t hcr_val = hcr;
467 SERIALIZE_SCALAR(hcr_val);
468 uint64_t eisr_val = eisr;
469 SERIALIZE_SCALAR(eisr_val);
470 uint8_t VMGrp0En_val = VMGrp0En;
471 SERIALIZE_SCALAR(VMGrp0En_val);
472 uint8_t VMGrp1En_val = VMGrp1En;
473 SERIALIZE_SCALAR(VMGrp1En_val);
474 uint8_t VMAckCtl_val = VMAckCtl;
475 SERIALIZE_SCALAR(VMAckCtl_val);
476 uint8_t VMFiqEn_val = VMFiqEn;
477 SERIALIZE_SCALAR(VMFiqEn_val);
478 uint8_t VMCBPR_val = VMCBPR;
479 SERIALIZE_SCALAR(VMCBPR_val);
480 uint8_t VEM_val = VEM;
481 SERIALIZE_SCALAR(VEM_val);
482 uint8_t VMABP_val = VMABP;
483 SERIALIZE_SCALAR(VMABP_val);
484 uint8_t VMBP_val = VMBP;
485 SERIALIZE_SCALAR(VMBP_val);
486 uint8_t VMPriMask_val = VMPriMask;
487 SERIALIZE_SCALAR(VMPriMask_val);
488
489 for (int i = 0; i < NUM_LR; i++) {
490 ScopedCheckpointSection sec_lr(cp, csprintf("LR%d", i));
491 paramOut(cp, "lr", LR[i]);
492 }
493}
494
492 }
493}
494
495void VGic::unserialize(Checkpoint *cp, const std::string &section)
495void VGic::unserialize(CheckpointIn &cp)
496{
497 DPRINTF(Checkpoint, "Unserializing Arm GIC\n");
498
499 Tick interrupt_time[VGIC_CPU_MAX];
500 UNSERIALIZE_ARRAY(interrupt_time, VGIC_CPU_MAX);
501 for (uint32_t cpu = 0; cpu < VGIC_CPU_MAX; cpu++) {
502 if (interrupt_time[cpu])
503 schedule(postVIntEvent[cpu], interrupt_time[cpu]);
504
496{
497 DPRINTF(Checkpoint, "Unserializing Arm GIC\n");
498
499 Tick interrupt_time[VGIC_CPU_MAX];
500 UNSERIALIZE_ARRAY(interrupt_time, VGIC_CPU_MAX);
501 for (uint32_t cpu = 0; cpu < VGIC_CPU_MAX; cpu++) {
502 if (interrupt_time[cpu])
503 schedule(postVIntEvent[cpu], interrupt_time[cpu]);
504
505 uint32_t tmp;
506 paramIn(cp, csprintf("%s.vcpuData%d", section, cpu),
507 "vctrl_val", tmp);
508 vcpuData[cpu].vctrl = tmp;
509 paramIn(cp, csprintf("%s.vcpuData%d", section, cpu),
510 "hcr_val", tmp);
511 vcpuData[cpu].hcr = tmp;
512 paramIn(cp, csprintf("%s.vcpuData%d", section, cpu),
513 "eisr_val", vcpuData[cpu].eisr);
514 paramIn(cp, csprintf("%s.vcpuData%d", section, cpu),
515 "VMGrp0En_val", vcpuData[cpu].VMGrp0En);
516 paramIn(cp, csprintf("%s.vcpuData%d", section, cpu),
517 "VMGrp1En_val", vcpuData[cpu].VMGrp1En);
518 paramIn(cp, csprintf("%s.vcpuData%d", section, cpu),
519 "VMAckCtl_val", vcpuData[cpu].VMAckCtl);
520 paramIn(cp, csprintf("%s.vcpuData%d", section, cpu),
521 "VMFiqEn_val", vcpuData[cpu].VMFiqEn);
522 paramIn(cp, csprintf("%s.vcpuData%d", section, cpu),
523 "VMCBPR_val", vcpuData[cpu].VMCBPR);
524 paramIn(cp, csprintf("%s.vcpuData%d", section, cpu),
525 "VEM_val", vcpuData[cpu].VEM);
526 paramIn(cp, csprintf("%s.vcpuData%d", section, cpu),
527 "VMABP_val", vcpuData[cpu].VMABP);
528 paramIn(cp, csprintf("%s.vcpuData%d", section, cpu),
529 "VMPriMask_val", vcpuData[cpu].VMPriMask);
530
531 for (int i = 0; i < NUM_LR; i++) {
532 paramIn(cp, csprintf("%s.vcpuData%d.LR%d", section, cpu, i),
533 "lr", tmp);
534 vcpuData[cpu].LR[i] = tmp;
535 }
505 vcpuData[cpu].unserializeSection(cp, csprintf("vcpuData%d", cpu));
536 }
537 UNSERIALIZE_ARRAY(maintIntPosted, VGIC_CPU_MAX);
538 UNSERIALIZE_ARRAY(vIntPosted, VGIC_CPU_MAX);
539 UNSERIALIZE_SCALAR(vcpuAddr);
540 UNSERIALIZE_SCALAR(hvAddr);
541 UNSERIALIZE_SCALAR(pioDelay);
542 UNSERIALIZE_SCALAR(maintInt);
543}
544
506 }
507 UNSERIALIZE_ARRAY(maintIntPosted, VGIC_CPU_MAX);
508 UNSERIALIZE_ARRAY(vIntPosted, VGIC_CPU_MAX);
509 UNSERIALIZE_SCALAR(vcpuAddr);
510 UNSERIALIZE_SCALAR(hvAddr);
511 UNSERIALIZE_SCALAR(pioDelay);
512 UNSERIALIZE_SCALAR(maintInt);
513}
514
515void
516VGic::vcpuIntData::unserialize(CheckpointIn &cp)
517{
518 paramIn(cp, "vctrl_val", vctrl);
519 paramIn(cp, "hcr_val", hcr);
520 paramIn(cp, "eisr_val", eisr);
521 paramIn(cp, "VMGrp0En_val", VMGrp0En);
522 paramIn(cp, "VMGrp1En_val", VMGrp1En);
523 paramIn(cp, "VMAckCtl_val", VMAckCtl);
524 paramIn(cp, "VMFiqEn_val", VMFiqEn);
525 paramIn(cp, "VMCBPR_val", VMCBPR);
526 paramIn(cp, "VEM_val", VEM);
527 paramIn(cp, "VMABP_val", VMABP);
528 paramIn(cp, "VMPriMask_val", VMPriMask);
529
530 for (int i = 0; i < NUM_LR; i++) {
531 ScopedCheckpointSection sec_lr(cp, csprintf("LR%d", i));
532 paramIn(cp, "lr", LR[i]);
533 }
534}
535
545VGic *
546VGicParams::create()
547{
548 return new VGic(this);
549}
536VGic *
537VGicParams::create()
538{
539 return new VGic(this);
540}