1/*
2 * Copyright (c) 2010-2016 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Gabe Black
38 * Ali Saidi
39 */
40
41#include "arch/arm/isa.hh"
42#include "arch/arm/pmu.hh"
43#include "arch/arm/system.hh"
44#include "arch/arm/tlb.hh"
45#include "cpu/base.hh"
46#include "cpu/checker/cpu.hh"
47#include "debug/Arm.hh"
48#include "debug/MiscRegs.hh"
49#include "dev/arm/generic_timer.hh"
50#include "params/ArmISA.hh"
51#include "sim/faults.hh"
52#include "sim/stat_control.hh"
53#include "sim/system.hh"
54
55namespace ArmISA
56{
57
58ISA::ISA(Params *p)
59 : SimObject(p),
60 system(NULL),
61 _decoderFlavour(p->decoderFlavour),
62 _vecRegRenameMode(p->vecRegRenameMode),
63 pmu(p->pmu)
64{
65 miscRegs[MISCREG_SCTLR_RST] = 0;
66
67 // Hook up a dummy device if we haven't been configured with a
68 // real PMU. By using a dummy device, we don't need to check that
69 // the PMU exist every time we try to access a PMU register.
70 if (!pmu)
71 pmu = &dummyDevice;
72
73 // Give all ISA devices a pointer to this ISA
74 pmu->setISA(this);
75
76 system = dynamic_cast<ArmSystem *>(p->system);
77
78 // Cache system-level properties
79 if (FullSystem && system) {
80 highestELIs64 = system->highestELIs64();
81 haveSecurity = system->haveSecurity();
82 haveLPAE = system->haveLPAE();
83 haveVirtualization = system->haveVirtualization();
84 haveLargeAsid64 = system->haveLargeAsid64();
85 physAddrRange64 = system->physAddrRange64();
86 } else {
87 highestELIs64 = true; // ArmSystem::highestELIs64 does the same
88 haveSecurity = haveLPAE = haveVirtualization = false;
89 haveLargeAsid64 = false;
90 physAddrRange64 = 32; // dummy value
91 }
92
93 initializeMiscRegMetadata();
94 preUnflattenMiscReg();
95
96 clear();
97}
98
99std::vector<struct ISA::MiscRegLUTEntry> ISA::lookUpMiscReg(NUM_MISCREGS);
100
101const ArmISAParams *
102ISA::params() const
103{
104 return dynamic_cast<const Params *>(_params);
105}
106
107void
108ISA::clear()
109{
110 const Params *p(params());
111
112 SCTLR sctlr_rst = miscRegs[MISCREG_SCTLR_RST];
113 memset(miscRegs, 0, sizeof(miscRegs));
114
115 // Initialize configurable default values
116 miscRegs[MISCREG_MIDR] = p->midr;
117 miscRegs[MISCREG_MIDR_EL1] = p->midr;
118 miscRegs[MISCREG_VPIDR] = p->midr;
119
120 if (FullSystem && system->highestELIs64()) {
121 // Initialize AArch64 state
122 clear64(p);
123 return;
124 }
125
126 // Initialize AArch32 state...
127
128 CPSR cpsr = 0;
129 cpsr.mode = MODE_USER;
130 miscRegs[MISCREG_CPSR] = cpsr;
131 updateRegMap(cpsr);
132
133 SCTLR sctlr = 0;
134 sctlr.te = (bool) sctlr_rst.te;
135 sctlr.nmfi = (bool) sctlr_rst.nmfi;
136 sctlr.v = (bool) sctlr_rst.v;
137 sctlr.u = 1;
138 sctlr.xp = 1;
139 sctlr.rao2 = 1;
140 sctlr.rao3 = 1;
141 sctlr.rao4 = 0xf; // SCTLR[6:3]
142 sctlr.uci = 1;
143 sctlr.dze = 1;
144 miscRegs[MISCREG_SCTLR_NS] = sctlr;
145 miscRegs[MISCREG_SCTLR_RST] = sctlr_rst;
146 miscRegs[MISCREG_HCPTR] = 0;
147
148 // Start with an event in the mailbox
149 miscRegs[MISCREG_SEV_MAILBOX] = 1;
150
151 // Separate Instruction and Data TLBs
152 miscRegs[MISCREG_TLBTR] = 1;
153
154 MVFR0 mvfr0 = 0;
155 mvfr0.advSimdRegisters = 2;
156 mvfr0.singlePrecision = 2;
157 mvfr0.doublePrecision = 2;
158 mvfr0.vfpExceptionTrapping = 0;
159 mvfr0.divide = 1;
160 mvfr0.squareRoot = 1;
161 mvfr0.shortVectors = 1;
162 mvfr0.roundingModes = 1;
163 miscRegs[MISCREG_MVFR0] = mvfr0;
164
165 MVFR1 mvfr1 = 0;
166 mvfr1.flushToZero = 1;
167 mvfr1.defaultNaN = 1;
168 mvfr1.advSimdLoadStore = 1;
169 mvfr1.advSimdInteger = 1;
170 mvfr1.advSimdSinglePrecision = 1;
171 mvfr1.advSimdHalfPrecision = 1;
172 mvfr1.vfpHalfPrecision = 1;
173 miscRegs[MISCREG_MVFR1] = mvfr1;
174
175 // Reset values of PRRR and NMRR are implementation dependent
176
177 // @todo: PRRR and NMRR in secure state?
178 miscRegs[MISCREG_PRRR_NS] =
179 (1 << 19) | // 19
180 (0 << 18) | // 18
181 (0 << 17) | // 17
182 (1 << 16) | // 16
183 (2 << 14) | // 15:14
184 (0 << 12) | // 13:12
185 (2 << 10) | // 11:10
186 (2 << 8) | // 9:8
187 (2 << 6) | // 7:6
188 (2 << 4) | // 5:4
189 (1 << 2) | // 3:2
190 0; // 1:0
191 miscRegs[MISCREG_NMRR_NS] =
192 (1 << 30) | // 31:30
193 (0 << 26) | // 27:26
194 (0 << 24) | // 25:24
195 (3 << 22) | // 23:22
196 (2 << 20) | // 21:20
197 (0 << 18) | // 19:18
198 (0 << 16) | // 17:16
199 (1 << 14) | // 15:14
200 (0 << 12) | // 13:12
201 (2 << 10) | // 11:10
202 (0 << 8) | // 9:8
203 (3 << 6) | // 7:6
204 (2 << 4) | // 5:4
205 (0 << 2) | // 3:2
206 0; // 1:0
207
208 miscRegs[MISCREG_CPACR] = 0;
209
210 miscRegs[MISCREG_ID_MMFR0] = p->id_mmfr0;
211 miscRegs[MISCREG_ID_MMFR1] = p->id_mmfr1;
212 miscRegs[MISCREG_ID_MMFR2] = p->id_mmfr2;
213 miscRegs[MISCREG_ID_MMFR3] = p->id_mmfr3;
214
215 miscRegs[MISCREG_ID_ISAR0] = p->id_isar0;
216 miscRegs[MISCREG_ID_ISAR1] = p->id_isar1;
217 miscRegs[MISCREG_ID_ISAR2] = p->id_isar2;
218 miscRegs[MISCREG_ID_ISAR3] = p->id_isar3;
219 miscRegs[MISCREG_ID_ISAR4] = p->id_isar4;
220 miscRegs[MISCREG_ID_ISAR5] = p->id_isar5;
221
222 miscRegs[MISCREG_FPSID] = p->fpsid;
223
224 if (haveLPAE) {
225 TTBCR ttbcr = miscRegs[MISCREG_TTBCR_NS];
226 ttbcr.eae = 0;
227 miscRegs[MISCREG_TTBCR_NS] = ttbcr;
228 // Enforce consistency with system-level settings
229 miscRegs[MISCREG_ID_MMFR0] = (miscRegs[MISCREG_ID_MMFR0] & ~0xf) | 0x5;
230 }
231
232 if (haveSecurity) {
233 miscRegs[MISCREG_SCTLR_S] = sctlr;
234 miscRegs[MISCREG_SCR] = 0;
235 miscRegs[MISCREG_VBAR_S] = 0;
236 } else {
237 // we're always non-secure
238 miscRegs[MISCREG_SCR] = 1;
239 }
240
241 //XXX We need to initialize the rest of the state.
242}
243
244void
245ISA::clear64(const ArmISAParams *p)
246{
247 CPSR cpsr = 0;
248 Addr rvbar = system->resetAddr64();
249 switch (system->highestEL()) {
250 // Set initial EL to highest implemented EL using associated stack
251 // pointer (SP_ELx); set RVBAR_ELx to implementation defined reset
252 // value
253 case EL3:
254 cpsr.mode = MODE_EL3H;
255 miscRegs[MISCREG_RVBAR_EL3] = rvbar;
256 break;
257 case EL2:
258 cpsr.mode = MODE_EL2H;
259 miscRegs[MISCREG_RVBAR_EL2] = rvbar;
260 break;
261 case EL1:
262 cpsr.mode = MODE_EL1H;
263 miscRegs[MISCREG_RVBAR_EL1] = rvbar;
264 break;
265 default:
266 panic("Invalid highest implemented exception level");
267 break;
268 }
269
270 // Initialize rest of CPSR
271 cpsr.daif = 0xf; // Mask all interrupts
272 cpsr.ss = 0;
273 cpsr.il = 0;
274 miscRegs[MISCREG_CPSR] = cpsr;
275 updateRegMap(cpsr);
276
277 // Initialize other control registers
278 miscRegs[MISCREG_MPIDR_EL1] = 0x80000000;
279 if (haveSecurity) {
280 miscRegs[MISCREG_SCTLR_EL3] = 0x30c50830;
281 miscRegs[MISCREG_SCR_EL3] = 0x00000030; // RES1 fields
282 } else if (haveVirtualization) {
283 // also MISCREG_SCTLR_EL2 (by mapping)
284 miscRegs[MISCREG_HSCTLR] = 0x30c50830;
285 } else {
286 // also MISCREG_SCTLR_EL1 (by mapping)
287 miscRegs[MISCREG_SCTLR_NS] = 0x30d00800 | 0x00050030; // RES1 | init
288 // Always non-secure
289 miscRegs[MISCREG_SCR_EL3] = 1;
290 }
291
292 // Initialize configurable id registers
293 miscRegs[MISCREG_ID_AA64AFR0_EL1] = p->id_aa64afr0_el1;
294 miscRegs[MISCREG_ID_AA64AFR1_EL1] = p->id_aa64afr1_el1;
295 miscRegs[MISCREG_ID_AA64DFR0_EL1] =
296 (p->id_aa64dfr0_el1 & 0xfffffffffffff0ffULL) |
297 (p->pmu ? 0x0000000000000100ULL : 0); // Enable PMUv3
298
299 miscRegs[MISCREG_ID_AA64DFR1_EL1] = p->id_aa64dfr1_el1;
300 miscRegs[MISCREG_ID_AA64ISAR0_EL1] = p->id_aa64isar0_el1;
301 miscRegs[MISCREG_ID_AA64ISAR1_EL1] = p->id_aa64isar1_el1;
302 miscRegs[MISCREG_ID_AA64MMFR0_EL1] = p->id_aa64mmfr0_el1;
303 miscRegs[MISCREG_ID_AA64MMFR1_EL1] = p->id_aa64mmfr1_el1;
304
305 miscRegs[MISCREG_ID_DFR0_EL1] =
306 (p->pmu ? 0x03000000ULL : 0); // Enable PMUv3
307
308 miscRegs[MISCREG_ID_DFR0] = miscRegs[MISCREG_ID_DFR0_EL1];
309
310 // Enforce consistency with system-level settings...
311
312 // EL3
313 miscRegs[MISCREG_ID_AA64PFR0_EL1] = insertBits(
314 miscRegs[MISCREG_ID_AA64PFR0_EL1], 15, 12,
315 haveSecurity ? 0x2 : 0x0);
316 // EL2
317 miscRegs[MISCREG_ID_AA64PFR0_EL1] = insertBits(
318 miscRegs[MISCREG_ID_AA64PFR0_EL1], 11, 8,
319 haveVirtualization ? 0x2 : 0x0);
320 // Large ASID support
321 miscRegs[MISCREG_ID_AA64MMFR0_EL1] = insertBits(
322 miscRegs[MISCREG_ID_AA64MMFR0_EL1], 7, 4,
323 haveLargeAsid64 ? 0x2 : 0x0);
324 // Physical address size
325 miscRegs[MISCREG_ID_AA64MMFR0_EL1] = insertBits(
326 miscRegs[MISCREG_ID_AA64MMFR0_EL1], 3, 0,
327 encodePhysAddrRange64(physAddrRange64));
328}
329
330MiscReg
331ISA::readMiscRegNoEffect(int misc_reg) const
332{
333 assert(misc_reg < NumMiscRegs);
334
335 const auto &reg = lookUpMiscReg[misc_reg]; // bit masks
336 const auto &map = getMiscIndices(misc_reg);
337 int lower = map.first, upper = map.second;
338 // NB!: apply architectural masks according to desired register,
339 // despite possibly getting value from different (mapped) register.
340 auto val = !upper ? miscRegs[lower] : ((miscRegs[lower] & mask(32))
341 |(miscRegs[upper] << 32));
342 if (val & reg.res0()) {
343 DPRINTF(MiscRegs, "Reading MiscReg %s with set res0 bits: %#x\n",
344 miscRegName[misc_reg], val & reg.res0());
345 }
346 if ((val & reg.res1()) != reg.res1()) {
347 DPRINTF(MiscRegs, "Reading MiscReg %s with clear res1 bits: %#x\n",
348 miscRegName[misc_reg], (val & reg.res1()) ^ reg.res1());
349 }
350 return (val & ~reg.raz()) | reg.rao(); // enforce raz/rao
351}
352
353
354MiscReg
355ISA::readMiscReg(int misc_reg, ThreadContext *tc)
356{
357 CPSR cpsr = 0;
358 PCState pc = 0;
359 SCR scr = 0;
360
361 if (misc_reg == MISCREG_CPSR) {
362 cpsr = miscRegs[misc_reg];
363 pc = tc->pcState();
364 cpsr.j = pc.jazelle() ? 1 : 0;
365 cpsr.t = pc.thumb() ? 1 : 0;
366 return cpsr;
367 }
368
369#ifndef NDEBUG
370 if (!miscRegInfo[misc_reg][MISCREG_IMPLEMENTED]) {
371 if (miscRegInfo[misc_reg][MISCREG_WARN_NOT_FAIL])
372 warn("Unimplemented system register %s read.\n",
373 miscRegName[misc_reg]);
374 else
375 panic("Unimplemented system register %s read.\n",
376 miscRegName[misc_reg]);
377 }
378#endif
379
380 switch (unflattenMiscReg(misc_reg)) {
381 case MISCREG_HCR:
382 {
383 if (!haveVirtualization)
384 return 0;
385 else
386 return readMiscRegNoEffect(MISCREG_HCR);
387 }
388 case MISCREG_CPACR:
389 {
390 const uint32_t ones = (uint32_t)(-1);
391 CPACR cpacrMask = 0;
392 // Only cp10, cp11, and ase are implemented, nothing else should
393 // be readable? (straight copy from the write code)
394 cpacrMask.cp10 = ones;
395 cpacrMask.cp11 = ones;
396 cpacrMask.asedis = ones;
397
398 // Security Extensions may limit the readability of CPACR
399 if (haveSecurity) {
400 scr = readMiscRegNoEffect(MISCREG_SCR);
401 cpsr = readMiscRegNoEffect(MISCREG_CPSR);
402 if (scr.ns && (cpsr.mode != MODE_MON)) {
403 NSACR nsacr = readMiscRegNoEffect(MISCREG_NSACR);
404 // NB: Skipping the full loop, here
405 if (!nsacr.cp10) cpacrMask.cp10 = 0;
406 if (!nsacr.cp11) cpacrMask.cp11 = 0;
407 }
408 }
409 MiscReg val = readMiscRegNoEffect(MISCREG_CPACR);
410 val &= cpacrMask;
411 DPRINTF(MiscRegs, "Reading misc reg %s: %#x\n",
412 miscRegName[misc_reg], val);
413 return val;
414 }
415 case MISCREG_MPIDR:
416 cpsr = readMiscRegNoEffect(MISCREG_CPSR);
417 scr = readMiscRegNoEffect(MISCREG_SCR);
418 if ((cpsr.mode == MODE_HYP) || inSecureState(scr, cpsr)) {
419 return getMPIDR(system, tc);
420 } else {
421 return readMiscReg(MISCREG_VMPIDR, tc);
422 }
423 break;
424 case MISCREG_MPIDR_EL1:
425 // @todo in the absence of v8 virtualization support just return MPIDR_EL1
426 return getMPIDR(system, tc) & 0xffffffff;
427 case MISCREG_VMPIDR:
428 // top bit defined as RES1
429 return readMiscRegNoEffect(misc_reg) | 0x80000000;
430 case MISCREG_ID_AFR0: // not implemented, so alias MIDR
431 case MISCREG_REVIDR: // not implemented, so alias MIDR
432 case MISCREG_MIDR:
433 cpsr = readMiscRegNoEffect(MISCREG_CPSR);
434 scr = readMiscRegNoEffect(MISCREG_SCR);
435 if ((cpsr.mode == MODE_HYP) || inSecureState(scr, cpsr)) {
436 return readMiscRegNoEffect(misc_reg);
437 } else {
438 return readMiscRegNoEffect(MISCREG_VPIDR);
439 }
440 break;
441 case MISCREG_JOSCR: // Jazelle trivial implementation, RAZ/WI
442 case MISCREG_JMCR: // Jazelle trivial implementation, RAZ/WI
443 case MISCREG_JIDR: // Jazelle trivial implementation, RAZ/WI
444 case MISCREG_AIDR: // AUX ID set to 0
445 case MISCREG_TCMTR: // No TCM's
446 return 0;
447
448 case MISCREG_CLIDR:
449 warn_once("The clidr register always reports 0 caches.\n");
450 warn_once("clidr LoUIS field of 0b001 to match current "
451 "ARM implementations.\n");
452 return 0x00200000;
453 case MISCREG_CCSIDR:
454 warn_once("The ccsidr register isn't implemented and "
455 "always reads as 0.\n");
456 break;
457 case MISCREG_CTR: // AArch32, ARMv7, top bit set
458 case MISCREG_CTR_EL0: // AArch64
459 {
460 //all caches have the same line size in gem5
461 //4 byte words in ARM
462 unsigned lineSizeWords =
463 tc->getSystemPtr()->cacheLineSize() / 4;
464 unsigned log2LineSizeWords = 0;
465
466 while (lineSizeWords >>= 1) {
467 ++log2LineSizeWords;
468 }
469
470 CTR ctr = 0;
471 //log2 of minimun i-cache line size (words)
472 ctr.iCacheLineSize = log2LineSizeWords;
473 //b11 - gem5 uses pipt
474 ctr.l1IndexPolicy = 0x3;
475 //log2 of minimum d-cache line size (words)
476 ctr.dCacheLineSize = log2LineSizeWords;
477 //log2 of max reservation size (words)
478 ctr.erg = log2LineSizeWords;
479 //log2 of max writeback size (words)
480 ctr.cwg = log2LineSizeWords;
481 //b100 - gem5 format is ARMv7
482 ctr.format = 0x4;
483
484 return ctr;
485 }
486 case MISCREG_ACTLR:
487 warn("Not doing anything for miscreg ACTLR\n");
488 break;
489
490 case MISCREG_PMXEVTYPER_PMCCFILTR:
491 case MISCREG_PMINTENSET_EL1 ... MISCREG_PMOVSSET_EL0:
492 case MISCREG_PMEVCNTR0_EL0 ... MISCREG_PMEVTYPER5_EL0:
493 case MISCREG_PMCR ... MISCREG_PMOVSSET:
494 return pmu->readMiscReg(misc_reg);
495
496 case MISCREG_CPSR_Q:
497 panic("shouldn't be reading this register seperately\n");
498 case MISCREG_FPSCR_QC:
499 return readMiscRegNoEffect(MISCREG_FPSCR) & ~FpscrQcMask;
500 case MISCREG_FPSCR_EXC:
501 return readMiscRegNoEffect(MISCREG_FPSCR) & ~FpscrExcMask;
502 case MISCREG_FPSR:
503 {
504 const uint32_t ones = (uint32_t)(-1);
505 FPSCR fpscrMask = 0;
506 fpscrMask.ioc = ones;
507 fpscrMask.dzc = ones;
508 fpscrMask.ofc = ones;
509 fpscrMask.ufc = ones;
510 fpscrMask.ixc = ones;
511 fpscrMask.idc = ones;
512 fpscrMask.qc = ones;
513 fpscrMask.v = ones;
514 fpscrMask.c = ones;
515 fpscrMask.z = ones;
516 fpscrMask.n = ones;
517 return readMiscRegNoEffect(MISCREG_FPSCR) & (uint32_t)fpscrMask;
518 }
519 case MISCREG_FPCR:
520 {
521 const uint32_t ones = (uint32_t)(-1);
522 FPSCR fpscrMask = 0;
523 fpscrMask.ioe = ones;
524 fpscrMask.dze = ones;
525 fpscrMask.ofe = ones;
526 fpscrMask.ufe = ones;
527 fpscrMask.ixe = ones;
528 fpscrMask.ide = ones;
529 fpscrMask.len = ones;
530 fpscrMask.stride = ones;
531 fpscrMask.rMode = ones;
532 fpscrMask.fz = ones;
533 fpscrMask.dn = ones;
534 fpscrMask.ahp = ones;
535 return readMiscRegNoEffect(MISCREG_FPSCR) & (uint32_t)fpscrMask;
536 }
537 case MISCREG_NZCV:
538 {
539 CPSR cpsr = 0;
540 cpsr.nz = tc->readCCReg(CCREG_NZ);
541 cpsr.c = tc->readCCReg(CCREG_C);
542 cpsr.v = tc->readCCReg(CCREG_V);
543 return cpsr;
544 }
545 case MISCREG_DAIF:
546 {
547 CPSR cpsr = 0;
548 cpsr.daif = (uint8_t) ((CPSR) miscRegs[MISCREG_CPSR]).daif;
549 return cpsr;
550 }
551 case MISCREG_SP_EL0:
552 {
553 return tc->readIntReg(INTREG_SP0);
554 }
555 case MISCREG_SP_EL1:
556 {
557 return tc->readIntReg(INTREG_SP1);
558 }
559 case MISCREG_SP_EL2:
560 {
561 return tc->readIntReg(INTREG_SP2);
562 }
563 case MISCREG_SPSEL:
564 {
565 return miscRegs[MISCREG_CPSR] & 0x1;
566 }
567 case MISCREG_CURRENTEL:
568 {
569 return miscRegs[MISCREG_CPSR] & 0xc;
570 }
571 case MISCREG_L2CTLR:
572 {
573 // mostly unimplemented, just set NumCPUs field from sim and return
574 L2CTLR l2ctlr = 0;
575 // b00:1CPU to b11:4CPUs
576 l2ctlr.numCPUs = tc->getSystemPtr()->numContexts() - 1;
577 return l2ctlr;
578 }
579 case MISCREG_DBGDIDR:
580 /* For now just implement the version number.
581 * ARMv7, v7.1 Debug architecture (0b0101 --> 0x5)
582 */
583 return 0x5 << 16;
584 case MISCREG_DBGDSCRint:
585 return 0;
586 case MISCREG_ISR:
587 return tc->getCpuPtr()->getInterruptController(tc->threadId())->getISR(
588 readMiscRegNoEffect(MISCREG_HCR),
589 readMiscRegNoEffect(MISCREG_CPSR),
590 readMiscRegNoEffect(MISCREG_SCR));
591 case MISCREG_ISR_EL1:
592 return tc->getCpuPtr()->getInterruptController(tc->threadId())->getISR(
593 readMiscRegNoEffect(MISCREG_HCR_EL2),
594 readMiscRegNoEffect(MISCREG_CPSR),
595 readMiscRegNoEffect(MISCREG_SCR_EL3));
596 case MISCREG_DCZID_EL0:
597 return 0x04; // DC ZVA clear 64-byte chunks
598 case MISCREG_HCPTR:
599 {
600 MiscReg val = readMiscRegNoEffect(misc_reg);
601 // The trap bit associated with CP14 is defined as RAZ
602 val &= ~(1 << 14);
603 // If a CP bit in NSACR is 0 then the corresponding bit in
604 // HCPTR is RAO/WI
605 bool secure_lookup = haveSecurity &&
606 inSecureState(readMiscRegNoEffect(MISCREG_SCR),
607 readMiscRegNoEffect(MISCREG_CPSR));
608 if (!secure_lookup) {
609 MiscReg mask = readMiscRegNoEffect(MISCREG_NSACR);
610 val |= (mask ^ 0x7FFF) & 0xBFFF;
611 }
612 // Set the bits for unimplemented coprocessors to RAO/WI
613 val |= 0x33FF;
614 return (val);
615 }
616 case MISCREG_HDFAR: // alias for secure DFAR
617 return readMiscRegNoEffect(MISCREG_DFAR_S);
618 case MISCREG_HIFAR: // alias for secure IFAR
619 return readMiscRegNoEffect(MISCREG_IFAR_S);
620 case MISCREG_HVBAR: // bottom bits reserved
621 return readMiscRegNoEffect(MISCREG_HVBAR) & 0xFFFFFFE0;
622 case MISCREG_SCTLR:
623 return (readMiscRegNoEffect(misc_reg) & 0x72DD39FF) | 0x00C00818;
624 case MISCREG_SCTLR_EL1:
625 return (readMiscRegNoEffect(misc_reg) & 0x37DDDBBF) | 0x30D00800;
626 case MISCREG_SCTLR_EL2:
627 case MISCREG_SCTLR_EL3:
628 case MISCREG_HSCTLR:
629 return (readMiscRegNoEffect(misc_reg) & 0x32CD183F) | 0x30C50830;
630
631 case MISCREG_ID_PFR0:
632 // !ThumbEE | !Jazelle | Thumb | ARM
633 return 0x00000031;
634 case MISCREG_ID_PFR1:
635 { // Timer | Virti | !M Profile | TrustZone | ARMv4
636 bool haveTimer = (system->getGenericTimer() != NULL);
637 return 0x00000001
638 | (haveSecurity ? 0x00000010 : 0x0)
639 | (haveVirtualization ? 0x00001000 : 0x0)
640 | (haveTimer ? 0x00010000 : 0x0);
641 }
642 case MISCREG_ID_AA64PFR0_EL1:
643 return 0x0000000000000002 // AArch{64,32} supported at EL0
644 | 0x0000000000000020 // EL1
645 | (haveVirtualization ? 0x0000000000000200 : 0) // EL2
646 | (haveSecurity ? 0x0000000000002000 : 0); // EL3
647 case MISCREG_ID_AA64PFR1_EL1:
648 return 0; // bits [63:0] RES0 (reserved for future use)
649
650 // Generic Timer registers
651 case MISCREG_CNTFRQ ... MISCREG_CNTHP_CTL:
652 case MISCREG_CNTPCT ... MISCREG_CNTHP_CVAL:
653 case MISCREG_CNTKCTL_EL1 ... MISCREG_CNTV_CVAL_EL0:
654 case MISCREG_CNTVOFF_EL2 ... MISCREG_CNTPS_CVAL_EL1:
655 return getGenericTimer(tc).readMiscReg(misc_reg);
656
657 default:
658 break;
659
660 }
661 return readMiscRegNoEffect(misc_reg);
662}
663
664void
665ISA::setMiscRegNoEffect(int misc_reg, const MiscReg &val)
666{
667 assert(misc_reg < NumMiscRegs);
668
669 const auto &reg = lookUpMiscReg[misc_reg]; // bit masks
670 const auto &map = getMiscIndices(misc_reg);
671 int lower = map.first, upper = map.second;
672
673 auto v = (val & ~reg.wi()) | reg.rao();
674 if (upper > 0) {
675 miscRegs[lower] = bits(v, 31, 0);
676 miscRegs[upper] = bits(v, 63, 32);
677 DPRINTF(MiscRegs, "Writing to misc reg %d (%d:%d) : %#x\n",
678 misc_reg, lower, upper, v);
679 } else {
680 miscRegs[lower] = v;
681 DPRINTF(MiscRegs, "Writing to misc reg %d (%d) : %#x\n",
682 misc_reg, lower, v);
683 }
684}
685
686namespace {
687
688template<typename T>
689TLB *
690getITBPtr(T *tc)
691{
692 auto tlb = dynamic_cast<TLB *>(tc->getITBPtr());
693 assert(tlb);
694 return tlb;
695}
696
697template<typename T>
698TLB *
699getDTBPtr(T *tc)
700{
701 auto tlb = dynamic_cast<TLB *>(tc->getDTBPtr());
702 assert(tlb);
703 return tlb;
704}
705
706} // anonymous namespace
707
708void
709ISA::setMiscReg(int misc_reg, const MiscReg &val, ThreadContext *tc)
710{
711
712 MiscReg newVal = val;
713 int x;
713 bool secure_lookup;
714 bool hyp;
716 System *sys;
717 ThreadContext *oc;
715 uint8_t target_el;
716 uint16_t asid;
717 SCR scr;
718
719 if (misc_reg == MISCREG_CPSR) {
720 updateRegMap(val);
721
722
723 CPSR old_cpsr = miscRegs[MISCREG_CPSR];
724 int old_mode = old_cpsr.mode;
725 CPSR cpsr = val;
726 if (old_mode != cpsr.mode || cpsr.il != old_cpsr.il) {
727 getITBPtr(tc)->invalidateMiscReg();
728 getDTBPtr(tc)->invalidateMiscReg();
729 }
730
731 DPRINTF(Arm, "Updating CPSR from %#x to %#x f:%d i:%d a:%d mode:%#x\n",
732 miscRegs[misc_reg], cpsr, cpsr.f, cpsr.i, cpsr.a, cpsr.mode);
733 PCState pc = tc->pcState();
734 pc.nextThumb(cpsr.t);
735 pc.nextJazelle(cpsr.j);
736
737 // Follow slightly different semantics if a CheckerCPU object
738 // is connected
739 CheckerCPU *checker = tc->getCheckerCpuPtr();
740 if (checker) {
741 tc->pcStateNoRecord(pc);
742 } else {
743 tc->pcState(pc);
744 }
745 } else {
746#ifndef NDEBUG
747 if (!miscRegInfo[misc_reg][MISCREG_IMPLEMENTED]) {
748 if (miscRegInfo[misc_reg][MISCREG_WARN_NOT_FAIL])
749 warn("Unimplemented system register %s write with %#x.\n",
750 miscRegName[misc_reg], val);
751 else
752 panic("Unimplemented system register %s write with %#x.\n",
753 miscRegName[misc_reg], val);
754 }
755#endif
756 switch (unflattenMiscReg(misc_reg)) {
757 case MISCREG_CPACR:
758 {
759
760 const uint32_t ones = (uint32_t)(-1);
761 CPACR cpacrMask = 0;
762 // Only cp10, cp11, and ase are implemented, nothing else should
763 // be writable
764 cpacrMask.cp10 = ones;
765 cpacrMask.cp11 = ones;
766 cpacrMask.asedis = ones;
767
768 // Security Extensions may limit the writability of CPACR
769 if (haveSecurity) {
770 scr = readMiscRegNoEffect(MISCREG_SCR);
771 CPSR cpsr = readMiscRegNoEffect(MISCREG_CPSR);
772 if (scr.ns && (cpsr.mode != MODE_MON)) {
773 NSACR nsacr = readMiscRegNoEffect(MISCREG_NSACR);
774 // NB: Skipping the full loop, here
775 if (!nsacr.cp10) cpacrMask.cp10 = 0;
776 if (!nsacr.cp11) cpacrMask.cp11 = 0;
777 }
778 }
779
780 MiscReg old_val = readMiscRegNoEffect(MISCREG_CPACR);
781 newVal &= cpacrMask;
782 newVal |= old_val & ~cpacrMask;
783 DPRINTF(MiscRegs, "Writing misc reg %s: %#x\n",
784 miscRegName[misc_reg], newVal);
785 }
786 break;
787 case MISCREG_CPACR_EL1:
788 {
789 const uint32_t ones = (uint32_t)(-1);
790 CPACR cpacrMask = 0;
791 cpacrMask.tta = ones;
792 cpacrMask.fpen = ones;
793 newVal &= cpacrMask;
794 DPRINTF(MiscRegs, "Writing misc reg %s: %#x\n",
795 miscRegName[misc_reg], newVal);
796 }
797 break;
798 case MISCREG_CPTR_EL2:
799 {
800 const uint32_t ones = (uint32_t)(-1);
801 CPTR cptrMask = 0;
802 cptrMask.tcpac = ones;
803 cptrMask.tta = ones;
804 cptrMask.tfp = ones;
805 newVal &= cptrMask;
806 cptrMask = 0;
807 cptrMask.res1_13_12_el2 = ones;
808 cptrMask.res1_9_0_el2 = ones;
809 newVal |= cptrMask;
810 DPRINTF(MiscRegs, "Writing misc reg %s: %#x\n",
811 miscRegName[misc_reg], newVal);
812 }
813 break;
814 case MISCREG_CPTR_EL3:
815 {
816 const uint32_t ones = (uint32_t)(-1);
817 CPTR cptrMask = 0;
818 cptrMask.tcpac = ones;
819 cptrMask.tta = ones;
820 cptrMask.tfp = ones;
821 newVal &= cptrMask;
822 DPRINTF(MiscRegs, "Writing misc reg %s: %#x\n",
823 miscRegName[misc_reg], newVal);
824 }
825 break;
826 case MISCREG_CSSELR:
827 warn_once("The csselr register isn't implemented.\n");
828 return;
829
830 case MISCREG_DC_ZVA_Xt:
831 warn("Calling DC ZVA! Not Implemeted! Expect WEIRD results\n");
832 return;
833
834 case MISCREG_FPSCR:
835 {
836 const uint32_t ones = (uint32_t)(-1);
837 FPSCR fpscrMask = 0;
838 fpscrMask.ioc = ones;
839 fpscrMask.dzc = ones;
840 fpscrMask.ofc = ones;
841 fpscrMask.ufc = ones;
842 fpscrMask.ixc = ones;
843 fpscrMask.idc = ones;
844 fpscrMask.ioe = ones;
845 fpscrMask.dze = ones;
846 fpscrMask.ofe = ones;
847 fpscrMask.ufe = ones;
848 fpscrMask.ixe = ones;
849 fpscrMask.ide = ones;
850 fpscrMask.len = ones;
851 fpscrMask.stride = ones;
852 fpscrMask.rMode = ones;
853 fpscrMask.fz = ones;
854 fpscrMask.dn = ones;
855 fpscrMask.ahp = ones;
856 fpscrMask.qc = ones;
857 fpscrMask.v = ones;
858 fpscrMask.c = ones;
859 fpscrMask.z = ones;
860 fpscrMask.n = ones;
861 newVal = (newVal & (uint32_t)fpscrMask) |
862 (readMiscRegNoEffect(MISCREG_FPSCR) &
863 ~(uint32_t)fpscrMask);
864 tc->getDecoderPtr()->setContext(newVal);
865 }
866 break;
867 case MISCREG_FPSR:
868 {
869 const uint32_t ones = (uint32_t)(-1);
870 FPSCR fpscrMask = 0;
871 fpscrMask.ioc = ones;
872 fpscrMask.dzc = ones;
873 fpscrMask.ofc = ones;
874 fpscrMask.ufc = ones;
875 fpscrMask.ixc = ones;
876 fpscrMask.idc = ones;
877 fpscrMask.qc = ones;
878 fpscrMask.v = ones;
879 fpscrMask.c = ones;
880 fpscrMask.z = ones;
881 fpscrMask.n = ones;
882 newVal = (newVal & (uint32_t)fpscrMask) |
883 (readMiscRegNoEffect(MISCREG_FPSCR) &
884 ~(uint32_t)fpscrMask);
885 misc_reg = MISCREG_FPSCR;
886 }
887 break;
888 case MISCREG_FPCR:
889 {
890 const uint32_t ones = (uint32_t)(-1);
891 FPSCR fpscrMask = 0;
892 fpscrMask.ioe = ones;
893 fpscrMask.dze = ones;
894 fpscrMask.ofe = ones;
895 fpscrMask.ufe = ones;
896 fpscrMask.ixe = ones;
897 fpscrMask.ide = ones;
898 fpscrMask.len = ones;
899 fpscrMask.stride = ones;
900 fpscrMask.rMode = ones;
901 fpscrMask.fz = ones;
902 fpscrMask.dn = ones;
903 fpscrMask.ahp = ones;
904 newVal = (newVal & (uint32_t)fpscrMask) |
905 (readMiscRegNoEffect(MISCREG_FPSCR) &
906 ~(uint32_t)fpscrMask);
907 misc_reg = MISCREG_FPSCR;
908 }
909 break;
910 case MISCREG_CPSR_Q:
911 {
912 assert(!(newVal & ~CpsrMaskQ));
913 newVal = readMiscRegNoEffect(MISCREG_CPSR) | newVal;
914 misc_reg = MISCREG_CPSR;
915 }
916 break;
917 case MISCREG_FPSCR_QC:
918 {
919 newVal = readMiscRegNoEffect(MISCREG_FPSCR) |
920 (newVal & FpscrQcMask);
921 misc_reg = MISCREG_FPSCR;
922 }
923 break;
924 case MISCREG_FPSCR_EXC:
925 {
926 newVal = readMiscRegNoEffect(MISCREG_FPSCR) |
927 (newVal & FpscrExcMask);
928 misc_reg = MISCREG_FPSCR;
929 }
930 break;
931 case MISCREG_FPEXC:
932 {
933 // vfpv3 architecture, section B.6.1 of DDI04068
934 // bit 29 - valid only if fpexc[31] is 0
935 const uint32_t fpexcMask = 0x60000000;
936 newVal = (newVal & fpexcMask) |
937 (readMiscRegNoEffect(MISCREG_FPEXC) & ~fpexcMask);
938 }
939 break;
940 case MISCREG_HCR:
941 {
942 if (!haveVirtualization)
943 return;
944 }
945 break;
946 case MISCREG_IFSR:
947 {
948 // ARM ARM (ARM DDI 0406C.b) B4.1.96
949 const uint32_t ifsrMask =
950 mask(31, 13) | mask(11, 11) | mask(8, 6);
951 newVal = newVal & ~ifsrMask;
952 }
953 break;
954 case MISCREG_DFSR:
955 {
956 // ARM ARM (ARM DDI 0406C.b) B4.1.52
957 const uint32_t dfsrMask = mask(31, 14) | mask(8, 8);
958 newVal = newVal & ~dfsrMask;
959 }
960 break;
961 case MISCREG_AMAIR0:
962 case MISCREG_AMAIR1:
963 {
964 // ARM ARM (ARM DDI 0406C.b) B4.1.5
965 // Valid only with LPAE
966 if (!haveLPAE)
967 return;
968 DPRINTF(MiscRegs, "Writing AMAIR: %#x\n", newVal);
969 }
970 break;
971 case MISCREG_SCR:
972 getITBPtr(tc)->invalidateMiscReg();
973 getDTBPtr(tc)->invalidateMiscReg();
974 break;
975 case MISCREG_SCTLR:
976 {
977 DPRINTF(MiscRegs, "Writing SCTLR: %#x\n", newVal);
978 scr = readMiscRegNoEffect(MISCREG_SCR);
979 MiscRegIndex sctlr_idx = (haveSecurity && !scr.ns)
980 ? MISCREG_SCTLR_S : MISCREG_SCTLR_NS;
981 SCTLR sctlr = miscRegs[sctlr_idx];
982 SCTLR new_sctlr = newVal;
983 new_sctlr.nmfi = ((bool)sctlr.nmfi) && !haveVirtualization;
984 miscRegs[sctlr_idx] = (MiscReg)new_sctlr;
985 getITBPtr(tc)->invalidateMiscReg();
986 getDTBPtr(tc)->invalidateMiscReg();
987 }
988 case MISCREG_MIDR:
989 case MISCREG_ID_PFR0:
990 case MISCREG_ID_PFR1:
991 case MISCREG_ID_DFR0:
992 case MISCREG_ID_MMFR0:
993 case MISCREG_ID_MMFR1:
994 case MISCREG_ID_MMFR2:
995 case MISCREG_ID_MMFR3:
996 case MISCREG_ID_ISAR0:
997 case MISCREG_ID_ISAR1:
998 case MISCREG_ID_ISAR2:
999 case MISCREG_ID_ISAR3:
1000 case MISCREG_ID_ISAR4:
1001 case MISCREG_ID_ISAR5:
1002
1003 case MISCREG_MPIDR:
1004 case MISCREG_FPSID:
1005 case MISCREG_TLBTR:
1006 case MISCREG_MVFR0:
1007 case MISCREG_MVFR1:
1008
1009 case MISCREG_ID_AA64AFR0_EL1:
1010 case MISCREG_ID_AA64AFR1_EL1:
1011 case MISCREG_ID_AA64DFR0_EL1:
1012 case MISCREG_ID_AA64DFR1_EL1:
1013 case MISCREG_ID_AA64ISAR0_EL1:
1014 case MISCREG_ID_AA64ISAR1_EL1:
1015 case MISCREG_ID_AA64MMFR0_EL1:
1016 case MISCREG_ID_AA64MMFR1_EL1:
1017 case MISCREG_ID_AA64PFR0_EL1:
1018 case MISCREG_ID_AA64PFR1_EL1:
1019 // ID registers are constants.
1020 return;
1021
1022 // TLBI all entries, EL0&1 inner sharable (ignored)
1023 case MISCREG_TLBIALLIS:
1024 case MISCREG_TLBIALL: // TLBI all entries, EL0&1,
1025 assert32(tc);
1026 target_el = 1; // el 0 and 1 are handled together
1027 scr = readMiscReg(MISCREG_SCR, tc);
1028 secure_lookup = haveSecurity && !scr.ns;
1032 sys = tc->getSystemPtr();
1033 for (x = 0; x < sys->numContexts(); x++) {
1034 oc = sys->getThreadContext(x);
1035 getITBPtr(oc)->flushAllSecurity(secure_lookup, target_el);
1036 getDTBPtr(oc)->flushAllSecurity(secure_lookup, target_el);
1037
1038 // If CheckerCPU is connected, need to notify it of a flush
1039 CheckerCPU *checker = oc->getCheckerCpuPtr();
1040 if (checker) {
1041 getITBPtr(checker)->flushAllSecurity(secure_lookup,
1042 target_el);
1043 getDTBPtr(checker)->flushAllSecurity(secure_lookup,
1044 target_el);
1045 }
1046 }
1029 tlbiALL(tc, secure_lookup, target_el);
1030 return;
1031 // TLBI all entries, EL0&1, instruction side
1032 case MISCREG_ITLBIALL:
1033 assert32(tc);
1034 target_el = 1; // el 0 and 1 are handled together
1035 scr = readMiscReg(MISCREG_SCR, tc);
1036 secure_lookup = haveSecurity && !scr.ns;
1037 getITBPtr(tc)->flushAllSecurity(secure_lookup, target_el);
1038 return;
1039 // TLBI all entries, EL0&1, data side
1040 case MISCREG_DTLBIALL:
1041 assert32(tc);
1042 target_el = 1; // el 0 and 1 are handled together
1043 scr = readMiscReg(MISCREG_SCR, tc);
1044 secure_lookup = haveSecurity && !scr.ns;
1045 getDTBPtr(tc)->flushAllSecurity(secure_lookup, target_el);
1046 return;
1047 // TLBI based on VA, EL0&1 inner sharable (ignored)
1048 case MISCREG_TLBIMVAL:
1049 case MISCREG_TLBIMVALIS:
1050 // mcr tlbimval(is) is invalidating all matching entries
1051 // regardless of the level of lookup, since in gem5 we cache
1052 // in the tlb the last level of lookup only.
1053 case MISCREG_TLBIMVA:
1054 case MISCREG_TLBIMVAIS:
1055 assert32(tc);
1056 target_el = 1; // el 0 and 1 are handled together
1057 scr = readMiscReg(MISCREG_SCR, tc);
1058 secure_lookup = haveSecurity && !scr.ns;
1076 sys = tc->getSystemPtr();
1077 for (x = 0; x < sys->numContexts(); x++) {
1078 oc = sys->getThreadContext(x);
1079 getITBPtr(oc)->flushMvaAsid(mbits(newVal, 31, 12),
1080 bits(newVal, 7,0),
1081 secure_lookup, target_el);
1082 getDTBPtr(oc)->flushMvaAsid(mbits(newVal, 31, 12),
1083 bits(newVal, 7,0),
1084 secure_lookup, target_el);
1085
1086 CheckerCPU *checker = oc->getCheckerCpuPtr();
1087 if (checker) {
1088 getITBPtr(checker)->flushMvaAsid(mbits(newVal, 31, 12),
1089 bits(newVal, 7,0), secure_lookup, target_el);
1090 getDTBPtr(checker)->flushMvaAsid(mbits(newVal, 31, 12),
1091 bits(newVal, 7,0), secure_lookup, target_el);
1092 }
1093 }
1059 tlbiVA(tc, mbits(newVal, 31, 12), bits(newVal, 7,0),
1060 secure_lookup, target_el);
1061 return;
1062 // TLBI by ASID, EL0&1, inner sharable
1063 case MISCREG_TLBIASIDIS:
1064 case MISCREG_TLBIASID:
1065 assert32(tc);
1066 target_el = 1; // el 0 and 1 are handled together
1067 scr = readMiscReg(MISCREG_SCR, tc);
1068 secure_lookup = haveSecurity && !scr.ns;
1102 sys = tc->getSystemPtr();
1103 for (x = 0; x < sys->numContexts(); x++) {
1104 oc = sys->getThreadContext(x);
1105 getITBPtr(oc)->flushAsid(bits(newVal, 7,0),
1106 secure_lookup, target_el);
1107 getDTBPtr(oc)->flushAsid(bits(newVal, 7,0),
1108 secure_lookup, target_el);
1109 CheckerCPU *checker = oc->getCheckerCpuPtr();
1110 if (checker) {
1111 getITBPtr(checker)->flushAsid(bits(newVal, 7,0),
1112 secure_lookup, target_el);
1113 getDTBPtr(checker)->flushAsid(bits(newVal, 7,0),
1114 secure_lookup, target_el);
1115 }
1116 }
1069 asid = bits(newVal, 7,0);
1070 tlbiASID(tc, asid, secure_lookup, target_el);
1071 return;
1072 // TLBI by address, EL0&1, inner sharable (ignored)
1073 case MISCREG_TLBIMVAAL:
1074 case MISCREG_TLBIMVAALIS:
1075 // mcr tlbimvaal(is) is invalidating all matching entries
1076 // regardless of the level of lookup, since in gem5 we cache
1077 // in the tlb the last level of lookup only.
1078 case MISCREG_TLBIMVAA:
1079 case MISCREG_TLBIMVAAIS:
1080 assert32(tc);
1081 target_el = 1; // el 0 and 1 are handled together
1082 scr = readMiscReg(MISCREG_SCR, tc);
1083 secure_lookup = haveSecurity && !scr.ns;
1084 hyp = 0;
1131 tlbiMVA(tc, newVal, secure_lookup, hyp, target_el);
1085 tlbiMVA(tc, mbits(newVal, 31,12), secure_lookup, hyp, target_el);
1086 return;
1087 // TLBI by address, EL2, hypervisor mode
1088 case MISCREG_TLBIMVALH:
1089 case MISCREG_TLBIMVALHIS:
1090 // mcr tlbimvalh(is) is invalidating all matching entries
1091 // regardless of the level of lookup, since in gem5 we cache
1092 // in the tlb the last level of lookup only.
1093 case MISCREG_TLBIMVAH:
1094 case MISCREG_TLBIMVAHIS:
1095 assert32(tc);
1096 target_el = 1; // aarch32, use hyp bit
1097 scr = readMiscReg(MISCREG_SCR, tc);
1098 secure_lookup = haveSecurity && !scr.ns;
1099 hyp = 1;
1146 tlbiMVA(tc, newVal, secure_lookup, hyp, target_el);
1100 tlbiMVA(tc, mbits(newVal, 31,12), secure_lookup, hyp, target_el);
1101 return;
1102 case MISCREG_TLBIIPAS2L:
1103 case MISCREG_TLBIIPAS2LIS:
1104 // mcr tlbiipas2l(is) is invalidating all matching entries
1105 // regardless of the level of lookup, since in gem5 we cache
1106 // in the tlb the last level of lookup only.
1107 case MISCREG_TLBIIPAS2:
1108 case MISCREG_TLBIIPAS2IS:
1109 assert32(tc);
1110 target_el = 1; // EL 0 and 1 are handled together
1111 scr = readMiscReg(MISCREG_SCR, tc);
1112 secure_lookup = haveSecurity && !scr.ns;
1113 tlbiIPA(tc, newVal, secure_lookup, target_el);
1114 return;
1115 // TLBI by address and asid, EL0&1, instruction side only
1116 case MISCREG_ITLBIMVA:
1117 assert32(tc);
1118 target_el = 1; // el 0 and 1 are handled together
1119 scr = readMiscReg(MISCREG_SCR, tc);
1120 secure_lookup = haveSecurity && !scr.ns;
1121 getITBPtr(tc)->flushMvaAsid(mbits(newVal, 31, 12),
1122 bits(newVal, 7,0), secure_lookup, target_el);
1123 return;
1124 // TLBI by address and asid, EL0&1, data side only
1125 case MISCREG_DTLBIMVA:
1126 assert32(tc);
1127 target_el = 1; // el 0 and 1 are handled together
1128 scr = readMiscReg(MISCREG_SCR, tc);
1129 secure_lookup = haveSecurity && !scr.ns;
1130 getDTBPtr(tc)->flushMvaAsid(mbits(newVal, 31, 12),
1131 bits(newVal, 7,0), secure_lookup, target_el);
1132 return;
1133 // TLBI by ASID, EL0&1, instrution side only
1134 case MISCREG_ITLBIASID:
1135 assert32(tc);
1136 target_el = 1; // el 0 and 1 are handled together
1137 scr = readMiscReg(MISCREG_SCR, tc);
1138 secure_lookup = haveSecurity && !scr.ns;
1139 getITBPtr(tc)->flushAsid(bits(newVal, 7,0), secure_lookup,
1140 target_el);
1141 return;
1142 // TLBI by ASID EL0&1 data size only
1143 case MISCREG_DTLBIASID:
1144 assert32(tc);
1145 target_el = 1; // el 0 and 1 are handled together
1146 scr = readMiscReg(MISCREG_SCR, tc);
1147 secure_lookup = haveSecurity && !scr.ns;
1148 getDTBPtr(tc)->flushAsid(bits(newVal, 7,0), secure_lookup,
1149 target_el);
1150 return;
1151 // Invalidate entire Non-secure Hyp/Non-Hyp Unified TLB
1152 case MISCREG_TLBIALLNSNH:
1153 case MISCREG_TLBIALLNSNHIS:
1154 assert32(tc);
1155 target_el = 1; // el 0 and 1 are handled together
1156 hyp = 0;
1157 tlbiALLN(tc, hyp, target_el);
1158 return;
1159 // TLBI all entries, EL2, hyp,
1160 case MISCREG_TLBIALLH:
1161 case MISCREG_TLBIALLHIS:
1162 assert32(tc);
1163 target_el = 1; // aarch32, use hyp bit
1164 hyp = 1;
1165 tlbiALLN(tc, hyp, target_el);
1166 return;
1167 // AArch64 TLBI: invalidate all entries EL3
1168 case MISCREG_TLBI_ALLE3IS:
1169 case MISCREG_TLBI_ALLE3:
1170 assert64(tc);
1171 target_el = 3;
1172 secure_lookup = true;
1173 tlbiALL(tc, secure_lookup, target_el);
1174 return;
1175 // @todo: uncomment this to enable Virtualization
1176 // case MISCREG_TLBI_ALLE2IS:
1177 // case MISCREG_TLBI_ALLE2:
1178 // TLBI all entries, EL0&1
1179 case MISCREG_TLBI_ALLE1IS:
1180 case MISCREG_TLBI_ALLE1:
1181 // AArch64 TLBI: invalidate all entries, stage 1, current VMID
1182 case MISCREG_TLBI_VMALLE1IS:
1183 case MISCREG_TLBI_VMALLE1:
1184 // AArch64 TLBI: invalidate all entries, stages 1 & 2, current VMID
1185 case MISCREG_TLBI_VMALLS12E1IS:
1186 case MISCREG_TLBI_VMALLS12E1:
1187 // @todo: handle VMID and stage 2 to enable Virtualization
1188 assert64(tc);
1189 target_el = 1; // el 0 and 1 are handled together
1190 scr = readMiscReg(MISCREG_SCR, tc);
1191 secure_lookup = haveSecurity && !scr.ns;
1192 tlbiALL(tc, secure_lookup, target_el);
1193 return;
1194 // AArch64 TLBI: invalidate by VA and ASID, stage 1, current VMID
1195 // VAEx(IS) and VALEx(IS) are the same because TLBs only store entries
1196 // from the last level of translation table walks
1197 // @todo: handle VMID to enable Virtualization
1198 // TLBI all entries, EL0&1
1199 case MISCREG_TLBI_VAE3IS_Xt:
1200 case MISCREG_TLBI_VAE3_Xt:
1201 // TLBI by VA, EL3 regime stage 1, last level walk
1202 case MISCREG_TLBI_VALE3IS_Xt:
1203 case MISCREG_TLBI_VALE3_Xt:
1204 assert64(tc);
1205 target_el = 3;
1206 asid = 0xbeef; // does not matter, tlbi is global
1207 secure_lookup = true;
1254 tlbiVA(tc, newVal, asid, secure_lookup, target_el);
1208 tlbiVA(tc, ((Addr) bits(newVal, 43, 0)) << 12,
1209 asid, secure_lookup, target_el);
1210 return;
1211 // TLBI by VA, EL2
1212 case MISCREG_TLBI_VAE2IS_Xt:
1213 case MISCREG_TLBI_VAE2_Xt:
1214 // TLBI by VA, EL2, stage1 last level walk
1215 case MISCREG_TLBI_VALE2IS_Xt:
1216 case MISCREG_TLBI_VALE2_Xt:
1217 assert64(tc);
1218 target_el = 2;
1219 asid = 0xbeef; // does not matter, tlbi is global
1220 scr = readMiscReg(MISCREG_SCR, tc);
1221 secure_lookup = haveSecurity && !scr.ns;
1267 tlbiVA(tc, newVal, asid, secure_lookup, target_el);
1222 tlbiVA(tc, ((Addr) bits(newVal, 43, 0)) << 12,
1223 asid, secure_lookup, target_el);
1224 return;
1225 // TLBI by VA EL1 & 0, stage1, ASID, current VMID
1226 case MISCREG_TLBI_VAE1IS_Xt:
1227 case MISCREG_TLBI_VAE1_Xt:
1228 case MISCREG_TLBI_VALE1IS_Xt:
1229 case MISCREG_TLBI_VALE1_Xt:
1230 assert64(tc);
1231 asid = bits(newVal, 63, 48);
1232 target_el = 1; // el 0 and 1 are handled together
1233 scr = readMiscReg(MISCREG_SCR, tc);
1234 secure_lookup = haveSecurity && !scr.ns;
1279 tlbiVA(tc, newVal, asid, secure_lookup, target_el);
1235 tlbiVA(tc, ((Addr) bits(newVal, 43, 0)) << 12,
1236 asid, secure_lookup, target_el);
1237 return;
1238 // AArch64 TLBI: invalidate by ASID, stage 1, current VMID
1239 // @todo: handle VMID to enable Virtualization
1240 case MISCREG_TLBI_ASIDE1IS_Xt:
1241 case MISCREG_TLBI_ASIDE1_Xt:
1242 assert64(tc);
1243 target_el = 1; // el 0 and 1 are handled together
1244 scr = readMiscReg(MISCREG_SCR, tc);
1245 secure_lookup = haveSecurity && !scr.ns;
1289 sys = tc->getSystemPtr();
1290 for (x = 0; x < sys->numContexts(); x++) {
1291 oc = sys->getThreadContext(x);
1292 asid = bits(newVal, 63, 48);
1293 if (!haveLargeAsid64)
1294 asid &= mask(8);
1295 getITBPtr(oc)->flushAsid(asid, secure_lookup, target_el);
1296 getDTBPtr(oc)->flushAsid(asid, secure_lookup, target_el);
1297 CheckerCPU *checker = oc->getCheckerCpuPtr();
1298 if (checker) {
1299 getITBPtr(checker)->flushAsid(asid,
1300 secure_lookup, target_el);
1301 getDTBPtr(checker)->flushAsid(asid,
1302 secure_lookup, target_el);
1303 }
1304 }
1246 asid = bits(newVal, 63, 48);
1247 tlbiASID(tc, asid, secure_lookup, target_el);
1248 return;
1249 // AArch64 TLBI: invalidate by VA, ASID, stage 1, current VMID
1250 // VAAE1(IS) and VAALE1(IS) are the same because TLBs only store
1251 // entries from the last level of translation table walks
1252 // @todo: handle VMID to enable Virtualization
1253 case MISCREG_TLBI_VAAE1IS_Xt:
1254 case MISCREG_TLBI_VAAE1_Xt:
1255 case MISCREG_TLBI_VAALE1IS_Xt:
1256 case MISCREG_TLBI_VAALE1_Xt:
1257 assert64(tc);
1258 target_el = 1; // el 0 and 1 are handled together
1259 scr = readMiscReg(MISCREG_SCR, tc);
1260 secure_lookup = haveSecurity && !scr.ns;
1318 sys = tc->getSystemPtr();
1319 for (x = 0; x < sys->numContexts(); x++) {
1320 // @todo: extra controls on TLBI broadcast?
1321 oc = sys->getThreadContext(x);
1322 Addr va = ((Addr) bits(newVal, 43, 0)) << 12;
1323 getITBPtr(oc)->flushMva(va,
1261 tlbiMVA(tc,
1262 ((Addr)bits(newVal, 43, 0)) << 12,
1263 secure_lookup, false, target_el);
1325 getDTBPtr(oc)->flushMva(va,
1326 secure_lookup, false, target_el);
1327
1328 CheckerCPU *checker = oc->getCheckerCpuPtr();
1329 if (checker) {
1330 getITBPtr(checker)->flushMva(va,
1331 secure_lookup, false, target_el);
1332 getDTBPtr(checker)->flushMva(va,
1333 secure_lookup, false, target_el);
1334 }
1335 }
1264 return;
1265 // AArch64 TLBI: invalidate by IPA, stage 2, current VMID
1266 case MISCREG_TLBI_IPAS2LE1IS_Xt:
1267 case MISCREG_TLBI_IPAS2LE1_Xt:
1268 case MISCREG_TLBI_IPAS2E1IS_Xt:
1269 case MISCREG_TLBI_IPAS2E1_Xt:
1270 assert64(tc);
1271 target_el = 1; // EL 0 and 1 are handled together
1272 scr = readMiscReg(MISCREG_SCR, tc);
1273 secure_lookup = haveSecurity && !scr.ns;
1274 tlbiIPA(tc, newVal, secure_lookup, target_el);
1275 return;
1276 case MISCREG_ACTLR:
1277 warn("Not doing anything for write of miscreg ACTLR\n");
1278 break;
1279
1280 case MISCREG_PMXEVTYPER_PMCCFILTR:
1281 case MISCREG_PMINTENSET_EL1 ... MISCREG_PMOVSSET_EL0:
1282 case MISCREG_PMEVCNTR0_EL0 ... MISCREG_PMEVTYPER5_EL0:
1283 case MISCREG_PMCR ... MISCREG_PMOVSSET:
1284 pmu->setMiscReg(misc_reg, newVal);
1285 break;
1286
1287
1288 case MISCREG_HSTR: // TJDBX, now redifined to be RES0
1289 {
1290 HSTR hstrMask = 0;
1291 hstrMask.tjdbx = 1;
1292 newVal &= ~((uint32_t) hstrMask);
1293 break;
1294 }
1295 case MISCREG_HCPTR:
1296 {
1297 // If a CP bit in NSACR is 0 then the corresponding bit in
1298 // HCPTR is RAO/WI. Same applies to NSASEDIS
1299 secure_lookup = haveSecurity &&
1300 inSecureState(readMiscRegNoEffect(MISCREG_SCR),
1301 readMiscRegNoEffect(MISCREG_CPSR));
1302 if (!secure_lookup) {
1303 MiscReg oldValue = readMiscRegNoEffect(MISCREG_HCPTR);
1304 MiscReg mask = (readMiscRegNoEffect(MISCREG_NSACR) ^ 0x7FFF) & 0xBFFF;
1305 newVal = (newVal & ~mask) | (oldValue & mask);
1306 }
1307 break;
1308 }
1309 case MISCREG_HDFAR: // alias for secure DFAR
1310 misc_reg = MISCREG_DFAR_S;
1311 break;
1312 case MISCREG_HIFAR: // alias for secure IFAR
1313 misc_reg = MISCREG_IFAR_S;
1314 break;
1315 case MISCREG_ATS1CPR:
1316 case MISCREG_ATS1CPW:
1317 case MISCREG_ATS1CUR:
1318 case MISCREG_ATS1CUW:
1319 case MISCREG_ATS12NSOPR:
1320 case MISCREG_ATS12NSOPW:
1321 case MISCREG_ATS12NSOUR:
1322 case MISCREG_ATS12NSOUW:
1323 case MISCREG_ATS1HR:
1324 case MISCREG_ATS1HW:
1325 {
1326 Request::Flags flags = 0;
1327 BaseTLB::Mode mode = BaseTLB::Read;
1328 TLB::ArmTranslationType tranType = TLB::NormalTran;
1329 Fault fault;
1330 switch(misc_reg) {
1331 case MISCREG_ATS1CPR:
1332 flags = TLB::MustBeOne;
1333 tranType = TLB::S1CTran;
1334 mode = BaseTLB::Read;
1335 break;
1336 case MISCREG_ATS1CPW:
1337 flags = TLB::MustBeOne;
1338 tranType = TLB::S1CTran;
1339 mode = BaseTLB::Write;
1340 break;
1341 case MISCREG_ATS1CUR:
1342 flags = TLB::MustBeOne | TLB::UserMode;
1343 tranType = TLB::S1CTran;
1344 mode = BaseTLB::Read;
1345 break;
1346 case MISCREG_ATS1CUW:
1347 flags = TLB::MustBeOne | TLB::UserMode;
1348 tranType = TLB::S1CTran;
1349 mode = BaseTLB::Write;
1350 break;
1351 case MISCREG_ATS12NSOPR:
1352 if (!haveSecurity)
1353 panic("Security Extensions required for ATS12NSOPR");
1354 flags = TLB::MustBeOne;
1355 tranType = TLB::S1S2NsTran;
1356 mode = BaseTLB::Read;
1357 break;
1358 case MISCREG_ATS12NSOPW:
1359 if (!haveSecurity)
1360 panic("Security Extensions required for ATS12NSOPW");
1361 flags = TLB::MustBeOne;
1362 tranType = TLB::S1S2NsTran;
1363 mode = BaseTLB::Write;
1364 break;
1365 case MISCREG_ATS12NSOUR:
1366 if (!haveSecurity)
1367 panic("Security Extensions required for ATS12NSOUR");
1368 flags = TLB::MustBeOne | TLB::UserMode;
1369 tranType = TLB::S1S2NsTran;
1370 mode = BaseTLB::Read;
1371 break;
1372 case MISCREG_ATS12NSOUW:
1373 if (!haveSecurity)
1374 panic("Security Extensions required for ATS12NSOUW");
1375 flags = TLB::MustBeOne | TLB::UserMode;
1376 tranType = TLB::S1S2NsTran;
1377 mode = BaseTLB::Write;
1378 break;
1379 case MISCREG_ATS1HR: // only really useful from secure mode.
1380 flags = TLB::MustBeOne;
1381 tranType = TLB::HypMode;
1382 mode = BaseTLB::Read;
1383 break;
1384 case MISCREG_ATS1HW:
1385 flags = TLB::MustBeOne;
1386 tranType = TLB::HypMode;
1387 mode = BaseTLB::Write;
1388 break;
1389 }
1390 // If we're in timing mode then doing the translation in
1391 // functional mode then we're slightly distorting performance
1392 // results obtained from simulations. The translation should be
1393 // done in the same mode the core is running in. NOTE: This
1394 // can't be an atomic translation because that causes problems
1395 // with unexpected atomic snoop requests.
1396 warn("Translating via MISCREG(%d) in functional mode! Fix Me!\n", misc_reg);
1397 Request req(0, val, 0, flags, Request::funcMasterId,
1398 tc->pcState().pc(), tc->contextId());
1399 fault = getDTBPtr(tc)->translateFunctional(
1400 &req, tc, mode, tranType);
1401 TTBCR ttbcr = readMiscRegNoEffect(MISCREG_TTBCR);
1402 HCR hcr = readMiscRegNoEffect(MISCREG_HCR);
1403
1404 MiscReg newVal;
1405 if (fault == NoFault) {
1406 Addr paddr = req.getPaddr();
1407 if (haveLPAE && (ttbcr.eae || tranType & TLB::HypMode ||
1408 ((tranType & TLB::S1S2NsTran) && hcr.vm) )) {
1409 newVal = (paddr & mask(39, 12)) |
1410 (getDTBPtr(tc)->getAttr());
1411 } else {
1412 newVal = (paddr & 0xfffff000) |
1413 (getDTBPtr(tc)->getAttr());
1414 }
1415 DPRINTF(MiscRegs,
1416 "MISCREG: Translated addr 0x%08x: PAR: 0x%08x\n",
1417 val, newVal);
1418 } else {
1419 ArmFault *armFault = static_cast<ArmFault *>(fault.get());
1420 armFault->update(tc);
1421 // Set fault bit and FSR
1422 FSR fsr = armFault->getFsr(tc);
1423
1424 newVal = ((fsr >> 9) & 1) << 11;
1425 if (newVal) {
1426 // LPAE - rearange fault status
1427 newVal |= ((fsr >> 0) & 0x3f) << 1;
1428 } else {
1429 // VMSA - rearange fault status
1430 newVal |= ((fsr >> 0) & 0xf) << 1;
1431 newVal |= ((fsr >> 10) & 0x1) << 5;
1432 newVal |= ((fsr >> 12) & 0x1) << 6;
1433 }
1434 newVal |= 0x1; // F bit
1435 newVal |= ((armFault->iss() >> 7) & 0x1) << 8;
1436 newVal |= armFault->isStage2() ? 0x200 : 0;
1437 DPRINTF(MiscRegs,
1438 "MISCREG: Translated addr 0x%08x fault fsr %#x: PAR: 0x%08x\n",
1439 val, fsr, newVal);
1440 }
1441 setMiscRegNoEffect(MISCREG_PAR, newVal);
1442 return;
1443 }
1444 case MISCREG_TTBCR:
1445 {
1446 TTBCR ttbcr = readMiscRegNoEffect(MISCREG_TTBCR);
1447 const uint32_t ones = (uint32_t)(-1);
1448 TTBCR ttbcrMask = 0;
1449 TTBCR ttbcrNew = newVal;
1450
1451 // ARM DDI 0406C.b, ARMv7-32
1452 ttbcrMask.n = ones; // T0SZ
1453 if (haveSecurity) {
1454 ttbcrMask.pd0 = ones;
1455 ttbcrMask.pd1 = ones;
1456 }
1457 ttbcrMask.epd0 = ones;
1458 ttbcrMask.irgn0 = ones;
1459 ttbcrMask.orgn0 = ones;
1460 ttbcrMask.sh0 = ones;
1461 ttbcrMask.ps = ones; // T1SZ
1462 ttbcrMask.a1 = ones;
1463 ttbcrMask.epd1 = ones;
1464 ttbcrMask.irgn1 = ones;
1465 ttbcrMask.orgn1 = ones;
1466 ttbcrMask.sh1 = ones;
1467 if (haveLPAE)
1468 ttbcrMask.eae = ones;
1469
1470 if (haveLPAE && ttbcrNew.eae) {
1471 newVal = newVal & ttbcrMask;
1472 } else {
1473 newVal = (newVal & ttbcrMask) | (ttbcr & (~ttbcrMask));
1474 }
1475 }
1476 M5_FALLTHROUGH;
1477 case MISCREG_TTBR0:
1478 case MISCREG_TTBR1:
1479 {
1480 TTBCR ttbcr = readMiscRegNoEffect(MISCREG_TTBCR);
1481 if (haveLPAE) {
1482 if (ttbcr.eae) {
1483 // ARMv7 bit 63-56, 47-40 reserved, UNK/SBZP
1484 // ARMv8 AArch32 bit 63-56 only
1485 uint64_t ttbrMask = mask(63,56) | mask(47,40);
1486 newVal = (newVal & (~ttbrMask));
1487 }
1488 }
1489 }
1490 M5_FALLTHROUGH;
1491 case MISCREG_SCTLR_EL1:
1492 {
1493 getITBPtr(tc)->invalidateMiscReg();
1494 getDTBPtr(tc)->invalidateMiscReg();
1495 setMiscRegNoEffect(misc_reg, newVal);
1496 }
1497 M5_FALLTHROUGH;
1498 case MISCREG_CONTEXTIDR:
1499 case MISCREG_PRRR:
1500 case MISCREG_NMRR:
1501 case MISCREG_MAIR0:
1502 case MISCREG_MAIR1:
1503 case MISCREG_DACR:
1504 case MISCREG_VTTBR:
1505 case MISCREG_SCR_EL3:
1506 case MISCREG_HCR_EL2:
1507 case MISCREG_TCR_EL1:
1508 case MISCREG_TCR_EL2:
1509 case MISCREG_TCR_EL3:
1510 case MISCREG_SCTLR_EL2:
1511 case MISCREG_SCTLR_EL3:
1512 case MISCREG_HSCTLR:
1513 case MISCREG_TTBR0_EL1:
1514 case MISCREG_TTBR1_EL1:
1515 case MISCREG_TTBR0_EL2:
1516 case MISCREG_TTBR0_EL3:
1517 getITBPtr(tc)->invalidateMiscReg();
1518 getDTBPtr(tc)->invalidateMiscReg();
1519 break;
1520 case MISCREG_NZCV:
1521 {
1522 CPSR cpsr = val;
1523
1524 tc->setCCReg(CCREG_NZ, cpsr.nz);
1525 tc->setCCReg(CCREG_C, cpsr.c);
1526 tc->setCCReg(CCREG_V, cpsr.v);
1527 }
1528 break;
1529 case MISCREG_DAIF:
1530 {
1531 CPSR cpsr = miscRegs[MISCREG_CPSR];
1532 cpsr.daif = (uint8_t) ((CPSR) newVal).daif;
1533 newVal = cpsr;
1534 misc_reg = MISCREG_CPSR;
1535 }
1536 break;
1537 case MISCREG_SP_EL0:
1538 tc->setIntReg(INTREG_SP0, newVal);
1539 break;
1540 case MISCREG_SP_EL1:
1541 tc->setIntReg(INTREG_SP1, newVal);
1542 break;
1543 case MISCREG_SP_EL2:
1544 tc->setIntReg(INTREG_SP2, newVal);
1545 break;
1546 case MISCREG_SPSEL:
1547 {
1548 CPSR cpsr = miscRegs[MISCREG_CPSR];
1549 cpsr.sp = (uint8_t) ((CPSR) newVal).sp;
1550 newVal = cpsr;
1551 misc_reg = MISCREG_CPSR;
1552 }
1553 break;
1554 case MISCREG_CURRENTEL:
1555 {
1556 CPSR cpsr = miscRegs[MISCREG_CPSR];
1557 cpsr.el = (uint8_t) ((CPSR) newVal).el;
1558 newVal = cpsr;
1559 misc_reg = MISCREG_CPSR;
1560 }
1561 break;
1562 case MISCREG_AT_S1E1R_Xt:
1563 case MISCREG_AT_S1E1W_Xt:
1564 case MISCREG_AT_S1E0R_Xt:
1565 case MISCREG_AT_S1E0W_Xt:
1566 case MISCREG_AT_S1E2R_Xt:
1567 case MISCREG_AT_S1E2W_Xt:
1568 case MISCREG_AT_S12E1R_Xt:
1569 case MISCREG_AT_S12E1W_Xt:
1570 case MISCREG_AT_S12E0R_Xt:
1571 case MISCREG_AT_S12E0W_Xt:
1572 case MISCREG_AT_S1E3R_Xt:
1573 case MISCREG_AT_S1E3W_Xt:
1574 {
1575 RequestPtr req = new Request;
1576 Request::Flags flags = 0;
1577 BaseTLB::Mode mode = BaseTLB::Read;
1578 TLB::ArmTranslationType tranType = TLB::NormalTran;
1579 Fault fault;
1580 switch(misc_reg) {
1581 case MISCREG_AT_S1E1R_Xt:
1582 flags = TLB::MustBeOne;
1583 tranType = TLB::S1E1Tran;
1584 mode = BaseTLB::Read;
1585 break;
1586 case MISCREG_AT_S1E1W_Xt:
1587 flags = TLB::MustBeOne;
1588 tranType = TLB::S1E1Tran;
1589 mode = BaseTLB::Write;
1590 break;
1591 case MISCREG_AT_S1E0R_Xt:
1592 flags = TLB::MustBeOne | TLB::UserMode;
1593 tranType = TLB::S1E0Tran;
1594 mode = BaseTLB::Read;
1595 break;
1596 case MISCREG_AT_S1E0W_Xt:
1597 flags = TLB::MustBeOne | TLB::UserMode;
1598 tranType = TLB::S1E0Tran;
1599 mode = BaseTLB::Write;
1600 break;
1601 case MISCREG_AT_S1E2R_Xt:
1602 flags = TLB::MustBeOne;
1603 tranType = TLB::S1E2Tran;
1604 mode = BaseTLB::Read;
1605 break;
1606 case MISCREG_AT_S1E2W_Xt:
1607 flags = TLB::MustBeOne;
1608 tranType = TLB::S1E2Tran;
1609 mode = BaseTLB::Write;
1610 break;
1611 case MISCREG_AT_S12E0R_Xt:
1612 flags = TLB::MustBeOne | TLB::UserMode;
1613 tranType = TLB::S12E0Tran;
1614 mode = BaseTLB::Read;
1615 break;
1616 case MISCREG_AT_S12E0W_Xt:
1617 flags = TLB::MustBeOne | TLB::UserMode;
1618 tranType = TLB::S12E0Tran;
1619 mode = BaseTLB::Write;
1620 break;
1621 case MISCREG_AT_S12E1R_Xt:
1622 flags = TLB::MustBeOne;
1623 tranType = TLB::S12E1Tran;
1624 mode = BaseTLB::Read;
1625 break;
1626 case MISCREG_AT_S12E1W_Xt:
1627 flags = TLB::MustBeOne;
1628 tranType = TLB::S12E1Tran;
1629 mode = BaseTLB::Write;
1630 break;
1631 case MISCREG_AT_S1E3R_Xt:
1632 flags = TLB::MustBeOne;
1633 tranType = TLB::S1E3Tran;
1634 mode = BaseTLB::Read;
1635 break;
1636 case MISCREG_AT_S1E3W_Xt:
1637 flags = TLB::MustBeOne;
1638 tranType = TLB::S1E3Tran;
1639 mode = BaseTLB::Write;
1640 break;
1641 }
1642 // If we're in timing mode then doing the translation in
1643 // functional mode then we're slightly distorting performance
1644 // results obtained from simulations. The translation should be
1645 // done in the same mode the core is running in. NOTE: This
1646 // can't be an atomic translation because that causes problems
1647 // with unexpected atomic snoop requests.
1648 warn("Translating via MISCREG(%d) in functional mode! Fix Me!\n", misc_reg);
1649 req->setVirt(0, val, 0, flags, Request::funcMasterId,
1650 tc->pcState().pc());
1651 req->setContext(tc->contextId());
1652 fault = getDTBPtr(tc)->translateFunctional(req, tc, mode,
1653 tranType);
1654
1655 MiscReg newVal;
1656 if (fault == NoFault) {
1657 Addr paddr = req->getPaddr();
1658 uint64_t attr = getDTBPtr(tc)->getAttr();
1659 uint64_t attr1 = attr >> 56;
1660 if (!attr1 || attr1 ==0x44) {
1661 attr |= 0x100;
1662 attr &= ~ uint64_t(0x80);
1663 }
1664 newVal = (paddr & mask(47, 12)) | attr;
1665 DPRINTF(MiscRegs,
1666 "MISCREG: Translated addr %#x: PAR_EL1: %#xx\n",
1667 val, newVal);
1668 } else {
1669 ArmFault *armFault = static_cast<ArmFault *>(fault.get());
1670 armFault->update(tc);
1671 // Set fault bit and FSR
1672 FSR fsr = armFault->getFsr(tc);
1673
1674 CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
1675 if (cpsr.width) { // AArch32
1676 newVal = ((fsr >> 9) & 1) << 11;
1677 // rearrange fault status
1678 newVal |= ((fsr >> 0) & 0x3f) << 1;
1679 newVal |= 0x1; // F bit
1680 newVal |= ((armFault->iss() >> 7) & 0x1) << 8;
1681 newVal |= armFault->isStage2() ? 0x200 : 0;
1682 } else { // AArch64
1683 newVal = 1; // F bit
1684 newVal |= fsr << 1; // FST
1685 // TODO: DDI 0487A.f D7-2083, AbortFault's s1ptw bit.
1686 newVal |= armFault->isStage2() ? 1 << 8 : 0; // PTW
1687 newVal |= armFault->isStage2() ? 1 << 9 : 0; // S
1688 newVal |= 1 << 11; // RES1
1689 }
1690 DPRINTF(MiscRegs,
1691 "MISCREG: Translated addr %#x fault fsr %#x: PAR: %#x\n",
1692 val, fsr, newVal);
1693 }
1694 delete req;
1695 setMiscRegNoEffect(MISCREG_PAR_EL1, newVal);
1696 return;
1697 }
1698 case MISCREG_SPSR_EL3:
1699 case MISCREG_SPSR_EL2:
1700 case MISCREG_SPSR_EL1:
1701 // Force bits 23:21 to 0
1702 newVal = val & ~(0x7 << 21);
1703 break;
1704 case MISCREG_L2CTLR:
1705 warn("miscreg L2CTLR (%s) written with %#x. ignored...\n",
1706 miscRegName[misc_reg], uint32_t(val));
1707 break;
1708
1709 // Generic Timer registers
1710 case MISCREG_CNTFRQ ... MISCREG_CNTHP_CTL:
1711 case MISCREG_CNTPCT ... MISCREG_CNTHP_CVAL:
1712 case MISCREG_CNTKCTL_EL1 ... MISCREG_CNTV_CVAL_EL0:
1713 case MISCREG_CNTVOFF_EL2 ... MISCREG_CNTPS_CVAL_EL1:
1714 getGenericTimer(tc).setMiscReg(misc_reg, newVal);
1715 break;
1716 }
1717 }
1718 setMiscRegNoEffect(misc_reg, newVal);
1719}
1720
1721void
1794ISA::tlbiVA(ThreadContext *tc, MiscReg newVal, uint16_t asid,
1722ISA::tlbiVA(ThreadContext *tc, Addr va, uint16_t asid,
1723 bool secure_lookup, uint8_t target_el)
1724{
1725 if (!haveLargeAsid64)
1726 asid &= mask(8);
1799 Addr va = ((Addr) bits(newVal, 43, 0)) << 12;
1727 System *sys = tc->getSystemPtr();
1728 for (int x = 0; x < sys->numContexts(); x++) {
1729 ThreadContext *oc = sys->getThreadContext(x);
1730 getITBPtr(oc)->flushMvaAsid(va, asid,
1731 secure_lookup, target_el);
1732 getDTBPtr(oc)->flushMvaAsid(va, asid,
1733 secure_lookup, target_el);
1734
1735 CheckerCPU *checker = oc->getCheckerCpuPtr();
1736 if (checker) {
1737 getITBPtr(checker)->flushMvaAsid(
1738 va, asid, secure_lookup, target_el);
1739 getDTBPtr(checker)->flushMvaAsid(
1740 va, asid, secure_lookup, target_el);
1741 }
1742 }
1743}
1744
1745void
1746ISA::tlbiALL(ThreadContext *tc, bool secure_lookup, uint8_t target_el)
1747{
1748 System *sys = tc->getSystemPtr();
1749 for (int x = 0; x < sys->numContexts(); x++) {
1750 ThreadContext *oc = sys->getThreadContext(x);
1751 getITBPtr(oc)->flushAllSecurity(secure_lookup, target_el);
1752 getDTBPtr(oc)->flushAllSecurity(secure_lookup, target_el);
1753
1754 // If CheckerCPU is connected, need to notify it of a flush
1755 CheckerCPU *checker = oc->getCheckerCpuPtr();
1756 if (checker) {
1757 getITBPtr(checker)->flushAllSecurity(secure_lookup,
1758 target_el);
1759 getDTBPtr(checker)->flushAllSecurity(secure_lookup,
1760 target_el);
1761 }
1762 }
1763}
1764
1765void
1766ISA::tlbiALLN(ThreadContext *tc, bool hyp, uint8_t target_el)
1767{
1768 System *sys = tc->getSystemPtr();
1769 for (int x = 0; x < sys->numContexts(); x++) {
1770 ThreadContext *oc = sys->getThreadContext(x);
1771 getITBPtr(oc)->flushAllNs(hyp, target_el);
1772 getDTBPtr(oc)->flushAllNs(hyp, target_el);
1773
1774 CheckerCPU *checker = oc->getCheckerCpuPtr();
1775 if (checker) {
1776 getITBPtr(checker)->flushAllNs(hyp, target_el);
1777 getDTBPtr(checker)->flushAllNs(hyp, target_el);
1778 }
1779 }
1780}
1781
1782void
1856ISA::tlbiMVA(ThreadContext *tc, MiscReg newVal, bool secure_lookup, bool hyp,
1783ISA::tlbiMVA(ThreadContext *tc, Addr va, bool secure_lookup, bool hyp,
1784 uint8_t target_el)
1785{
1786 System *sys = tc->getSystemPtr();
1787 for (int x = 0; x < sys->numContexts(); x++) {
1788 ThreadContext *oc = sys->getThreadContext(x);
1862 getITBPtr(oc)->flushMva(mbits(newVal, 31,12),
1863 secure_lookup, hyp, target_el);
1864 getDTBPtr(oc)->flushMva(mbits(newVal, 31,12),
1865 secure_lookup, hyp, target_el);
1789 getITBPtr(oc)->flushMva(va, secure_lookup, hyp, target_el);
1790 getDTBPtr(oc)->flushMva(va, secure_lookup, hyp, target_el);
1791
1792 CheckerCPU *checker = oc->getCheckerCpuPtr();
1793 if (checker) {
1869 getITBPtr(checker)->flushMva(mbits(newVal, 31,12),
1870 secure_lookup, hyp, target_el);
1871 getDTBPtr(checker)->flushMva(mbits(newVal, 31,12),
1872 secure_lookup, hyp, target_el);
1794 getITBPtr(checker)->flushMva(va, secure_lookup, hyp, target_el);
1795 getDTBPtr(checker)->flushMva(va, secure_lookup, hyp, target_el);
1796 }
1797 }
1798}
1799
1800void
1801ISA::tlbiIPA(ThreadContext *tc, MiscReg newVal, bool secure_lookup,
1802 uint8_t target_el)
1803{
1804 System *sys = tc->getSystemPtr();
1805 for (auto x = 0; x < sys->numContexts(); x++) {
1806 tc = sys->getThreadContext(x);
1807 Addr ipa = ((Addr) bits(newVal, 35, 0)) << 12;
1808 getITBPtr(tc)->flushIpaVmid(ipa,
1809 secure_lookup, false, target_el);
1810 getDTBPtr(tc)->flushIpaVmid(ipa,
1811 secure_lookup, false, target_el);
1812
1813 CheckerCPU *checker = tc->getCheckerCpuPtr();
1814 if (checker) {
1815 getITBPtr(checker)->flushIpaVmid(ipa,
1816 secure_lookup, false, target_el);
1817 getDTBPtr(checker)->flushIpaVmid(ipa,
1818 secure_lookup, false, target_el);
1819 }
1820 }
1821}
1822
1823void
1824ISA::tlbiASID(ThreadContext *tc, uint16_t asid, bool secure_lookup,
1825 uint8_t target_el)
1826{
1827 if (!haveLargeAsid64)
1828 asid &= mask(8);
1829
1830 System *sys = tc->getSystemPtr();
1831 for (auto x = 0; x < sys->numContexts(); x++) {
1832 tc = sys->getThreadContext(x);
1833 getITBPtr(tc)->flushAsid(asid, secure_lookup, target_el);
1834 getDTBPtr(tc)->flushAsid(asid, secure_lookup, target_el);
1835 CheckerCPU *checker = tc->getCheckerCpuPtr();
1836 if (checker) {
1837 getITBPtr(checker)->flushAsid(asid, secure_lookup, target_el);
1838 getDTBPtr(checker)->flushAsid(asid, secure_lookup, target_el);
1839 }
1840 }
1841}
1842
1843BaseISADevice &
1844ISA::getGenericTimer(ThreadContext *tc)
1845{
1846 // We only need to create an ISA interface the first time we try
1847 // to access the timer.
1848 if (timer)
1849 return *timer.get();
1850
1851 assert(system);
1852 GenericTimer *generic_timer(system->getGenericTimer());
1853 if (!generic_timer) {
1854 panic("Trying to get a generic timer from a system that hasn't "
1855 "been configured to use a generic timer.\n");
1856 }
1857
1858 timer.reset(new GenericTimerISA(*generic_timer, tc->contextId()));
1859 return *timer.get();
1860}
1861
1862}
1863
1864ArmISA::ISA *
1865ArmISAParams::create()
1866{
1867 return new ArmISA::ISA(this);
1868}