2c2
< * Copyright (c) 2010 ARM Limited
---
> * Copyright (c) 2010, 2012-2013 ARM Limited
37a38
> * Giacomo Gabrielli
40a42,43
> #include "arch/arm/stage2_mmu.hh"
> #include "arch/arm/system.hh"
54,56c57,59
< : MemObject(p), port(this, params()->sys), drainManager(NULL),
< tlb(NULL), currState(NULL), pending(false),
< masterId(p->sys->getMasterId(name())),
---
> : MemObject(p), port(this, p->sys), drainManager(NULL),
> stage2Mmu(NULL), isStage2(p->is_stage2), tlb(NULL),
> currState(NULL), pending(false), masterId(p->sys->getMasterId(name())),
58c61,64
< doL1DescEvent(this), doL2DescEvent(this), doProcessEvent(this)
---
> doL1DescEvent(this), doL2DescEvent(this),
> doL0LongDescEvent(this), doL1LongDescEvent(this), doL2LongDescEvent(this),
> doL3LongDescEvent(this),
> doProcessEvent(this)
60a67,83
>
> // Cache system-level properties
> if (FullSystem) {
> armSys = dynamic_cast<ArmSystem *>(p->sys);
> assert(armSys);
> haveSecurity = armSys->haveSecurity();
> _haveLPAE = armSys->haveLPAE();
> _haveVirtualization = armSys->haveVirtualization();
> physAddrRange = armSys->physAddrRange();
> _haveLargeAsid64 = armSys->haveLargeAsid64();
> } else {
> armSys = NULL;
> haveSecurity = _haveLPAE = _haveVirtualization = false;
> _haveLargeAsid64 = false;
> physAddrRange = 32;
> }
>
67a91,94
> TableWalker::WalkerState::WalkerState() : stage2Tran(NULL), l2Desc(l1Desc)
> {
> }
>
71c98
< if (drainManager && stateQueueL1.empty() && stateQueueL2.empty() &&
---
> if (drainManager && stateQueues[L1].empty() && stateQueues[L2].empty() &&
85,88c112
< if (stateQueueL1.empty() && stateQueueL2.empty() &&
< pendingQueue.empty()) {
< setDrainState(Drainable::Drained);
< DPRINTF(Drain, "TableWalker free, no need to drain\n");
---
> bool state_queues_not_empty = false;
90,92c114,121
< // table walker is drained, but its ports may still need to be drained
< return count;
< } else {
---
> for (int i = 0; i < MAX_LOOKUP_LEVELS; ++i) {
> if (!stateQueues[i].empty()) {
> state_queues_not_empty = true;
> break;
> }
> }
>
> if (state_queues_not_empty || pendingQueue.size()) {
98a128,130
> } else {
> setDrainState(Drainable::Drained);
> DPRINTF(Drain, "TableWalker free, no need to drain\n");
99a132,133
> // table walker is drained, but its ports may still need to be drained
> return count;
123,124c157,160
< TableWalker::walk(RequestPtr _req, ThreadContext *_tc, uint8_t _cid, TLB::Mode _mode,
< TLB::Translation *_trans, bool _timing, bool _functional)
---
> TableWalker::walk(RequestPtr _req, ThreadContext *_tc, uint16_t _asid,
> uint8_t _vmid, bool _isHyp, TLB::Mode _mode,
> TLB::Translation *_trans, bool _timing, bool _functional,
> bool secure, TLB::ArmTranslationType tranType)
126a163
>
142c179
< if (currState->vaddr == _req->getVaddr()) {
---
> if (currState->vaddr_tainted == _req->getVaddr()) {
145d181
< panic("currState should always be empty in timing mode!\n");
148a185,186
> currState->aarch64 = opModeIs64(currOpMode(_tc));
> currState->el = currEL(_tc);
152c190,192
< currState->contextId = _cid;
---
> currState->asid = _asid;
> currState->vmid = _vmid;
> currState->isHyp = _isHyp;
155a196,198
> currState->tranType = tranType;
> currState->isSecure = secure;
> currState->physAddrRange = physAddrRange;
159,160c202,239
< currState->vaddr = currState->req->getVaddr();
< currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR);
---
> currState->vaddr_tainted = currState->req->getVaddr();
> if (currState->aarch64)
> currState->vaddr = purifyTaggedAddr(currState->vaddr_tainted,
> currState->tc, currState->el);
> else
> currState->vaddr = currState->vaddr_tainted;
>
> if (currState->aarch64) {
> switch (currState->el) {
> case EL0:
> case EL1:
> currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
> currState->ttbcr = currState->tc->readMiscReg(MISCREG_TCR_EL1);
> break;
> // @todo: uncomment this to enable Virtualization
> // case EL2:
> // assert(haveVirtualization);
> // currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL2);
> // currState->ttbcr = currState->tc->readMiscReg(MISCREG_TCR_EL2);
> // break;
> case EL3:
> assert(haveSecurity);
> currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL3);
> currState->ttbcr = currState->tc->readMiscReg(MISCREG_TCR_EL3);
> break;
> default:
> panic("Invalid exception level");
> break;
> }
> } else {
> currState->sctlr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
> MISCREG_SCTLR, currState->tc, !currState->isSecure));
> currState->ttbcr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
> MISCREG_TTBCR, currState->tc, !currState->isSecure));
> currState->htcr = currState->tc->readMiscReg(MISCREG_HTCR);
> currState->hcr = currState->tc->readMiscReg(MISCREG_HCR);
> currState->vtcr = currState->tc->readMiscReg(MISCREG_VTCR);
> }
162d240
< currState->N = currState->tc->readMiscReg(MISCREG_TTBCR);
166a245,251
> // We only do a second stage of translation if we're not secure, or in
> // hyp mode, the second stage MMU is enabled, and this table walker
> // instance is the first stage.
> currState->doingStage2 = false;
> // @todo: for now disable this in AArch64 (HCR is not set)
> currState->stage2Req = !currState->aarch64 && currState->hcr.vm &&
> !isStage2 && !currState->isSecure && !currState->isHyp;
168,169c253,255
< if (!currState->timing)
< return processWalk();
---
> bool long_desc_format = currState->aarch64 ||
> (_haveLPAE && currState->ttbcr.eae) ||
> _isHyp || isStage2;
170a257,274
> if (long_desc_format) {
> // Helper variables used for hierarchical permissions
> currState->secureLookup = currState->isSecure;
> currState->rwTable = true;
> currState->userTable = true;
> currState->xnTable = false;
> currState->pxnTable = false;
> }
>
> if (!currState->timing) {
> if (currState->aarch64)
> return processWalkAArch64();
> else if (long_desc_format)
> return processWalkLPAE();
> else
> return processWalk();
> }
>
176c280,285
< return processWalk();
---
> if (currState->aarch64)
> return processWalkAArch64();
> else if (long_desc_format)
> return processWalkLPAE();
> else
> return processWalk();
188a298,303
> ExceptionLevel target_el = EL0;
> if (currState->aarch64)
> target_el = currEL(currState->tc);
> else
> target_el = EL1;
>
190c305,308
< TlbEntry* te = tlb->lookup(currState->vaddr, currState->contextId, true);
---
> // @TODO Should this always be the TLB or should we look in the stage2 TLB?
> TlbEntry* te = tlb->lookup(currState->vaddr, currState->asid,
> currState->vmid, currState->isHyp, currState->isSecure, true, false,
> target_el);
201c319,324
< processWalk();
---
> if (currState->aarch64)
> processWalkAArch64();
> else if ((_haveLPAE && currState->ttbcr.eae) || currState->isHyp || isStage2)
> processWalkLPAE();
> else
> processWalk();
215c338,339
< DPRINTF(TLB, "Squashing table walk for address %#x\n", currState->vaddr);
---
> DPRINTF(TLB, "Squashing table walk for address %#x\n",
> currState->vaddr_tainted);
223,224c347,349
< currState->fault = tlb->translateTiming(currState->req, currState->tc,
< currState->transState, currState->mode);
---
> tlb->translateTiming(currState->req, currState->tc,
> currState->transState, currState->mode);
>
233c358,360
< te = tlb->lookup(currState->vaddr, currState->contextId, true);
---
> te = tlb->lookup(currState->vaddr, currState->asid,
> currState->vmid, currState->isHyp, currState->isSecure, true,
> false, target_el);
252c379
< assert(currState->sctlr.m);
---
> assert(currState->sctlr.m || isStage2);
254,256c381,383
< DPRINTF(TLB, "Begining table walk for address %#x, TTBCR: %#x, bits:%#x\n",
< currState->vaddr, currState->N, mbits(currState->vaddr, 31,
< 32-currState->N));
---
> DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
> currState->vaddr_tainted, currState->ttbcr, mbits(currState->vaddr, 31,
> 32 - currState->ttbcr.n));
258c385,386
< if (currState->N == 0 || !mbits(currState->vaddr, 31, 32-currState->N)) {
---
> if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31,
> 32 - currState->ttbcr.n)) {
260c388,402
< ttbr = currState->tc->readMiscReg(MISCREG_TTBR0);
---
> // Check if table walk is allowed when Security Extensions are enabled
> if (haveSecurity && currState->ttbcr.pd0) {
> if (currState->isFetch)
> return new PrefetchAbort(currState->vaddr_tainted,
> ArmFault::TranslationLL + L1,
> isStage2,
> ArmFault::VmsaTran);
> else
> return new DataAbort(currState->vaddr_tainted,
> TlbEntry::DomainType::NoAccess, currState->isWrite,
> ArmFault::TranslationLL + L1, isStage2,
> ArmFault::VmsaTran);
> }
> ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
> MISCREG_TTBR0, currState->tc, !currState->isSecure));
263,264c405,420
< ttbr = currState->tc->readMiscReg(MISCREG_TTBR1);
< currState->N = 0;
---
> // Check if table walk is allowed when Security Extensions are enabled
> if (haveSecurity && currState->ttbcr.pd1) {
> if (currState->isFetch)
> return new PrefetchAbort(currState->vaddr_tainted,
> ArmFault::TranslationLL + L1,
> isStage2,
> ArmFault::VmsaTran);
> else
> return new DataAbort(currState->vaddr_tainted,
> TlbEntry::DomainType::NoAccess, currState->isWrite,
> ArmFault::TranslationLL + L1, isStage2,
> ArmFault::VmsaTran);
> }
> ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
> MISCREG_TTBR1, currState->tc, !currState->isSecure));
> currState->ttbcr.n = 0;
267,269c423,426
< Addr l1desc_addr = mbits(ttbr, 31, 14-currState->N) |
< (bits(currState->vaddr,31-currState->N,20) << 2);
< DPRINTF(TLB, " - Descriptor at address %#x\n", l1desc_addr);
---
> Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) |
> (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2);
> DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr,
> currState->isSecure ? "s" : "ns");
271d427
<
274,275c430,432
< f = tlb->walkTrickBoxCheck(l1desc_addr, currState->vaddr, sizeof(uint32_t),
< currState->isFetch, currState->isWrite, 0, true);
---
> f = tlb->walkTrickBoxCheck(l1desc_addr, currState->isSecure,
> currState->vaddr, sizeof(uint32_t), currState->isFetch,
> currState->isWrite, TlbEntry::DomainType::NoAccess, L1);
277c434
< DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr);
---
> DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
293a451,826
> bool delayed;
> delayed = fetchDescriptor(l1desc_addr, (uint8_t*)&currState->l1Desc.data,
> sizeof(uint32_t), flag, L1, &doL1DescEvent,
> &TableWalker::doL1Descriptor);
> if (!delayed) {
> f = currState->fault;
> }
>
> return f;
> }
>
> Fault
> TableWalker::processWalkLPAE()
> {
> Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
> int tsz, n;
> LookupLevel start_lookup_level = L1;
>
> DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n",
> currState->vaddr_tainted, currState->ttbcr);
>
> Request::Flags flag = 0;
> if (currState->isSecure)
> flag.set(Request::SECURE);
>
> // work out which base address register to use, if in hyp mode we always
> // use HTTBR
> if (isStage2) {
> DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n");
> ttbr = currState->tc->readMiscReg(MISCREG_VTTBR);
> tsz = sext<4>(currState->vtcr.t0sz);
> start_lookup_level = currState->vtcr.sl0 ? L1 : L2;
> } else if (currState->isHyp) {
> DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n");
> ttbr = currState->tc->readMiscReg(MISCREG_HTTBR);
> tsz = currState->htcr.t0sz;
> } else {
> assert(_haveLPAE && currState->ttbcr.eae);
>
> // Determine boundaries of TTBR0/1 regions
> if (currState->ttbcr.t0sz)
> ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1;
> else if (currState->ttbcr.t1sz)
> ttbr0_max = (1ULL << 32) -
> (1ULL << (32 - currState->ttbcr.t1sz)) - 1;
> else
> ttbr0_max = (1ULL << 32) - 1;
> if (currState->ttbcr.t1sz)
> ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz));
> else
> ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz));
>
> // The following code snippet selects the appropriate translation table base
> // address (TTBR0 or TTBR1) and the appropriate starting lookup level
> // depending on the address range supported by the translation table (ARM
> // ARM issue C B3.6.4)
> if (currState->vaddr <= ttbr0_max) {
> DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n");
> // Check if table walk is allowed
> if (currState->ttbcr.epd0) {
> if (currState->isFetch)
> return new PrefetchAbort(currState->vaddr_tainted,
> ArmFault::TranslationLL + L1,
> isStage2,
> ArmFault::LpaeTran);
> else
> return new DataAbort(currState->vaddr_tainted,
> TlbEntry::DomainType::NoAccess,
> currState->isWrite,
> ArmFault::TranslationLL + L1,
> isStage2,
> ArmFault::LpaeTran);
> }
> ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
> MISCREG_TTBR0, currState->tc, !currState->isSecure));
> tsz = currState->ttbcr.t0sz;
> if (ttbr0_max < (1ULL << 30)) // Upper limit < 1 GB
> start_lookup_level = L2;
> } else if (currState->vaddr >= ttbr1_min) {
> DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n");
> // Check if table walk is allowed
> if (currState->ttbcr.epd1) {
> if (currState->isFetch)
> return new PrefetchAbort(currState->vaddr_tainted,
> ArmFault::TranslationLL + L1,
> isStage2,
> ArmFault::LpaeTran);
> else
> return new DataAbort(currState->vaddr_tainted,
> TlbEntry::DomainType::NoAccess,
> currState->isWrite,
> ArmFault::TranslationLL + L1,
> isStage2,
> ArmFault::LpaeTran);
> }
> ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
> MISCREG_TTBR1, currState->tc, !currState->isSecure));
> tsz = currState->ttbcr.t1sz;
> if (ttbr1_min >= (1ULL << 31) + (1ULL << 30)) // Lower limit >= 3 GB
> start_lookup_level = L2;
> } else {
> // Out of boundaries -> translation fault
> if (currState->isFetch)
> return new PrefetchAbort(currState->vaddr_tainted,
> ArmFault::TranslationLL + L1,
> isStage2,
> ArmFault::LpaeTran);
> else
> return new DataAbort(currState->vaddr_tainted,
> TlbEntry::DomainType::NoAccess,
> currState->isWrite, ArmFault::TranslationLL + L1,
> isStage2, ArmFault::LpaeTran);
> }
>
> }
>
> // Perform lookup (ARM ARM issue C B3.6.6)
> if (start_lookup_level == L1) {
> n = 5 - tsz;
> desc_addr = mbits(ttbr, 39, n) |
> (bits(currState->vaddr, n + 26, 30) << 3);
> DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
> desc_addr, currState->isSecure ? "s" : "ns");
> } else {
> // Skip first-level lookup
> n = (tsz >= 2 ? 14 - tsz : 12);
> desc_addr = mbits(ttbr, 39, n) |
> (bits(currState->vaddr, n + 17, 21) << 3);
> DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
> desc_addr, currState->isSecure ? "s" : "ns");
> }
>
> // Trickbox address check
> Fault f = tlb->walkTrickBoxCheck(desc_addr, currState->isSecure,
> currState->vaddr, sizeof(uint64_t), currState->isFetch,
> currState->isWrite, TlbEntry::DomainType::NoAccess,
> start_lookup_level);
> if (f) {
> DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
> if (currState->timing) {
> pending = false;
> nextWalk(currState->tc);
> currState = NULL;
> } else {
> currState->tc = NULL;
> currState->req = NULL;
> }
> return f;
> }
>
> if (currState->sctlr.c == 0) {
> flag = Request::UNCACHEABLE;
> }
>
> if (currState->isSecure)
> flag.set(Request::SECURE);
>
> currState->longDesc.lookupLevel = start_lookup_level;
> currState->longDesc.aarch64 = false;
> currState->longDesc.largeGrain = false;
> currState->longDesc.grainSize = 12;
>
> Event *event = start_lookup_level == L1 ? (Event *) &doL1LongDescEvent
> : (Event *) &doL2LongDescEvent;
>
> bool delayed = fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
> sizeof(uint64_t), flag, start_lookup_level,
> event, &TableWalker::doLongDescriptor);
> if (!delayed) {
> f = currState->fault;
> }
>
> return f;
> }
>
> unsigned
> TableWalker::adjustTableSizeAArch64(unsigned tsz)
> {
> if (tsz < 25)
> return 25;
> if (tsz > 48)
> return 48;
> return tsz;
> }
>
> bool
> TableWalker::checkAddrSizeFaultAArch64(Addr addr, int currPhysAddrRange)
> {
> return (currPhysAddrRange != MaxPhysAddrRange &&
> bits(addr, MaxPhysAddrRange - 1, currPhysAddrRange));
> }
>
> Fault
> TableWalker::processWalkAArch64()
> {
> assert(currState->aarch64);
>
> DPRINTF(TLB, "Beginning table walk for address %#llx, TTBCR: %#llx\n",
> currState->vaddr_tainted, currState->ttbcr);
>
> // Determine TTBR, table size, granule size and phys. address range
> Addr ttbr = 0;
> int tsz = 0, ps = 0;
> bool large_grain = false;
> bool fault = false;
> switch (currState->el) {
> case EL0:
> case EL1:
> switch (bits(currState->vaddr, 63,48)) {
> case 0:
> DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
> ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL1);
> tsz = adjustTableSizeAArch64(64 - currState->ttbcr.t0sz);
> large_grain = currState->ttbcr.tg0;
> if (bits(currState->vaddr, 63, tsz) != 0x0 ||
> currState->ttbcr.epd0)
> fault = true;
> break;
> case 0xffff:
> DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
> ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL1);
> tsz = adjustTableSizeAArch64(64 - currState->ttbcr.t1sz);
> large_grain = currState->ttbcr.tg1;
> if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) ||
> currState->ttbcr.epd1)
> fault = true;
> break;
> default:
> // top two bytes must be all 0s or all 1s, else invalid addr
> fault = true;
> }
> ps = currState->ttbcr.ips;
> break;
> case EL2:
> case EL3:
> switch(bits(currState->vaddr, 63,48)) {
> case 0:
> DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
> if (currState->el == EL2)
> ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL2);
> else
> ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL3);
> tsz = adjustTableSizeAArch64(64 - currState->ttbcr.t0sz);
> large_grain = currState->ttbcr.tg0;
> break;
> default:
> // invalid addr if top two bytes are not all 0s
> fault = true;
> }
> ps = currState->ttbcr.ps;
> break;
> }
>
> if (fault) {
> Fault f;
> if (currState->isFetch)
> f = new PrefetchAbort(currState->vaddr_tainted,
> ArmFault::TranslationLL + L0, isStage2,
> ArmFault::LpaeTran);
> else
> f = new DataAbort(currState->vaddr_tainted,
> TlbEntry::DomainType::NoAccess,
> currState->isWrite,
> ArmFault::TranslationLL + L0,
> isStage2, ArmFault::LpaeTran);
>
> if (currState->timing) {
> pending = false;
> nextWalk(currState->tc);
> currState = NULL;
> } else {
> currState->tc = NULL;
> currState->req = NULL;
> }
> return f;
>
> }
>
> // Determine starting lookup level
> LookupLevel start_lookup_level;
> int grain_size, stride;
> if (large_grain) { // 64 KB granule
> grain_size = 16;
> stride = grain_size - 3;
> if (tsz > grain_size + 2 * stride)
> start_lookup_level = L1;
> else if (tsz > grain_size + stride)
> start_lookup_level = L2;
> else
> start_lookup_level = L3;
> } else { // 4 KB granule
> grain_size = 12;
> stride = grain_size - 3;
> if (tsz > grain_size + 3 * stride)
> start_lookup_level = L0;
> else if (tsz > grain_size + 2 * stride)
> start_lookup_level = L1;
> else
> start_lookup_level = L2;
> }
>
> // Determine table base address
> int base_addr_lo = 3 + tsz - stride * (3 - start_lookup_level) -
> grain_size;
> Addr base_addr = mbits(ttbr, 47, base_addr_lo);
>
> // Determine physical address size and raise an Address Size Fault if
> // necessary
> int pa_range = decodePhysAddrRange64(ps);
> // Clamp to lower limit
> if (pa_range > physAddrRange)
> currState->physAddrRange = physAddrRange;
> else
> currState->physAddrRange = pa_range;
> if (checkAddrSizeFaultAArch64(base_addr, currState->physAddrRange)) {
> DPRINTF(TLB, "Address size fault before any lookup\n");
> Fault f;
> if (currState->isFetch)
> f = new PrefetchAbort(currState->vaddr_tainted,
> ArmFault::AddressSizeLL + start_lookup_level,
> isStage2,
> ArmFault::LpaeTran);
> else
> f = new DataAbort(currState->vaddr_tainted,
> TlbEntry::DomainType::NoAccess,
> currState->isWrite,
> ArmFault::AddressSizeLL + start_lookup_level,
> isStage2,
> ArmFault::LpaeTran);
>
>
> if (currState->timing) {
> pending = false;
> nextWalk(currState->tc);
> currState = NULL;
> } else {
> currState->tc = NULL;
> currState->req = NULL;
> }
> return f;
>
> }
>
> // Determine descriptor address
> Addr desc_addr = base_addr |
> (bits(currState->vaddr, tsz - 1,
> stride * (3 - start_lookup_level) + grain_size) << 3);
>
> // Trickbox address check
> Fault f = tlb->walkTrickBoxCheck(desc_addr, currState->isSecure,
> currState->vaddr, sizeof(uint64_t), currState->isFetch,
> currState->isWrite, TlbEntry::DomainType::NoAccess,
> start_lookup_level);
> if (f) {
> DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
> if (currState->timing) {
> pending = false;
> nextWalk(currState->tc);
> currState = NULL;
> } else {
> currState->tc = NULL;
> currState->req = NULL;
> }
> return f;
> }
>
> Request::Flags flag = 0;
> if (currState->sctlr.c == 0) {
> flag = Request::UNCACHEABLE;
> }
>
> currState->longDesc.lookupLevel = start_lookup_level;
> currState->longDesc.aarch64 = true;
> currState->longDesc.largeGrain = large_grain;
> currState->longDesc.grainSize = grain_size;
>
295,296c828,847
< port.dmaAction(MemCmd::ReadReq, l1desc_addr, sizeof(uint32_t),
< &doL1DescEvent, (uint8_t*)&currState->l1Desc.data,
---
> Event *event;
> switch (start_lookup_level) {
> case L0:
> event = (Event *) &doL0LongDescEvent;
> break;
> case L1:
> event = (Event *) &doL1LongDescEvent;
> break;
> case L2:
> event = (Event *) &doL2LongDescEvent;
> break;
> case L3:
> event = (Event *) &doL3LongDescEvent;
> break;
> default:
> panic("Invalid table lookup level");
> break;
> }
> port.dmaAction(MemCmd::ReadReq, desc_addr, sizeof(uint64_t), event,
> (uint8_t*) &currState->longDesc.data,
298,301c849,852
< DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before "
< "adding: %d\n",
< stateQueueL1.size());
< stateQueueL1.push_back(currState);
---
> DPRINTF(TLBVerbose,
> "Adding to walker fifo: queue size before adding: %d\n",
> stateQueues[start_lookup_level].size());
> stateQueues[start_lookup_level].push_back(currState);
304,305c855,856
< port.dmaAction(MemCmd::ReadReq, l1desc_addr, sizeof(uint32_t),
< NULL, (uint8_t*)&currState->l1Desc.data,
---
> port.dmaAction(MemCmd::ReadReq, desc_addr, sizeof(uint64_t),
> NULL, (uint8_t*) &currState->longDesc.data,
307c858
< doL1Descriptor();
---
> doLongDescriptor();
310,311c861,862
< RequestPtr req = new Request(l1desc_addr, sizeof(uint32_t), flag, masterId);
< req->taskId(ContextSwitchTaskId::DMA);
---
> RequestPtr req = new Request(desc_addr, sizeof(uint64_t), flag,
> masterId);
313c864
< pkt->dataStatic((uint8_t*)&currState->l1Desc.data);
---
> pkt->dataStatic((uint8_t*) &currState->longDesc.data);
315c866
< doL1Descriptor();
---
> doLongDescriptor();
333c884
< bool outer_shareable = false;
---
> te.outerShareable = false;
338c889
< te.mtype = TlbEntry::StronglyOrdered;
---
> te.mtype = TlbEntry::MemoryType::StronglyOrdered;
345c896
< te.mtype = TlbEntry::Device;
---
> te.mtype = TlbEntry::MemoryType::Device;
351c902
< te.mtype = TlbEntry::Normal;
---
> te.mtype = TlbEntry::MemoryType::Normal;
357c908
< te.mtype = TlbEntry::Normal;
---
> te.mtype = TlbEntry::MemoryType::Normal;
364c915
< te.mtype = TlbEntry::Normal;
---
> te.mtype = TlbEntry::MemoryType::Normal;
376c927
< te.mtype = TlbEntry::Normal;
---
> te.mtype = TlbEntry::MemoryType::Normal;
383c934
< te.mtype = TlbEntry::Device;
---
> te.mtype = TlbEntry::MemoryType::Device;
392c943
< te.mtype = TlbEntry::Normal;
---
> te.mtype = TlbEntry::MemoryType::Normal;
404,405c955,958
< PRRR prrr = tc->readMiscReg(MISCREG_PRRR);
< NMRR nmrr = tc->readMiscReg(MISCREG_NMRR);
---
> PRRR prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR,
> currState->tc, !currState->isSecure));
> NMRR nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR,
> currState->tc, !currState->isSecure));
413c966
< outer_shareable = (prrr.nos0 == 0);
---
> te.outerShareable = (prrr.nos0 == 0);
419c972
< outer_shareable = (prrr.nos1 == 0);
---
> te.outerShareable = (prrr.nos1 == 0);
425c978
< outer_shareable = (prrr.nos2 == 0);
---
> te.outerShareable = (prrr.nos2 == 0);
431c984
< outer_shareable = (prrr.nos3 == 0);
---
> te.outerShareable = (prrr.nos3 == 0);
437c990
< outer_shareable = (prrr.nos4 == 0);
---
> te.outerShareable = (prrr.nos4 == 0);
443c996
< outer_shareable = (prrr.nos5 == 0);
---
> te.outerShareable = (prrr.nos5 == 0);
451c1004
< outer_shareable = (prrr.nos7 == 0);
---
> te.outerShareable = (prrr.nos7 == 0);
458c1011
< te.mtype = TlbEntry::StronglyOrdered;
---
> te.mtype = TlbEntry::MemoryType::StronglyOrdered;
467c1020
< te.mtype = TlbEntry::Device;
---
> te.mtype = TlbEntry::MemoryType::Device;
479c1032
< te.mtype = TlbEntry::Normal;
---
> te.mtype = TlbEntry::MemoryType::Normal;
489c1042
< if (te.mtype == TlbEntry::Normal){
---
> if (te.mtype == TlbEntry::MemoryType::Normal){
525a1079,1080
> te.setAttributes(false);
> }
527,550c1082,1086
< /** Formatting for Physical Address Register (PAR)
< * Only including lower bits (TLB info here)
< * PAR:
< * PA [31:12]
< * Reserved [11]
< * TLB info [10:1]
< * NOS [10] (Not Outer Sharable)
< * NS [9] (Non-Secure)
< * -- [8] (Implementation Defined)
< * SH [7] (Sharable)
< * Inner[6:4](Inner memory attributes)
< * Outer[3:2](Outer memory attributes)
< * SS [1] (SuperSection)
< * F [0] (Fault, Fault Status in [6:1] if faulted)
< */
< te.attributes = (
< ((outer_shareable ? 0:1) << 10) |
< // TODO: NS Bit
< ((te.shareable ? 1:0) << 7) |
< (te.innerAttrs << 4) |
< (te.outerAttrs << 2)
< // TODO: Supersection bit
< // TODO: Fault bit
< );
---
> void
> TableWalker::memAttrsLPAE(ThreadContext *tc, TlbEntry &te,
> LongDescriptor &lDescriptor)
> {
> assert(_haveLPAE);
551a1088,1095
> uint8_t attr;
> uint8_t sh = lDescriptor.sh();
> // Different format and source of attributes if this is a stage 2
> // translation
> if (isStage2) {
> attr = lDescriptor.memAttr();
> uint8_t attr_3_2 = (attr >> 2) & 0x3;
> uint8_t attr_1_0 = attr & 0x3;
552a1097,1204
> DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh);
>
> if (attr_3_2 == 0) {
> te.mtype = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered
> : TlbEntry::MemoryType::Device;
> te.outerAttrs = 0;
> te.innerAttrs = attr_1_0 == 0 ? 1 : 3;
> te.nonCacheable = true;
> } else {
> te.mtype = TlbEntry::MemoryType::Normal;
> te.outerAttrs = attr_3_2 == 1 ? 0 :
> attr_3_2 == 2 ? 2 : 1;
> te.innerAttrs = attr_1_0 == 1 ? 0 :
> attr_1_0 == 2 ? 6 : 5;
> te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
> }
> } else {
> uint8_t attrIndx = lDescriptor.attrIndx();
>
> // LPAE always uses remapping of memory attributes, irrespective of the
> // value of SCTLR.TRE
> int reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0;
> reg = flattenMiscRegNsBanked(reg, currState->tc, !currState->isSecure);
> uint32_t mair = currState->tc->readMiscReg(reg);
> attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
> uint8_t attr_7_4 = bits(attr, 7, 4);
> uint8_t attr_3_0 = bits(attr, 3, 0);
> DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr);
>
> // Note: the memory subsystem only cares about the 'cacheable' memory
> // attribute. The other attributes are only used to fill the PAR register
> // accordingly to provide the illusion of full support
> te.nonCacheable = false;
>
> switch (attr_7_4) {
> case 0x0:
> // Strongly-ordered or Device memory
> if (attr_3_0 == 0x0)
> te.mtype = TlbEntry::MemoryType::StronglyOrdered;
> else if (attr_3_0 == 0x4)
> te.mtype = TlbEntry::MemoryType::Device;
> else
> panic("Unpredictable behavior\n");
> te.nonCacheable = true;
> te.outerAttrs = 0;
> break;
> case 0x4:
> // Normal memory, Outer Non-cacheable
> te.mtype = TlbEntry::MemoryType::Normal;
> te.outerAttrs = 0;
> if (attr_3_0 == 0x4)
> // Inner Non-cacheable
> te.nonCacheable = true;
> else if (attr_3_0 < 0x8)
> panic("Unpredictable behavior\n");
> break;
> case 0x8:
> case 0x9:
> case 0xa:
> case 0xb:
> case 0xc:
> case 0xd:
> case 0xe:
> case 0xf:
> if (attr_7_4 & 0x4) {
> te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
> } else {
> te.outerAttrs = 0x2;
> }
> // Normal memory, Outer Cacheable
> te.mtype = TlbEntry::MemoryType::Normal;
> if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
> panic("Unpredictable behavior\n");
> break;
> default:
> panic("Unpredictable behavior\n");
> break;
> }
>
> switch (attr_3_0) {
> case 0x0:
> te.innerAttrs = 0x1;
> break;
> case 0x4:
> te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
> break;
> case 0x8:
> case 0x9:
> case 0xA:
> case 0xB:
> te.innerAttrs = 6;
> break;
> case 0xC:
> case 0xD:
> case 0xE:
> case 0xF:
> te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
> break;
> default:
> panic("Unpredictable behavior\n");
> break;
> }
> }
>
> te.outerShareable = sh == 2;
> te.shareable = (sh & 0x2) ? true : false;
> te.setAttributes(true);
> te.attributes |= (uint64_t) attr << 56;
555a1208,1256
> TableWalker::memAttrsAArch64(ThreadContext *tc, TlbEntry &te, uint8_t attrIndx,
> uint8_t sh)
> {
> DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh);
>
> // Select MAIR
> uint64_t mair;
> switch (currState->el) {
> case EL0:
> case EL1:
> mair = tc->readMiscReg(MISCREG_MAIR_EL1);
> break;
> case EL2:
> mair = tc->readMiscReg(MISCREG_MAIR_EL2);
> break;
> case EL3:
> mair = tc->readMiscReg(MISCREG_MAIR_EL3);
> break;
> default:
> panic("Invalid exception level");
> break;
> }
>
> // Select attributes
> uint8_t attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
> uint8_t attr_lo = bits(attr, 3, 0);
> uint8_t attr_hi = bits(attr, 7, 4);
>
> // Memory type
> te.mtype = attr_hi == 0 ? TlbEntry::MemoryType::Device : TlbEntry::MemoryType::Normal;
>
> // Cacheability
> te.nonCacheable = false;
> if (te.mtype == TlbEntry::MemoryType::Device || // Device memory
> attr_hi == 0x8 || // Normal memory, Outer Non-cacheable
> attr_lo == 0x8) { // Normal memory, Inner Non-cacheable
> te.nonCacheable = true;
> }
>
> te.shareable = sh == 2;
> te.outerShareable = (sh & 0x2) ? true : false;
> // Attributes formatted according to the 64-bit PAR
> te.attributes = ((uint64_t) attr << 56) |
> (1 << 11) | // LPAE bit
> (te.ns << 9) | // NS bit
> (sh << 7);
> }
>
> void
557a1259,1262
> if (currState->fault != NoFault) {
> return;
> }
>
559c1264
< currState->vaddr, currState->l1Desc.data);
---
> currState->vaddr_tainted, currState->l1Desc.data);
572c1277,1280
< new PrefetchAbort(currState->vaddr, ArmFault::Translation0);
---
> new PrefetchAbort(currState->vaddr_tainted,
> ArmFault::TranslationLL + L1,
> isStage2,
> ArmFault::VmsaTran);
575,576c1283,1287
< new DataAbort(currState->vaddr, 0, currState->isWrite,
< ArmFault::Translation0);
---
> new DataAbort(currState->vaddr_tainted,
> TlbEntry::DomainType::NoAccess,
> currState->isWrite,
> ArmFault::TranslationLL + L1, isStage2,
> ArmFault::VmsaTran);
585,587c1296,1301
< currState->fault = new DataAbort(currState->vaddr,
< currState->l1Desc.domain(), currState->isWrite,
< ArmFault::AccessFlag0);
---
> currState->fault = new DataAbort(currState->vaddr_tainted,
> currState->l1Desc.domain(),
> currState->isWrite,
> ArmFault::AccessFlagLL + L1,
> isStage2,
> ArmFault::VmsaTran);
592,604c1306,1314
< te.N = 20;
< te.pfn = currState->l1Desc.pfn();
< te.size = (1<<te.N) - 1;
< te.global = !currState->l1Desc.global();
< te.valid = true;
< te.vpn = currState->vaddr >> te.N;
< te.sNp = true;
< te.xn = currState->l1Desc.xn();
< te.ap = currState->l1Desc.ap();
< te.domain = currState->l1Desc.domain();
< te.asid = currState->contextId;
< memAttrs(currState->tc, te, currState->sctlr,
< currState->l1Desc.texcb(), currState->l1Desc.shareable());
---
> insertTableEntry(currState->l1Desc, false);
> return;
> case L1Descriptor::PageTable:
> {
> Addr l2desc_addr;
> l2desc_addr = currState->l1Desc.l2Addr() |
> (bits(currState->vaddr, 19, 12) << 2);
> DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n",
> l2desc_addr, currState->isSecure ? "s" : "ns");
606,614c1316,1320
< DPRINTF(TLB, "Inserting Section Descriptor into TLB\n");
< DPRINTF(TLB, " - N:%d pfn:%#x size: %#x global:%d valid: %d\n",
< te.N, te.pfn, te.size, te.global, te.valid);
< DPRINTF(TLB, " - vpn:%#x sNp: %d xn:%d ap:%d domain: %d asid:%d nc:%d\n",
< te.vpn, te.sNp, te.xn, te.ap, te.domain, te.asid,
< te.nonCacheable);
< DPRINTF(TLB, " - domain from l1 desc: %d data: %#x bits:%d\n",
< currState->l1Desc.domain(), currState->l1Desc.data,
< (currState->l1Desc.data >> 5) & 0xF );
---
> // Trickbox address check
> currState->fault = tlb->walkTrickBoxCheck(
> l2desc_addr, currState->isSecure, currState->vaddr,
> sizeof(uint32_t), currState->isFetch, currState->isWrite,
> currState->l1Desc.domain(), L2);
615a1322,1383
> if (currState->fault) {
> if (!currState->timing) {
> currState->tc = NULL;
> currState->req = NULL;
> }
> return;
> }
>
> Request::Flags flag = 0;
> if (currState->isSecure)
> flag.set(Request::SECURE);
>
> bool delayed;
> delayed = fetchDescriptor(l2desc_addr,
> (uint8_t*)&currState->l2Desc.data,
> sizeof(uint32_t), flag, -1, &doL2DescEvent,
> &TableWalker::doL2Descriptor);
> if (delayed) {
> currState->delayed = true;
> }
>
> return;
> }
> default:
> panic("A new type in a 2 bit field?\n");
> }
> }
>
> void
> TableWalker::doLongDescriptor()
> {
> if (currState->fault != NoFault) {
> return;
> }
>
> DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n",
> currState->longDesc.lookupLevel, currState->vaddr_tainted,
> currState->longDesc.data,
> currState->aarch64 ? "AArch64" : "long-desc.");
>
> if ((currState->longDesc.type() == LongDescriptor::Block) ||
> (currState->longDesc.type() == LongDescriptor::Page)) {
> DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, pxn: %d, "
> "xn: %d, ap: %d, af: %d, type: %d\n",
> currState->longDesc.lookupLevel,
> currState->longDesc.data,
> currState->longDesc.pxn(),
> currState->longDesc.xn(),
> currState->longDesc.ap(),
> currState->longDesc.af(),
> currState->longDesc.type());
> } else {
> DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, type: %d\n",
> currState->longDesc.lookupLevel,
> currState->longDesc.data,
> currState->longDesc.type());
> }
>
> TlbEntry te;
>
> switch (currState->longDesc.type()) {
> case LongDescriptor::Invalid:
620d1387
< tlb->insert(currState->vaddr, te);
621a1389,1405
> DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n",
> currState->longDesc.lookupLevel,
> ArmFault::TranslationLL + currState->longDesc.lookupLevel);
> if (currState->isFetch)
> currState->fault = new PrefetchAbort(
> currState->vaddr_tainted,
> ArmFault::TranslationLL + currState->longDesc.lookupLevel,
> isStage2,
> ArmFault::LpaeTran);
> else
> currState->fault = new DataAbort(
> currState->vaddr_tainted,
> TlbEntry::DomainType::NoAccess,
> currState->isWrite,
> ArmFault::TranslationLL + currState->longDesc.lookupLevel,
> isStage2,
> ArmFault::LpaeTran);
623,628c1407,1460
< case L1Descriptor::PageTable:
< Addr l2desc_addr;
< l2desc_addr = currState->l1Desc.l2Addr() |
< (bits(currState->vaddr, 19,12) << 2);
< DPRINTF(TLB, "L1 descriptor points to page table at: %#x\n",
< l2desc_addr);
---
> case LongDescriptor::Block:
> case LongDescriptor::Page:
> {
> bool fault = false;
> bool aff = false;
> // Check for address size fault
> if (checkAddrSizeFaultAArch64(
> mbits(currState->longDesc.data, MaxPhysAddrRange - 1,
> currState->longDesc.offsetBits()),
> currState->physAddrRange)) {
> fault = true;
> DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
> currState->longDesc.lookupLevel);
> // Check for access fault
> } else if (currState->longDesc.af() == 0) {
> fault = true;
> DPRINTF(TLB, "L%d descriptor causing Access Fault\n",
> currState->longDesc.lookupLevel);
> aff = true;
> }
> if (fault) {
> if (currState->isFetch)
> currState->fault = new PrefetchAbort(
> currState->vaddr_tainted,
> (aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) +
> currState->longDesc.lookupLevel,
> isStage2,
> ArmFault::LpaeTran);
> else
> currState->fault = new DataAbort(
> currState->vaddr_tainted,
> TlbEntry::DomainType::NoAccess, currState->isWrite,
> (aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) +
> currState->longDesc.lookupLevel,
> isStage2,
> ArmFault::LpaeTran);
> } else {
> insertTableEntry(currState->longDesc, true);
> }
> }
> return;
> case LongDescriptor::Table:
> {
> // Set hierarchical permission flags
> currState->secureLookup = currState->secureLookup &&
> currState->longDesc.secureTable();
> currState->rwTable = currState->rwTable &&
> currState->longDesc.rwTable();
> currState->userTable = currState->userTable &&
> currState->longDesc.userTable();
> currState->xnTable = currState->xnTable ||
> currState->longDesc.xnTable();
> currState->pxnTable = currState->pxnTable ||
> currState->longDesc.pxnTable();
630,633c1462,1464
< // Trickbox address check
< currState->fault = tlb->walkTrickBoxCheck(l2desc_addr, currState->vaddr,
< sizeof(uint32_t), currState->isFetch, currState->isWrite,
< currState->l1Desc.domain(), false);
---
> // Set up next level lookup
> Addr next_desc_addr = currState->longDesc.nextDescAddr(
> currState->vaddr);
635,638c1466,1492
< if (currState->fault) {
< if (!currState->timing) {
< currState->tc = NULL;
< currState->req = NULL;
---
> DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
> currState->longDesc.lookupLevel,
> currState->longDesc.lookupLevel + 1,
> next_desc_addr,
> currState->secureLookup ? "s" : "ns");
>
> // Check for address size fault
> if (currState->aarch64 && checkAddrSizeFaultAArch64(
> next_desc_addr, currState->physAddrRange)) {
> DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
> currState->longDesc.lookupLevel);
> if (currState->isFetch)
> currState->fault = new PrefetchAbort(
> currState->vaddr_tainted,
> ArmFault::AddressSizeLL
> + currState->longDesc.lookupLevel,
> isStage2,
> ArmFault::LpaeTran);
> else
> currState->fault = new DataAbort(
> currState->vaddr_tainted,
> TlbEntry::DomainType::NoAccess, currState->isWrite,
> ArmFault::AddressSizeLL
> + currState->longDesc.lookupLevel,
> isStage2,
> ArmFault::LpaeTran);
> return;
640,641d1493
< return;
< }
642a1495,1501
> // Trickbox address check
> currState->fault = tlb->walkTrickBoxCheck(
> next_desc_addr, currState->vaddr,
> currState->vaddr, sizeof(uint64_t),
> currState->isFetch, currState->isWrite,
> TlbEntry::DomainType::Client,
> toLookupLevel(currState->longDesc.lookupLevel +1));
644,663c1503,1540
< if (currState->timing) {
< currState->delayed = true;
< port.dmaAction(MemCmd::ReadReq, l2desc_addr, sizeof(uint32_t),
< &doL2DescEvent, (uint8_t*)&currState->l2Desc.data,
< currState->tc->getCpuPtr()->clockPeriod());
< } else if (!currState->functional) {
< port.dmaAction(MemCmd::ReadReq, l2desc_addr, sizeof(uint32_t),
< NULL, (uint8_t*)&currState->l2Desc.data,
< currState->tc->getCpuPtr()->clockPeriod());
< doL2Descriptor();
< } else {
< RequestPtr req = new Request(l2desc_addr, sizeof(uint32_t), 0,
< masterId);
< req->taskId(ContextSwitchTaskId::DMA);
< PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
< pkt->dataStatic((uint8_t*)&currState->l2Desc.data);
< port.sendFunctional(pkt);
< doL2Descriptor();
< delete req;
< delete pkt;
---
> if (currState->fault) {
> if (!currState->timing) {
> currState->tc = NULL;
> currState->req = NULL;
> }
> return;
> }
>
> Request::Flags flag = 0;
> if (currState->secureLookup)
> flag.set(Request::SECURE);
>
> currState->longDesc.lookupLevel =
> (LookupLevel) (currState->longDesc.lookupLevel + 1);
> Event *event = NULL;
> switch (currState->longDesc.lookupLevel) {
> case L1:
> assert(currState->aarch64);
> event = &doL1LongDescEvent;
> break;
> case L2:
> event = &doL2LongDescEvent;
> break;
> case L3:
> event = &doL3LongDescEvent;
> break;
> default:
> panic("Wrong lookup level in table walk\n");
> break;
> }
>
> bool delayed;
> delayed = fetchDescriptor(next_desc_addr, (uint8_t*)&currState->longDesc.data,
> sizeof(uint64_t), flag, -1, event,
> &TableWalker::doLongDescriptor);
> if (delayed) {
> currState->delayed = true;
> }
673a1551,1554
> if (currState->fault != NoFault) {
> return;
> }
>
675c1556
< currState->vaddr, currState->l2Desc.data);
---
> currState->vaddr_tainted, currState->l2Desc.data);
686c1567,1570
< new PrefetchAbort(currState->vaddr, ArmFault::Translation1);
---
> new PrefetchAbort(currState->vaddr_tainted,
> ArmFault::TranslationLL + L2,
> isStage2,
> ArmFault::VmsaTran);
689,690c1573,1576
< new DataAbort(currState->vaddr, currState->l1Desc.domain(),
< currState->isWrite, ArmFault::Translation1);
---
> new DataAbort(currState->vaddr_tainted, currState->l1Desc.domain(),
> currState->isWrite, ArmFault::TranslationLL + L2,
> isStage2,
> ArmFault::VmsaTran);
697a1584,1585
> DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n",
> currState->sctlr.afe, currState->l2Desc.ap());
700,702c1588,1591
< new DataAbort(currState->vaddr, 0, currState->isWrite,
< ArmFault::AccessFlag1);
<
---
> new DataAbort(currState->vaddr_tainted,
> TlbEntry::DomainType::NoAccess, currState->isWrite,
> ArmFault::AccessFlagLL + L2, isStage2,
> ArmFault::VmsaTran);
705,729c1594
< if (currState->l2Desc.large()) {
< te.N = 16;
< te.pfn = currState->l2Desc.pfn();
< } else {
< te.N = 12;
< te.pfn = currState->l2Desc.pfn();
< }
<
< te.valid = true;
< te.size = (1 << te.N) - 1;
< te.asid = currState->contextId;
< te.sNp = false;
< te.vpn = currState->vaddr >> te.N;
< te.global = currState->l2Desc.global();
< te.xn = currState->l2Desc.xn();
< te.ap = currState->l2Desc.ap();
< te.domain = currState->l1Desc.domain();
< memAttrs(currState->tc, te, currState->sctlr, currState->l2Desc.texcb(),
< currState->l2Desc.shareable());
<
< if (!currState->timing) {
< currState->tc = NULL;
< currState->req = NULL;
< }
< tlb->insert(currState->vaddr, te);
---
> insertTableEntry(currState->l2Desc, false);
735c1600
< currState = stateQueueL1.front();
---
> currState = stateQueues[L1].front();
736a1602,1606
> // if there's a stage2 translation object we don't need it any more
> if (currState->stage2Tran) {
> delete currState->stage2Tran;
> currState->stage2Tran = NULL;
> }
737a1608
>
741c1612
< DPRINTF(TLBVerbose, "calling doL1Descriptor for vaddr:%#x\n", currState->vaddr);
---
> DPRINTF(TLBVerbose, "calling doL1Descriptor for vaddr:%#x\n", currState->vaddr_tainted);
744c1615
< stateQueueL1.pop_front();
---
> stateQueues[L1].pop_front();
761,763c1632,1637
< DPRINTF(TLBVerbose, "calling translateTiming again\n");
< currState->fault = tlb->translateTiming(currState->req, currState->tc,
< currState->transState, currState->mode);
---
> // Don't finish the translation if a stage 2 look up is underway
> if (!currState->doingStage2) {
> DPRINTF(TLBVerbose, "calling translateTiming again\n");
> currState->fault = tlb->translateTiming(currState->req, currState->tc,
> currState->transState, currState->mode);
> }
774c1648
< stateQueueL2.push_back(currState);
---
> stateQueues[L2].push_back(currState);
782c1656
< currState = stateQueueL2.front();
---
> currState = stateQueues[L2].front();
783a1658,1662
> // if there's a stage2 translation object we don't need it any more
> if (currState->stage2Tran) {
> delete currState->stage2Tran;
> currState->stage2Tran = NULL;
> }
786c1665
< currState->vaddr);
---
> currState->vaddr_tainted);
795,797c1674,1679
< DPRINTF(TLBVerbose, "calling translateTiming again\n");
< currState->fault = tlb->translateTiming(currState->req, currState->tc,
< currState->transState, currState->mode);
---
> // Don't finish the translation if a stage 2 look up is underway
> if (!currState->doingStage2) {
> DPRINTF(TLBVerbose, "calling translateTiming again\n");
> currState->fault = tlb->translateTiming(currState->req,
> currState->tc, currState->transState, currState->mode);
> }
801c1683
< stateQueueL2.pop_front();
---
> stateQueues[L2].pop_front();
814a1697,1778
> TableWalker::doL0LongDescriptorWrapper()
> {
> doLongDescriptorWrapper(L0);
> }
>
> void
> TableWalker::doL1LongDescriptorWrapper()
> {
> doLongDescriptorWrapper(L1);
> }
>
> void
> TableWalker::doL2LongDescriptorWrapper()
> {
> doLongDescriptorWrapper(L2);
> }
>
> void
> TableWalker::doL3LongDescriptorWrapper()
> {
> doLongDescriptorWrapper(L3);
> }
>
> void
> TableWalker::doLongDescriptorWrapper(LookupLevel curr_lookup_level)
> {
> currState = stateQueues[curr_lookup_level].front();
> assert(curr_lookup_level == currState->longDesc.lookupLevel);
> currState->delayed = false;
>
> // if there's a stage2 translation object we don't need it any more
> if (currState->stage2Tran) {
> delete currState->stage2Tran;
> currState->stage2Tran = NULL;
> }
>
> DPRINTF(TLBVerbose, "calling doLongDescriptor for vaddr:%#x\n",
> currState->vaddr_tainted);
> doLongDescriptor();
>
> stateQueues[curr_lookup_level].pop_front();
>
> if (currState->fault != NoFault) {
> // A fault was generated
> currState->transState->finish(currState->fault, currState->req,
> currState->tc, currState->mode);
>
> pending = false;
> nextWalk(currState->tc);
>
> currState->req = NULL;
> currState->tc = NULL;
> currState->delayed = false;
> delete currState;
> } else if (!currState->delayed) {
> // No additional lookups required
> // Don't finish the translation if a stage 2 look up is underway
> if (!currState->doingStage2) {
> DPRINTF(TLBVerbose, "calling translateTiming again\n");
> currState->fault = tlb->translateTiming(currState->req, currState->tc,
> currState->transState,
> currState->mode);
> }
>
> pending = false;
> nextWalk(currState->tc);
>
> currState->req = NULL;
> currState->tc = NULL;
> currState->delayed = false;
> delete currState;
> } else {
> if (curr_lookup_level >= MAX_LOOKUP_LEVELS - 1)
> panic("Max. number of lookups already reached in table walk\n");
> // Need to perform additional lookups
> stateQueues[currState->longDesc.lookupLevel].push_back(currState);
> }
> currState = NULL;
> }
>
>
> void
820a1785,1790
> bool
> TableWalker::fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes,
> Request::Flags flags, int queueIndex, Event *event,
> void (TableWalker::*doDescriptor)())
> {
> bool isTiming = currState->timing;
821a1792,1796
> // do the requests for the page table descriptors have to go through the
> // second stage MMU
> if (currState->stage2Req) {
> Fault fault;
> flags = flags | TLB::MustBeOne;
822a1798,1925
> if (isTiming) {
> Stage2MMU::Stage2Translation *tran = new
> Stage2MMU::Stage2Translation(*stage2Mmu, data, event,
> currState->vaddr);
> currState->stage2Tran = tran;
> stage2Mmu->readDataTimed(currState->tc, descAddr, tran, numBytes,
> flags, masterId);
> fault = tran->fault;
> } else {
> fault = stage2Mmu->readDataUntimed(currState->tc,
> currState->vaddr, descAddr, data, numBytes, flags, masterId,
> currState->functional);
> }
>
> if (fault != NoFault) {
> currState->fault = fault;
> }
> if (isTiming) {
> if (queueIndex >= 0) {
> DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
> stateQueues[queueIndex].size());
> stateQueues[queueIndex].push_back(currState);
> currState = NULL;
> }
> } else {
> (this->*doDescriptor)();
> }
> } else {
> if (isTiming) {
> port.dmaAction(MemCmd::ReadReq, descAddr, numBytes, event, data,
> currState->tc->getCpuPtr()->clockPeriod(), flags);
> if (queueIndex >= 0) {
> DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
> stateQueues[queueIndex].size());
> stateQueues[queueIndex].push_back(currState);
> currState = NULL;
> }
> } else if (!currState->functional) {
> port.dmaAction(MemCmd::ReadReq, descAddr, numBytes, NULL, data,
> currState->tc->getCpuPtr()->clockPeriod(), flags);
> (this->*doDescriptor)();
> } else {
> RequestPtr req = new Request(descAddr, numBytes, flags, masterId);
> req->taskId(ContextSwitchTaskId::DMA);
> PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
> pkt->dataStatic(data);
> port.sendFunctional(pkt);
> (this->*doDescriptor)();
> delete req;
> delete pkt;
> }
> }
> return (isTiming);
> }
>
> void
> TableWalker::insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
> {
> TlbEntry te;
>
> // Create and fill a new page table entry
> te.valid = true;
> te.longDescFormat = longDescriptor;
> te.isHyp = currState->isHyp;
> te.asid = currState->asid;
> te.vmid = currState->vmid;
> te.N = descriptor.offsetBits();
> te.vpn = currState->vaddr >> te.N;
> te.size = (1<<te.N) - 1;
> te.pfn = descriptor.pfn();
> te.domain = descriptor.domain();
> te.lookupLevel = descriptor.lookupLevel;
> te.ns = !descriptor.secure(haveSecurity, currState) || isStage2;
> te.nstid = !currState->isSecure;
> te.xn = descriptor.xn();
> if (currState->aarch64)
> te.el = currState->el;
> else
> te.el = 1;
>
> // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
> // as global
> te.global = descriptor.global(currState) || isStage2;
> if (longDescriptor) {
> LongDescriptor lDescriptor =
> dynamic_cast<LongDescriptor &>(descriptor);
>
> te.xn |= currState->xnTable;
> te.pxn = currState->pxnTable || lDescriptor.pxn();
> if (isStage2) {
> // this is actually the HAP field, but its stored in the same bit
> // possitions as the AP field in a stage 1 translation.
> te.hap = lDescriptor.ap();
> } else {
> te.ap = ((!currState->rwTable || descriptor.ap() >> 1) << 1) |
> (currState->userTable && (descriptor.ap() & 0x1));
> }
> if (currState->aarch64)
> memAttrsAArch64(currState->tc, te, currState->longDesc.attrIndx(),
> currState->longDesc.sh());
> else
> memAttrsLPAE(currState->tc, te, lDescriptor);
> } else {
> te.ap = descriptor.ap();
> memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(),
> descriptor.shareable());
> }
>
> // Debug output
> DPRINTF(TLB, descriptor.dbgHeader().c_str());
> DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
> te.N, te.pfn, te.size, te.global, te.valid);
> DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
> "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
> te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp,
> te.nonCacheable, te.ns);
> DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
> descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
> descriptor.getRawData());
>
> // Insert the entry into the TLB
> tlb->insert(currState->vaddr, te);
> if (!currState->timing) {
> currState->tc = NULL;
> currState->req = NULL;
> }
> }
>
828a1932,1945
> LookupLevel
> TableWalker::toLookupLevel(uint8_t lookup_level_as_int)
> {
> switch (lookup_level_as_int) {
> case L1:
> return L1;
> case L2:
> return L2;
> case L3:
> return L3;
> default:
> panic("Invalid lookup level conversion");
> }
> }