2c2
< * Copyright (c) 2010-2012 ARM Limited
---
> * Copyright (c) 2010-2013 ARM Limited
51a52,53
> #include "arch/arm/stage2_lookup.hh"
> #include "arch/arm/stage2_mmu.hh"
70,72c72,76
< TLB::TLB(const Params *p)
< : BaseTLB(p), size(p->size) , tableWalker(p->walker),
< rangeMRU(1), bootUncacheability(false), miscRegValid(false)
---
> TLB::TLB(const ArmTLBParams *p)
> : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size),
> isStage2(p->is_stage2), tableWalker(p->walker), stage2Tlb(NULL),
> stage2Mmu(NULL), rangeMRU(1), bootUncacheability(false),
> miscRegValid(false), curTranType(NormalTran)
74,76d77
< table = new TlbEntry[size];
< memset(table, 0, sizeof(TlbEntry) * size);
<
77a79,83
>
> // Cache system-level properties
> haveLPAE = tableWalker->haveLPAE();
> haveVirtualization = tableWalker->haveVirtualization();
> haveLargeAsid64 = tableWalker->haveLargeAsid64();
82,83c88
< if (table)
< delete [] table;
---
> delete[] table;
85a91,104
> void
> TLB::init()
> {
> if (stage2Mmu && !isStage2)
> stage2Tlb = stage2Mmu->stage2Tlb();
> }
>
> void
> TLB::setMMU(Stage2MMU *m)
> {
> stage2Mmu = m;
> tableWalker->setMMU(m);
> }
>
89,91c108,116
< if (!miscRegValid)
< updateMiscReg(tc);
< TlbEntry *e = lookup(va, contextId, true);
---
> updateMiscReg(tc);
>
> if (directToStage2) {
> assert(stage2Tlb);
> return stage2Tlb->translateFunctional(tc, va, pa);
> }
>
> TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false,
> aarch64 ? aarch64EL : EL1);
105c130,131
< TLB::lookup(Addr va, uint8_t cid, bool functional)
---
> TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure,
> bool functional, bool ignore_asn, uint8_t target_el)
110,111c136
< // Maitaining LRU array
<
---
> // Maintaining LRU array
114,116c139,143
< if (table[x].match(va, cid)) {
<
< // We only move the hit entry ahead when the position is higher than rangeMRU
---
> if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false,
> target_el)) ||
> (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) {
> // We only move the hit entry ahead when the position is higher
> // than rangeMRU
120c147
< table[i] = table[i-1];
---
> table[i] = table[i - 1];
128c155
< x++;
---
> ++x;
131,135c158,167
< DPRINTF(TLBVerbose, "Lookup %#x, cid %#x -> %s ppn %#x size: %#x pa: %#x ap:%d\n",
< va, cid, retval ? "hit" : "miss", retval ? retval->pfn : 0,
< retval ? retval->size : 0, retval ? retval->pAddr(va) : 0,
< retval ? retval->ap : 0);
< ;
---
> DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d "
> "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d "
> "el: %d\n",
> va, asn, retval ? "hit" : "miss", vmid, hyp, secure,
> retval ? retval->pfn : 0, retval ? retval->size : 0,
> retval ? retval->pAddr(va) : 0, retval ? retval->ap : 0,
> retval ? retval->ns : 0, retval ? retval->nstid : 0,
> retval ? retval->global : 0, retval ? retval->asid : 0,
> retval ? retval->el : 0, retval ? retval->el : 0);
>
144,147c176,181
< " asid:%d N:%d global:%d valid:%d nc:%d sNp:%d xn:%d ap:%#x"
< " domain:%#x\n", entry.pfn, entry.size, entry.vpn, entry.asid,
< entry.N, entry.global, entry.valid, entry.nonCacheable, entry.sNp,
< entry.xn, entry.ap, entry.domain);
---
> " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
> " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn,
> entry.size, entry.vpn, entry.asid, entry.vmid, entry.N,
> entry.global, entry.valid, entry.nonCacheable, entry.xn,
> entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid,
> entry.isHyp);
149,150c183,185
< if (table[size-1].valid)
< DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d ppn %#x size: %#x ap:%d\n",
---
> if (table[size - 1].valid)
> DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
> "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n",
152,153c187,190
< table[size-1].pfn << table[size-1].N, table[size-1].size,
< table[size-1].ap);
---
> table[size-1].vmid, table[size-1].pfn << table[size-1].N,
> table[size-1].size, table[size-1].ap, table[size-1].ns,
> table[size-1].nstid, table[size-1].global, table[size-1].isHyp,
> table[size-1].el);
157,158c194,195
< for(int i = size-1; i > 0; i--)
< table[i] = table[i-1];
---
> for (int i = size - 1; i > 0; --i)
> table[i] = table[i-1];
165c202
< TLB::printTlb()
---
> TLB::printTlb() const
171,175c208,211
< te = &table[x];
< if (te->valid)
< DPRINTF(TLB, " * %#x, asn %d ppn %#x size: %#x ap:%d\n",
< te->vpn << te->N, te->asid, te->pfn << te->N, te->size, te->ap);
< x++;
---
> te = &table[x];
> if (te->valid)
> DPRINTF(TLB, " * %s\n", te->print());
> ++x;
179d214
<
181c216
< TLB::flushAll()
---
> TLB::flushAllSecurity(bool secure_lookup, uint8_t target_el, bool ignore_el)
183c218,219
< DPRINTF(TLB, "Flushing all TLB entries\n");
---
> DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n",
> (secure_lookup ? "secure" : "non-secure"));
187,193c223,232
< te = &table[x];
< if (te->valid) {
< DPRINTF(TLB, " - %#x, asn %d ppn %#x size: %#x ap:%d\n",
< te->vpn << te->N, te->asid, te->pfn << te->N, te->size, te->ap);
< flushedEntries++;
< }
< x++;
---
> te = &table[x];
> if (te->valid && secure_lookup == !te->nstid &&
> (te->vmid == vmid || secure_lookup) &&
> checkELMatch(target_el, te->el, ignore_el)) {
>
> DPRINTF(TLB, " - %s\n", te->print());
> te->valid = false;
> flushedEntries++;
> }
> ++x;
196,197d234
< memset(table, 0, sizeof(TlbEntry) * size);
<
198a236,241
>
> // If there's a second stage TLB (and we're not it) then flush it as well
> // if we're currently in hyp mode
> if (!isStage2 && isHyp) {
> stage2Tlb->flushAllSecurity(secure_lookup, true);
> }
201d243
<
203c245
< TLB::flushMvaAsid(Addr mva, uint64_t asn)
---
> TLB::flushAllNs(bool hyp, uint8_t target_el, bool ignore_el)
205c247,249
< DPRINTF(TLB, "Flushing mva %#x asid: %#x\n", mva, asn);
---
> DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n",
> (hyp ? "hyp" : "non-hyp"));
> int x = 0;
206a251,254
> while (x < size) {
> te = &table[x];
> if (te->valid && te->nstid && te->isHyp == hyp &&
> checkELMatch(target_el, te->el, ignore_el)) {
208,214c256,260
< te = lookup(mva, asn);
< while (te != NULL) {
< DPRINTF(TLB, " - %#x, asn %d ppn %#x size: %#x ap:%d\n",
< te->vpn << te->N, te->asid, te->pfn << te->N, te->size, te->ap);
< te->valid = false;
< flushedEntries++;
< te = lookup(mva,asn);
---
> DPRINTF(TLB, " - %s\n", te->print());
> flushedEntries++;
> te->valid = false;
> }
> ++x;
215a262,277
>
> flushTlb++;
>
> // If there's a second stage TLB (and we're not it) then flush it as well
> if (!isStage2 && !hyp) {
> stage2Tlb->flushAllNs(false, true);
> }
> }
>
> void
> TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, uint8_t target_el)
> {
> DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x "
> "(%s lookup)\n", mva, asn, (secure_lookup ?
> "secure" : "non-secure"));
> _flushMva(mva, asn, secure_lookup, false, false, target_el);
220c282
< TLB::flushAsid(uint64_t asn)
---
> TLB::flushAsid(uint64_t asn, bool secure_lookup, uint8_t target_el)
222c284,285
< DPRINTF(TLB, "Flushing all entries with asid: %#x\n", asn);
---
> DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn,
> (secure_lookup ? "secure" : "non-secure"));
224c287
< int x = 0;
---
> int x = 0 ;
229c292,295
< if (te->asid == asn) {
---
> if (te->valid && te->asid == asn && secure_lookup == !te->nstid &&
> (te->vmid == vmid || secure_lookup) &&
> checkELMatch(target_el, te->el, false)) {
>
231,232c297
< DPRINTF(TLB, " - %#x, asn %d ppn %#x size: %#x ap:%d\n",
< te->vpn << te->N, te->asid, te->pfn << te->N, te->size, te->ap);
---
> DPRINTF(TLB, " - %s\n", te->print());
235c300
< x++;
---
> ++x;
241c306
< TLB::flushMva(Addr mva)
---
> TLB::flushMva(Addr mva, bool secure_lookup, bool hyp, uint8_t target_el)
243c308,312
< DPRINTF(TLB, "Flushing all entries with mva: %#x\n", mva);
---
> DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva,
> (secure_lookup ? "secure" : "non-secure"));
> _flushMva(mva, 0xbeef, secure_lookup, hyp, true, target_el);
> flushTlbMva++;
> }
245c314,317
< int x = 0;
---
> void
> TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, bool hyp,
> bool ignore_asn, uint8_t target_el)
> {
247,251c319,325
<
< while (x < size) {
< te = &table[x];
< Addr v = te->vpn << te->N;
< if (mva >= v && mva < v + te->size) {
---
> // D5.7.2: Sign-extend address to 64 bits
> mva = sext<56>(mva);
> te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
> target_el);
> while (te != NULL) {
> if (secure_lookup == !te->nstid) {
> DPRINTF(TLB, " - %s\n", te->print());
253,254d326
< DPRINTF(TLB, " - %#x, asn %d ppn %#x size: %#x ap:%d\n",
< te->vpn << te->N, te->asid, te->pfn << te->N, te->size, te->ap);
257c329,330
< x++;
---
> te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
> target_el);
259d331
< flushTlbMva++;
261a334,347
> bool
> TLB::checkELMatch(uint8_t target_el, uint8_t tentry_el, bool ignore_el)
> {
> bool elMatch = true;
> if (!ignore_el) {
> if (target_el == 2 || target_el == 3) {
> elMatch = (tentry_el == target_el);
> } else {
> elMatch = (tentry_el == 0) || (tentry_el == 1);
> }
> }
> return elMatch;
> }
>
275a362,365
> SERIALIZE_SCALAR(haveLPAE);
> SERIALIZE_SCALAR(directToStage2);
> SERIALIZE_SCALAR(stage2Req);
> SERIALIZE_SCALAR(bootUncacheability);
290a381,385
> UNSERIALIZE_SCALAR(haveLPAE);
> UNSERIALIZE_SCALAR(directToStage2);
> UNSERIALIZE_SCALAR(stage2Req);
> UNSERIALIZE_SCALAR(bootUncacheability);
>
416c511
< Translation *translation, bool &delay, bool timing)
---
> Translation *translation, bool &delay, bool timing)
418,420c513,519
< if (!miscRegValid)
< updateMiscReg(tc);
< Addr vaddr = req->getVaddr();
---
> updateMiscReg(tc);
> Addr vaddr_tainted = req->getVaddr();
> Addr vaddr = 0;
> if (aarch64)
> vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL);
> else
> vaddr = vaddr_tainted;
429,430c528,533
< if (vaddr & flags & AlignmentMask) {
< return new DataAbort(vaddr, 0, is_write, ArmFault::AlignmentFault);
---
> if (vaddr & mask(flags & AlignmentMask)) {
> // LPAE is always disabled in SE mode
> return new DataAbort(vaddr_tainted,
> TlbEntry::DomainType::NoAccess, is_write,
> ArmFault::AlignmentFault, isStage2,
> ArmFault::VmsaTran);
439c542
< return Fault(new GenericPageTableFault(vaddr));
---
> return Fault(new GenericPageTableFault(vaddr_tainted));
446c549
< TLB::trickBoxCheck(RequestPtr req, Mode mode, uint8_t domain, bool sNp)
---
> TLB::trickBoxCheck(RequestPtr req, Mode mode, TlbEntry::DomainType domain)
452,453c555,556
< TLB::walkTrickBoxCheck(Addr pa, Addr va, Addr sz, bool is_exec,
< bool is_write, uint8_t domain, bool sNp)
---
> TLB::walkTrickBoxCheck(Addr pa, bool is_secure, Addr va, Addr sz, bool is_exec,
> bool is_write, TlbEntry::DomainType domain, LookupLevel lookup_level)
458a562,904
> TLB::checkPermissions(TlbEntry *te, RequestPtr req, Mode mode)
> {
> Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
> uint32_t flags = req->getFlags();
> bool is_fetch = (mode == Execute);
> bool is_write = (mode == Write);
> bool is_priv = isPriv && !(flags & UserMode);
>
> // Get the translation type from the actuall table entry
> ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran
> : ArmFault::VmsaTran;
>
> // If this is the second stage of translation and the request is for a
> // stage 1 page table walk then we need to check the HCR.PTW bit. This
> // allows us to generate a fault if the request targets an area marked
> // as a device or strongly ordered.
> if (isStage2 && req->isPTWalk() && hcr.ptw &&
> (te->mtype != TlbEntry::MemoryType::Normal)) {
> return new DataAbort(vaddr, te->domain, is_write,
> ArmFault::PermissionLL + te->lookupLevel,
> isStage2, tranMethod);
> }
>
> // Generate an alignment fault for unaligned data accesses to device or
> // strongly ordered memory
> if (!is_fetch) {
> if (te->mtype != TlbEntry::MemoryType::Normal) {
> if (vaddr & mask(flags & AlignmentMask)) {
> alignFaults++;
> return new DataAbort(vaddr, TlbEntry::DomainType::NoAccess, is_write,
> ArmFault::AlignmentFault, isStage2,
> tranMethod);
> }
> }
> }
>
> if (te->nonCacheable) {
> // Prevent prefetching from I/O devices.
> if (req->isPrefetch()) {
> // Here we can safely use the fault status for the short
> // desc. format in all cases
> return new PrefetchAbort(vaddr, ArmFault::PrefetchUncacheable,
> isStage2, tranMethod);
> }
> }
>
> if (!te->longDescFormat) {
> switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
> case 0:
> domainFaults++;
> DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x"
> " domain: %#x write:%d\n", dacr,
> static_cast<uint8_t>(te->domain), is_write);
> if (is_fetch)
> return new PrefetchAbort(vaddr,
> ArmFault::DomainLL + te->lookupLevel,
> isStage2, tranMethod);
> else
> return new DataAbort(vaddr, te->domain, is_write,
> ArmFault::DomainLL + te->lookupLevel,
> isStage2, tranMethod);
> case 1:
> // Continue with permissions check
> break;
> case 2:
> panic("UNPRED domain\n");
> case 3:
> return NoFault;
> }
> }
>
> // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
> uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap;
> uint8_t hap = te->hap;
>
> if (sctlr.afe == 1 || te->longDescFormat)
> ap |= 1;
>
> bool abt;
> bool isWritable = true;
> // If this is a stage 2 access (eg for reading stage 1 page table entries)
> // then don't perform the AP permissions check, we stil do the HAP check
> // below.
> if (isStage2) {
> abt = false;
> } else {
> switch (ap) {
> case 0:
> DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n",
> (int)sctlr.rs);
> if (!sctlr.xp) {
> switch ((int)sctlr.rs) {
> case 2:
> abt = is_write;
> break;
> case 1:
> abt = is_write || !is_priv;
> break;
> case 0:
> case 3:
> default:
> abt = true;
> break;
> }
> } else {
> abt = true;
> }
> break;
> case 1:
> abt = !is_priv;
> break;
> case 2:
> abt = !is_priv && is_write;
> isWritable = is_priv;
> break;
> case 3:
> abt = false;
> break;
> case 4:
> panic("UNPRED premissions\n");
> case 5:
> abt = !is_priv || is_write;
> isWritable = false;
> break;
> case 6:
> case 7:
> abt = is_write;
> isWritable = false;
> break;
> default:
> panic("Unknown permissions %#x\n", ap);
> }
> }
>
> bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
> bool xn = te->xn || (isWritable && sctlr.wxn) ||
> (ap == 3 && sctlr.uwxn && is_priv);
> if (is_fetch && (abt || xn ||
> (te->longDescFormat && te->pxn && !is_priv) ||
> (isSecure && te->ns && scr.sif))) {
> permsFaults++;
> DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
> "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
> ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe);
> return new PrefetchAbort(vaddr,
> ArmFault::PermissionLL + te->lookupLevel,
> isStage2, tranMethod);
> } else if (abt | hapAbt) {
> permsFaults++;
> DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
> " write:%d\n", ap, is_priv, is_write);
> return new DataAbort(vaddr, te->domain, is_write,
> ArmFault::PermissionLL + te->lookupLevel,
> isStage2 | !abt, tranMethod);
> }
> return NoFault;
> }
>
>
> Fault
> TLB::checkPermissions64(TlbEntry *te, RequestPtr req, Mode mode,
> ThreadContext *tc)
> {
> assert(aarch64);
>
> Addr vaddr_tainted = req->getVaddr();
> Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL);
>
> uint32_t flags = req->getFlags();
> bool is_fetch = (mode == Execute);
> bool is_write = (mode == Write);
> bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode);
>
> updateMiscReg(tc, curTranType);
>
> // If this is the second stage of translation and the request is for a
> // stage 1 page table walk then we need to check the HCR.PTW bit. This
> // allows us to generate a fault if the request targets an area marked
> // as a device or strongly ordered.
> if (isStage2 && req->isPTWalk() && hcr.ptw &&
> (te->mtype != TlbEntry::MemoryType::Normal)) {
> return new DataAbort(vaddr_tainted, te->domain, is_write,
> ArmFault::PermissionLL + te->lookupLevel,
> isStage2, ArmFault::LpaeTran);
> }
>
> // Generate an alignment fault for unaligned accesses to device or
> // strongly ordered memory
> if (!is_fetch) {
> if (te->mtype != TlbEntry::MemoryType::Normal) {
> if (vaddr & mask(flags & AlignmentMask)) {
> alignFaults++;
> return new DataAbort(vaddr_tainted,
> TlbEntry::DomainType::NoAccess, is_write,
> ArmFault::AlignmentFault, isStage2,
> ArmFault::LpaeTran);
> }
> }
> }
>
> if (te->nonCacheable) {
> // Prevent prefetching from I/O devices.
> if (req->isPrefetch()) {
> // Here we can safely use the fault status for the short
> // desc. format in all cases
> return new PrefetchAbort(vaddr_tainted,
> ArmFault::PrefetchUncacheable,
> isStage2, ArmFault::LpaeTran);
> }
> }
>
> uint8_t ap = 0x3 & (te->ap); // 2-bit access protection field
> bool grant = false;
>
> uint8_t xn = te->xn;
> uint8_t pxn = te->pxn;
> bool r = !is_write && !is_fetch;
> bool w = is_write;
> bool x = is_fetch;
> DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
> "w:%d, x:%d\n", ap, xn, pxn, r, w, x);
>
> if (isStage2) {
> panic("Virtualization in AArch64 state is not supported yet");
> } else {
> switch (aarch64EL) {
> case EL0:
> {
> uint8_t perm = (ap << 2) | (xn << 1) | pxn;
> switch (perm) {
> case 0:
> case 1:
> case 8:
> case 9:
> grant = x;
> break;
> case 4:
> case 5:
> grant = r || w || (x && !sctlr.wxn);
> break;
> case 6:
> case 7:
> grant = r || w;
> break;
> case 12:
> case 13:
> grant = r || x;
> break;
> case 14:
> case 15:
> grant = r;
> break;
> default:
> grant = false;
> }
> }
> break;
> case EL1:
> {
> uint8_t perm = (ap << 2) | (xn << 1) | pxn;
> switch (perm) {
> case 0:
> case 2:
> grant = r || w || (x && !sctlr.wxn);
> break;
> case 1:
> case 3:
> case 4:
> case 5:
> case 6:
> case 7:
> // regions that are writeable at EL0 should not be
> // executable at EL1
> grant = r || w;
> break;
> case 8:
> case 10:
> case 12:
> case 14:
> grant = r || x;
> break;
> case 9:
> case 11:
> case 13:
> case 15:
> grant = r;
> break;
> default:
> grant = false;
> }
> }
> break;
> case EL2:
> case EL3:
> {
> uint8_t perm = (ap & 0x2) | xn;
> switch (perm) {
> case 0:
> grant = r || w || (x && !sctlr.wxn) ;
> break;
> case 1:
> grant = r || w;
> break;
> case 2:
> grant = r || x;
> break;
> case 3:
> grant = r;
> break;
> default:
> grant = false;
> }
> }
> break;
> }
> }
>
> if (!grant) {
> if (is_fetch) {
> permsFaults++;
> DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. "
> "AP:%d priv:%d write:%d ns:%d sif:%d "
> "sctlr.afe: %d\n",
> ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe);
> // Use PC value instead of vaddr because vaddr might be aligned to
> // cache line and should not be the address reported in FAR
> return new PrefetchAbort(req->getPC(),
> ArmFault::PermissionLL + te->lookupLevel,
> isStage2, ArmFault::LpaeTran);
> } else {
> permsFaults++;
> DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d "
> "priv:%d write:%d\n", ap, is_priv, is_write);
> return new DataAbort(vaddr_tainted, te->domain, is_write,
> ArmFault::PermissionLL + te->lookupLevel,
> isStage2, ArmFault::LpaeTran);
> }
> }
>
> return NoFault;
> }
>
> Fault
460c906,907
< Translation *translation, bool &delay, bool timing, bool functional)
---
> Translation *translation, bool &delay, bool timing,
> TLB::ArmTranslationType tranType, bool functional)
465,468c912
< if (!miscRegValid) {
< updateMiscReg(tc);
< DPRINTF(TLBVerbose, "TLB variables changed!\n");
< }
---
> updateMiscReg(tc, tranType);
470c914,919
< Addr vaddr = req->getVaddr();
---
> Addr vaddr_tainted = req->getVaddr();
> Addr vaddr = 0;
> if (aarch64)
> vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL);
> else
> vaddr = vaddr_tainted;
473,475c922,926
< bool is_fetch = (mode == Execute);
< bool is_write = (mode == Write);
< bool is_priv = isPriv && !(flags & UserMode);
---
> bool is_fetch = (mode == Execute);
> bool is_write = (mode == Write);
> bool long_desc_format = aarch64 || (haveLPAE && ttbcr.eae);
> ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
> : ArmFault::VmsaTran;
477,479c928
< req->setAsid(contextId.asid);
< if (is_priv)
< req->setFlags(Request::PRIVILEGED);
---
> req->setAsid(asid);
481c930,931
< req->taskId(tc->getCpuPtr()->taskId());
---
> DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
> isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran);
483,484c933,941
< DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d\n",
< isPriv, flags & UserMode);
---
> DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
> "flags %#x tranType 0x%x\n", vaddr_tainted, mode, isStage2,
> scr, sctlr, flags, tranType);
>
> // Generate an alignment fault for unaligned PC
> if (aarch64 && is_fetch && (req->getPC() & mask(2))) {
> return new PCAlignmentFault(req->getPC());
> }
>
488a946
> // @todo: check implications of security extensions
501c959
< if (vaddr & flags & AlignmentMask) {
---
> if (vaddr & mask(flags & AlignmentMask)) {
503c961,964
< return new DataAbort(vaddr, 0, is_write, ArmFault::AlignmentFault);
---
> return new DataAbort(vaddr_tainted,
> TlbEntry::DomainType::NoAccess, is_write,
> ArmFault::AlignmentFault, isStage2,
> tranMethod);
508c969,970
< Fault fault;
---
> // If guest MMU is off or hcr.vm=0 go straight to stage2
> if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) {
510d971
< if (!sctlr.m) {
512c973,979
< if (sctlr.tre == 0) {
---
> // When the MMU is off the security attribute corresponds to the
> // security state of the processor
> if (isSecure)
> req->setFlags(Request::SECURE);
>
> // @todo: double check this (ARM ARM issue C B3.2.1)
> if (long_desc_format || sctlr.tre == 0) {
516c983
< req->setFlags(Request::UNCACHEABLE);
---
> req->setFlags(Request::UNCACHEABLE);
521,522c988,1005
< tableWalker->memAttrs(tc, temp_te, sctlr, 0, 1);
< temp_te.shareable = true;
---
> temp_te.ns = !isSecure;
> if (isStage2 || hcr.dc == 0 || isSecure ||
> (isHyp && !(tranType & S1CTran))) {
>
> temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal
> : TlbEntry::MemoryType::StronglyOrdered;
> temp_te.innerAttrs = 0x0;
> temp_te.outerAttrs = 0x0;
> temp_te.shareable = true;
> temp_te.outerShareable = true;
> } else {
> temp_te.mtype = TlbEntry::MemoryType::Normal;
> temp_te.innerAttrs = 0x3;
> temp_te.outerAttrs = 0x3;
> temp_te.shareable = false;
> temp_te.outerShareable = false;
> }
> temp_te.setAttributes(long_desc_format);
524,525c1007,1009
< %d, innerAttrs: %d, outerAttrs: %d\n", temp_te.shareable,
< temp_te.innerAttrs, temp_te.outerAttrs);
---
> %d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n",
> temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
> isStage2);
528c1012
< return trickBoxCheck(req, mode, 0, false);
---
> return trickBoxCheck(req, mode, TlbEntry::DomainType::NoAccess);
531c1015,1016
< DPRINTF(TLBVerbose, "Translating vaddr=%#x context=%d\n", vaddr, contextId);
---
> DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n",
> isStage2 ? "IPA" : "VA", vaddr_tainted, asid);
534,540c1019,1037
< TlbEntry *te = lookup(vaddr, contextId);
< if (te == NULL) {
< if (req->isPrefetch()){
< //if the request is a prefetch don't attempt to fill the TLB
< //or go any further with the memory access
< prefetchFaults++;
< return new PrefetchAbort(vaddr, ArmFault::PrefetchTLBMiss);
---
> TlbEntry *te = NULL;
> TlbEntry mergeTe;
> Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
> functional, &mergeTe);
> // only proceed if we have a valid table entry
> if ((te == NULL) && (fault == NoFault)) delay = true;
>
> // If we have the table entry transfer some of the attributes to the
> // request that triggered the translation
> if (te != NULL) {
> // Set memory attributes
> DPRINTF(TLBVerbose,
> "Setting memory attributes: shareable: %d, innerAttrs: %d, \
> outerAttrs: %d, mtype: %d, isStage2: %d\n",
> te->shareable, te->innerAttrs, te->outerAttrs,
> static_cast<uint8_t>(te->mtype), isStage2);
> setAttr(te->attributes);
> if (te->nonCacheable) {
> req->setFlags(Request::UNCACHEABLE);
543,548c1040,1043
< if (is_fetch)
< instMisses++;
< else if (is_write)
< writeMisses++;
< else
< readMisses++;
---
> if (!bootUncacheability &&
> ((ArmSystem*)tc->getSystemPtr())->adderBootUncacheable(vaddr)) {
> req->setFlags(Request::UNCACHEABLE);
> }
550,559c1045,1047
< // start translation table walk, pass variables rather than
< // re-retreaving in table walker for speed
< DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d)\n",
< vaddr, contextId);
< fault = tableWalker->walk(req, tc, contextId, mode, translation,
< timing, functional);
< if (timing && fault == NoFault) {
< delay = true;
< // for timing mode, return and wait for table walk
< return fault;
---
> req->setPaddr(te->pAddr(vaddr));
> if (isSecure && !te->ns) {
> req->setFlags(Request::SECURE);
561,562c1049,1058
< if (fault)
< return fault;
---
> if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) &&
> (te->mtype != TlbEntry::MemoryType::Normal)) {
> // Unaligned accesses to Device memory should always cause an
> // abort regardless of sctlr.a
> alignFaults++;
> return new DataAbort(vaddr_tainted,
> TlbEntry::DomainType::NoAccess, is_write,
> ArmFault::AlignmentFault, isStage2,
> tranMethod);
> }
564,588c1060,1062
< te = lookup(vaddr, contextId);
< if (!te)
< printTlb();
< assert(te);
< } else {
< if (is_fetch)
< instHits++;
< else if (is_write)
< writeHits++;
< else
< readHits++;
< }
<
< // Set memory attributes
< DPRINTF(TLBVerbose,
< "Setting memory attributes: shareable: %d, innerAttrs: %d, \
< outerAttrs: %d\n",
< te->shareable, te->innerAttrs, te->outerAttrs);
< setAttr(te->attributes);
< if (te->nonCacheable) {
< req->setFlags(Request::UNCACHEABLE);
<
< // Prevent prefetching from I/O devices.
< if (req->isPrefetch()) {
< return new PrefetchAbort(vaddr, ArmFault::PrefetchUncacheable);
---
> // Check for a trickbox generated address fault
> if (fault == NoFault) {
> fault = trickBoxCheck(req, mode, te->domain);
592,648c1066,1070
< if (!bootUncacheability &&
< ((ArmSystem*)tc->getSystemPtr())->adderBootUncacheable(vaddr))
< req->setFlags(Request::UNCACHEABLE);
<
< switch ( (dacr >> (te->domain * 2)) & 0x3) {
< case 0:
< domainFaults++;
< DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x domain: %#x"
< " write:%d sNp:%d\n", dacr, te->domain, is_write, te->sNp);
< if (is_fetch)
< return new PrefetchAbort(vaddr,
< (te->sNp ? ArmFault::Domain0 : ArmFault::Domain1));
< else
< return new DataAbort(vaddr, te->domain, is_write,
< (te->sNp ? ArmFault::Domain0 : ArmFault::Domain1));
< case 1:
< // Continue with permissions check
< break;
< case 2:
< panic("UNPRED domain\n");
< case 3:
< req->setPaddr(te->pAddr(vaddr));
< fault = trickBoxCheck(req, mode, te->domain, te->sNp);
< if (fault)
< return fault;
< return NoFault;
< }
<
< uint8_t ap = te->ap;
<
< if (sctlr.afe == 1)
< ap |= 1;
<
< bool abt;
<
< /* if (!sctlr.xp)
< ap &= 0x3;
< */
< switch (ap) {
< case 0:
< DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n", (int)sctlr.rs);
< if (!sctlr.xp) {
< switch ((int)sctlr.rs) {
< case 2:
< abt = is_write;
< break;
< case 1:
< abt = is_write || !is_priv;
< break;
< case 0:
< case 3:
< default:
< abt = true;
< break;
< }
< } else {
< abt = true;
---
> // Generate Illegal Inst Set State fault if IL bit is set in CPSR
> if (fault == NoFault) {
> CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
> if (aarch64 && is_fetch && cpsr.il == 1) {
> return new IllegalInstSetStateFault();
650,670d1071
< break;
< case 1:
< abt = !is_priv;
< break;
< case 2:
< abt = !is_priv && is_write;
< break;
< case 3:
< abt = false;
< break;
< case 4:
< panic("UNPRED premissions\n");
< case 5:
< abt = !is_priv || is_write;
< break;
< case 6:
< case 7:
< abt = is_write;
< break;
< default:
< panic("Unknown permissions\n");
672,686d1072
< if ((is_fetch) && (abt || te->xn)) {
< permsFaults++;
< DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d priv:%d"
< " write:%d sNp:%d\n", ap, is_priv, is_write, te->sNp);
< return new PrefetchAbort(vaddr,
< (te->sNp ? ArmFault::Permission0 :
< ArmFault::Permission1));
< } else if (abt) {
< permsFaults++;
< DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
< " write:%d sNp:%d\n", ap, is_priv, is_write, te->sNp);
< return new DataAbort(vaddr, te->domain, is_write,
< (te->sNp ? ArmFault::Permission0 :
< ArmFault::Permission1));
< }
688,694c1074
< req->setPaddr(te->pAddr(vaddr));
< // Check for a trickbox generated address fault
< fault = trickBoxCheck(req, mode, te->domain, te->sNp);
< if (fault)
< return fault;
<
< return NoFault;
---
> return fault;
698c1078,1079
< TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode)
---
> TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode,
> TLB::ArmTranslationType tranType)
699a1081,1087
> updateMiscReg(tc, tranType);
>
> if (directToStage2) {
> assert(stage2Tlb);
> return stage2Tlb->translateAtomic(req, tc, mode, tranType);
> }
>
703c1091
< fault = translateFs(req, tc, mode, NULL, delay, false);
---
> fault = translateFs(req, tc, mode, NULL, delay, false, tranType);
711c1099,1100
< TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode)
---
> TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode,
> TLB::ArmTranslationType tranType)
712a1102,1108
> updateMiscReg(tc, tranType);
>
> if (directToStage2) {
> assert(stage2Tlb);
> return stage2Tlb->translateFunctional(req, tc, mode, tranType);
> }
>
716,717c1112,1113
< fault = translateFs(req, tc, mode, NULL, delay, false, true);
< else
---
> fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true);
> else
725c1121
< Translation *translation, Mode mode)
---
> Translation *translation, Mode mode, TLB::ArmTranslationType tranType)
726a1123,1129
> updateMiscReg(tc, tranType);
>
> if (directToStage2) {
> assert(stage2Tlb);
> return stage2Tlb->translateTiming(req, tc, translation, mode, tranType);
> }
>
727a1131,1139
>
> return translateComplete(req, tc, translation, mode, tranType, isStage2);
> }
>
> Fault
> TLB::translateComplete(RequestPtr req, ThreadContext *tc,
> Translation *translation, Mode mode, TLB::ArmTranslationType tranType,
> bool callFromS2)
> {
731c1143
< fault = translateFs(req, tc, mode, translation, delay, true);
---
> fault = translateFs(req, tc, mode, translation, delay, true, tranType);
736,739c1148,1159
< if (!delay)
< translation->finish(fault, req, tc, mode);
< else
< translation->markDelayed();
---
> // If we have a translation, and we're not in the middle of doing a stage
> // 2 translation tell the translation that we've either finished or its
> // going to take a while. By not doing this when we're in the middle of a
> // stage 2 translation we prevent marking the translation as delayed twice,
> // one when the translation starts and again when the stage 1 translation
> // completes.
> if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) {
> if (!delay)
> translation->finish(fault, req, tc, mode);
> else
> translation->markDelayed();
> }
748a1169,1173
> DmaPort&
> TLB::getWalkerPort()
> {
> return tableWalker->getWalkerPort();
> }
749a1175,1183
> void
> TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType)
> {
> // check if the regs have changed, or the translation mode is different.
> // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
> // one type of translation anyway
> if (miscRegValid && ((tranType == curTranType) || isStage2)) {
> return;
> }
750a1185,1392
> DPRINTF(TLBVerbose, "TLB variables changed!\n");
> CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
> // Dependencies: SCR/SCR_EL3, CPSR
> isSecure = inSecureState(tc);
> isSecure &= (tranType & HypMode) == 0;
> isSecure &= (tranType & S1S2NsTran) == 0;
> aarch64 = !cpsr.width;
> if (aarch64) { // AArch64
> aarch64EL = (ExceptionLevel) (uint8_t) cpsr.el;
> switch (aarch64EL) {
> case EL0:
> case EL1:
> {
> sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
> ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
> uint64_t ttbr_asid = ttbcr.a1 ?
> tc->readMiscReg(MISCREG_TTBR1_EL1) :
> tc->readMiscReg(MISCREG_TTBR0_EL1);
> asid = bits(ttbr_asid,
> (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
> }
> break;
> case EL2:
> sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
> ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
> asid = -1;
> break;
> case EL3:
> sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3);
> ttbcr = tc->readMiscReg(MISCREG_TCR_EL3);
> asid = -1;
> break;
> }
> scr = tc->readMiscReg(MISCREG_SCR_EL3);
> isPriv = aarch64EL != EL0;
> // @todo: modify this behaviour to support Virtualization in
> // AArch64
> vmid = 0;
> isHyp = false;
> directToStage2 = false;
> stage2Req = false;
> } else { // AArch32
> sctlr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_SCTLR, tc,
> !isSecure));
> ttbcr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_TTBCR, tc,
> !isSecure));
> scr = tc->readMiscReg(MISCREG_SCR);
> isPriv = cpsr.mode != MODE_USER;
> if (haveLPAE && ttbcr.eae) {
> // Long-descriptor translation table format in use
> uint64_t ttbr_asid = tc->readMiscReg(
> flattenMiscRegNsBanked(ttbcr.a1 ? MISCREG_TTBR1
> : MISCREG_TTBR0,
> tc, !isSecure));
> asid = bits(ttbr_asid, 55, 48);
> } else {
> // Short-descriptor translation table format in use
> CONTEXTIDR context_id = tc->readMiscReg(flattenMiscRegNsBanked(
> MISCREG_CONTEXTIDR, tc,!isSecure));
> asid = context_id.asid;
> }
> prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR, tc,
> !isSecure));
> nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR, tc,
> !isSecure));
> dacr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_DACR, tc,
> !isSecure));
> hcr = tc->readMiscReg(MISCREG_HCR);
>
> if (haveVirtualization) {
> vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
> isHyp = cpsr.mode == MODE_HYP;
> isHyp |= tranType & HypMode;
> isHyp &= (tranType & S1S2NsTran) == 0;
> isHyp &= (tranType & S1CTran) == 0;
> if (isHyp) {
> sctlr = tc->readMiscReg(MISCREG_HSCTLR);
> }
> // Work out if we should skip the first stage of translation and go
> // directly to stage 2. This value is cached so we don't have to
> // compute it for every translation.
> stage2Req = hcr.vm && !isStage2 && !isHyp && !isSecure &&
> !(tranType & S1CTran);
> directToStage2 = stage2Req && !sctlr.m;
> } else {
> vmid = 0;
> stage2Req = false;
> isHyp = false;
> directToStage2 = false;
> }
> }
> miscRegValid = true;
> curTranType = tranType;
> }
>
> Fault
> TLB::getTE(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
> Translation *translation, bool timing, bool functional,
> bool is_secure, TLB::ArmTranslationType tranType)
> {
> bool is_fetch = (mode == Execute);
> bool is_write = (mode == Write);
>
> Addr vaddr_tainted = req->getVaddr();
> Addr vaddr = 0;
> ExceptionLevel target_el = aarch64 ? aarch64EL : EL1;
> if (aarch64) {
> vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el);
> } else {
> vaddr = vaddr_tainted;
> }
> *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
> if (*te == NULL) {
> if (req->isPrefetch()) {
> // if the request is a prefetch don't attempt to fill the TLB or go
> // any further with the memory access (here we can safely use the
> // fault status for the short desc. format in all cases)
> prefetchFaults++;
> return new PrefetchAbort(vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2);
> }
>
> if (is_fetch)
> instMisses++;
> else if (is_write)
> writeMisses++;
> else
> readMisses++;
>
> // start translation table walk, pass variables rather than
> // re-retreaving in table walker for speed
> DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
> vaddr_tainted, asid, vmid);
> Fault fault;
> fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode,
> translation, timing, functional, is_secure,
> tranType);
> // for timing mode, return and wait for table walk,
> if (timing || fault != NoFault) {
> return fault;
> }
>
> *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
> if (!*te)
> printTlb();
> assert(*te);
> } else {
> if (is_fetch)
> instHits++;
> else if (is_write)
> writeHits++;
> else
> readHits++;
> }
> return NoFault;
> }
>
> Fault
> TLB::getResultTe(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
> Translation *translation, bool timing, bool functional,
> TlbEntry *mergeTe)
> {
> Fault fault;
> TlbEntry *s1Te = NULL;
>
> Addr vaddr_tainted = req->getVaddr();
>
> // Get the stage 1 table entry
> fault = getTE(&s1Te, req, tc, mode, translation, timing, functional,
> isSecure, curTranType);
> // only proceed if we have a valid table entry
> if ((s1Te != NULL) && (fault == NoFault)) {
> // Check stage 1 permissions before checking stage 2
> if (aarch64)
> fault = checkPermissions64(s1Te, req, mode, tc);
> else
> fault = checkPermissions(s1Te, req, mode);
> if (stage2Req & (fault == NoFault)) {
> Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te,
> req, translation, mode, timing, functional, curTranType);
> fault = s2Lookup->getTe(tc, mergeTe);
> if (s2Lookup->isComplete()) {
> *te = mergeTe;
> // We've finished with the lookup so delete it
> delete s2Lookup;
> } else {
> // The lookup hasn't completed, so we can't delete it now. We
> // get round this by asking the object to self delete when the
> // translation is complete.
> s2Lookup->setSelfDelete();
> }
> } else {
> // This case deals with an S1 hit (or bypass), followed by
> // an S2 hit-but-perms issue
> if (isStage2) {
> DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
> vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault);
> if (fault != NoFault) {
> ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get());
> armFault->annotate(ArmFault::S1PTW, false);
> armFault->annotate(ArmFault::OVA, vaddr_tainted);
> }
> }
> *te = s1Te;
> }
> }
> return fault;
> }
>