Deleted Added
sdiff udiff text old ( 10024:fc10e1f9f124 ) new ( 10037:5cac77888310 )
full compact
1/*
2 * Copyright (c) 2010-2013 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated

--- 33 unchanged lines hidden (view full) ---

44
45#include <string>
46#include <vector>
47
48#include "arch/arm/faults.hh"
49#include "arch/arm/pagetable.hh"
50#include "arch/arm/system.hh"
51#include "arch/arm/table_walker.hh"
52#include "arch/arm/stage2_lookup.hh"
53#include "arch/arm/stage2_mmu.hh"
54#include "arch/arm/tlb.hh"
55#include "arch/arm/utility.hh"
56#include "base/inifile.hh"
57#include "base/str.hh"
58#include "base/trace.hh"
59#include "cpu/base.hh"
60#include "cpu/thread_context.hh"
61#include "debug/Checkpoint.hh"
62#include "debug/TLB.hh"
63#include "debug/TLBVerbose.hh"
64#include "mem/page_table.hh"
65#include "params/ArmTLB.hh"
66#include "sim/full_system.hh"
67#include "sim/process.hh"
68
69using namespace std;
70using namespace ArmISA;
71
72TLB::TLB(const ArmTLBParams *p)
73 : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size),
74 isStage2(p->is_stage2), tableWalker(p->walker), stage2Tlb(NULL),
75 stage2Mmu(NULL), rangeMRU(1), bootUncacheability(false),
76 miscRegValid(false), curTranType(NormalTran)
77{
78 tableWalker->setTlb(this);
79
80 // Cache system-level properties
81 haveLPAE = tableWalker->haveLPAE();
82 haveVirtualization = tableWalker->haveVirtualization();
83 haveLargeAsid64 = tableWalker->haveLargeAsid64();
84}
85
86TLB::~TLB()
87{
88 delete[] table;
89}
90
91void
92TLB::init()
93{
94 if (stage2Mmu && !isStage2)
95 stage2Tlb = stage2Mmu->stage2Tlb();
96}
97
98void
99TLB::setMMU(Stage2MMU *m)
100{
101 stage2Mmu = m;
102 tableWalker->setMMU(m);
103}
104
105bool
106TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa)
107{
108 updateMiscReg(tc);
109
110 if (directToStage2) {
111 assert(stage2Tlb);
112 return stage2Tlb->translateFunctional(tc, va, pa);
113 }
114
115 TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false,
116 aarch64 ? aarch64EL : EL1);
117 if (!e)
118 return false;
119 pa = e->pAddr(va);
120 return true;
121}
122
123Fault
124TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
125{
126 return NoFault;
127}
128
129TlbEntry*
130TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure,
131 bool functional, bool ignore_asn, uint8_t target_el)
132{
133
134 TlbEntry *retval = NULL;
135
136 // Maintaining LRU array
137 int x = 0;
138 while (retval == NULL && x < size) {
139 if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false,
140 target_el)) ||
141 (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) {
142 // We only move the hit entry ahead when the position is higher
143 // than rangeMRU
144 if (x > rangeMRU && !functional) {
145 TlbEntry tmp_entry = table[x];
146 for(int i = x; i > 0; i--)
147 table[i] = table[i - 1];
148 table[0] = tmp_entry;
149 retval = &table[0];
150 } else {
151 retval = &table[x];
152 }
153 break;
154 }
155 ++x;
156 }
157
158 DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d "
159 "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d "
160 "el: %d\n",
161 va, asn, retval ? "hit" : "miss", vmid, hyp, secure,
162 retval ? retval->pfn : 0, retval ? retval->size : 0,
163 retval ? retval->pAddr(va) : 0, retval ? retval->ap : 0,
164 retval ? retval->ns : 0, retval ? retval->nstid : 0,
165 retval ? retval->global : 0, retval ? retval->asid : 0,
166 retval ? retval->el : 0, retval ? retval->el : 0);
167
168 return retval;
169}
170
171// insert a new TLB entry
172void
173TLB::insert(Addr addr, TlbEntry &entry)
174{
175 DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x"
176 " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
177 " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn,
178 entry.size, entry.vpn, entry.asid, entry.vmid, entry.N,
179 entry.global, entry.valid, entry.nonCacheable, entry.xn,
180 entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid,
181 entry.isHyp);
182
183 if (table[size - 1].valid)
184 DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
185 "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n",
186 table[size-1].vpn << table[size-1].N, table[size-1].asid,
187 table[size-1].vmid, table[size-1].pfn << table[size-1].N,
188 table[size-1].size, table[size-1].ap, table[size-1].ns,
189 table[size-1].nstid, table[size-1].global, table[size-1].isHyp,
190 table[size-1].el);
191
192 //inserting to MRU position and evicting the LRU one
193
194 for (int i = size - 1; i > 0; --i)
195 table[i] = table[i-1];
196 table[0] = entry;
197
198 inserts++;
199}
200
201void
202TLB::printTlb() const
203{
204 int x = 0;
205 TlbEntry *te;
206 DPRINTF(TLB, "Current TLB contents:\n");
207 while (x < size) {
208 te = &table[x];
209 if (te->valid)
210 DPRINTF(TLB, " * %s\n", te->print());
211 ++x;
212 }
213}
214
215void
216TLB::flushAllSecurity(bool secure_lookup, uint8_t target_el, bool ignore_el)
217{
218 DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n",
219 (secure_lookup ? "secure" : "non-secure"));
220 int x = 0;
221 TlbEntry *te;
222 while (x < size) {
223 te = &table[x];
224 if (te->valid && secure_lookup == !te->nstid &&
225 (te->vmid == vmid || secure_lookup) &&
226 checkELMatch(target_el, te->el, ignore_el)) {
227
228 DPRINTF(TLB, " - %s\n", te->print());
229 te->valid = false;
230 flushedEntries++;
231 }
232 ++x;
233 }
234
235 flushTlb++;
236
237 // If there's a second stage TLB (and we're not it) then flush it as well
238 // if we're currently in hyp mode
239 if (!isStage2 && isHyp) {
240 stage2Tlb->flushAllSecurity(secure_lookup, true);
241 }
242}
243
244void
245TLB::flushAllNs(bool hyp, uint8_t target_el, bool ignore_el)
246{
247 DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n",
248 (hyp ? "hyp" : "non-hyp"));
249 int x = 0;
250 TlbEntry *te;
251 while (x < size) {
252 te = &table[x];
253 if (te->valid && te->nstid && te->isHyp == hyp &&
254 checkELMatch(target_el, te->el, ignore_el)) {
255
256 DPRINTF(TLB, " - %s\n", te->print());
257 flushedEntries++;
258 te->valid = false;
259 }
260 ++x;
261 }
262
263 flushTlb++;
264
265 // If there's a second stage TLB (and we're not it) then flush it as well
266 if (!isStage2 && !hyp) {
267 stage2Tlb->flushAllNs(false, true);
268 }
269}
270
271void
272TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, uint8_t target_el)
273{
274 DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x "
275 "(%s lookup)\n", mva, asn, (secure_lookup ?
276 "secure" : "non-secure"));
277 _flushMva(mva, asn, secure_lookup, false, false, target_el);
278 flushTlbMvaAsid++;
279}
280
281void
282TLB::flushAsid(uint64_t asn, bool secure_lookup, uint8_t target_el)
283{
284 DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn,
285 (secure_lookup ? "secure" : "non-secure"));
286
287 int x = 0 ;
288 TlbEntry *te;
289
290 while (x < size) {
291 te = &table[x];
292 if (te->valid && te->asid == asn && secure_lookup == !te->nstid &&
293 (te->vmid == vmid || secure_lookup) &&
294 checkELMatch(target_el, te->el, false)) {
295
296 te->valid = false;
297 DPRINTF(TLB, " - %s\n", te->print());
298 flushedEntries++;
299 }
300 ++x;
301 }
302 flushTlbAsid++;
303}
304
305void
306TLB::flushMva(Addr mva, bool secure_lookup, bool hyp, uint8_t target_el)
307{
308 DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva,
309 (secure_lookup ? "secure" : "non-secure"));
310 _flushMva(mva, 0xbeef, secure_lookup, hyp, true, target_el);
311 flushTlbMva++;
312}
313
314void
315TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, bool hyp,
316 bool ignore_asn, uint8_t target_el)
317{
318 TlbEntry *te;
319 // D5.7.2: Sign-extend address to 64 bits
320 mva = sext<56>(mva);
321 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
322 target_el);
323 while (te != NULL) {
324 if (secure_lookup == !te->nstid) {
325 DPRINTF(TLB, " - %s\n", te->print());
326 te->valid = false;
327 flushedEntries++;
328 }
329 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
330 target_el);
331 }
332}
333
334bool
335TLB::checkELMatch(uint8_t target_el, uint8_t tentry_el, bool ignore_el)
336{
337 bool elMatch = true;
338 if (!ignore_el) {
339 if (target_el == 2 || target_el == 3) {
340 elMatch = (tentry_el == target_el);
341 } else {
342 elMatch = (tentry_el == 0) || (tentry_el == 1);
343 }
344 }
345 return elMatch;
346}
347
348void
349TLB::drainResume()
350{
351 // We might have unserialized something or switched CPUs, so make
352 // sure to re-read the misc regs.
353 miscRegValid = false;
354}
355
356void
357TLB::serialize(ostream &os)
358{
359 DPRINTF(Checkpoint, "Serializing Arm TLB\n");
360
361 SERIALIZE_SCALAR(_attr);
362 SERIALIZE_SCALAR(haveLPAE);
363 SERIALIZE_SCALAR(directToStage2);
364 SERIALIZE_SCALAR(stage2Req);
365 SERIALIZE_SCALAR(bootUncacheability);
366
367 int num_entries = size;
368 SERIALIZE_SCALAR(num_entries);
369 for(int i = 0; i < size; i++){
370 nameOut(os, csprintf("%s.TlbEntry%d", name(), i));
371 table[i].serialize(os);
372 }
373}
374
375void
376TLB::unserialize(Checkpoint *cp, const string &section)
377{
378 DPRINTF(Checkpoint, "Unserializing Arm TLB\n");
379
380 UNSERIALIZE_SCALAR(_attr);
381 UNSERIALIZE_SCALAR(haveLPAE);
382 UNSERIALIZE_SCALAR(directToStage2);
383 UNSERIALIZE_SCALAR(stage2Req);
384 UNSERIALIZE_SCALAR(bootUncacheability);
385
386 int num_entries;
387 UNSERIALIZE_SCALAR(num_entries);
388 for(int i = 0; i < min(size, num_entries); i++){
389 table[i].unserialize(cp, csprintf("%s.TlbEntry%d", section, i));
390 }
391}
392
393void

--- 109 unchanged lines hidden (view full) ---

503 writeAccesses = writeHits + writeMisses;
504 hits = readHits + writeHits + instHits;
505 misses = readMisses + writeMisses + instMisses;
506 accesses = readAccesses + writeAccesses + instAccesses;
507}
508
509Fault
510TLB::translateSe(RequestPtr req, ThreadContext *tc, Mode mode,
511 Translation *translation, bool &delay, bool timing)
512{
513 updateMiscReg(tc);
514 Addr vaddr_tainted = req->getVaddr();
515 Addr vaddr = 0;
516 if (aarch64)
517 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL);
518 else
519 vaddr = vaddr_tainted;
520 uint32_t flags = req->getFlags();
521
522 bool is_fetch = (mode == Execute);
523 bool is_write = (mode == Write);
524
525 if (!is_fetch) {
526 assert(flags & MustBeOne);
527 if (sctlr.a || !(flags & AllowUnaligned)) {
528 if (vaddr & mask(flags & AlignmentMask)) {
529 // LPAE is always disabled in SE mode
530 return new DataAbort(vaddr_tainted,
531 TlbEntry::DomainType::NoAccess, is_write,
532 ArmFault::AlignmentFault, isStage2,
533 ArmFault::VmsaTran);
534 }
535 }
536 }
537
538 Addr paddr;
539 Process *p = tc->getProcessPtr();
540
541 if (!p->pTable->translate(vaddr, paddr))
542 return Fault(new GenericPageTableFault(vaddr_tainted));
543 req->setPaddr(paddr);
544
545 return NoFault;
546}
547
548Fault
549TLB::trickBoxCheck(RequestPtr req, Mode mode, TlbEntry::DomainType domain)
550{
551 return NoFault;
552}
553
554Fault
555TLB::walkTrickBoxCheck(Addr pa, bool is_secure, Addr va, Addr sz, bool is_exec,
556 bool is_write, TlbEntry::DomainType domain, LookupLevel lookup_level)
557{
558 return NoFault;
559}
560
561Fault
562TLB::checkPermissions(TlbEntry *te, RequestPtr req, Mode mode)
563{
564 Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
565 uint32_t flags = req->getFlags();
566 bool is_fetch = (mode == Execute);
567 bool is_write = (mode == Write);
568 bool is_priv = isPriv && !(flags & UserMode);
569
570 // Get the translation type from the actuall table entry
571 ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran
572 : ArmFault::VmsaTran;
573
574 // If this is the second stage of translation and the request is for a
575 // stage 1 page table walk then we need to check the HCR.PTW bit. This
576 // allows us to generate a fault if the request targets an area marked
577 // as a device or strongly ordered.
578 if (isStage2 && req->isPTWalk() && hcr.ptw &&
579 (te->mtype != TlbEntry::MemoryType::Normal)) {
580 return new DataAbort(vaddr, te->domain, is_write,
581 ArmFault::PermissionLL + te->lookupLevel,
582 isStage2, tranMethod);
583 }
584
585 // Generate an alignment fault for unaligned data accesses to device or
586 // strongly ordered memory
587 if (!is_fetch) {
588 if (te->mtype != TlbEntry::MemoryType::Normal) {
589 if (vaddr & mask(flags & AlignmentMask)) {
590 alignFaults++;
591 return new DataAbort(vaddr, TlbEntry::DomainType::NoAccess, is_write,
592 ArmFault::AlignmentFault, isStage2,
593 tranMethod);
594 }
595 }
596 }
597
598 if (te->nonCacheable) {
599 // Prevent prefetching from I/O devices.
600 if (req->isPrefetch()) {
601 // Here we can safely use the fault status for the short
602 // desc. format in all cases
603 return new PrefetchAbort(vaddr, ArmFault::PrefetchUncacheable,
604 isStage2, tranMethod);
605 }
606 }
607
608 if (!te->longDescFormat) {
609 switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
610 case 0:
611 domainFaults++;
612 DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x"
613 " domain: %#x write:%d\n", dacr,
614 static_cast<uint8_t>(te->domain), is_write);
615 if (is_fetch)
616 return new PrefetchAbort(vaddr,
617 ArmFault::DomainLL + te->lookupLevel,
618 isStage2, tranMethod);
619 else
620 return new DataAbort(vaddr, te->domain, is_write,
621 ArmFault::DomainLL + te->lookupLevel,
622 isStage2, tranMethod);
623 case 1:
624 // Continue with permissions check
625 break;
626 case 2:
627 panic("UNPRED domain\n");
628 case 3:
629 return NoFault;
630 }
631 }
632
633 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
634 uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap;
635 uint8_t hap = te->hap;
636
637 if (sctlr.afe == 1 || te->longDescFormat)
638 ap |= 1;
639
640 bool abt;
641 bool isWritable = true;
642 // If this is a stage 2 access (eg for reading stage 1 page table entries)
643 // then don't perform the AP permissions check, we stil do the HAP check
644 // below.
645 if (isStage2) {
646 abt = false;
647 } else {
648 switch (ap) {
649 case 0:
650 DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n",
651 (int)sctlr.rs);
652 if (!sctlr.xp) {
653 switch ((int)sctlr.rs) {
654 case 2:
655 abt = is_write;
656 break;
657 case 1:
658 abt = is_write || !is_priv;
659 break;
660 case 0:
661 case 3:
662 default:
663 abt = true;
664 break;
665 }
666 } else {
667 abt = true;
668 }
669 break;
670 case 1:
671 abt = !is_priv;
672 break;
673 case 2:
674 abt = !is_priv && is_write;
675 isWritable = is_priv;
676 break;
677 case 3:
678 abt = false;
679 break;
680 case 4:
681 panic("UNPRED premissions\n");
682 case 5:
683 abt = !is_priv || is_write;
684 isWritable = false;
685 break;
686 case 6:
687 case 7:
688 abt = is_write;
689 isWritable = false;
690 break;
691 default:
692 panic("Unknown permissions %#x\n", ap);
693 }
694 }
695
696 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
697 bool xn = te->xn || (isWritable && sctlr.wxn) ||
698 (ap == 3 && sctlr.uwxn && is_priv);
699 if (is_fetch && (abt || xn ||
700 (te->longDescFormat && te->pxn && !is_priv) ||
701 (isSecure && te->ns && scr.sif))) {
702 permsFaults++;
703 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
704 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
705 ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe);
706 return new PrefetchAbort(vaddr,
707 ArmFault::PermissionLL + te->lookupLevel,
708 isStage2, tranMethod);
709 } else if (abt | hapAbt) {
710 permsFaults++;
711 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
712 " write:%d\n", ap, is_priv, is_write);
713 return new DataAbort(vaddr, te->domain, is_write,
714 ArmFault::PermissionLL + te->lookupLevel,
715 isStage2 | !abt, tranMethod);
716 }
717 return NoFault;
718}
719
720
721Fault
722TLB::checkPermissions64(TlbEntry *te, RequestPtr req, Mode mode,
723 ThreadContext *tc)
724{
725 assert(aarch64);
726
727 Addr vaddr_tainted = req->getVaddr();
728 Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL);
729
730 uint32_t flags = req->getFlags();
731 bool is_fetch = (mode == Execute);
732 bool is_write = (mode == Write);
733 bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode);
734
735 updateMiscReg(tc, curTranType);
736
737 // If this is the second stage of translation and the request is for a
738 // stage 1 page table walk then we need to check the HCR.PTW bit. This
739 // allows us to generate a fault if the request targets an area marked
740 // as a device or strongly ordered.
741 if (isStage2 && req->isPTWalk() && hcr.ptw &&
742 (te->mtype != TlbEntry::MemoryType::Normal)) {
743 return new DataAbort(vaddr_tainted, te->domain, is_write,
744 ArmFault::PermissionLL + te->lookupLevel,
745 isStage2, ArmFault::LpaeTran);
746 }
747
748 // Generate an alignment fault for unaligned accesses to device or
749 // strongly ordered memory
750 if (!is_fetch) {
751 if (te->mtype != TlbEntry::MemoryType::Normal) {
752 if (vaddr & mask(flags & AlignmentMask)) {
753 alignFaults++;
754 return new DataAbort(vaddr_tainted,
755 TlbEntry::DomainType::NoAccess, is_write,
756 ArmFault::AlignmentFault, isStage2,
757 ArmFault::LpaeTran);
758 }
759 }
760 }
761
762 if (te->nonCacheable) {
763 // Prevent prefetching from I/O devices.
764 if (req->isPrefetch()) {
765 // Here we can safely use the fault status for the short
766 // desc. format in all cases
767 return new PrefetchAbort(vaddr_tainted,
768 ArmFault::PrefetchUncacheable,
769 isStage2, ArmFault::LpaeTran);
770 }
771 }
772
773 uint8_t ap = 0x3 & (te->ap); // 2-bit access protection field
774 bool grant = false;
775
776 uint8_t xn = te->xn;
777 uint8_t pxn = te->pxn;
778 bool r = !is_write && !is_fetch;
779 bool w = is_write;
780 bool x = is_fetch;
781 DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
782 "w:%d, x:%d\n", ap, xn, pxn, r, w, x);
783
784 if (isStage2) {
785 panic("Virtualization in AArch64 state is not supported yet");
786 } else {
787 switch (aarch64EL) {
788 case EL0:
789 {
790 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
791 switch (perm) {
792 case 0:
793 case 1:
794 case 8:
795 case 9:
796 grant = x;
797 break;
798 case 4:
799 case 5:
800 grant = r || w || (x && !sctlr.wxn);
801 break;
802 case 6:
803 case 7:
804 grant = r || w;
805 break;
806 case 12:
807 case 13:
808 grant = r || x;
809 break;
810 case 14:
811 case 15:
812 grant = r;
813 break;
814 default:
815 grant = false;
816 }
817 }
818 break;
819 case EL1:
820 {
821 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
822 switch (perm) {
823 case 0:
824 case 2:
825 grant = r || w || (x && !sctlr.wxn);
826 break;
827 case 1:
828 case 3:
829 case 4:
830 case 5:
831 case 6:
832 case 7:
833 // regions that are writeable at EL0 should not be
834 // executable at EL1
835 grant = r || w;
836 break;
837 case 8:
838 case 10:
839 case 12:
840 case 14:
841 grant = r || x;
842 break;
843 case 9:
844 case 11:
845 case 13:
846 case 15:
847 grant = r;
848 break;
849 default:
850 grant = false;
851 }
852 }
853 break;
854 case EL2:
855 case EL3:
856 {
857 uint8_t perm = (ap & 0x2) | xn;
858 switch (perm) {
859 case 0:
860 grant = r || w || (x && !sctlr.wxn) ;
861 break;
862 case 1:
863 grant = r || w;
864 break;
865 case 2:
866 grant = r || x;
867 break;
868 case 3:
869 grant = r;
870 break;
871 default:
872 grant = false;
873 }
874 }
875 break;
876 }
877 }
878
879 if (!grant) {
880 if (is_fetch) {
881 permsFaults++;
882 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. "
883 "AP:%d priv:%d write:%d ns:%d sif:%d "
884 "sctlr.afe: %d\n",
885 ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe);
886 // Use PC value instead of vaddr because vaddr might be aligned to
887 // cache line and should not be the address reported in FAR
888 return new PrefetchAbort(req->getPC(),
889 ArmFault::PermissionLL + te->lookupLevel,
890 isStage2, ArmFault::LpaeTran);
891 } else {
892 permsFaults++;
893 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d "
894 "priv:%d write:%d\n", ap, is_priv, is_write);
895 return new DataAbort(vaddr_tainted, te->domain, is_write,
896 ArmFault::PermissionLL + te->lookupLevel,
897 isStage2, ArmFault::LpaeTran);
898 }
899 }
900
901 return NoFault;
902}
903
904Fault
905TLB::translateFs(RequestPtr req, ThreadContext *tc, Mode mode,
906 Translation *translation, bool &delay, bool timing,
907 TLB::ArmTranslationType tranType, bool functional)
908{
909 // No such thing as a functional timing access
910 assert(!(timing && functional));
911
912 updateMiscReg(tc, tranType);
913
914 Addr vaddr_tainted = req->getVaddr();
915 Addr vaddr = 0;
916 if (aarch64)
917 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL);
918 else
919 vaddr = vaddr_tainted;
920 uint32_t flags = req->getFlags();
921
922 bool is_fetch = (mode == Execute);
923 bool is_write = (mode == Write);
924 bool long_desc_format = aarch64 || (haveLPAE && ttbcr.eae);
925 ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
926 : ArmFault::VmsaTran;
927
928 req->setAsid(asid);
929
930 DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
931 isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran);
932
933 DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
934 "flags %#x tranType 0x%x\n", vaddr_tainted, mode, isStage2,
935 scr, sctlr, flags, tranType);
936
937 // Generate an alignment fault for unaligned PC
938 if (aarch64 && is_fetch && (req->getPC() & mask(2))) {
939 return new PCAlignmentFault(req->getPC());
940 }
941
942 // If this is a clrex instruction, provide a PA of 0 with no fault
943 // This will force the monitor to set the tracked address to 0
944 // a bit of a hack but this effectively clrears this processors monitor
945 if (flags & Request::CLEAR_LL){
946 // @todo: check implications of security extensions
947 req->setPaddr(0);
948 req->setFlags(Request::UNCACHEABLE);
949 req->setFlags(Request::CLEAR_LL);
950 return NoFault;
951 }
952 if ((req->isInstFetch() && (!sctlr.i)) ||
953 ((!req->isInstFetch()) && (!sctlr.c))){
954 req->setFlags(Request::UNCACHEABLE);
955 }
956 if (!is_fetch) {
957 assert(flags & MustBeOne);
958 if (sctlr.a || !(flags & AllowUnaligned)) {
959 if (vaddr & mask(flags & AlignmentMask)) {
960 alignFaults++;
961 return new DataAbort(vaddr_tainted,
962 TlbEntry::DomainType::NoAccess, is_write,
963 ArmFault::AlignmentFault, isStage2,
964 tranMethod);
965 }
966 }
967 }
968
969 // If guest MMU is off or hcr.vm=0 go straight to stage2
970 if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) {
971
972 req->setPaddr(vaddr);
973 // When the MMU is off the security attribute corresponds to the
974 // security state of the processor
975 if (isSecure)
976 req->setFlags(Request::SECURE);
977
978 // @todo: double check this (ARM ARM issue C B3.2.1)
979 if (long_desc_format || sctlr.tre == 0) {
980 req->setFlags(Request::UNCACHEABLE);
981 } else {
982 if (nmrr.ir0 == 0 || nmrr.or0 == 0 || prrr.tr0 != 0x2)
983 req->setFlags(Request::UNCACHEABLE);
984 }
985
986 // Set memory attributes
987 TlbEntry temp_te;
988 temp_te.ns = !isSecure;
989 if (isStage2 || hcr.dc == 0 || isSecure ||
990 (isHyp && !(tranType & S1CTran))) {
991
992 temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal
993 : TlbEntry::MemoryType::StronglyOrdered;
994 temp_te.innerAttrs = 0x0;
995 temp_te.outerAttrs = 0x0;
996 temp_te.shareable = true;
997 temp_te.outerShareable = true;
998 } else {
999 temp_te.mtype = TlbEntry::MemoryType::Normal;
1000 temp_te.innerAttrs = 0x3;
1001 temp_te.outerAttrs = 0x3;
1002 temp_te.shareable = false;
1003 temp_te.outerShareable = false;
1004 }
1005 temp_te.setAttributes(long_desc_format);
1006 DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable:\
1007 %d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n",
1008 temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
1009 isStage2);
1010 setAttr(temp_te.attributes);
1011
1012 return trickBoxCheck(req, mode, TlbEntry::DomainType::NoAccess);
1013 }
1014
1015 DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n",
1016 isStage2 ? "IPA" : "VA", vaddr_tainted, asid);
1017 // Translation enabled
1018
1019 TlbEntry *te = NULL;
1020 TlbEntry mergeTe;
1021 Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
1022 functional, &mergeTe);
1023 // only proceed if we have a valid table entry
1024 if ((te == NULL) && (fault == NoFault)) delay = true;
1025
1026 // If we have the table entry transfer some of the attributes to the
1027 // request that triggered the translation
1028 if (te != NULL) {
1029 // Set memory attributes
1030 DPRINTF(TLBVerbose,
1031 "Setting memory attributes: shareable: %d, innerAttrs: %d, \
1032 outerAttrs: %d, mtype: %d, isStage2: %d\n",
1033 te->shareable, te->innerAttrs, te->outerAttrs,
1034 static_cast<uint8_t>(te->mtype), isStage2);
1035 setAttr(te->attributes);
1036 if (te->nonCacheable) {
1037 req->setFlags(Request::UNCACHEABLE);
1038 }
1039
1040 if (!bootUncacheability &&
1041 ((ArmSystem*)tc->getSystemPtr())->adderBootUncacheable(vaddr)) {
1042 req->setFlags(Request::UNCACHEABLE);
1043 }
1044
1045 req->setPaddr(te->pAddr(vaddr));
1046 if (isSecure && !te->ns) {
1047 req->setFlags(Request::SECURE);
1048 }
1049 if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) &&
1050 (te->mtype != TlbEntry::MemoryType::Normal)) {
1051 // Unaligned accesses to Device memory should always cause an
1052 // abort regardless of sctlr.a
1053 alignFaults++;
1054 return new DataAbort(vaddr_tainted,
1055 TlbEntry::DomainType::NoAccess, is_write,
1056 ArmFault::AlignmentFault, isStage2,
1057 tranMethod);
1058 }
1059
1060 // Check for a trickbox generated address fault
1061 if (fault == NoFault) {
1062 fault = trickBoxCheck(req, mode, te->domain);
1063 }
1064 }
1065
1066 // Generate Illegal Inst Set State fault if IL bit is set in CPSR
1067 if (fault == NoFault) {
1068 CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
1069 if (aarch64 && is_fetch && cpsr.il == 1) {
1070 return new IllegalInstSetStateFault();
1071 }
1072 }
1073
1074 return fault;
1075}
1076
1077Fault
1078TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode,
1079 TLB::ArmTranslationType tranType)
1080{
1081 updateMiscReg(tc, tranType);
1082
1083 if (directToStage2) {
1084 assert(stage2Tlb);
1085 return stage2Tlb->translateAtomic(req, tc, mode, tranType);
1086 }
1087
1088 bool delay = false;
1089 Fault fault;
1090 if (FullSystem)
1091 fault = translateFs(req, tc, mode, NULL, delay, false, tranType);
1092 else
1093 fault = translateSe(req, tc, mode, NULL, delay, false);
1094 assert(!delay);
1095 return fault;
1096}
1097
1098Fault
1099TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode,
1100 TLB::ArmTranslationType tranType)
1101{
1102 updateMiscReg(tc, tranType);
1103
1104 if (directToStage2) {
1105 assert(stage2Tlb);
1106 return stage2Tlb->translateFunctional(req, tc, mode, tranType);
1107 }
1108
1109 bool delay = false;
1110 Fault fault;
1111 if (FullSystem)
1112 fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true);
1113 else
1114 fault = translateSe(req, tc, mode, NULL, delay, false);
1115 assert(!delay);
1116 return fault;
1117}
1118
1119Fault
1120TLB::translateTiming(RequestPtr req, ThreadContext *tc,
1121 Translation *translation, Mode mode, TLB::ArmTranslationType tranType)
1122{
1123 updateMiscReg(tc, tranType);
1124
1125 if (directToStage2) {
1126 assert(stage2Tlb);
1127 return stage2Tlb->translateTiming(req, tc, translation, mode, tranType);
1128 }
1129
1130 assert(translation);
1131
1132 return translateComplete(req, tc, translation, mode, tranType, isStage2);
1133}
1134
1135Fault
1136TLB::translateComplete(RequestPtr req, ThreadContext *tc,
1137 Translation *translation, Mode mode, TLB::ArmTranslationType tranType,
1138 bool callFromS2)
1139{
1140 bool delay = false;
1141 Fault fault;
1142 if (FullSystem)
1143 fault = translateFs(req, tc, mode, translation, delay, true, tranType);
1144 else
1145 fault = translateSe(req, tc, mode, translation, delay, true);
1146 DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay, fault !=
1147 NoFault);
1148 // If we have a translation, and we're not in the middle of doing a stage
1149 // 2 translation tell the translation that we've either finished or its
1150 // going to take a while. By not doing this when we're in the middle of a
1151 // stage 2 translation we prevent marking the translation as delayed twice,
1152 // one when the translation starts and again when the stage 1 translation
1153 // completes.
1154 if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) {
1155 if (!delay)
1156 translation->finish(fault, req, tc, mode);
1157 else
1158 translation->markDelayed();
1159 }
1160 return fault;
1161}
1162
1163BaseMasterPort*
1164TLB::getMasterPort()
1165{
1166 return &tableWalker->getMasterPort("port");
1167}
1168
1169DmaPort&
1170TLB::getWalkerPort()
1171{
1172 return tableWalker->getWalkerPort();
1173}
1174
1175void
1176TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType)
1177{
1178 // check if the regs have changed, or the translation mode is different.
1179 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1180 // one type of translation anyway
1181 if (miscRegValid && ((tranType == curTranType) || isStage2)) {
1182 return;
1183 }
1184
1185 DPRINTF(TLBVerbose, "TLB variables changed!\n");
1186 CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
1187 // Dependencies: SCR/SCR_EL3, CPSR
1188 isSecure = inSecureState(tc);
1189 isSecure &= (tranType & HypMode) == 0;
1190 isSecure &= (tranType & S1S2NsTran) == 0;
1191 aarch64 = !cpsr.width;
1192 if (aarch64) { // AArch64
1193 aarch64EL = (ExceptionLevel) (uint8_t) cpsr.el;
1194 switch (aarch64EL) {
1195 case EL0:
1196 case EL1:
1197 {
1198 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
1199 ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
1200 uint64_t ttbr_asid = ttbcr.a1 ?
1201 tc->readMiscReg(MISCREG_TTBR1_EL1) :
1202 tc->readMiscReg(MISCREG_TTBR0_EL1);
1203 asid = bits(ttbr_asid,
1204 (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1205 }
1206 break;
1207 case EL2:
1208 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1209 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1210 asid = -1;
1211 break;
1212 case EL3:
1213 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3);
1214 ttbcr = tc->readMiscReg(MISCREG_TCR_EL3);
1215 asid = -1;
1216 break;
1217 }
1218 scr = tc->readMiscReg(MISCREG_SCR_EL3);
1219 isPriv = aarch64EL != EL0;
1220 // @todo: modify this behaviour to support Virtualization in
1221 // AArch64
1222 vmid = 0;
1223 isHyp = false;
1224 directToStage2 = false;
1225 stage2Req = false;
1226 } else { // AArch32
1227 sctlr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_SCTLR, tc,
1228 !isSecure));
1229 ttbcr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_TTBCR, tc,
1230 !isSecure));
1231 scr = tc->readMiscReg(MISCREG_SCR);
1232 isPriv = cpsr.mode != MODE_USER;
1233 if (haveLPAE && ttbcr.eae) {
1234 // Long-descriptor translation table format in use
1235 uint64_t ttbr_asid = tc->readMiscReg(
1236 flattenMiscRegNsBanked(ttbcr.a1 ? MISCREG_TTBR1
1237 : MISCREG_TTBR0,
1238 tc, !isSecure));
1239 asid = bits(ttbr_asid, 55, 48);
1240 } else {
1241 // Short-descriptor translation table format in use
1242 CONTEXTIDR context_id = tc->readMiscReg(flattenMiscRegNsBanked(
1243 MISCREG_CONTEXTIDR, tc,!isSecure));
1244 asid = context_id.asid;
1245 }
1246 prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR, tc,
1247 !isSecure));
1248 nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR, tc,
1249 !isSecure));
1250 dacr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_DACR, tc,
1251 !isSecure));
1252 hcr = tc->readMiscReg(MISCREG_HCR);
1253
1254 if (haveVirtualization) {
1255 vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
1256 isHyp = cpsr.mode == MODE_HYP;
1257 isHyp |= tranType & HypMode;
1258 isHyp &= (tranType & S1S2NsTran) == 0;
1259 isHyp &= (tranType & S1CTran) == 0;
1260 if (isHyp) {
1261 sctlr = tc->readMiscReg(MISCREG_HSCTLR);
1262 }
1263 // Work out if we should skip the first stage of translation and go
1264 // directly to stage 2. This value is cached so we don't have to
1265 // compute it for every translation.
1266 stage2Req = hcr.vm && !isStage2 && !isHyp && !isSecure &&
1267 !(tranType & S1CTran);
1268 directToStage2 = stage2Req && !sctlr.m;
1269 } else {
1270 vmid = 0;
1271 stage2Req = false;
1272 isHyp = false;
1273 directToStage2 = false;
1274 }
1275 }
1276 miscRegValid = true;
1277 curTranType = tranType;
1278}
1279
1280Fault
1281TLB::getTE(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
1282 Translation *translation, bool timing, bool functional,
1283 bool is_secure, TLB::ArmTranslationType tranType)
1284{
1285 bool is_fetch = (mode == Execute);
1286 bool is_write = (mode == Write);
1287
1288 Addr vaddr_tainted = req->getVaddr();
1289 Addr vaddr = 0;
1290 ExceptionLevel target_el = aarch64 ? aarch64EL : EL1;
1291 if (aarch64) {
1292 vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el);
1293 } else {
1294 vaddr = vaddr_tainted;
1295 }
1296 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1297 if (*te == NULL) {
1298 if (req->isPrefetch()) {
1299 // if the request is a prefetch don't attempt to fill the TLB or go
1300 // any further with the memory access (here we can safely use the
1301 // fault status for the short desc. format in all cases)
1302 prefetchFaults++;
1303 return new PrefetchAbort(vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2);
1304 }
1305
1306 if (is_fetch)
1307 instMisses++;
1308 else if (is_write)
1309 writeMisses++;
1310 else
1311 readMisses++;
1312
1313 // start translation table walk, pass variables rather than
1314 // re-retreaving in table walker for speed
1315 DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1316 vaddr_tainted, asid, vmid);
1317 Fault fault;
1318 fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode,
1319 translation, timing, functional, is_secure,
1320 tranType);
1321 // for timing mode, return and wait for table walk,
1322 if (timing || fault != NoFault) {
1323 return fault;
1324 }
1325
1326 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1327 if (!*te)
1328 printTlb();
1329 assert(*te);
1330 } else {
1331 if (is_fetch)
1332 instHits++;
1333 else if (is_write)
1334 writeHits++;
1335 else
1336 readHits++;
1337 }
1338 return NoFault;
1339}
1340
1341Fault
1342TLB::getResultTe(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
1343 Translation *translation, bool timing, bool functional,
1344 TlbEntry *mergeTe)
1345{
1346 Fault fault;
1347 TlbEntry *s1Te = NULL;
1348
1349 Addr vaddr_tainted = req->getVaddr();
1350
1351 // Get the stage 1 table entry
1352 fault = getTE(&s1Te, req, tc, mode, translation, timing, functional,
1353 isSecure, curTranType);
1354 // only proceed if we have a valid table entry
1355 if ((s1Te != NULL) && (fault == NoFault)) {
1356 // Check stage 1 permissions before checking stage 2
1357 if (aarch64)
1358 fault = checkPermissions64(s1Te, req, mode, tc);
1359 else
1360 fault = checkPermissions(s1Te, req, mode);
1361 if (stage2Req & (fault == NoFault)) {
1362 Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te,
1363 req, translation, mode, timing, functional, curTranType);
1364 fault = s2Lookup->getTe(tc, mergeTe);
1365 if (s2Lookup->isComplete()) {
1366 *te = mergeTe;
1367 // We've finished with the lookup so delete it
1368 delete s2Lookup;
1369 } else {
1370 // The lookup hasn't completed, so we can't delete it now. We
1371 // get round this by asking the object to self delete when the
1372 // translation is complete.
1373 s2Lookup->setSelfDelete();
1374 }
1375 } else {
1376 // This case deals with an S1 hit (or bypass), followed by
1377 // an S2 hit-but-perms issue
1378 if (isStage2) {
1379 DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1380 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault);
1381 if (fault != NoFault) {
1382 ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get());
1383 armFault->annotate(ArmFault::S1PTW, false);
1384 armFault->annotate(ArmFault::OVA, vaddr_tainted);
1385 }
1386 }
1387 *te = s1Te;
1388 }
1389 }
1390 return fault;
1391}
1392
1393ArmISA::TLB *
1394ArmTLBParams::create()
1395{
1396 return new ArmISA::TLB(this);
1397}