tlb.cc (11577:a26a328c20eb) tlb.cc (11580:afe051c345e9)
1/*
2 * Copyright (c) 2010-2013, 2016 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 * Nathan Binkert
42 * Steve Reinhardt
43 */
44
45#include "arch/arm/tlb.hh"
46
47#include <memory>
48#include <string>
49#include <vector>
50
51#include "arch/arm/faults.hh"
52#include "arch/arm/pagetable.hh"
53#include "arch/arm/system.hh"
54#include "arch/arm/table_walker.hh"
55#include "arch/arm/stage2_lookup.hh"
56#include "arch/arm/stage2_mmu.hh"
57#include "arch/arm/utility.hh"
58#include "base/inifile.hh"
59#include "base/str.hh"
60#include "base/trace.hh"
61#include "cpu/base.hh"
62#include "cpu/thread_context.hh"
63#include "debug/Checkpoint.hh"
64#include "debug/TLB.hh"
65#include "debug/TLBVerbose.hh"
66#include "mem/page_table.hh"
67#include "params/ArmTLB.hh"
68#include "sim/full_system.hh"
69#include "sim/process.hh"
70
71using namespace std;
72using namespace ArmISA;
73
74TLB::TLB(const ArmTLBParams *p)
75 : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size),
76 isStage2(p->is_stage2), stage2Req(false), _attr(0),
77 directToStage2(false), tableWalker(p->walker), stage2Tlb(NULL),
78 stage2Mmu(NULL), test(nullptr), rangeMRU(1),
79 aarch64(false), aarch64EL(EL0), isPriv(false), isSecure(false),
80 isHyp(false), asid(0), vmid(0), dacr(0),
81 miscRegValid(false), miscRegContext(0), curTranType(NormalTran)
82{
83 tableWalker->setTlb(this);
84
85 // Cache system-level properties
86 haveLPAE = tableWalker->haveLPAE();
87 haveVirtualization = tableWalker->haveVirtualization();
88 haveLargeAsid64 = tableWalker->haveLargeAsid64();
89}
90
91TLB::~TLB()
92{
93 delete[] table;
94}
95
96void
97TLB::init()
98{
99 if (stage2Mmu && !isStage2)
100 stage2Tlb = stage2Mmu->stage2Tlb();
101}
102
103void
104TLB::setMMU(Stage2MMU *m, MasterID master_id)
105{
106 stage2Mmu = m;
107 tableWalker->setMMU(m, master_id);
108}
109
110bool
111TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa)
112{
113 updateMiscReg(tc);
114
115 if (directToStage2) {
116 assert(stage2Tlb);
117 return stage2Tlb->translateFunctional(tc, va, pa);
118 }
119
120 TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false,
121 aarch64 ? aarch64EL : EL1);
122 if (!e)
123 return false;
124 pa = e->pAddr(va);
125 return true;
126}
127
128Fault
129TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
130{
131 return NoFault;
132}
133
134TlbEntry*
135TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure,
136 bool functional, bool ignore_asn, uint8_t target_el)
137{
138
139 TlbEntry *retval = NULL;
140
141 // Maintaining LRU array
142 int x = 0;
143 while (retval == NULL && x < size) {
144 if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false,
145 target_el)) ||
146 (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) {
147 // We only move the hit entry ahead when the position is higher
148 // than rangeMRU
149 if (x > rangeMRU && !functional) {
150 TlbEntry tmp_entry = table[x];
151 for (int i = x; i > 0; i--)
152 table[i] = table[i - 1];
153 table[0] = tmp_entry;
154 retval = &table[0];
155 } else {
156 retval = &table[x];
157 }
158 break;
159 }
160 ++x;
161 }
162
163 DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d "
164 "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d "
165 "el: %d\n",
166 va, asn, retval ? "hit" : "miss", vmid, hyp, secure,
167 retval ? retval->pfn : 0, retval ? retval->size : 0,
168 retval ? retval->pAddr(va) : 0, retval ? retval->ap : 0,
169 retval ? retval->ns : 0, retval ? retval->nstid : 0,
170 retval ? retval->global : 0, retval ? retval->asid : 0,
171 retval ? retval->el : 0);
172
173 return retval;
174}
175
176// insert a new TLB entry
177void
178TLB::insert(Addr addr, TlbEntry &entry)
179{
180 DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x"
181 " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
182 " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn,
183 entry.size, entry.vpn, entry.asid, entry.vmid, entry.N,
184 entry.global, entry.valid, entry.nonCacheable, entry.xn,
185 entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid,
186 entry.isHyp);
187
188 if (table[size - 1].valid)
189 DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
190 "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n",
191 table[size-1].vpn << table[size-1].N, table[size-1].asid,
192 table[size-1].vmid, table[size-1].pfn << table[size-1].N,
193 table[size-1].size, table[size-1].ap, table[size-1].ns,
194 table[size-1].nstid, table[size-1].global, table[size-1].isHyp,
195 table[size-1].el);
196
197 //inserting to MRU position and evicting the LRU one
198
199 for (int i = size - 1; i > 0; --i)
200 table[i] = table[i-1];
201 table[0] = entry;
202
203 inserts++;
204 ppRefills->notify(1);
205}
206
207void
208TLB::printTlb() const
209{
210 int x = 0;
211 TlbEntry *te;
212 DPRINTF(TLB, "Current TLB contents:\n");
213 while (x < size) {
214 te = &table[x];
215 if (te->valid)
216 DPRINTF(TLB, " * %s\n", te->print());
217 ++x;
218 }
219}
220
221void
222TLB::flushAllSecurity(bool secure_lookup, uint8_t target_el, bool ignore_el)
223{
224 DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n",
225 (secure_lookup ? "secure" : "non-secure"));
226 int x = 0;
227 TlbEntry *te;
228 while (x < size) {
229 te = &table[x];
230 if (te->valid && secure_lookup == !te->nstid &&
231 (te->vmid == vmid || secure_lookup) &&
232 checkELMatch(target_el, te->el, ignore_el)) {
233
234 DPRINTF(TLB, " - %s\n", te->print());
235 te->valid = false;
236 flushedEntries++;
237 }
238 ++x;
239 }
240
241 flushTlb++;
242
243 // If there's a second stage TLB (and we're not it) then flush it as well
244 // if we're currently in hyp mode
245 if (!isStage2 && isHyp) {
246 stage2Tlb->flushAllSecurity(secure_lookup, true);
247 }
248}
249
250void
251TLB::flushAllNs(bool hyp, uint8_t target_el, bool ignore_el)
252{
253 DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n",
254 (hyp ? "hyp" : "non-hyp"));
255 int x = 0;
256 TlbEntry *te;
257 while (x < size) {
258 te = &table[x];
259 if (te->valid && te->nstid && te->isHyp == hyp &&
260 checkELMatch(target_el, te->el, ignore_el)) {
261
262 DPRINTF(TLB, " - %s\n", te->print());
263 flushedEntries++;
264 te->valid = false;
265 }
266 ++x;
267 }
268
269 flushTlb++;
270
271 // If there's a second stage TLB (and we're not it) then flush it as well
272 if (!isStage2 && !hyp) {
273 stage2Tlb->flushAllNs(false, true);
274 }
275}
276
277void
278TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, uint8_t target_el)
279{
280 DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x "
281 "(%s lookup)\n", mva, asn, (secure_lookup ?
282 "secure" : "non-secure"));
283 _flushMva(mva, asn, secure_lookup, false, false, target_el);
284 flushTlbMvaAsid++;
285}
286
287void
288TLB::flushAsid(uint64_t asn, bool secure_lookup, uint8_t target_el)
289{
290 DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn,
291 (secure_lookup ? "secure" : "non-secure"));
292
293 int x = 0 ;
294 TlbEntry *te;
295
296 while (x < size) {
297 te = &table[x];
298 if (te->valid && te->asid == asn && secure_lookup == !te->nstid &&
299 (te->vmid == vmid || secure_lookup) &&
300 checkELMatch(target_el, te->el, false)) {
301
302 te->valid = false;
303 DPRINTF(TLB, " - %s\n", te->print());
304 flushedEntries++;
305 }
306 ++x;
307 }
308 flushTlbAsid++;
309}
310
311void
312TLB::flushMva(Addr mva, bool secure_lookup, bool hyp, uint8_t target_el)
313{
314 DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva,
315 (secure_lookup ? "secure" : "non-secure"));
316 _flushMva(mva, 0xbeef, secure_lookup, hyp, true, target_el);
317 flushTlbMva++;
318}
319
320void
321TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, bool hyp,
322 bool ignore_asn, uint8_t target_el)
323{
324 TlbEntry *te;
325 // D5.7.2: Sign-extend address to 64 bits
326 mva = sext<56>(mva);
327 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
328 target_el);
329 while (te != NULL) {
330 if (secure_lookup == !te->nstid) {
331 DPRINTF(TLB, " - %s\n", te->print());
332 te->valid = false;
333 flushedEntries++;
334 }
335 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
336 target_el);
337 }
338}
339
340bool
341TLB::checkELMatch(uint8_t target_el, uint8_t tentry_el, bool ignore_el)
342{
343 bool elMatch = true;
344 if (!ignore_el) {
345 if (target_el == 2 || target_el == 3) {
346 elMatch = (tentry_el == target_el);
347 } else {
348 elMatch = (tentry_el == 0) || (tentry_el == 1);
349 }
350 }
351 return elMatch;
352}
353
354void
355TLB::drainResume()
356{
357 // We might have unserialized something or switched CPUs, so make
358 // sure to re-read the misc regs.
359 miscRegValid = false;
360}
361
362void
363TLB::takeOverFrom(BaseTLB *_otlb)
364{
365 TLB *otlb = dynamic_cast<TLB*>(_otlb);
366 /* Make sure we actually have a valid type */
367 if (otlb) {
368 _attr = otlb->_attr;
369 haveLPAE = otlb->haveLPAE;
370 directToStage2 = otlb->directToStage2;
371 stage2Req = otlb->stage2Req;
372
373 /* Sync the stage2 MMU if they exist in both
374 * the old CPU and the new
375 */
376 if (!isStage2 &&
377 stage2Tlb && otlb->stage2Tlb) {
378 stage2Tlb->takeOverFrom(otlb->stage2Tlb);
379 }
380 } else {
381 panic("Incompatible TLB type!");
382 }
383}
384
385void
386TLB::serialize(CheckpointOut &cp) const
387{
388 DPRINTF(Checkpoint, "Serializing Arm TLB\n");
389
390 SERIALIZE_SCALAR(_attr);
391 SERIALIZE_SCALAR(haveLPAE);
392 SERIALIZE_SCALAR(directToStage2);
393 SERIALIZE_SCALAR(stage2Req);
394
395 int num_entries = size;
396 SERIALIZE_SCALAR(num_entries);
397 for (int i = 0; i < size; i++)
398 table[i].serializeSection(cp, csprintf("TlbEntry%d", i));
399}
400
401void
402TLB::unserialize(CheckpointIn &cp)
403{
404 DPRINTF(Checkpoint, "Unserializing Arm TLB\n");
405
406 UNSERIALIZE_SCALAR(_attr);
407 UNSERIALIZE_SCALAR(haveLPAE);
408 UNSERIALIZE_SCALAR(directToStage2);
409 UNSERIALIZE_SCALAR(stage2Req);
410
411 int num_entries;
412 UNSERIALIZE_SCALAR(num_entries);
413 for (int i = 0; i < min(size, num_entries); i++)
414 table[i].unserializeSection(cp, csprintf("TlbEntry%d", i));
415}
416
417void
418TLB::regStats()
419{
420 BaseTLB::regStats();
421 instHits
422 .name(name() + ".inst_hits")
423 .desc("ITB inst hits")
424 ;
425
426 instMisses
427 .name(name() + ".inst_misses")
428 .desc("ITB inst misses")
429 ;
430
431 instAccesses
432 .name(name() + ".inst_accesses")
433 .desc("ITB inst accesses")
434 ;
435
436 readHits
437 .name(name() + ".read_hits")
438 .desc("DTB read hits")
439 ;
440
441 readMisses
442 .name(name() + ".read_misses")
443 .desc("DTB read misses")
444 ;
445
446 readAccesses
447 .name(name() + ".read_accesses")
448 .desc("DTB read accesses")
449 ;
450
451 writeHits
452 .name(name() + ".write_hits")
453 .desc("DTB write hits")
454 ;
455
456 writeMisses
457 .name(name() + ".write_misses")
458 .desc("DTB write misses")
459 ;
460
461 writeAccesses
462 .name(name() + ".write_accesses")
463 .desc("DTB write accesses")
464 ;
465
466 hits
467 .name(name() + ".hits")
468 .desc("DTB hits")
469 ;
470
471 misses
472 .name(name() + ".misses")
473 .desc("DTB misses")
474 ;
475
476 accesses
477 .name(name() + ".accesses")
478 .desc("DTB accesses")
479 ;
480
481 flushTlb
482 .name(name() + ".flush_tlb")
483 .desc("Number of times complete TLB was flushed")
484 ;
485
486 flushTlbMva
487 .name(name() + ".flush_tlb_mva")
488 .desc("Number of times TLB was flushed by MVA")
489 ;
490
491 flushTlbMvaAsid
492 .name(name() + ".flush_tlb_mva_asid")
493 .desc("Number of times TLB was flushed by MVA & ASID")
494 ;
495
496 flushTlbAsid
497 .name(name() + ".flush_tlb_asid")
498 .desc("Number of times TLB was flushed by ASID")
499 ;
500
501 flushedEntries
502 .name(name() + ".flush_entries")
503 .desc("Number of entries that have been flushed from TLB")
504 ;
505
506 alignFaults
507 .name(name() + ".align_faults")
508 .desc("Number of TLB faults due to alignment restrictions")
509 ;
510
511 prefetchFaults
512 .name(name() + ".prefetch_faults")
513 .desc("Number of TLB faults due to prefetch")
514 ;
515
516 domainFaults
517 .name(name() + ".domain_faults")
518 .desc("Number of TLB faults due to domain restrictions")
519 ;
520
521 permsFaults
522 .name(name() + ".perms_faults")
523 .desc("Number of TLB faults due to permissions restrictions")
524 ;
525
526 instAccesses = instHits + instMisses;
527 readAccesses = readHits + readMisses;
528 writeAccesses = writeHits + writeMisses;
529 hits = readHits + writeHits + instHits;
530 misses = readMisses + writeMisses + instMisses;
531 accesses = readAccesses + writeAccesses + instAccesses;
532}
533
534void
535TLB::regProbePoints()
536{
537 ppRefills.reset(new ProbePoints::PMU(getProbeManager(), "Refills"));
538}
539
540Fault
541TLB::translateSe(RequestPtr req, ThreadContext *tc, Mode mode,
542 Translation *translation, bool &delay, bool timing)
543{
544 updateMiscReg(tc);
545 Addr vaddr_tainted = req->getVaddr();
546 Addr vaddr = 0;
547 if (aarch64)
548 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
549 else
550 vaddr = vaddr_tainted;
551 uint32_t flags = req->getFlags();
552
553 bool is_fetch = (mode == Execute);
554 bool is_write = (mode == Write);
555
556 if (!is_fetch) {
557 assert(flags & MustBeOne);
558 if (sctlr.a || !(flags & AllowUnaligned)) {
559 if (vaddr & mask(flags & AlignmentMask)) {
560 // LPAE is always disabled in SE mode
561 return std::make_shared<DataAbort>(
562 vaddr_tainted,
563 TlbEntry::DomainType::NoAccess, is_write,
564 ArmFault::AlignmentFault, isStage2,
565 ArmFault::VmsaTran);
566 }
567 }
568 }
569
570 Addr paddr;
571 Process *p = tc->getProcessPtr();
572
573 if (!p->pTable->translate(vaddr, paddr))
574 return std::make_shared<GenericPageTableFault>(vaddr_tainted);
575 req->setPaddr(paddr);
576
577 return NoFault;
578}
579
580Fault
581TLB::checkPermissions(TlbEntry *te, RequestPtr req, Mode mode)
582{
583 Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
584 uint32_t flags = req->getFlags();
585 bool is_fetch = (mode == Execute);
586 bool is_write = (mode == Write);
587 bool is_priv = isPriv && !(flags & UserMode);
588
589 // Get the translation type from the actuall table entry
590 ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran
591 : ArmFault::VmsaTran;
592
593 // If this is the second stage of translation and the request is for a
594 // stage 1 page table walk then we need to check the HCR.PTW bit. This
595 // allows us to generate a fault if the request targets an area marked
596 // as a device or strongly ordered.
597 if (isStage2 && req->isPTWalk() && hcr.ptw &&
598 (te->mtype != TlbEntry::MemoryType::Normal)) {
599 return std::make_shared<DataAbort>(
600 vaddr, te->domain, is_write,
601 ArmFault::PermissionLL + te->lookupLevel,
602 isStage2, tranMethod);
603 }
604
605 // Generate an alignment fault for unaligned data accesses to device or
606 // strongly ordered memory
607 if (!is_fetch) {
608 if (te->mtype != TlbEntry::MemoryType::Normal) {
609 if (vaddr & mask(flags & AlignmentMask)) {
610 alignFaults++;
611 return std::make_shared<DataAbort>(
612 vaddr, TlbEntry::DomainType::NoAccess, is_write,
613 ArmFault::AlignmentFault, isStage2,
614 tranMethod);
615 }
616 }
617 }
618
619 if (te->nonCacheable) {
620 // Prevent prefetching from I/O devices.
621 if (req->isPrefetch()) {
622 // Here we can safely use the fault status for the short
623 // desc. format in all cases
624 return std::make_shared<PrefetchAbort>(
625 vaddr, ArmFault::PrefetchUncacheable,
626 isStage2, tranMethod);
627 }
628 }
629
630 if (!te->longDescFormat) {
631 switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
632 case 0:
633 domainFaults++;
634 DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x"
635 " domain: %#x write:%d\n", dacr,
636 static_cast<uint8_t>(te->domain), is_write);
637 if (is_fetch)
638 return std::make_shared<PrefetchAbort>(
639 vaddr,
640 ArmFault::DomainLL + te->lookupLevel,
641 isStage2, tranMethod);
642 else
643 return std::make_shared<DataAbort>(
644 vaddr, te->domain, is_write,
645 ArmFault::DomainLL + te->lookupLevel,
646 isStage2, tranMethod);
647 case 1:
648 // Continue with permissions check
649 break;
650 case 2:
651 panic("UNPRED domain\n");
652 case 3:
653 return NoFault;
654 }
655 }
656
657 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
658 uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap;
659 uint8_t hap = te->hap;
660
661 if (sctlr.afe == 1 || te->longDescFormat)
662 ap |= 1;
663
664 bool abt;
665 bool isWritable = true;
666 // If this is a stage 2 access (eg for reading stage 1 page table entries)
667 // then don't perform the AP permissions check, we stil do the HAP check
668 // below.
669 if (isStage2) {
670 abt = false;
671 } else {
672 switch (ap) {
673 case 0:
674 DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n",
675 (int)sctlr.rs);
676 if (!sctlr.xp) {
677 switch ((int)sctlr.rs) {
678 case 2:
679 abt = is_write;
680 break;
681 case 1:
682 abt = is_write || !is_priv;
683 break;
684 case 0:
685 case 3:
686 default:
687 abt = true;
688 break;
689 }
690 } else {
691 abt = true;
692 }
693 break;
694 case 1:
695 abt = !is_priv;
696 break;
697 case 2:
698 abt = !is_priv && is_write;
699 isWritable = is_priv;
700 break;
701 case 3:
702 abt = false;
703 break;
704 case 4:
705 panic("UNPRED premissions\n");
706 case 5:
707 abt = !is_priv || is_write;
708 isWritable = false;
709 break;
710 case 6:
711 case 7:
712 abt = is_write;
713 isWritable = false;
714 break;
715 default:
716 panic("Unknown permissions %#x\n", ap);
717 }
718 }
719
720 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
721 bool xn = te->xn || (isWritable && sctlr.wxn) ||
722 (ap == 3 && sctlr.uwxn && is_priv);
723 if (is_fetch && (abt || xn ||
724 (te->longDescFormat && te->pxn && is_priv) ||
725 (isSecure && te->ns && scr.sif))) {
726 permsFaults++;
727 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
728 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
729 ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe);
730 return std::make_shared<PrefetchAbort>(
731 vaddr,
732 ArmFault::PermissionLL + te->lookupLevel,
733 isStage2, tranMethod);
734 } else if (abt | hapAbt) {
735 permsFaults++;
736 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
737 " write:%d\n", ap, is_priv, is_write);
738 return std::make_shared<DataAbort>(
739 vaddr, te->domain, is_write,
740 ArmFault::PermissionLL + te->lookupLevel,
741 isStage2 | !abt, tranMethod);
742 }
743 return NoFault;
744}
745
746
747Fault
748TLB::checkPermissions64(TlbEntry *te, RequestPtr req, Mode mode,
749 ThreadContext *tc)
750{
751 assert(aarch64);
752
753 Addr vaddr_tainted = req->getVaddr();
754 Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
755
756 uint32_t flags = req->getFlags();
757 bool is_fetch = (mode == Execute);
758 bool is_write = (mode == Write);
759 bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode);
760
761 updateMiscReg(tc, curTranType);
762
763 // If this is the second stage of translation and the request is for a
764 // stage 1 page table walk then we need to check the HCR.PTW bit. This
765 // allows us to generate a fault if the request targets an area marked
766 // as a device or strongly ordered.
767 if (isStage2 && req->isPTWalk() && hcr.ptw &&
768 (te->mtype != TlbEntry::MemoryType::Normal)) {
769 return std::make_shared<DataAbort>(
770 vaddr_tainted, te->domain, is_write,
771 ArmFault::PermissionLL + te->lookupLevel,
772 isStage2, ArmFault::LpaeTran);
773 }
774
775 // Generate an alignment fault for unaligned accesses to device or
776 // strongly ordered memory
777 if (!is_fetch) {
778 if (te->mtype != TlbEntry::MemoryType::Normal) {
779 if (vaddr & mask(flags & AlignmentMask)) {
780 alignFaults++;
781 return std::make_shared<DataAbort>(
782 vaddr_tainted,
783 TlbEntry::DomainType::NoAccess, is_write,
784 ArmFault::AlignmentFault, isStage2,
785 ArmFault::LpaeTran);
786 }
787 }
788 }
789
790 if (te->nonCacheable) {
791 // Prevent prefetching from I/O devices.
792 if (req->isPrefetch()) {
793 // Here we can safely use the fault status for the short
794 // desc. format in all cases
795 return std::make_shared<PrefetchAbort>(
796 vaddr_tainted,
797 ArmFault::PrefetchUncacheable,
798 isStage2, ArmFault::LpaeTran);
799 }
800 }
801
802 uint8_t ap = 0x3 & (te->ap); // 2-bit access protection field
803 bool grant = false;
804
805 uint8_t xn = te->xn;
806 uint8_t pxn = te->pxn;
807 bool r = !is_write && !is_fetch;
808 bool w = is_write;
809 bool x = is_fetch;
810 DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
811 "w:%d, x:%d\n", ap, xn, pxn, r, w, x);
812
813 if (isStage2) {
814 assert(ArmSystem::haveVirtualization(tc) && aarch64EL != EL2);
815 // In stage 2 we use the hypervisor access permission bits.
816 // The following permissions are described in ARM DDI 0487A.f
817 // D4-1802
818 uint8_t hap = 0x3 & te->hap;
819 if (is_fetch) {
820 // sctlr.wxn overrides the xn bit
821 grant = !sctlr.wxn && !xn;
822 } else if (is_write) {
823 grant = hap & 0x2;
824 } else { // is_read
825 grant = hap & 0x1;
826 }
827 } else {
828 switch (aarch64EL) {
829 case EL0:
830 {
831 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
832 switch (perm) {
833 case 0:
834 case 1:
835 case 8:
836 case 9:
837 grant = x;
838 break;
839 case 4:
840 case 5:
841 grant = r || w || (x && !sctlr.wxn);
842 break;
843 case 6:
844 case 7:
845 grant = r || w;
846 break;
847 case 12:
848 case 13:
849 grant = r || x;
850 break;
851 case 14:
852 case 15:
853 grant = r;
854 break;
855 default:
856 grant = false;
857 }
858 }
859 break;
860 case EL1:
861 {
862 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
863 switch (perm) {
864 case 0:
865 case 2:
866 grant = r || w || (x && !sctlr.wxn);
867 break;
868 case 1:
869 case 3:
870 case 4:
871 case 5:
872 case 6:
873 case 7:
874 // regions that are writeable at EL0 should not be
875 // executable at EL1
876 grant = r || w;
877 break;
878 case 8:
879 case 10:
880 case 12:
881 case 14:
882 grant = r || x;
883 break;
884 case 9:
885 case 11:
886 case 13:
887 case 15:
888 grant = r;
889 break;
890 default:
891 grant = false;
892 }
893 }
894 break;
895 case EL2:
896 case EL3:
897 {
898 uint8_t perm = (ap & 0x2) | xn;
899 switch (perm) {
900 case 0:
901 grant = r || w || (x && !sctlr.wxn) ;
902 break;
903 case 1:
904 grant = r || w;
905 break;
906 case 2:
907 grant = r || x;
908 break;
909 case 3:
910 grant = r;
911 break;
912 default:
913 grant = false;
914 }
915 }
916 break;
917 }
918 }
919
920 if (!grant) {
921 if (is_fetch) {
922 permsFaults++;
923 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. "
924 "AP:%d priv:%d write:%d ns:%d sif:%d "
925 "sctlr.afe: %d\n",
926 ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe);
927 // Use PC value instead of vaddr because vaddr might be aligned to
928 // cache line and should not be the address reported in FAR
929 return std::make_shared<PrefetchAbort>(
930 req->getPC(),
931 ArmFault::PermissionLL + te->lookupLevel,
932 isStage2, ArmFault::LpaeTran);
933 } else {
934 permsFaults++;
935 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d "
936 "priv:%d write:%d\n", ap, is_priv, is_write);
937 return std::make_shared<DataAbort>(
938 vaddr_tainted, te->domain, is_write,
939 ArmFault::PermissionLL + te->lookupLevel,
940 isStage2, ArmFault::LpaeTran);
941 }
942 }
943
944 return NoFault;
945}
946
947Fault
948TLB::translateFs(RequestPtr req, ThreadContext *tc, Mode mode,
949 Translation *translation, bool &delay, bool timing,
950 TLB::ArmTranslationType tranType, bool functional)
951{
952 // No such thing as a functional timing access
953 assert(!(timing && functional));
954
955 updateMiscReg(tc, tranType);
956
957 Addr vaddr_tainted = req->getVaddr();
958 Addr vaddr = 0;
959 if (aarch64)
960 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
961 else
962 vaddr = vaddr_tainted;
963 uint32_t flags = req->getFlags();
964
965 bool is_fetch = (mode == Execute);
966 bool is_write = (mode == Write);
967 bool long_desc_format = aarch64 || longDescFormatInUse(tc);
968 ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
969 : ArmFault::VmsaTran;
970
971 req->setAsid(asid);
972
973 DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
974 isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran);
975
976 DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
977 "flags %#x tranType 0x%x\n", vaddr_tainted, mode, isStage2,
978 scr, sctlr, flags, tranType);
979
980 if ((req->isInstFetch() && (!sctlr.i)) ||
981 ((!req->isInstFetch()) && (!sctlr.c))){
982 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
983 }
984 if (!is_fetch) {
985 assert(flags & MustBeOne);
986 if (sctlr.a || !(flags & AllowUnaligned)) {
987 if (vaddr & mask(flags & AlignmentMask)) {
988 alignFaults++;
989 return std::make_shared<DataAbort>(
990 vaddr_tainted,
991 TlbEntry::DomainType::NoAccess, is_write,
992 ArmFault::AlignmentFault, isStage2,
993 tranMethod);
994 }
995 }
996 }
997
998 // If guest MMU is off or hcr.vm=0 go straight to stage2
999 if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) {
1000
1001 req->setPaddr(vaddr);
1002 // When the MMU is off the security attribute corresponds to the
1003 // security state of the processor
1004 if (isSecure)
1005 req->setFlags(Request::SECURE);
1006
1007 // @todo: double check this (ARM ARM issue C B3.2.1)
1008 if (long_desc_format || sctlr.tre == 0) {
1009 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
1010 } else {
1011 if (nmrr.ir0 == 0 || nmrr.or0 == 0 || prrr.tr0 != 0x2)
1012 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
1013 }
1014
1015 // Set memory attributes
1016 TlbEntry temp_te;
1017 temp_te.ns = !isSecure;
1018 if (isStage2 || hcr.dc == 0 || isSecure ||
1019 (isHyp && !(tranType & S1CTran))) {
1020
1021 temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal
1022 : TlbEntry::MemoryType::StronglyOrdered;
1023 temp_te.innerAttrs = 0x0;
1024 temp_te.outerAttrs = 0x0;
1025 temp_te.shareable = true;
1026 temp_te.outerShareable = true;
1027 } else {
1028 temp_te.mtype = TlbEntry::MemoryType::Normal;
1029 temp_te.innerAttrs = 0x3;
1030 temp_te.outerAttrs = 0x3;
1031 temp_te.shareable = false;
1032 temp_te.outerShareable = false;
1033 }
1034 temp_te.setAttributes(long_desc_format);
1035 DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: "
1036 "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n",
1037 temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
1038 isStage2);
1039 setAttr(temp_te.attributes);
1040
1041 return testTranslation(req, mode, TlbEntry::DomainType::NoAccess);
1042 }
1043
1044 DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n",
1045 isStage2 ? "IPA" : "VA", vaddr_tainted, asid);
1046 // Translation enabled
1047
1048 TlbEntry *te = NULL;
1049 TlbEntry mergeTe;
1050 Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
1051 functional, &mergeTe);
1052 // only proceed if we have a valid table entry
1053 if ((te == NULL) && (fault == NoFault)) delay = true;
1054
1055 // If we have the table entry transfer some of the attributes to the
1056 // request that triggered the translation
1057 if (te != NULL) {
1058 // Set memory attributes
1059 DPRINTF(TLBVerbose,
1060 "Setting memory attributes: shareable: %d, innerAttrs: %d, "
1061 "outerAttrs: %d, mtype: %d, isStage2: %d\n",
1062 te->shareable, te->innerAttrs, te->outerAttrs,
1063 static_cast<uint8_t>(te->mtype), isStage2);
1064 setAttr(te->attributes);
1065
1066 if (te->nonCacheable)
1067 req->setFlags(Request::UNCACHEABLE);
1068
1069 // Require requests to be ordered if the request goes to
1070 // strongly ordered or device memory (i.e., anything other
1071 // than normal memory requires strict order).
1072 if (te->mtype != TlbEntry::MemoryType::Normal)
1073 req->setFlags(Request::STRICT_ORDER);
1074
1075 Addr pa = te->pAddr(vaddr);
1076 req->setPaddr(pa);
1077
1078 if (isSecure && !te->ns) {
1079 req->setFlags(Request::SECURE);
1080 }
1081 if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) &&
1082 (te->mtype != TlbEntry::MemoryType::Normal)) {
1083 // Unaligned accesses to Device memory should always cause an
1084 // abort regardless of sctlr.a
1085 alignFaults++;
1086 return std::make_shared<DataAbort>(
1087 vaddr_tainted,
1088 TlbEntry::DomainType::NoAccess, is_write,
1089 ArmFault::AlignmentFault, isStage2,
1090 tranMethod);
1091 }
1092
1093 // Check for a trickbox generated address fault
1094 if (fault == NoFault)
1095 fault = testTranslation(req, mode, te->domain);
1096 }
1097
1098 // Generate Illegal Inst Set State fault if IL bit is set in CPSR
1099 if (fault == NoFault) {
1100 if (aarch64 && is_fetch && cpsr.il == 1) {
1101 return std::make_shared<IllegalInstSetStateFault>();
1102 }
1103 }
1104
1105 return fault;
1106}
1107
1108Fault
1109TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode,
1110 TLB::ArmTranslationType tranType)
1111{
1112 updateMiscReg(tc, tranType);
1113
1114 if (directToStage2) {
1115 assert(stage2Tlb);
1116 return stage2Tlb->translateAtomic(req, tc, mode, tranType);
1117 }
1118
1119 bool delay = false;
1120 Fault fault;
1121 if (FullSystem)
1122 fault = translateFs(req, tc, mode, NULL, delay, false, tranType);
1123 else
1124 fault = translateSe(req, tc, mode, NULL, delay, false);
1125 assert(!delay);
1126 return fault;
1127}
1128
1129Fault
1130TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode,
1131 TLB::ArmTranslationType tranType)
1132{
1133 updateMiscReg(tc, tranType);
1134
1135 if (directToStage2) {
1136 assert(stage2Tlb);
1137 return stage2Tlb->translateFunctional(req, tc, mode, tranType);
1138 }
1139
1140 bool delay = false;
1141 Fault fault;
1142 if (FullSystem)
1143 fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true);
1144 else
1145 fault = translateSe(req, tc, mode, NULL, delay, false);
1146 assert(!delay);
1147 return fault;
1148}
1149
1150Fault
1151TLB::translateTiming(RequestPtr req, ThreadContext *tc,
1152 Translation *translation, Mode mode, TLB::ArmTranslationType tranType)
1153{
1154 updateMiscReg(tc, tranType);
1155
1156 if (directToStage2) {
1157 assert(stage2Tlb);
1158 return stage2Tlb->translateTiming(req, tc, translation, mode, tranType);
1159 }
1160
1161 assert(translation);
1162
1163 return translateComplete(req, tc, translation, mode, tranType, isStage2);
1164}
1165
1166Fault
1167TLB::translateComplete(RequestPtr req, ThreadContext *tc,
1168 Translation *translation, Mode mode, TLB::ArmTranslationType tranType,
1169 bool callFromS2)
1170{
1171 bool delay = false;
1172 Fault fault;
1173 if (FullSystem)
1174 fault = translateFs(req, tc, mode, translation, delay, true, tranType);
1175 else
1176 fault = translateSe(req, tc, mode, translation, delay, true);
1177 DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay, fault !=
1178 NoFault);
1179 // If we have a translation, and we're not in the middle of doing a stage
1180 // 2 translation tell the translation that we've either finished or its
1181 // going to take a while. By not doing this when we're in the middle of a
1182 // stage 2 translation we prevent marking the translation as delayed twice,
1183 // one when the translation starts and again when the stage 1 translation
1184 // completes.
1185 if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) {
1186 if (!delay)
1187 translation->finish(fault, req, tc, mode);
1188 else
1189 translation->markDelayed();
1190 }
1191 return fault;
1192}
1193
1194BaseMasterPort*
1195TLB::getMasterPort()
1196{
1197 return &stage2Mmu->getPort();
1198}
1199
1200void
1201TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType)
1202{
1203 // check if the regs have changed, or the translation mode is different.
1204 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1205 // one type of translation anyway
1206 if (miscRegValid && miscRegContext == tc->contextId() &&
1207 ((tranType == curTranType) || isStage2)) {
1208 return;
1209 }
1210
1211 DPRINTF(TLBVerbose, "TLB variables changed!\n");
1212 cpsr = tc->readMiscReg(MISCREG_CPSR);
1213
1214 // Dependencies: SCR/SCR_EL3, CPSR
1215 isSecure = inSecureState(tc) &&
1216 !(tranType & HypMode) && !(tranType & S1S2NsTran);
1217
1218 const OperatingMode op_mode = (OperatingMode) (uint8_t)cpsr.mode;
1219 aarch64 = opModeIs64(op_mode) ||
1220 (opModeToEL(op_mode) == EL0 && ELIs64(tc, EL1));
1221
1222 if (aarch64) { // AArch64
1223 // determine EL we need to translate in
1224 switch (tranType) {
1225 case S1E0Tran:
1226 case S12E0Tran:
1227 aarch64EL = EL0;
1228 break;
1229 case S1E1Tran:
1230 case S12E1Tran:
1231 aarch64EL = EL1;
1232 break;
1233 case S1E2Tran:
1234 aarch64EL = EL2;
1235 break;
1236 case S1E3Tran:
1237 aarch64EL = EL3;
1238 break;
1239 case NormalTran:
1240 case S1CTran:
1241 case S1S2NsTran:
1242 case HypMode:
1243 aarch64EL = (ExceptionLevel) (uint8_t) cpsr.el;
1244 break;
1245 }
1246
1247 switch (aarch64EL) {
1248 case EL0:
1249 case EL1:
1250 {
1251 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
1252 ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
1253 uint64_t ttbr_asid = ttbcr.a1 ?
1254 tc->readMiscReg(MISCREG_TTBR1_EL1) :
1255 tc->readMiscReg(MISCREG_TTBR0_EL1);
1256 asid = bits(ttbr_asid,
1257 (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1258 }
1259 break;
1260 case EL2:
1261 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1262 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1263 asid = -1;
1264 break;
1265 case EL3:
1266 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3);
1267 ttbcr = tc->readMiscReg(MISCREG_TCR_EL3);
1268 asid = -1;
1269 break;
1270 }
1271 hcr = tc->readMiscReg(MISCREG_HCR_EL2);
1272 scr = tc->readMiscReg(MISCREG_SCR_EL3);
1273 isPriv = aarch64EL != EL0;
1274 if (haveVirtualization) {
1275 vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
1276 isHyp = tranType & HypMode;
1277 isHyp &= (tranType & S1S2NsTran) == 0;
1278 isHyp &= (tranType & S1CTran) == 0;
1279 // Work out if we should skip the first stage of translation and go
1280 // directly to stage 2. This value is cached so we don't have to
1281 // compute it for every translation.
1282 stage2Req = isStage2 ||
1283 (hcr.vm && !isHyp && !isSecure &&
1284 !(tranType & S1CTran) && (aarch64EL < EL2) &&
1285 !(tranType & S1E1Tran)); // <--- FIX THIS HACK
1286 directToStage2 = !isStage2 && stage2Req && !sctlr.m;
1287 } else {
1288 vmid = 0;
1289 isHyp = false;
1290 directToStage2 = false;
1291 stage2Req = false;
1292 }
1293 } else { // AArch32
1294 sctlr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_SCTLR, tc,
1295 !isSecure));
1296 ttbcr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_TTBCR, tc,
1297 !isSecure));
1298 scr = tc->readMiscReg(MISCREG_SCR);
1299 isPriv = cpsr.mode != MODE_USER;
1300 if (longDescFormatInUse(tc)) {
1301 uint64_t ttbr_asid = tc->readMiscReg(
1302 flattenMiscRegNsBanked(ttbcr.a1 ? MISCREG_TTBR1
1303 : MISCREG_TTBR0,
1304 tc, !isSecure));
1305 asid = bits(ttbr_asid, 55, 48);
1306 } else { // Short-descriptor translation table format in use
1307 CONTEXTIDR context_id = tc->readMiscReg(flattenMiscRegNsBanked(
1308 MISCREG_CONTEXTIDR, tc,!isSecure));
1309 asid = context_id.asid;
1310 }
1311 prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR, tc,
1312 !isSecure));
1313 nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR, tc,
1314 !isSecure));
1315 dacr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_DACR, tc,
1316 !isSecure));
1317 hcr = tc->readMiscReg(MISCREG_HCR);
1318
1319 if (haveVirtualization) {
1320 vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
1321 isHyp = cpsr.mode == MODE_HYP;
1322 isHyp |= tranType & HypMode;
1323 isHyp &= (tranType & S1S2NsTran) == 0;
1324 isHyp &= (tranType & S1CTran) == 0;
1325 if (isHyp) {
1326 sctlr = tc->readMiscReg(MISCREG_HSCTLR);
1327 }
1328 // Work out if we should skip the first stage of translation and go
1329 // directly to stage 2. This value is cached so we don't have to
1330 // compute it for every translation.
1331 stage2Req = hcr.vm && !isStage2 && !isHyp && !isSecure &&
1332 !(tranType & S1CTran);
1333 directToStage2 = stage2Req && !sctlr.m;
1334 } else {
1335 vmid = 0;
1336 stage2Req = false;
1337 isHyp = false;
1338 directToStage2 = false;
1339 }
1340 }
1341 miscRegValid = true;
1342 miscRegContext = tc->contextId();
1343 curTranType = tranType;
1344}
1345
1346Fault
1347TLB::getTE(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
1348 Translation *translation, bool timing, bool functional,
1349 bool is_secure, TLB::ArmTranslationType tranType)
1350{
1351 bool is_fetch = (mode == Execute);
1352 bool is_write = (mode == Write);
1353
1354 Addr vaddr_tainted = req->getVaddr();
1355 Addr vaddr = 0;
1356 ExceptionLevel target_el = aarch64 ? aarch64EL : EL1;
1357 if (aarch64) {
1358 vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el, ttbcr);
1359 } else {
1360 vaddr = vaddr_tainted;
1361 }
1362 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1363 if (*te == NULL) {
1364 if (req->isPrefetch()) {
1365 // if the request is a prefetch don't attempt to fill the TLB or go
1366 // any further with the memory access (here we can safely use the
1367 // fault status for the short desc. format in all cases)
1368 prefetchFaults++;
1369 return std::make_shared<PrefetchAbort>(
1370 vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2);
1371 }
1372
1373 if (is_fetch)
1374 instMisses++;
1375 else if (is_write)
1376 writeMisses++;
1377 else
1378 readMisses++;
1379
1380 // start translation table walk, pass variables rather than
1381 // re-retreaving in table walker for speed
1382 DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1383 vaddr_tainted, asid, vmid);
1384 Fault fault;
1385 fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode,
1386 translation, timing, functional, is_secure,
1/*
2 * Copyright (c) 2010-2013, 2016 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 * Nathan Binkert
42 * Steve Reinhardt
43 */
44
45#include "arch/arm/tlb.hh"
46
47#include <memory>
48#include <string>
49#include <vector>
50
51#include "arch/arm/faults.hh"
52#include "arch/arm/pagetable.hh"
53#include "arch/arm/system.hh"
54#include "arch/arm/table_walker.hh"
55#include "arch/arm/stage2_lookup.hh"
56#include "arch/arm/stage2_mmu.hh"
57#include "arch/arm/utility.hh"
58#include "base/inifile.hh"
59#include "base/str.hh"
60#include "base/trace.hh"
61#include "cpu/base.hh"
62#include "cpu/thread_context.hh"
63#include "debug/Checkpoint.hh"
64#include "debug/TLB.hh"
65#include "debug/TLBVerbose.hh"
66#include "mem/page_table.hh"
67#include "params/ArmTLB.hh"
68#include "sim/full_system.hh"
69#include "sim/process.hh"
70
71using namespace std;
72using namespace ArmISA;
73
74TLB::TLB(const ArmTLBParams *p)
75 : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size),
76 isStage2(p->is_stage2), stage2Req(false), _attr(0),
77 directToStage2(false), tableWalker(p->walker), stage2Tlb(NULL),
78 stage2Mmu(NULL), test(nullptr), rangeMRU(1),
79 aarch64(false), aarch64EL(EL0), isPriv(false), isSecure(false),
80 isHyp(false), asid(0), vmid(0), dacr(0),
81 miscRegValid(false), miscRegContext(0), curTranType(NormalTran)
82{
83 tableWalker->setTlb(this);
84
85 // Cache system-level properties
86 haveLPAE = tableWalker->haveLPAE();
87 haveVirtualization = tableWalker->haveVirtualization();
88 haveLargeAsid64 = tableWalker->haveLargeAsid64();
89}
90
91TLB::~TLB()
92{
93 delete[] table;
94}
95
96void
97TLB::init()
98{
99 if (stage2Mmu && !isStage2)
100 stage2Tlb = stage2Mmu->stage2Tlb();
101}
102
103void
104TLB::setMMU(Stage2MMU *m, MasterID master_id)
105{
106 stage2Mmu = m;
107 tableWalker->setMMU(m, master_id);
108}
109
110bool
111TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa)
112{
113 updateMiscReg(tc);
114
115 if (directToStage2) {
116 assert(stage2Tlb);
117 return stage2Tlb->translateFunctional(tc, va, pa);
118 }
119
120 TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false,
121 aarch64 ? aarch64EL : EL1);
122 if (!e)
123 return false;
124 pa = e->pAddr(va);
125 return true;
126}
127
128Fault
129TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
130{
131 return NoFault;
132}
133
134TlbEntry*
135TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure,
136 bool functional, bool ignore_asn, uint8_t target_el)
137{
138
139 TlbEntry *retval = NULL;
140
141 // Maintaining LRU array
142 int x = 0;
143 while (retval == NULL && x < size) {
144 if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false,
145 target_el)) ||
146 (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) {
147 // We only move the hit entry ahead when the position is higher
148 // than rangeMRU
149 if (x > rangeMRU && !functional) {
150 TlbEntry tmp_entry = table[x];
151 for (int i = x; i > 0; i--)
152 table[i] = table[i - 1];
153 table[0] = tmp_entry;
154 retval = &table[0];
155 } else {
156 retval = &table[x];
157 }
158 break;
159 }
160 ++x;
161 }
162
163 DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d "
164 "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d "
165 "el: %d\n",
166 va, asn, retval ? "hit" : "miss", vmid, hyp, secure,
167 retval ? retval->pfn : 0, retval ? retval->size : 0,
168 retval ? retval->pAddr(va) : 0, retval ? retval->ap : 0,
169 retval ? retval->ns : 0, retval ? retval->nstid : 0,
170 retval ? retval->global : 0, retval ? retval->asid : 0,
171 retval ? retval->el : 0);
172
173 return retval;
174}
175
176// insert a new TLB entry
177void
178TLB::insert(Addr addr, TlbEntry &entry)
179{
180 DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x"
181 " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
182 " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn,
183 entry.size, entry.vpn, entry.asid, entry.vmid, entry.N,
184 entry.global, entry.valid, entry.nonCacheable, entry.xn,
185 entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid,
186 entry.isHyp);
187
188 if (table[size - 1].valid)
189 DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
190 "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n",
191 table[size-1].vpn << table[size-1].N, table[size-1].asid,
192 table[size-1].vmid, table[size-1].pfn << table[size-1].N,
193 table[size-1].size, table[size-1].ap, table[size-1].ns,
194 table[size-1].nstid, table[size-1].global, table[size-1].isHyp,
195 table[size-1].el);
196
197 //inserting to MRU position and evicting the LRU one
198
199 for (int i = size - 1; i > 0; --i)
200 table[i] = table[i-1];
201 table[0] = entry;
202
203 inserts++;
204 ppRefills->notify(1);
205}
206
207void
208TLB::printTlb() const
209{
210 int x = 0;
211 TlbEntry *te;
212 DPRINTF(TLB, "Current TLB contents:\n");
213 while (x < size) {
214 te = &table[x];
215 if (te->valid)
216 DPRINTF(TLB, " * %s\n", te->print());
217 ++x;
218 }
219}
220
221void
222TLB::flushAllSecurity(bool secure_lookup, uint8_t target_el, bool ignore_el)
223{
224 DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n",
225 (secure_lookup ? "secure" : "non-secure"));
226 int x = 0;
227 TlbEntry *te;
228 while (x < size) {
229 te = &table[x];
230 if (te->valid && secure_lookup == !te->nstid &&
231 (te->vmid == vmid || secure_lookup) &&
232 checkELMatch(target_el, te->el, ignore_el)) {
233
234 DPRINTF(TLB, " - %s\n", te->print());
235 te->valid = false;
236 flushedEntries++;
237 }
238 ++x;
239 }
240
241 flushTlb++;
242
243 // If there's a second stage TLB (and we're not it) then flush it as well
244 // if we're currently in hyp mode
245 if (!isStage2 && isHyp) {
246 stage2Tlb->flushAllSecurity(secure_lookup, true);
247 }
248}
249
250void
251TLB::flushAllNs(bool hyp, uint8_t target_el, bool ignore_el)
252{
253 DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n",
254 (hyp ? "hyp" : "non-hyp"));
255 int x = 0;
256 TlbEntry *te;
257 while (x < size) {
258 te = &table[x];
259 if (te->valid && te->nstid && te->isHyp == hyp &&
260 checkELMatch(target_el, te->el, ignore_el)) {
261
262 DPRINTF(TLB, " - %s\n", te->print());
263 flushedEntries++;
264 te->valid = false;
265 }
266 ++x;
267 }
268
269 flushTlb++;
270
271 // If there's a second stage TLB (and we're not it) then flush it as well
272 if (!isStage2 && !hyp) {
273 stage2Tlb->flushAllNs(false, true);
274 }
275}
276
277void
278TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, uint8_t target_el)
279{
280 DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x "
281 "(%s lookup)\n", mva, asn, (secure_lookup ?
282 "secure" : "non-secure"));
283 _flushMva(mva, asn, secure_lookup, false, false, target_el);
284 flushTlbMvaAsid++;
285}
286
287void
288TLB::flushAsid(uint64_t asn, bool secure_lookup, uint8_t target_el)
289{
290 DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn,
291 (secure_lookup ? "secure" : "non-secure"));
292
293 int x = 0 ;
294 TlbEntry *te;
295
296 while (x < size) {
297 te = &table[x];
298 if (te->valid && te->asid == asn && secure_lookup == !te->nstid &&
299 (te->vmid == vmid || secure_lookup) &&
300 checkELMatch(target_el, te->el, false)) {
301
302 te->valid = false;
303 DPRINTF(TLB, " - %s\n", te->print());
304 flushedEntries++;
305 }
306 ++x;
307 }
308 flushTlbAsid++;
309}
310
311void
312TLB::flushMva(Addr mva, bool secure_lookup, bool hyp, uint8_t target_el)
313{
314 DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva,
315 (secure_lookup ? "secure" : "non-secure"));
316 _flushMva(mva, 0xbeef, secure_lookup, hyp, true, target_el);
317 flushTlbMva++;
318}
319
320void
321TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, bool hyp,
322 bool ignore_asn, uint8_t target_el)
323{
324 TlbEntry *te;
325 // D5.7.2: Sign-extend address to 64 bits
326 mva = sext<56>(mva);
327 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
328 target_el);
329 while (te != NULL) {
330 if (secure_lookup == !te->nstid) {
331 DPRINTF(TLB, " - %s\n", te->print());
332 te->valid = false;
333 flushedEntries++;
334 }
335 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
336 target_el);
337 }
338}
339
340bool
341TLB::checkELMatch(uint8_t target_el, uint8_t tentry_el, bool ignore_el)
342{
343 bool elMatch = true;
344 if (!ignore_el) {
345 if (target_el == 2 || target_el == 3) {
346 elMatch = (tentry_el == target_el);
347 } else {
348 elMatch = (tentry_el == 0) || (tentry_el == 1);
349 }
350 }
351 return elMatch;
352}
353
354void
355TLB::drainResume()
356{
357 // We might have unserialized something or switched CPUs, so make
358 // sure to re-read the misc regs.
359 miscRegValid = false;
360}
361
362void
363TLB::takeOverFrom(BaseTLB *_otlb)
364{
365 TLB *otlb = dynamic_cast<TLB*>(_otlb);
366 /* Make sure we actually have a valid type */
367 if (otlb) {
368 _attr = otlb->_attr;
369 haveLPAE = otlb->haveLPAE;
370 directToStage2 = otlb->directToStage2;
371 stage2Req = otlb->stage2Req;
372
373 /* Sync the stage2 MMU if they exist in both
374 * the old CPU and the new
375 */
376 if (!isStage2 &&
377 stage2Tlb && otlb->stage2Tlb) {
378 stage2Tlb->takeOverFrom(otlb->stage2Tlb);
379 }
380 } else {
381 panic("Incompatible TLB type!");
382 }
383}
384
385void
386TLB::serialize(CheckpointOut &cp) const
387{
388 DPRINTF(Checkpoint, "Serializing Arm TLB\n");
389
390 SERIALIZE_SCALAR(_attr);
391 SERIALIZE_SCALAR(haveLPAE);
392 SERIALIZE_SCALAR(directToStage2);
393 SERIALIZE_SCALAR(stage2Req);
394
395 int num_entries = size;
396 SERIALIZE_SCALAR(num_entries);
397 for (int i = 0; i < size; i++)
398 table[i].serializeSection(cp, csprintf("TlbEntry%d", i));
399}
400
401void
402TLB::unserialize(CheckpointIn &cp)
403{
404 DPRINTF(Checkpoint, "Unserializing Arm TLB\n");
405
406 UNSERIALIZE_SCALAR(_attr);
407 UNSERIALIZE_SCALAR(haveLPAE);
408 UNSERIALIZE_SCALAR(directToStage2);
409 UNSERIALIZE_SCALAR(stage2Req);
410
411 int num_entries;
412 UNSERIALIZE_SCALAR(num_entries);
413 for (int i = 0; i < min(size, num_entries); i++)
414 table[i].unserializeSection(cp, csprintf("TlbEntry%d", i));
415}
416
417void
418TLB::regStats()
419{
420 BaseTLB::regStats();
421 instHits
422 .name(name() + ".inst_hits")
423 .desc("ITB inst hits")
424 ;
425
426 instMisses
427 .name(name() + ".inst_misses")
428 .desc("ITB inst misses")
429 ;
430
431 instAccesses
432 .name(name() + ".inst_accesses")
433 .desc("ITB inst accesses")
434 ;
435
436 readHits
437 .name(name() + ".read_hits")
438 .desc("DTB read hits")
439 ;
440
441 readMisses
442 .name(name() + ".read_misses")
443 .desc("DTB read misses")
444 ;
445
446 readAccesses
447 .name(name() + ".read_accesses")
448 .desc("DTB read accesses")
449 ;
450
451 writeHits
452 .name(name() + ".write_hits")
453 .desc("DTB write hits")
454 ;
455
456 writeMisses
457 .name(name() + ".write_misses")
458 .desc("DTB write misses")
459 ;
460
461 writeAccesses
462 .name(name() + ".write_accesses")
463 .desc("DTB write accesses")
464 ;
465
466 hits
467 .name(name() + ".hits")
468 .desc("DTB hits")
469 ;
470
471 misses
472 .name(name() + ".misses")
473 .desc("DTB misses")
474 ;
475
476 accesses
477 .name(name() + ".accesses")
478 .desc("DTB accesses")
479 ;
480
481 flushTlb
482 .name(name() + ".flush_tlb")
483 .desc("Number of times complete TLB was flushed")
484 ;
485
486 flushTlbMva
487 .name(name() + ".flush_tlb_mva")
488 .desc("Number of times TLB was flushed by MVA")
489 ;
490
491 flushTlbMvaAsid
492 .name(name() + ".flush_tlb_mva_asid")
493 .desc("Number of times TLB was flushed by MVA & ASID")
494 ;
495
496 flushTlbAsid
497 .name(name() + ".flush_tlb_asid")
498 .desc("Number of times TLB was flushed by ASID")
499 ;
500
501 flushedEntries
502 .name(name() + ".flush_entries")
503 .desc("Number of entries that have been flushed from TLB")
504 ;
505
506 alignFaults
507 .name(name() + ".align_faults")
508 .desc("Number of TLB faults due to alignment restrictions")
509 ;
510
511 prefetchFaults
512 .name(name() + ".prefetch_faults")
513 .desc("Number of TLB faults due to prefetch")
514 ;
515
516 domainFaults
517 .name(name() + ".domain_faults")
518 .desc("Number of TLB faults due to domain restrictions")
519 ;
520
521 permsFaults
522 .name(name() + ".perms_faults")
523 .desc("Number of TLB faults due to permissions restrictions")
524 ;
525
526 instAccesses = instHits + instMisses;
527 readAccesses = readHits + readMisses;
528 writeAccesses = writeHits + writeMisses;
529 hits = readHits + writeHits + instHits;
530 misses = readMisses + writeMisses + instMisses;
531 accesses = readAccesses + writeAccesses + instAccesses;
532}
533
534void
535TLB::regProbePoints()
536{
537 ppRefills.reset(new ProbePoints::PMU(getProbeManager(), "Refills"));
538}
539
540Fault
541TLB::translateSe(RequestPtr req, ThreadContext *tc, Mode mode,
542 Translation *translation, bool &delay, bool timing)
543{
544 updateMiscReg(tc);
545 Addr vaddr_tainted = req->getVaddr();
546 Addr vaddr = 0;
547 if (aarch64)
548 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
549 else
550 vaddr = vaddr_tainted;
551 uint32_t flags = req->getFlags();
552
553 bool is_fetch = (mode == Execute);
554 bool is_write = (mode == Write);
555
556 if (!is_fetch) {
557 assert(flags & MustBeOne);
558 if (sctlr.a || !(flags & AllowUnaligned)) {
559 if (vaddr & mask(flags & AlignmentMask)) {
560 // LPAE is always disabled in SE mode
561 return std::make_shared<DataAbort>(
562 vaddr_tainted,
563 TlbEntry::DomainType::NoAccess, is_write,
564 ArmFault::AlignmentFault, isStage2,
565 ArmFault::VmsaTran);
566 }
567 }
568 }
569
570 Addr paddr;
571 Process *p = tc->getProcessPtr();
572
573 if (!p->pTable->translate(vaddr, paddr))
574 return std::make_shared<GenericPageTableFault>(vaddr_tainted);
575 req->setPaddr(paddr);
576
577 return NoFault;
578}
579
580Fault
581TLB::checkPermissions(TlbEntry *te, RequestPtr req, Mode mode)
582{
583 Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
584 uint32_t flags = req->getFlags();
585 bool is_fetch = (mode == Execute);
586 bool is_write = (mode == Write);
587 bool is_priv = isPriv && !(flags & UserMode);
588
589 // Get the translation type from the actuall table entry
590 ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran
591 : ArmFault::VmsaTran;
592
593 // If this is the second stage of translation and the request is for a
594 // stage 1 page table walk then we need to check the HCR.PTW bit. This
595 // allows us to generate a fault if the request targets an area marked
596 // as a device or strongly ordered.
597 if (isStage2 && req->isPTWalk() && hcr.ptw &&
598 (te->mtype != TlbEntry::MemoryType::Normal)) {
599 return std::make_shared<DataAbort>(
600 vaddr, te->domain, is_write,
601 ArmFault::PermissionLL + te->lookupLevel,
602 isStage2, tranMethod);
603 }
604
605 // Generate an alignment fault for unaligned data accesses to device or
606 // strongly ordered memory
607 if (!is_fetch) {
608 if (te->mtype != TlbEntry::MemoryType::Normal) {
609 if (vaddr & mask(flags & AlignmentMask)) {
610 alignFaults++;
611 return std::make_shared<DataAbort>(
612 vaddr, TlbEntry::DomainType::NoAccess, is_write,
613 ArmFault::AlignmentFault, isStage2,
614 tranMethod);
615 }
616 }
617 }
618
619 if (te->nonCacheable) {
620 // Prevent prefetching from I/O devices.
621 if (req->isPrefetch()) {
622 // Here we can safely use the fault status for the short
623 // desc. format in all cases
624 return std::make_shared<PrefetchAbort>(
625 vaddr, ArmFault::PrefetchUncacheable,
626 isStage2, tranMethod);
627 }
628 }
629
630 if (!te->longDescFormat) {
631 switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
632 case 0:
633 domainFaults++;
634 DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x"
635 " domain: %#x write:%d\n", dacr,
636 static_cast<uint8_t>(te->domain), is_write);
637 if (is_fetch)
638 return std::make_shared<PrefetchAbort>(
639 vaddr,
640 ArmFault::DomainLL + te->lookupLevel,
641 isStage2, tranMethod);
642 else
643 return std::make_shared<DataAbort>(
644 vaddr, te->domain, is_write,
645 ArmFault::DomainLL + te->lookupLevel,
646 isStage2, tranMethod);
647 case 1:
648 // Continue with permissions check
649 break;
650 case 2:
651 panic("UNPRED domain\n");
652 case 3:
653 return NoFault;
654 }
655 }
656
657 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
658 uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap;
659 uint8_t hap = te->hap;
660
661 if (sctlr.afe == 1 || te->longDescFormat)
662 ap |= 1;
663
664 bool abt;
665 bool isWritable = true;
666 // If this is a stage 2 access (eg for reading stage 1 page table entries)
667 // then don't perform the AP permissions check, we stil do the HAP check
668 // below.
669 if (isStage2) {
670 abt = false;
671 } else {
672 switch (ap) {
673 case 0:
674 DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n",
675 (int)sctlr.rs);
676 if (!sctlr.xp) {
677 switch ((int)sctlr.rs) {
678 case 2:
679 abt = is_write;
680 break;
681 case 1:
682 abt = is_write || !is_priv;
683 break;
684 case 0:
685 case 3:
686 default:
687 abt = true;
688 break;
689 }
690 } else {
691 abt = true;
692 }
693 break;
694 case 1:
695 abt = !is_priv;
696 break;
697 case 2:
698 abt = !is_priv && is_write;
699 isWritable = is_priv;
700 break;
701 case 3:
702 abt = false;
703 break;
704 case 4:
705 panic("UNPRED premissions\n");
706 case 5:
707 abt = !is_priv || is_write;
708 isWritable = false;
709 break;
710 case 6:
711 case 7:
712 abt = is_write;
713 isWritable = false;
714 break;
715 default:
716 panic("Unknown permissions %#x\n", ap);
717 }
718 }
719
720 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
721 bool xn = te->xn || (isWritable && sctlr.wxn) ||
722 (ap == 3 && sctlr.uwxn && is_priv);
723 if (is_fetch && (abt || xn ||
724 (te->longDescFormat && te->pxn && is_priv) ||
725 (isSecure && te->ns && scr.sif))) {
726 permsFaults++;
727 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
728 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
729 ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe);
730 return std::make_shared<PrefetchAbort>(
731 vaddr,
732 ArmFault::PermissionLL + te->lookupLevel,
733 isStage2, tranMethod);
734 } else if (abt | hapAbt) {
735 permsFaults++;
736 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
737 " write:%d\n", ap, is_priv, is_write);
738 return std::make_shared<DataAbort>(
739 vaddr, te->domain, is_write,
740 ArmFault::PermissionLL + te->lookupLevel,
741 isStage2 | !abt, tranMethod);
742 }
743 return NoFault;
744}
745
746
747Fault
748TLB::checkPermissions64(TlbEntry *te, RequestPtr req, Mode mode,
749 ThreadContext *tc)
750{
751 assert(aarch64);
752
753 Addr vaddr_tainted = req->getVaddr();
754 Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
755
756 uint32_t flags = req->getFlags();
757 bool is_fetch = (mode == Execute);
758 bool is_write = (mode == Write);
759 bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode);
760
761 updateMiscReg(tc, curTranType);
762
763 // If this is the second stage of translation and the request is for a
764 // stage 1 page table walk then we need to check the HCR.PTW bit. This
765 // allows us to generate a fault if the request targets an area marked
766 // as a device or strongly ordered.
767 if (isStage2 && req->isPTWalk() && hcr.ptw &&
768 (te->mtype != TlbEntry::MemoryType::Normal)) {
769 return std::make_shared<DataAbort>(
770 vaddr_tainted, te->domain, is_write,
771 ArmFault::PermissionLL + te->lookupLevel,
772 isStage2, ArmFault::LpaeTran);
773 }
774
775 // Generate an alignment fault for unaligned accesses to device or
776 // strongly ordered memory
777 if (!is_fetch) {
778 if (te->mtype != TlbEntry::MemoryType::Normal) {
779 if (vaddr & mask(flags & AlignmentMask)) {
780 alignFaults++;
781 return std::make_shared<DataAbort>(
782 vaddr_tainted,
783 TlbEntry::DomainType::NoAccess, is_write,
784 ArmFault::AlignmentFault, isStage2,
785 ArmFault::LpaeTran);
786 }
787 }
788 }
789
790 if (te->nonCacheable) {
791 // Prevent prefetching from I/O devices.
792 if (req->isPrefetch()) {
793 // Here we can safely use the fault status for the short
794 // desc. format in all cases
795 return std::make_shared<PrefetchAbort>(
796 vaddr_tainted,
797 ArmFault::PrefetchUncacheable,
798 isStage2, ArmFault::LpaeTran);
799 }
800 }
801
802 uint8_t ap = 0x3 & (te->ap); // 2-bit access protection field
803 bool grant = false;
804
805 uint8_t xn = te->xn;
806 uint8_t pxn = te->pxn;
807 bool r = !is_write && !is_fetch;
808 bool w = is_write;
809 bool x = is_fetch;
810 DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
811 "w:%d, x:%d\n", ap, xn, pxn, r, w, x);
812
813 if (isStage2) {
814 assert(ArmSystem::haveVirtualization(tc) && aarch64EL != EL2);
815 // In stage 2 we use the hypervisor access permission bits.
816 // The following permissions are described in ARM DDI 0487A.f
817 // D4-1802
818 uint8_t hap = 0x3 & te->hap;
819 if (is_fetch) {
820 // sctlr.wxn overrides the xn bit
821 grant = !sctlr.wxn && !xn;
822 } else if (is_write) {
823 grant = hap & 0x2;
824 } else { // is_read
825 grant = hap & 0x1;
826 }
827 } else {
828 switch (aarch64EL) {
829 case EL0:
830 {
831 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
832 switch (perm) {
833 case 0:
834 case 1:
835 case 8:
836 case 9:
837 grant = x;
838 break;
839 case 4:
840 case 5:
841 grant = r || w || (x && !sctlr.wxn);
842 break;
843 case 6:
844 case 7:
845 grant = r || w;
846 break;
847 case 12:
848 case 13:
849 grant = r || x;
850 break;
851 case 14:
852 case 15:
853 grant = r;
854 break;
855 default:
856 grant = false;
857 }
858 }
859 break;
860 case EL1:
861 {
862 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
863 switch (perm) {
864 case 0:
865 case 2:
866 grant = r || w || (x && !sctlr.wxn);
867 break;
868 case 1:
869 case 3:
870 case 4:
871 case 5:
872 case 6:
873 case 7:
874 // regions that are writeable at EL0 should not be
875 // executable at EL1
876 grant = r || w;
877 break;
878 case 8:
879 case 10:
880 case 12:
881 case 14:
882 grant = r || x;
883 break;
884 case 9:
885 case 11:
886 case 13:
887 case 15:
888 grant = r;
889 break;
890 default:
891 grant = false;
892 }
893 }
894 break;
895 case EL2:
896 case EL3:
897 {
898 uint8_t perm = (ap & 0x2) | xn;
899 switch (perm) {
900 case 0:
901 grant = r || w || (x && !sctlr.wxn) ;
902 break;
903 case 1:
904 grant = r || w;
905 break;
906 case 2:
907 grant = r || x;
908 break;
909 case 3:
910 grant = r;
911 break;
912 default:
913 grant = false;
914 }
915 }
916 break;
917 }
918 }
919
920 if (!grant) {
921 if (is_fetch) {
922 permsFaults++;
923 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. "
924 "AP:%d priv:%d write:%d ns:%d sif:%d "
925 "sctlr.afe: %d\n",
926 ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe);
927 // Use PC value instead of vaddr because vaddr might be aligned to
928 // cache line and should not be the address reported in FAR
929 return std::make_shared<PrefetchAbort>(
930 req->getPC(),
931 ArmFault::PermissionLL + te->lookupLevel,
932 isStage2, ArmFault::LpaeTran);
933 } else {
934 permsFaults++;
935 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d "
936 "priv:%d write:%d\n", ap, is_priv, is_write);
937 return std::make_shared<DataAbort>(
938 vaddr_tainted, te->domain, is_write,
939 ArmFault::PermissionLL + te->lookupLevel,
940 isStage2, ArmFault::LpaeTran);
941 }
942 }
943
944 return NoFault;
945}
946
947Fault
948TLB::translateFs(RequestPtr req, ThreadContext *tc, Mode mode,
949 Translation *translation, bool &delay, bool timing,
950 TLB::ArmTranslationType tranType, bool functional)
951{
952 // No such thing as a functional timing access
953 assert(!(timing && functional));
954
955 updateMiscReg(tc, tranType);
956
957 Addr vaddr_tainted = req->getVaddr();
958 Addr vaddr = 0;
959 if (aarch64)
960 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
961 else
962 vaddr = vaddr_tainted;
963 uint32_t flags = req->getFlags();
964
965 bool is_fetch = (mode == Execute);
966 bool is_write = (mode == Write);
967 bool long_desc_format = aarch64 || longDescFormatInUse(tc);
968 ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
969 : ArmFault::VmsaTran;
970
971 req->setAsid(asid);
972
973 DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
974 isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran);
975
976 DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
977 "flags %#x tranType 0x%x\n", vaddr_tainted, mode, isStage2,
978 scr, sctlr, flags, tranType);
979
980 if ((req->isInstFetch() && (!sctlr.i)) ||
981 ((!req->isInstFetch()) && (!sctlr.c))){
982 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
983 }
984 if (!is_fetch) {
985 assert(flags & MustBeOne);
986 if (sctlr.a || !(flags & AllowUnaligned)) {
987 if (vaddr & mask(flags & AlignmentMask)) {
988 alignFaults++;
989 return std::make_shared<DataAbort>(
990 vaddr_tainted,
991 TlbEntry::DomainType::NoAccess, is_write,
992 ArmFault::AlignmentFault, isStage2,
993 tranMethod);
994 }
995 }
996 }
997
998 // If guest MMU is off or hcr.vm=0 go straight to stage2
999 if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) {
1000
1001 req->setPaddr(vaddr);
1002 // When the MMU is off the security attribute corresponds to the
1003 // security state of the processor
1004 if (isSecure)
1005 req->setFlags(Request::SECURE);
1006
1007 // @todo: double check this (ARM ARM issue C B3.2.1)
1008 if (long_desc_format || sctlr.tre == 0) {
1009 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
1010 } else {
1011 if (nmrr.ir0 == 0 || nmrr.or0 == 0 || prrr.tr0 != 0x2)
1012 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
1013 }
1014
1015 // Set memory attributes
1016 TlbEntry temp_te;
1017 temp_te.ns = !isSecure;
1018 if (isStage2 || hcr.dc == 0 || isSecure ||
1019 (isHyp && !(tranType & S1CTran))) {
1020
1021 temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal
1022 : TlbEntry::MemoryType::StronglyOrdered;
1023 temp_te.innerAttrs = 0x0;
1024 temp_te.outerAttrs = 0x0;
1025 temp_te.shareable = true;
1026 temp_te.outerShareable = true;
1027 } else {
1028 temp_te.mtype = TlbEntry::MemoryType::Normal;
1029 temp_te.innerAttrs = 0x3;
1030 temp_te.outerAttrs = 0x3;
1031 temp_te.shareable = false;
1032 temp_te.outerShareable = false;
1033 }
1034 temp_te.setAttributes(long_desc_format);
1035 DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: "
1036 "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n",
1037 temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
1038 isStage2);
1039 setAttr(temp_te.attributes);
1040
1041 return testTranslation(req, mode, TlbEntry::DomainType::NoAccess);
1042 }
1043
1044 DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n",
1045 isStage2 ? "IPA" : "VA", vaddr_tainted, asid);
1046 // Translation enabled
1047
1048 TlbEntry *te = NULL;
1049 TlbEntry mergeTe;
1050 Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
1051 functional, &mergeTe);
1052 // only proceed if we have a valid table entry
1053 if ((te == NULL) && (fault == NoFault)) delay = true;
1054
1055 // If we have the table entry transfer some of the attributes to the
1056 // request that triggered the translation
1057 if (te != NULL) {
1058 // Set memory attributes
1059 DPRINTF(TLBVerbose,
1060 "Setting memory attributes: shareable: %d, innerAttrs: %d, "
1061 "outerAttrs: %d, mtype: %d, isStage2: %d\n",
1062 te->shareable, te->innerAttrs, te->outerAttrs,
1063 static_cast<uint8_t>(te->mtype), isStage2);
1064 setAttr(te->attributes);
1065
1066 if (te->nonCacheable)
1067 req->setFlags(Request::UNCACHEABLE);
1068
1069 // Require requests to be ordered if the request goes to
1070 // strongly ordered or device memory (i.e., anything other
1071 // than normal memory requires strict order).
1072 if (te->mtype != TlbEntry::MemoryType::Normal)
1073 req->setFlags(Request::STRICT_ORDER);
1074
1075 Addr pa = te->pAddr(vaddr);
1076 req->setPaddr(pa);
1077
1078 if (isSecure && !te->ns) {
1079 req->setFlags(Request::SECURE);
1080 }
1081 if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) &&
1082 (te->mtype != TlbEntry::MemoryType::Normal)) {
1083 // Unaligned accesses to Device memory should always cause an
1084 // abort regardless of sctlr.a
1085 alignFaults++;
1086 return std::make_shared<DataAbort>(
1087 vaddr_tainted,
1088 TlbEntry::DomainType::NoAccess, is_write,
1089 ArmFault::AlignmentFault, isStage2,
1090 tranMethod);
1091 }
1092
1093 // Check for a trickbox generated address fault
1094 if (fault == NoFault)
1095 fault = testTranslation(req, mode, te->domain);
1096 }
1097
1098 // Generate Illegal Inst Set State fault if IL bit is set in CPSR
1099 if (fault == NoFault) {
1100 if (aarch64 && is_fetch && cpsr.il == 1) {
1101 return std::make_shared<IllegalInstSetStateFault>();
1102 }
1103 }
1104
1105 return fault;
1106}
1107
1108Fault
1109TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode,
1110 TLB::ArmTranslationType tranType)
1111{
1112 updateMiscReg(tc, tranType);
1113
1114 if (directToStage2) {
1115 assert(stage2Tlb);
1116 return stage2Tlb->translateAtomic(req, tc, mode, tranType);
1117 }
1118
1119 bool delay = false;
1120 Fault fault;
1121 if (FullSystem)
1122 fault = translateFs(req, tc, mode, NULL, delay, false, tranType);
1123 else
1124 fault = translateSe(req, tc, mode, NULL, delay, false);
1125 assert(!delay);
1126 return fault;
1127}
1128
1129Fault
1130TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode,
1131 TLB::ArmTranslationType tranType)
1132{
1133 updateMiscReg(tc, tranType);
1134
1135 if (directToStage2) {
1136 assert(stage2Tlb);
1137 return stage2Tlb->translateFunctional(req, tc, mode, tranType);
1138 }
1139
1140 bool delay = false;
1141 Fault fault;
1142 if (FullSystem)
1143 fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true);
1144 else
1145 fault = translateSe(req, tc, mode, NULL, delay, false);
1146 assert(!delay);
1147 return fault;
1148}
1149
1150Fault
1151TLB::translateTiming(RequestPtr req, ThreadContext *tc,
1152 Translation *translation, Mode mode, TLB::ArmTranslationType tranType)
1153{
1154 updateMiscReg(tc, tranType);
1155
1156 if (directToStage2) {
1157 assert(stage2Tlb);
1158 return stage2Tlb->translateTiming(req, tc, translation, mode, tranType);
1159 }
1160
1161 assert(translation);
1162
1163 return translateComplete(req, tc, translation, mode, tranType, isStage2);
1164}
1165
1166Fault
1167TLB::translateComplete(RequestPtr req, ThreadContext *tc,
1168 Translation *translation, Mode mode, TLB::ArmTranslationType tranType,
1169 bool callFromS2)
1170{
1171 bool delay = false;
1172 Fault fault;
1173 if (FullSystem)
1174 fault = translateFs(req, tc, mode, translation, delay, true, tranType);
1175 else
1176 fault = translateSe(req, tc, mode, translation, delay, true);
1177 DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay, fault !=
1178 NoFault);
1179 // If we have a translation, and we're not in the middle of doing a stage
1180 // 2 translation tell the translation that we've either finished or its
1181 // going to take a while. By not doing this when we're in the middle of a
1182 // stage 2 translation we prevent marking the translation as delayed twice,
1183 // one when the translation starts and again when the stage 1 translation
1184 // completes.
1185 if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) {
1186 if (!delay)
1187 translation->finish(fault, req, tc, mode);
1188 else
1189 translation->markDelayed();
1190 }
1191 return fault;
1192}
1193
1194BaseMasterPort*
1195TLB::getMasterPort()
1196{
1197 return &stage2Mmu->getPort();
1198}
1199
1200void
1201TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType)
1202{
1203 // check if the regs have changed, or the translation mode is different.
1204 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1205 // one type of translation anyway
1206 if (miscRegValid && miscRegContext == tc->contextId() &&
1207 ((tranType == curTranType) || isStage2)) {
1208 return;
1209 }
1210
1211 DPRINTF(TLBVerbose, "TLB variables changed!\n");
1212 cpsr = tc->readMiscReg(MISCREG_CPSR);
1213
1214 // Dependencies: SCR/SCR_EL3, CPSR
1215 isSecure = inSecureState(tc) &&
1216 !(tranType & HypMode) && !(tranType & S1S2NsTran);
1217
1218 const OperatingMode op_mode = (OperatingMode) (uint8_t)cpsr.mode;
1219 aarch64 = opModeIs64(op_mode) ||
1220 (opModeToEL(op_mode) == EL0 && ELIs64(tc, EL1));
1221
1222 if (aarch64) { // AArch64
1223 // determine EL we need to translate in
1224 switch (tranType) {
1225 case S1E0Tran:
1226 case S12E0Tran:
1227 aarch64EL = EL0;
1228 break;
1229 case S1E1Tran:
1230 case S12E1Tran:
1231 aarch64EL = EL1;
1232 break;
1233 case S1E2Tran:
1234 aarch64EL = EL2;
1235 break;
1236 case S1E3Tran:
1237 aarch64EL = EL3;
1238 break;
1239 case NormalTran:
1240 case S1CTran:
1241 case S1S2NsTran:
1242 case HypMode:
1243 aarch64EL = (ExceptionLevel) (uint8_t) cpsr.el;
1244 break;
1245 }
1246
1247 switch (aarch64EL) {
1248 case EL0:
1249 case EL1:
1250 {
1251 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
1252 ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
1253 uint64_t ttbr_asid = ttbcr.a1 ?
1254 tc->readMiscReg(MISCREG_TTBR1_EL1) :
1255 tc->readMiscReg(MISCREG_TTBR0_EL1);
1256 asid = bits(ttbr_asid,
1257 (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1258 }
1259 break;
1260 case EL2:
1261 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1262 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1263 asid = -1;
1264 break;
1265 case EL3:
1266 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3);
1267 ttbcr = tc->readMiscReg(MISCREG_TCR_EL3);
1268 asid = -1;
1269 break;
1270 }
1271 hcr = tc->readMiscReg(MISCREG_HCR_EL2);
1272 scr = tc->readMiscReg(MISCREG_SCR_EL3);
1273 isPriv = aarch64EL != EL0;
1274 if (haveVirtualization) {
1275 vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
1276 isHyp = tranType & HypMode;
1277 isHyp &= (tranType & S1S2NsTran) == 0;
1278 isHyp &= (tranType & S1CTran) == 0;
1279 // Work out if we should skip the first stage of translation and go
1280 // directly to stage 2. This value is cached so we don't have to
1281 // compute it for every translation.
1282 stage2Req = isStage2 ||
1283 (hcr.vm && !isHyp && !isSecure &&
1284 !(tranType & S1CTran) && (aarch64EL < EL2) &&
1285 !(tranType & S1E1Tran)); // <--- FIX THIS HACK
1286 directToStage2 = !isStage2 && stage2Req && !sctlr.m;
1287 } else {
1288 vmid = 0;
1289 isHyp = false;
1290 directToStage2 = false;
1291 stage2Req = false;
1292 }
1293 } else { // AArch32
1294 sctlr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_SCTLR, tc,
1295 !isSecure));
1296 ttbcr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_TTBCR, tc,
1297 !isSecure));
1298 scr = tc->readMiscReg(MISCREG_SCR);
1299 isPriv = cpsr.mode != MODE_USER;
1300 if (longDescFormatInUse(tc)) {
1301 uint64_t ttbr_asid = tc->readMiscReg(
1302 flattenMiscRegNsBanked(ttbcr.a1 ? MISCREG_TTBR1
1303 : MISCREG_TTBR0,
1304 tc, !isSecure));
1305 asid = bits(ttbr_asid, 55, 48);
1306 } else { // Short-descriptor translation table format in use
1307 CONTEXTIDR context_id = tc->readMiscReg(flattenMiscRegNsBanked(
1308 MISCREG_CONTEXTIDR, tc,!isSecure));
1309 asid = context_id.asid;
1310 }
1311 prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR, tc,
1312 !isSecure));
1313 nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR, tc,
1314 !isSecure));
1315 dacr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_DACR, tc,
1316 !isSecure));
1317 hcr = tc->readMiscReg(MISCREG_HCR);
1318
1319 if (haveVirtualization) {
1320 vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
1321 isHyp = cpsr.mode == MODE_HYP;
1322 isHyp |= tranType & HypMode;
1323 isHyp &= (tranType & S1S2NsTran) == 0;
1324 isHyp &= (tranType & S1CTran) == 0;
1325 if (isHyp) {
1326 sctlr = tc->readMiscReg(MISCREG_HSCTLR);
1327 }
1328 // Work out if we should skip the first stage of translation and go
1329 // directly to stage 2. This value is cached so we don't have to
1330 // compute it for every translation.
1331 stage2Req = hcr.vm && !isStage2 && !isHyp && !isSecure &&
1332 !(tranType & S1CTran);
1333 directToStage2 = stage2Req && !sctlr.m;
1334 } else {
1335 vmid = 0;
1336 stage2Req = false;
1337 isHyp = false;
1338 directToStage2 = false;
1339 }
1340 }
1341 miscRegValid = true;
1342 miscRegContext = tc->contextId();
1343 curTranType = tranType;
1344}
1345
1346Fault
1347TLB::getTE(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
1348 Translation *translation, bool timing, bool functional,
1349 bool is_secure, TLB::ArmTranslationType tranType)
1350{
1351 bool is_fetch = (mode == Execute);
1352 bool is_write = (mode == Write);
1353
1354 Addr vaddr_tainted = req->getVaddr();
1355 Addr vaddr = 0;
1356 ExceptionLevel target_el = aarch64 ? aarch64EL : EL1;
1357 if (aarch64) {
1358 vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el, ttbcr);
1359 } else {
1360 vaddr = vaddr_tainted;
1361 }
1362 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1363 if (*te == NULL) {
1364 if (req->isPrefetch()) {
1365 // if the request is a prefetch don't attempt to fill the TLB or go
1366 // any further with the memory access (here we can safely use the
1367 // fault status for the short desc. format in all cases)
1368 prefetchFaults++;
1369 return std::make_shared<PrefetchAbort>(
1370 vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2);
1371 }
1372
1373 if (is_fetch)
1374 instMisses++;
1375 else if (is_write)
1376 writeMisses++;
1377 else
1378 readMisses++;
1379
1380 // start translation table walk, pass variables rather than
1381 // re-retreaving in table walker for speed
1382 DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1383 vaddr_tainted, asid, vmid);
1384 Fault fault;
1385 fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode,
1386 translation, timing, functional, is_secure,
1387 tranType);
1387 tranType, stage2Req);
1388 // for timing mode, return and wait for table walk,
1389 if (timing || fault != NoFault) {
1390 return fault;
1391 }
1392
1393 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1394 if (!*te)
1395 printTlb();
1396 assert(*te);
1397 } else {
1398 if (is_fetch)
1399 instHits++;
1400 else if (is_write)
1401 writeHits++;
1402 else
1403 readHits++;
1404 }
1405 return NoFault;
1406}
1407
1408Fault
1409TLB::getResultTe(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
1410 Translation *translation, bool timing, bool functional,
1411 TlbEntry *mergeTe)
1412{
1413 Fault fault;
1414
1415 if (isStage2) {
1416 // We are already in the stage 2 TLB. Grab the table entry for stage
1417 // 2 only. We are here because stage 1 translation is disabled.
1418 TlbEntry *s2Te = NULL;
1419 // Get the stage 2 table entry
1420 fault = getTE(&s2Te, req, tc, mode, translation, timing, functional,
1421 isSecure, curTranType);
1422 // Check permissions of stage 2
1423 if ((s2Te != NULL) && (fault = NoFault)) {
1424 if(aarch64)
1425 fault = checkPermissions64(s2Te, req, mode, tc);
1426 else
1427 fault = checkPermissions(s2Te, req, mode);
1428 }
1429 *te = s2Te;
1430 return fault;
1431 }
1432
1433 TlbEntry *s1Te = NULL;
1434
1435 Addr vaddr_tainted = req->getVaddr();
1436
1437 // Get the stage 1 table entry
1438 fault = getTE(&s1Te, req, tc, mode, translation, timing, functional,
1439 isSecure, curTranType);
1440 // only proceed if we have a valid table entry
1441 if ((s1Te != NULL) && (fault == NoFault)) {
1442 // Check stage 1 permissions before checking stage 2
1443 if (aarch64)
1444 fault = checkPermissions64(s1Te, req, mode, tc);
1445 else
1446 fault = checkPermissions(s1Te, req, mode);
1447 if (stage2Req & (fault == NoFault)) {
1448 Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te,
1449 req, translation, mode, timing, functional, curTranType);
1450 fault = s2Lookup->getTe(tc, mergeTe);
1451 if (s2Lookup->isComplete()) {
1452 *te = mergeTe;
1453 // We've finished with the lookup so delete it
1454 delete s2Lookup;
1455 } else {
1456 // The lookup hasn't completed, so we can't delete it now. We
1457 // get round this by asking the object to self delete when the
1458 // translation is complete.
1459 s2Lookup->setSelfDelete();
1460 }
1461 } else {
1462 // This case deals with an S1 hit (or bypass), followed by
1463 // an S2 hit-but-perms issue
1464 if (isStage2) {
1465 DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1466 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault);
1467 if (fault != NoFault) {
1468 ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get());
1469 armFault->annotate(ArmFault::S1PTW, false);
1470 armFault->annotate(ArmFault::OVA, vaddr_tainted);
1471 }
1472 }
1473 *te = s1Te;
1474 }
1475 }
1476 return fault;
1477}
1478
1479void
1480TLB::setTestInterface(SimObject *_ti)
1481{
1482 if (!_ti) {
1483 test = nullptr;
1484 } else {
1485 TlbTestInterface *ti(dynamic_cast<TlbTestInterface *>(_ti));
1486 fatal_if(!ti, "%s is not a valid ARM TLB tester\n", _ti->name());
1487 test = ti;
1488 }
1489}
1490
1491Fault
1492TLB::testTranslation(RequestPtr req, Mode mode, TlbEntry::DomainType domain)
1493{
1494 if (!test || !req->hasSize() || req->getSize() == 0) {
1495 return NoFault;
1496 } else {
1497 return test->translationCheck(req, isPriv, mode, domain);
1498 }
1499}
1500
1501Fault
1502TLB::testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode,
1503 TlbEntry::DomainType domain, LookupLevel lookup_level)
1504{
1505 if (!test) {
1506 return NoFault;
1507 } else {
1508 return test->walkCheck(pa, size, va, is_secure, isPriv, mode,
1509 domain, lookup_level);
1510 }
1511}
1512
1513
1514ArmISA::TLB *
1515ArmTLBParams::create()
1516{
1517 return new ArmISA::TLB(this);
1518}
1388 // for timing mode, return and wait for table walk,
1389 if (timing || fault != NoFault) {
1390 return fault;
1391 }
1392
1393 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1394 if (!*te)
1395 printTlb();
1396 assert(*te);
1397 } else {
1398 if (is_fetch)
1399 instHits++;
1400 else if (is_write)
1401 writeHits++;
1402 else
1403 readHits++;
1404 }
1405 return NoFault;
1406}
1407
1408Fault
1409TLB::getResultTe(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
1410 Translation *translation, bool timing, bool functional,
1411 TlbEntry *mergeTe)
1412{
1413 Fault fault;
1414
1415 if (isStage2) {
1416 // We are already in the stage 2 TLB. Grab the table entry for stage
1417 // 2 only. We are here because stage 1 translation is disabled.
1418 TlbEntry *s2Te = NULL;
1419 // Get the stage 2 table entry
1420 fault = getTE(&s2Te, req, tc, mode, translation, timing, functional,
1421 isSecure, curTranType);
1422 // Check permissions of stage 2
1423 if ((s2Te != NULL) && (fault = NoFault)) {
1424 if(aarch64)
1425 fault = checkPermissions64(s2Te, req, mode, tc);
1426 else
1427 fault = checkPermissions(s2Te, req, mode);
1428 }
1429 *te = s2Te;
1430 return fault;
1431 }
1432
1433 TlbEntry *s1Te = NULL;
1434
1435 Addr vaddr_tainted = req->getVaddr();
1436
1437 // Get the stage 1 table entry
1438 fault = getTE(&s1Te, req, tc, mode, translation, timing, functional,
1439 isSecure, curTranType);
1440 // only proceed if we have a valid table entry
1441 if ((s1Te != NULL) && (fault == NoFault)) {
1442 // Check stage 1 permissions before checking stage 2
1443 if (aarch64)
1444 fault = checkPermissions64(s1Te, req, mode, tc);
1445 else
1446 fault = checkPermissions(s1Te, req, mode);
1447 if (stage2Req & (fault == NoFault)) {
1448 Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te,
1449 req, translation, mode, timing, functional, curTranType);
1450 fault = s2Lookup->getTe(tc, mergeTe);
1451 if (s2Lookup->isComplete()) {
1452 *te = mergeTe;
1453 // We've finished with the lookup so delete it
1454 delete s2Lookup;
1455 } else {
1456 // The lookup hasn't completed, so we can't delete it now. We
1457 // get round this by asking the object to self delete when the
1458 // translation is complete.
1459 s2Lookup->setSelfDelete();
1460 }
1461 } else {
1462 // This case deals with an S1 hit (or bypass), followed by
1463 // an S2 hit-but-perms issue
1464 if (isStage2) {
1465 DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1466 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault);
1467 if (fault != NoFault) {
1468 ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get());
1469 armFault->annotate(ArmFault::S1PTW, false);
1470 armFault->annotate(ArmFault::OVA, vaddr_tainted);
1471 }
1472 }
1473 *te = s1Te;
1474 }
1475 }
1476 return fault;
1477}
1478
1479void
1480TLB::setTestInterface(SimObject *_ti)
1481{
1482 if (!_ti) {
1483 test = nullptr;
1484 } else {
1485 TlbTestInterface *ti(dynamic_cast<TlbTestInterface *>(_ti));
1486 fatal_if(!ti, "%s is not a valid ARM TLB tester\n", _ti->name());
1487 test = ti;
1488 }
1489}
1490
1491Fault
1492TLB::testTranslation(RequestPtr req, Mode mode, TlbEntry::DomainType domain)
1493{
1494 if (!test || !req->hasSize() || req->getSize() == 0) {
1495 return NoFault;
1496 } else {
1497 return test->translationCheck(req, isPriv, mode, domain);
1498 }
1499}
1500
1501Fault
1502TLB::testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode,
1503 TlbEntry::DomainType domain, LookupLevel lookup_level)
1504{
1505 if (!test) {
1506 return NoFault;
1507 } else {
1508 return test->walkCheck(pa, size, va, is_secure, isPriv, mode,
1509 domain, lookup_level);
1510 }
1511}
1512
1513
1514ArmISA::TLB *
1515ArmTLBParams::create()
1516{
1517 return new ArmISA::TLB(this);
1518}