tlb.cc (12356:e56e838c47cb) tlb.cc (12406:86bde4a026b5)
1/*
2 * Copyright (c) 2010-2013, 2016-2017 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 * Nathan Binkert
42 * Steve Reinhardt
43 */
44
45#include "arch/arm/tlb.hh"
46
47#include <memory>
48#include <string>
49#include <vector>
50
51#include "arch/arm/faults.hh"
52#include "arch/arm/pagetable.hh"
53#include "arch/arm/stage2_lookup.hh"
54#include "arch/arm/stage2_mmu.hh"
55#include "arch/arm/system.hh"
56#include "arch/arm/table_walker.hh"
57#include "arch/arm/utility.hh"
58#include "arch/generic/mmapped_ipr.hh"
59#include "base/inifile.hh"
60#include "base/str.hh"
61#include "base/trace.hh"
62#include "cpu/base.hh"
63#include "cpu/thread_context.hh"
64#include "debug/Checkpoint.hh"
65#include "debug/TLB.hh"
66#include "debug/TLBVerbose.hh"
67#include "mem/page_table.hh"
68#include "mem/request.hh"
69#include "params/ArmTLB.hh"
70#include "sim/full_system.hh"
71#include "sim/process.hh"
72
73using namespace std;
74using namespace ArmISA;
75
76TLB::TLB(const ArmTLBParams *p)
77 : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size),
78 isStage2(p->is_stage2), stage2Req(false), _attr(0),
79 directToStage2(false), tableWalker(p->walker), stage2Tlb(NULL),
80 stage2Mmu(NULL), test(nullptr), rangeMRU(1),
81 aarch64(false), aarch64EL(EL0), isPriv(false), isSecure(false),
82 isHyp(false), asid(0), vmid(0), dacr(0),
83 miscRegValid(false), miscRegContext(0), curTranType(NormalTran)
84{
85 const ArmSystem *sys = dynamic_cast<const ArmSystem *>(p->sys);
86
87 tableWalker->setTlb(this);
88
89 // Cache system-level properties
90 haveLPAE = tableWalker->haveLPAE();
91 haveVirtualization = tableWalker->haveVirtualization();
92 haveLargeAsid64 = tableWalker->haveLargeAsid64();
93
94 if (sys)
95 m5opRange = sys->m5opRange();
96}
97
98TLB::~TLB()
99{
100 delete[] table;
101}
102
103void
104TLB::init()
105{
106 if (stage2Mmu && !isStage2)
107 stage2Tlb = stage2Mmu->stage2Tlb();
108}
109
110void
111TLB::setMMU(Stage2MMU *m, MasterID master_id)
112{
113 stage2Mmu = m;
114 tableWalker->setMMU(m, master_id);
115}
116
117bool
118TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa)
119{
120 updateMiscReg(tc);
121
122 if (directToStage2) {
123 assert(stage2Tlb);
124 return stage2Tlb->translateFunctional(tc, va, pa);
125 }
126
127 TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false,
128 aarch64 ? aarch64EL : EL1);
129 if (!e)
130 return false;
131 pa = e->pAddr(va);
132 return true;
133}
134
135Fault
136TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
137{
138 const Addr paddr = req->getPaddr();
139
140 if (m5opRange.contains(paddr)) {
141 req->setFlags(Request::MMAPPED_IPR | Request::GENERIC_IPR);
142 req->setPaddr(GenericISA::iprAddressPseudoInst(
143 (paddr >> 8) & 0xFF,
144 paddr & 0xFF));
145 }
146
147 return NoFault;
148}
149
150TlbEntry*
151TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure,
152 bool functional, bool ignore_asn, uint8_t target_el)
153{
154
155 TlbEntry *retval = NULL;
156
157 // Maintaining LRU array
158 int x = 0;
159 while (retval == NULL && x < size) {
160 if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false,
161 target_el)) ||
162 (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) {
163 // We only move the hit entry ahead when the position is higher
164 // than rangeMRU
165 if (x > rangeMRU && !functional) {
166 TlbEntry tmp_entry = table[x];
167 for (int i = x; i > 0; i--)
168 table[i] = table[i - 1];
169 table[0] = tmp_entry;
170 retval = &table[0];
171 } else {
172 retval = &table[x];
173 }
174 break;
175 }
176 ++x;
177 }
178
179 DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d "
180 "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d "
181 "el: %d\n",
182 va, asn, retval ? "hit" : "miss", vmid, hyp, secure,
183 retval ? retval->pfn : 0, retval ? retval->size : 0,
184 retval ? retval->pAddr(va) : 0, retval ? retval->ap : 0,
185 retval ? retval->ns : 0, retval ? retval->nstid : 0,
186 retval ? retval->global : 0, retval ? retval->asid : 0,
187 retval ? retval->el : 0);
188
189 return retval;
190}
191
192// insert a new TLB entry
193void
194TLB::insert(Addr addr, TlbEntry &entry)
195{
196 DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x"
197 " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
198 " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn,
199 entry.size, entry.vpn, entry.asid, entry.vmid, entry.N,
200 entry.global, entry.valid, entry.nonCacheable, entry.xn,
201 entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid,
202 entry.isHyp);
203
204 if (table[size - 1].valid)
205 DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
206 "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n",
207 table[size-1].vpn << table[size-1].N, table[size-1].asid,
208 table[size-1].vmid, table[size-1].pfn << table[size-1].N,
209 table[size-1].size, table[size-1].ap, table[size-1].ns,
210 table[size-1].nstid, table[size-1].global, table[size-1].isHyp,
211 table[size-1].el);
212
213 //inserting to MRU position and evicting the LRU one
214
215 for (int i = size - 1; i > 0; --i)
216 table[i] = table[i-1];
217 table[0] = entry;
218
219 inserts++;
220 ppRefills->notify(1);
221}
222
223void
224TLB::printTlb() const
225{
226 int x = 0;
227 TlbEntry *te;
228 DPRINTF(TLB, "Current TLB contents:\n");
229 while (x < size) {
230 te = &table[x];
231 if (te->valid)
232 DPRINTF(TLB, " * %s\n", te->print());
233 ++x;
234 }
235}
236
237void
238TLB::flushAllSecurity(bool secure_lookup, uint8_t target_el, bool ignore_el)
239{
240 DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n",
241 (secure_lookup ? "secure" : "non-secure"));
242 int x = 0;
243 TlbEntry *te;
244 while (x < size) {
245 te = &table[x];
246 if (te->valid && secure_lookup == !te->nstid &&
247 (te->vmid == vmid || secure_lookup) &&
248 checkELMatch(target_el, te->el, ignore_el)) {
249
250 DPRINTF(TLB, " - %s\n", te->print());
251 te->valid = false;
252 flushedEntries++;
253 }
254 ++x;
255 }
256
257 flushTlb++;
258
259 // If there's a second stage TLB (and we're not it) then flush it as well
260 // if we're currently in hyp mode
261 if (!isStage2 && isHyp) {
262 stage2Tlb->flushAllSecurity(secure_lookup, true);
263 }
264}
265
266void
267TLB::flushAllNs(bool hyp, uint8_t target_el, bool ignore_el)
268{
269 DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n",
270 (hyp ? "hyp" : "non-hyp"));
271 int x = 0;
272 TlbEntry *te;
273 while (x < size) {
274 te = &table[x];
275 if (te->valid && te->nstid && te->isHyp == hyp &&
276 checkELMatch(target_el, te->el, ignore_el)) {
277
278 DPRINTF(TLB, " - %s\n", te->print());
279 flushedEntries++;
280 te->valid = false;
281 }
282 ++x;
283 }
284
285 flushTlb++;
286
287 // If there's a second stage TLB (and we're not it) then flush it as well
288 if (!isStage2 && !hyp) {
289 stage2Tlb->flushAllNs(false, true);
290 }
291}
292
293void
294TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, uint8_t target_el)
295{
296 DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x "
297 "(%s lookup)\n", mva, asn, (secure_lookup ?
298 "secure" : "non-secure"));
299 _flushMva(mva, asn, secure_lookup, false, false, target_el);
300 flushTlbMvaAsid++;
301}
302
303void
304TLB::flushAsid(uint64_t asn, bool secure_lookup, uint8_t target_el)
305{
306 DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn,
307 (secure_lookup ? "secure" : "non-secure"));
308
309 int x = 0 ;
310 TlbEntry *te;
311
312 while (x < size) {
313 te = &table[x];
314 if (te->valid && te->asid == asn && secure_lookup == !te->nstid &&
315 (te->vmid == vmid || secure_lookup) &&
316 checkELMatch(target_el, te->el, false)) {
317
318 te->valid = false;
319 DPRINTF(TLB, " - %s\n", te->print());
320 flushedEntries++;
321 }
322 ++x;
323 }
324 flushTlbAsid++;
325}
326
327void
328TLB::flushMva(Addr mva, bool secure_lookup, bool hyp, uint8_t target_el)
329{
330 DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva,
331 (secure_lookup ? "secure" : "non-secure"));
332 _flushMva(mva, 0xbeef, secure_lookup, hyp, true, target_el);
333 flushTlbMva++;
334}
335
336void
337TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, bool hyp,
338 bool ignore_asn, uint8_t target_el)
339{
340 TlbEntry *te;
341 // D5.7.2: Sign-extend address to 64 bits
342 mva = sext<56>(mva);
343 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
344 target_el);
345 while (te != NULL) {
346 if (secure_lookup == !te->nstid) {
347 DPRINTF(TLB, " - %s\n", te->print());
348 te->valid = false;
349 flushedEntries++;
350 }
351 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
352 target_el);
353 }
354}
355
356void
357TLB::flushIpaVmid(Addr ipa, bool secure_lookup, bool hyp, uint8_t target_el)
358{
359 assert(!isStage2);
360 stage2Tlb->_flushMva(ipa, 0xbeef, secure_lookup, hyp, true, target_el);
361}
362
363bool
364TLB::checkELMatch(uint8_t target_el, uint8_t tentry_el, bool ignore_el)
365{
366 bool elMatch = true;
367 if (!ignore_el) {
368 if (target_el == 2 || target_el == 3) {
369 elMatch = (tentry_el == target_el);
370 } else {
371 elMatch = (tentry_el == 0) || (tentry_el == 1);
372 }
373 }
374 return elMatch;
375}
376
377void
378TLB::drainResume()
379{
380 // We might have unserialized something or switched CPUs, so make
381 // sure to re-read the misc regs.
382 miscRegValid = false;
383}
384
385void
386TLB::takeOverFrom(BaseTLB *_otlb)
387{
388 TLB *otlb = dynamic_cast<TLB*>(_otlb);
389 /* Make sure we actually have a valid type */
390 if (otlb) {
391 _attr = otlb->_attr;
392 haveLPAE = otlb->haveLPAE;
393 directToStage2 = otlb->directToStage2;
394 stage2Req = otlb->stage2Req;
395
396 /* Sync the stage2 MMU if they exist in both
397 * the old CPU and the new
398 */
399 if (!isStage2 &&
400 stage2Tlb && otlb->stage2Tlb) {
401 stage2Tlb->takeOverFrom(otlb->stage2Tlb);
402 }
403 } else {
404 panic("Incompatible TLB type!");
405 }
406}
407
408void
409TLB::serialize(CheckpointOut &cp) const
410{
411 DPRINTF(Checkpoint, "Serializing Arm TLB\n");
412
413 SERIALIZE_SCALAR(_attr);
414 SERIALIZE_SCALAR(haveLPAE);
415 SERIALIZE_SCALAR(directToStage2);
416 SERIALIZE_SCALAR(stage2Req);
417
418 int num_entries = size;
419 SERIALIZE_SCALAR(num_entries);
420 for (int i = 0; i < size; i++)
421 table[i].serializeSection(cp, csprintf("TlbEntry%d", i));
422}
423
424void
425TLB::unserialize(CheckpointIn &cp)
426{
427 DPRINTF(Checkpoint, "Unserializing Arm TLB\n");
428
429 UNSERIALIZE_SCALAR(_attr);
430 UNSERIALIZE_SCALAR(haveLPAE);
431 UNSERIALIZE_SCALAR(directToStage2);
432 UNSERIALIZE_SCALAR(stage2Req);
433
434 int num_entries;
435 UNSERIALIZE_SCALAR(num_entries);
436 for (int i = 0; i < min(size, num_entries); i++)
437 table[i].unserializeSection(cp, csprintf("TlbEntry%d", i));
438}
439
440void
441TLB::regStats()
442{
443 BaseTLB::regStats();
444 instHits
445 .name(name() + ".inst_hits")
446 .desc("ITB inst hits")
447 ;
448
449 instMisses
450 .name(name() + ".inst_misses")
451 .desc("ITB inst misses")
452 ;
453
454 instAccesses
455 .name(name() + ".inst_accesses")
456 .desc("ITB inst accesses")
457 ;
458
459 readHits
460 .name(name() + ".read_hits")
461 .desc("DTB read hits")
462 ;
463
464 readMisses
465 .name(name() + ".read_misses")
466 .desc("DTB read misses")
467 ;
468
469 readAccesses
470 .name(name() + ".read_accesses")
471 .desc("DTB read accesses")
472 ;
473
474 writeHits
475 .name(name() + ".write_hits")
476 .desc("DTB write hits")
477 ;
478
479 writeMisses
480 .name(name() + ".write_misses")
481 .desc("DTB write misses")
482 ;
483
484 writeAccesses
485 .name(name() + ".write_accesses")
486 .desc("DTB write accesses")
487 ;
488
489 hits
490 .name(name() + ".hits")
491 .desc("DTB hits")
492 ;
493
494 misses
495 .name(name() + ".misses")
496 .desc("DTB misses")
497 ;
498
499 accesses
500 .name(name() + ".accesses")
501 .desc("DTB accesses")
502 ;
503
504 flushTlb
505 .name(name() + ".flush_tlb")
506 .desc("Number of times complete TLB was flushed")
507 ;
508
509 flushTlbMva
510 .name(name() + ".flush_tlb_mva")
511 .desc("Number of times TLB was flushed by MVA")
512 ;
513
514 flushTlbMvaAsid
515 .name(name() + ".flush_tlb_mva_asid")
516 .desc("Number of times TLB was flushed by MVA & ASID")
517 ;
518
519 flushTlbAsid
520 .name(name() + ".flush_tlb_asid")
521 .desc("Number of times TLB was flushed by ASID")
522 ;
523
524 flushedEntries
525 .name(name() + ".flush_entries")
526 .desc("Number of entries that have been flushed from TLB")
527 ;
528
529 alignFaults
530 .name(name() + ".align_faults")
531 .desc("Number of TLB faults due to alignment restrictions")
532 ;
533
534 prefetchFaults
535 .name(name() + ".prefetch_faults")
536 .desc("Number of TLB faults due to prefetch")
537 ;
538
539 domainFaults
540 .name(name() + ".domain_faults")
541 .desc("Number of TLB faults due to domain restrictions")
542 ;
543
544 permsFaults
545 .name(name() + ".perms_faults")
546 .desc("Number of TLB faults due to permissions restrictions")
547 ;
548
549 instAccesses = instHits + instMisses;
550 readAccesses = readHits + readMisses;
551 writeAccesses = writeHits + writeMisses;
552 hits = readHits + writeHits + instHits;
553 misses = readMisses + writeMisses + instMisses;
554 accesses = readAccesses + writeAccesses + instAccesses;
555}
556
557void
558TLB::regProbePoints()
559{
560 ppRefills.reset(new ProbePoints::PMU(getProbeManager(), "Refills"));
561}
562
563Fault
564TLB::translateSe(RequestPtr req, ThreadContext *tc, Mode mode,
565 Translation *translation, bool &delay, bool timing)
566{
567 updateMiscReg(tc);
568 Addr vaddr_tainted = req->getVaddr();
569 Addr vaddr = 0;
570 if (aarch64)
571 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
572 else
573 vaddr = vaddr_tainted;
574 Request::Flags flags = req->getFlags();
575
576 bool is_fetch = (mode == Execute);
577 bool is_write = (mode == Write);
578
579 if (!is_fetch) {
580 assert(flags & MustBeOne);
581 if (sctlr.a || !(flags & AllowUnaligned)) {
582 if (vaddr & mask(flags & AlignmentMask)) {
583 // LPAE is always disabled in SE mode
584 return std::make_shared<DataAbort>(
585 vaddr_tainted,
586 TlbEntry::DomainType::NoAccess, is_write,
587 ArmFault::AlignmentFault, isStage2,
588 ArmFault::VmsaTran);
589 }
590 }
591 }
592
593 Addr paddr;
594 Process *p = tc->getProcessPtr();
595
596 if (!p->pTable->translate(vaddr, paddr))
597 return std::make_shared<GenericPageTableFault>(vaddr_tainted);
598 req->setPaddr(paddr);
599
600 return finalizePhysical(req, tc, mode);
601}
602
603Fault
604TLB::checkPermissions(TlbEntry *te, RequestPtr req, Mode mode)
605{
606 Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
607 Request::Flags flags = req->getFlags();
608 bool is_fetch = (mode == Execute);
609 bool is_write = (mode == Write);
610 bool is_priv = isPriv && !(flags & UserMode);
611
612 // Get the translation type from the actuall table entry
613 ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran
614 : ArmFault::VmsaTran;
615
616 // If this is the second stage of translation and the request is for a
617 // stage 1 page table walk then we need to check the HCR.PTW bit. This
618 // allows us to generate a fault if the request targets an area marked
619 // as a device or strongly ordered.
620 if (isStage2 && req->isPTWalk() && hcr.ptw &&
621 (te->mtype != TlbEntry::MemoryType::Normal)) {
622 return std::make_shared<DataAbort>(
623 vaddr, te->domain, is_write,
624 ArmFault::PermissionLL + te->lookupLevel,
625 isStage2, tranMethod);
626 }
627
628 // Generate an alignment fault for unaligned data accesses to device or
629 // strongly ordered memory
630 if (!is_fetch) {
631 if (te->mtype != TlbEntry::MemoryType::Normal) {
632 if (vaddr & mask(flags & AlignmentMask)) {
633 alignFaults++;
634 return std::make_shared<DataAbort>(
635 vaddr, TlbEntry::DomainType::NoAccess, is_write,
636 ArmFault::AlignmentFault, isStage2,
637 tranMethod);
638 }
639 }
640 }
641
642 if (te->nonCacheable) {
643 // Prevent prefetching from I/O devices.
644 if (req->isPrefetch()) {
645 // Here we can safely use the fault status for the short
646 // desc. format in all cases
647 return std::make_shared<PrefetchAbort>(
648 vaddr, ArmFault::PrefetchUncacheable,
649 isStage2, tranMethod);
650 }
651 }
652
653 if (!te->longDescFormat) {
654 switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
655 case 0:
656 domainFaults++;
657 DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x"
658 " domain: %#x write:%d\n", dacr,
659 static_cast<uint8_t>(te->domain), is_write);
660 if (is_fetch) {
661 // Use PC value instead of vaddr because vaddr might
662 // be aligned to cache line and should not be the
663 // address reported in FAR
664 return std::make_shared<PrefetchAbort>(
665 req->getPC(),
666 ArmFault::DomainLL + te->lookupLevel,
667 isStage2, tranMethod);
668 } else
669 return std::make_shared<DataAbort>(
670 vaddr, te->domain, is_write,
671 ArmFault::DomainLL + te->lookupLevel,
672 isStage2, tranMethod);
673 case 1:
674 // Continue with permissions check
675 break;
676 case 2:
677 panic("UNPRED domain\n");
678 case 3:
679 return NoFault;
680 }
681 }
682
683 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
684 uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap;
685 uint8_t hap = te->hap;
686
687 if (sctlr.afe == 1 || te->longDescFormat)
688 ap |= 1;
689
690 bool abt;
691 bool isWritable = true;
692 // If this is a stage 2 access (eg for reading stage 1 page table entries)
693 // then don't perform the AP permissions check, we stil do the HAP check
694 // below.
695 if (isStage2) {
696 abt = false;
697 } else {
698 switch (ap) {
699 case 0:
700 DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n",
701 (int)sctlr.rs);
702 if (!sctlr.xp) {
703 switch ((int)sctlr.rs) {
704 case 2:
705 abt = is_write;
706 break;
707 case 1:
708 abt = is_write || !is_priv;
709 break;
710 case 0:
711 case 3:
712 default:
713 abt = true;
714 break;
715 }
716 } else {
717 abt = true;
718 }
719 break;
720 case 1:
721 abt = !is_priv;
722 break;
723 case 2:
724 abt = !is_priv && is_write;
725 isWritable = is_priv;
726 break;
727 case 3:
728 abt = false;
729 break;
730 case 4:
731 panic("UNPRED premissions\n");
732 case 5:
733 abt = !is_priv || is_write;
734 isWritable = false;
735 break;
736 case 6:
737 case 7:
738 abt = is_write;
739 isWritable = false;
740 break;
741 default:
742 panic("Unknown permissions %#x\n", ap);
743 }
744 }
745
746 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
747 bool xn = te->xn || (isWritable && sctlr.wxn) ||
748 (ap == 3 && sctlr.uwxn && is_priv);
749 if (is_fetch && (abt || xn ||
750 (te->longDescFormat && te->pxn && is_priv) ||
751 (isSecure && te->ns && scr.sif))) {
752 permsFaults++;
753 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
754 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
755 ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe);
756 // Use PC value instead of vaddr because vaddr might be aligned to
757 // cache line and should not be the address reported in FAR
758 return std::make_shared<PrefetchAbort>(
759 req->getPC(),
760 ArmFault::PermissionLL + te->lookupLevel,
761 isStage2, tranMethod);
762 } else if (abt | hapAbt) {
763 permsFaults++;
764 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
765 " write:%d\n", ap, is_priv, is_write);
766 return std::make_shared<DataAbort>(
767 vaddr, te->domain, is_write,
768 ArmFault::PermissionLL + te->lookupLevel,
769 isStage2 | !abt, tranMethod);
770 }
771 return NoFault;
772}
773
774
775Fault
776TLB::checkPermissions64(TlbEntry *te, RequestPtr req, Mode mode,
777 ThreadContext *tc)
778{
779 assert(aarch64);
780
781 Addr vaddr_tainted = req->getVaddr();
782 Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
783
784 Request::Flags flags = req->getFlags();
785 bool is_fetch = (mode == Execute);
786 bool is_write = (mode == Write);
787 bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode);
788
789 updateMiscReg(tc, curTranType);
790
791 // If this is the second stage of translation and the request is for a
792 // stage 1 page table walk then we need to check the HCR.PTW bit. This
793 // allows us to generate a fault if the request targets an area marked
794 // as a device or strongly ordered.
795 if (isStage2 && req->isPTWalk() && hcr.ptw &&
796 (te->mtype != TlbEntry::MemoryType::Normal)) {
797 return std::make_shared<DataAbort>(
798 vaddr_tainted, te->domain, is_write,
799 ArmFault::PermissionLL + te->lookupLevel,
800 isStage2, ArmFault::LpaeTran);
801 }
802
803 // Generate an alignment fault for unaligned accesses to device or
804 // strongly ordered memory
805 if (!is_fetch) {
806 if (te->mtype != TlbEntry::MemoryType::Normal) {
807 if (vaddr & mask(flags & AlignmentMask)) {
808 alignFaults++;
809 return std::make_shared<DataAbort>(
810 vaddr_tainted,
811 TlbEntry::DomainType::NoAccess, is_write,
812 ArmFault::AlignmentFault, isStage2,
813 ArmFault::LpaeTran);
814 }
815 }
816 }
817
818 if (te->nonCacheable) {
819 // Prevent prefetching from I/O devices.
820 if (req->isPrefetch()) {
821 // Here we can safely use the fault status for the short
822 // desc. format in all cases
823 return std::make_shared<PrefetchAbort>(
824 vaddr_tainted,
825 ArmFault::PrefetchUncacheable,
826 isStage2, ArmFault::LpaeTran);
827 }
828 }
829
830 uint8_t ap = 0x3 & (te->ap); // 2-bit access protection field
831 bool grant = false;
832
833 uint8_t xn = te->xn;
834 uint8_t pxn = te->pxn;
835 bool r = !is_write && !is_fetch;
836 bool w = is_write;
837 bool x = is_fetch;
838 DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
839 "w:%d, x:%d\n", ap, xn, pxn, r, w, x);
840
841 if (isStage2) {
842 assert(ArmSystem::haveVirtualization(tc) && aarch64EL != EL2);
843 // In stage 2 we use the hypervisor access permission bits.
844 // The following permissions are described in ARM DDI 0487A.f
845 // D4-1802
846 uint8_t hap = 0x3 & te->hap;
847 if (is_fetch) {
848 // sctlr.wxn overrides the xn bit
849 grant = !sctlr.wxn && !xn;
850 } else if (is_write) {
851 grant = hap & 0x2;
852 } else { // is_read
853 grant = hap & 0x1;
854 }
855 } else {
856 switch (aarch64EL) {
857 case EL0:
858 {
859 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
860 switch (perm) {
861 case 0:
862 case 1:
863 case 8:
864 case 9:
865 grant = x;
866 break;
867 case 4:
868 case 5:
869 grant = r || w || (x && !sctlr.wxn);
870 break;
871 case 6:
872 case 7:
873 grant = r || w;
874 break;
875 case 12:
876 case 13:
877 grant = r || x;
878 break;
879 case 14:
880 case 15:
881 grant = r;
882 break;
883 default:
884 grant = false;
885 }
886 }
887 break;
888 case EL1:
889 {
890 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
891 switch (perm) {
892 case 0:
893 case 2:
894 grant = r || w || (x && !sctlr.wxn);
895 break;
896 case 1:
897 case 3:
898 case 4:
899 case 5:
900 case 6:
901 case 7:
902 // regions that are writeable at EL0 should not be
903 // executable at EL1
904 grant = r || w;
905 break;
906 case 8:
907 case 10:
908 case 12:
909 case 14:
910 grant = r || x;
911 break;
912 case 9:
913 case 11:
914 case 13:
915 case 15:
916 grant = r;
917 break;
918 default:
919 grant = false;
920 }
921 }
922 break;
923 case EL2:
924 case EL3:
925 {
926 uint8_t perm = (ap & 0x2) | xn;
927 switch (perm) {
928 case 0:
929 grant = r || w || (x && !sctlr.wxn) ;
930 break;
931 case 1:
932 grant = r || w;
933 break;
934 case 2:
935 grant = r || x;
936 break;
937 case 3:
938 grant = r;
939 break;
940 default:
941 grant = false;
942 }
943 }
944 break;
945 }
946 }
947
948 if (!grant) {
949 if (is_fetch) {
950 permsFaults++;
951 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. "
952 "AP:%d priv:%d write:%d ns:%d sif:%d "
953 "sctlr.afe: %d\n",
954 ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe);
955 // Use PC value instead of vaddr because vaddr might be aligned to
956 // cache line and should not be the address reported in FAR
957 return std::make_shared<PrefetchAbort>(
958 req->getPC(),
959 ArmFault::PermissionLL + te->lookupLevel,
960 isStage2, ArmFault::LpaeTran);
961 } else {
962 permsFaults++;
963 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d "
964 "priv:%d write:%d\n", ap, is_priv, is_write);
965 return std::make_shared<DataAbort>(
966 vaddr_tainted, te->domain, is_write,
967 ArmFault::PermissionLL + te->lookupLevel,
968 isStage2, ArmFault::LpaeTran);
969 }
970 }
971
972 return NoFault;
973}
974
975Fault
976TLB::translateFs(RequestPtr req, ThreadContext *tc, Mode mode,
977 Translation *translation, bool &delay, bool timing,
978 TLB::ArmTranslationType tranType, bool functional)
979{
980 // No such thing as a functional timing access
981 assert(!(timing && functional));
982
983 updateMiscReg(tc, tranType);
984
985 Addr vaddr_tainted = req->getVaddr();
986 Addr vaddr = 0;
987 if (aarch64)
988 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
989 else
990 vaddr = vaddr_tainted;
991 Request::Flags flags = req->getFlags();
992
993 bool is_fetch = (mode == Execute);
994 bool is_write = (mode == Write);
995 bool long_desc_format = aarch64 || longDescFormatInUse(tc);
996 ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
997 : ArmFault::VmsaTran;
998
999 req->setAsid(asid);
1000
1001 DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
1002 isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran);
1003
1004 DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
1005 "flags %#lx tranType 0x%x\n", vaddr_tainted, mode, isStage2,
1006 scr, sctlr, flags, tranType);
1007
1008 if ((req->isInstFetch() && (!sctlr.i)) ||
1009 ((!req->isInstFetch()) && (!sctlr.c))){
1010 if (!req->isCacheMaintenance()) {
1011 req->setFlags(Request::UNCACHEABLE);
1012 }
1013 req->setFlags(Request::STRICT_ORDER);
1014 }
1015 if (!is_fetch) {
1016 assert(flags & MustBeOne);
1017 if (sctlr.a || !(flags & AllowUnaligned)) {
1018 if (vaddr & mask(flags & AlignmentMask)) {
1019 alignFaults++;
1020 return std::make_shared<DataAbort>(
1021 vaddr_tainted,
1022 TlbEntry::DomainType::NoAccess, is_write,
1023 ArmFault::AlignmentFault, isStage2,
1024 tranMethod);
1025 }
1026 }
1027 }
1028
1029 // If guest MMU is off or hcr.vm=0 go straight to stage2
1030 if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) {
1031
1032 req->setPaddr(vaddr);
1033 // When the MMU is off the security attribute corresponds to the
1034 // security state of the processor
1035 if (isSecure)
1036 req->setFlags(Request::SECURE);
1037
1038 // @todo: double check this (ARM ARM issue C B3.2.1)
1039 if (long_desc_format || sctlr.tre == 0 || nmrr.ir0 == 0 ||
1040 nmrr.or0 == 0 || prrr.tr0 != 0x2) {
1041 if (!req->isCacheMaintenance()) {
1042 req->setFlags(Request::UNCACHEABLE);
1043 }
1044 req->setFlags(Request::STRICT_ORDER);
1045 }
1046
1047 // Set memory attributes
1048 TlbEntry temp_te;
1049 temp_te.ns = !isSecure;
1050 if (isStage2 || hcr.dc == 0 || isSecure ||
1051 (isHyp && !(tranType & S1CTran))) {
1052
1053 temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal
1054 : TlbEntry::MemoryType::StronglyOrdered;
1055 temp_te.innerAttrs = 0x0;
1056 temp_te.outerAttrs = 0x0;
1057 temp_te.shareable = true;
1058 temp_te.outerShareable = true;
1059 } else {
1060 temp_te.mtype = TlbEntry::MemoryType::Normal;
1061 temp_te.innerAttrs = 0x3;
1062 temp_te.outerAttrs = 0x3;
1063 temp_te.shareable = false;
1064 temp_te.outerShareable = false;
1065 }
1066 temp_te.setAttributes(long_desc_format);
1067 DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: "
1068 "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n",
1069 temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
1070 isStage2);
1071 setAttr(temp_te.attributes);
1072
1073 return testTranslation(req, mode, TlbEntry::DomainType::NoAccess);
1074 }
1075
1076 DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n",
1077 isStage2 ? "IPA" : "VA", vaddr_tainted, asid);
1078 // Translation enabled
1079
1080 TlbEntry *te = NULL;
1081 TlbEntry mergeTe;
1082 Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
1083 functional, &mergeTe);
1084 // only proceed if we have a valid table entry
1085 if ((te == NULL) && (fault == NoFault)) delay = true;
1086
1087 // If we have the table entry transfer some of the attributes to the
1088 // request that triggered the translation
1089 if (te != NULL) {
1090 // Set memory attributes
1091 DPRINTF(TLBVerbose,
1092 "Setting memory attributes: shareable: %d, innerAttrs: %d, "
1093 "outerAttrs: %d, mtype: %d, isStage2: %d\n",
1094 te->shareable, te->innerAttrs, te->outerAttrs,
1095 static_cast<uint8_t>(te->mtype), isStage2);
1096 setAttr(te->attributes);
1097
1098 if (te->nonCacheable && !req->isCacheMaintenance())
1099 req->setFlags(Request::UNCACHEABLE);
1100
1101 // Require requests to be ordered if the request goes to
1102 // strongly ordered or device memory (i.e., anything other
1103 // than normal memory requires strict order).
1104 if (te->mtype != TlbEntry::MemoryType::Normal)
1105 req->setFlags(Request::STRICT_ORDER);
1106
1107 Addr pa = te->pAddr(vaddr);
1108 req->setPaddr(pa);
1109
1110 if (isSecure && !te->ns) {
1111 req->setFlags(Request::SECURE);
1112 }
1113 if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) &&
1114 (te->mtype != TlbEntry::MemoryType::Normal)) {
1115 // Unaligned accesses to Device memory should always cause an
1116 // abort regardless of sctlr.a
1117 alignFaults++;
1118 return std::make_shared<DataAbort>(
1119 vaddr_tainted,
1120 TlbEntry::DomainType::NoAccess, is_write,
1121 ArmFault::AlignmentFault, isStage2,
1122 tranMethod);
1123 }
1124
1125 // Check for a trickbox generated address fault
1126 if (fault == NoFault)
1127 fault = testTranslation(req, mode, te->domain);
1128 }
1129
1130 if (fault == NoFault) {
1131 // Generate Illegal Inst Set State fault if IL bit is set in CPSR
1132 if (aarch64 && is_fetch && cpsr.il == 1) {
1133 return std::make_shared<IllegalInstSetStateFault>();
1134 }
1135
1136 // Don't try to finalize a physical address unless the
1137 // translation has completed (i.e., there is a table entry).
1138 return te ? finalizePhysical(req, tc, mode) : NoFault;
1139 } else {
1140 return fault;
1141 }
1142}
1143
1144Fault
1145TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode,
1146 TLB::ArmTranslationType tranType)
1147{
1148 updateMiscReg(tc, tranType);
1149
1150 if (directToStage2) {
1151 assert(stage2Tlb);
1152 return stage2Tlb->translateAtomic(req, tc, mode, tranType);
1153 }
1154
1155 bool delay = false;
1156 Fault fault;
1157 if (FullSystem)
1158 fault = translateFs(req, tc, mode, NULL, delay, false, tranType);
1159 else
1160 fault = translateSe(req, tc, mode, NULL, delay, false);
1161 assert(!delay);
1162 return fault;
1163}
1164
1165Fault
1166TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode,
1167 TLB::ArmTranslationType tranType)
1168{
1169 updateMiscReg(tc, tranType);
1170
1171 if (directToStage2) {
1172 assert(stage2Tlb);
1173 return stage2Tlb->translateFunctional(req, tc, mode, tranType);
1174 }
1175
1176 bool delay = false;
1177 Fault fault;
1178 if (FullSystem)
1179 fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true);
1180 else
1181 fault = translateSe(req, tc, mode, NULL, delay, false);
1182 assert(!delay);
1183 return fault;
1184}
1185
1/*
2 * Copyright (c) 2010-2013, 2016-2017 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 * Nathan Binkert
42 * Steve Reinhardt
43 */
44
45#include "arch/arm/tlb.hh"
46
47#include <memory>
48#include <string>
49#include <vector>
50
51#include "arch/arm/faults.hh"
52#include "arch/arm/pagetable.hh"
53#include "arch/arm/stage2_lookup.hh"
54#include "arch/arm/stage2_mmu.hh"
55#include "arch/arm/system.hh"
56#include "arch/arm/table_walker.hh"
57#include "arch/arm/utility.hh"
58#include "arch/generic/mmapped_ipr.hh"
59#include "base/inifile.hh"
60#include "base/str.hh"
61#include "base/trace.hh"
62#include "cpu/base.hh"
63#include "cpu/thread_context.hh"
64#include "debug/Checkpoint.hh"
65#include "debug/TLB.hh"
66#include "debug/TLBVerbose.hh"
67#include "mem/page_table.hh"
68#include "mem/request.hh"
69#include "params/ArmTLB.hh"
70#include "sim/full_system.hh"
71#include "sim/process.hh"
72
73using namespace std;
74using namespace ArmISA;
75
76TLB::TLB(const ArmTLBParams *p)
77 : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size),
78 isStage2(p->is_stage2), stage2Req(false), _attr(0),
79 directToStage2(false), tableWalker(p->walker), stage2Tlb(NULL),
80 stage2Mmu(NULL), test(nullptr), rangeMRU(1),
81 aarch64(false), aarch64EL(EL0), isPriv(false), isSecure(false),
82 isHyp(false), asid(0), vmid(0), dacr(0),
83 miscRegValid(false), miscRegContext(0), curTranType(NormalTran)
84{
85 const ArmSystem *sys = dynamic_cast<const ArmSystem *>(p->sys);
86
87 tableWalker->setTlb(this);
88
89 // Cache system-level properties
90 haveLPAE = tableWalker->haveLPAE();
91 haveVirtualization = tableWalker->haveVirtualization();
92 haveLargeAsid64 = tableWalker->haveLargeAsid64();
93
94 if (sys)
95 m5opRange = sys->m5opRange();
96}
97
98TLB::~TLB()
99{
100 delete[] table;
101}
102
103void
104TLB::init()
105{
106 if (stage2Mmu && !isStage2)
107 stage2Tlb = stage2Mmu->stage2Tlb();
108}
109
110void
111TLB::setMMU(Stage2MMU *m, MasterID master_id)
112{
113 stage2Mmu = m;
114 tableWalker->setMMU(m, master_id);
115}
116
117bool
118TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa)
119{
120 updateMiscReg(tc);
121
122 if (directToStage2) {
123 assert(stage2Tlb);
124 return stage2Tlb->translateFunctional(tc, va, pa);
125 }
126
127 TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false,
128 aarch64 ? aarch64EL : EL1);
129 if (!e)
130 return false;
131 pa = e->pAddr(va);
132 return true;
133}
134
135Fault
136TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
137{
138 const Addr paddr = req->getPaddr();
139
140 if (m5opRange.contains(paddr)) {
141 req->setFlags(Request::MMAPPED_IPR | Request::GENERIC_IPR);
142 req->setPaddr(GenericISA::iprAddressPseudoInst(
143 (paddr >> 8) & 0xFF,
144 paddr & 0xFF));
145 }
146
147 return NoFault;
148}
149
150TlbEntry*
151TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure,
152 bool functional, bool ignore_asn, uint8_t target_el)
153{
154
155 TlbEntry *retval = NULL;
156
157 // Maintaining LRU array
158 int x = 0;
159 while (retval == NULL && x < size) {
160 if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false,
161 target_el)) ||
162 (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) {
163 // We only move the hit entry ahead when the position is higher
164 // than rangeMRU
165 if (x > rangeMRU && !functional) {
166 TlbEntry tmp_entry = table[x];
167 for (int i = x; i > 0; i--)
168 table[i] = table[i - 1];
169 table[0] = tmp_entry;
170 retval = &table[0];
171 } else {
172 retval = &table[x];
173 }
174 break;
175 }
176 ++x;
177 }
178
179 DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d "
180 "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d "
181 "el: %d\n",
182 va, asn, retval ? "hit" : "miss", vmid, hyp, secure,
183 retval ? retval->pfn : 0, retval ? retval->size : 0,
184 retval ? retval->pAddr(va) : 0, retval ? retval->ap : 0,
185 retval ? retval->ns : 0, retval ? retval->nstid : 0,
186 retval ? retval->global : 0, retval ? retval->asid : 0,
187 retval ? retval->el : 0);
188
189 return retval;
190}
191
192// insert a new TLB entry
193void
194TLB::insert(Addr addr, TlbEntry &entry)
195{
196 DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x"
197 " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
198 " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn,
199 entry.size, entry.vpn, entry.asid, entry.vmid, entry.N,
200 entry.global, entry.valid, entry.nonCacheable, entry.xn,
201 entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid,
202 entry.isHyp);
203
204 if (table[size - 1].valid)
205 DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
206 "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n",
207 table[size-1].vpn << table[size-1].N, table[size-1].asid,
208 table[size-1].vmid, table[size-1].pfn << table[size-1].N,
209 table[size-1].size, table[size-1].ap, table[size-1].ns,
210 table[size-1].nstid, table[size-1].global, table[size-1].isHyp,
211 table[size-1].el);
212
213 //inserting to MRU position and evicting the LRU one
214
215 for (int i = size - 1; i > 0; --i)
216 table[i] = table[i-1];
217 table[0] = entry;
218
219 inserts++;
220 ppRefills->notify(1);
221}
222
223void
224TLB::printTlb() const
225{
226 int x = 0;
227 TlbEntry *te;
228 DPRINTF(TLB, "Current TLB contents:\n");
229 while (x < size) {
230 te = &table[x];
231 if (te->valid)
232 DPRINTF(TLB, " * %s\n", te->print());
233 ++x;
234 }
235}
236
237void
238TLB::flushAllSecurity(bool secure_lookup, uint8_t target_el, bool ignore_el)
239{
240 DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n",
241 (secure_lookup ? "secure" : "non-secure"));
242 int x = 0;
243 TlbEntry *te;
244 while (x < size) {
245 te = &table[x];
246 if (te->valid && secure_lookup == !te->nstid &&
247 (te->vmid == vmid || secure_lookup) &&
248 checkELMatch(target_el, te->el, ignore_el)) {
249
250 DPRINTF(TLB, " - %s\n", te->print());
251 te->valid = false;
252 flushedEntries++;
253 }
254 ++x;
255 }
256
257 flushTlb++;
258
259 // If there's a second stage TLB (and we're not it) then flush it as well
260 // if we're currently in hyp mode
261 if (!isStage2 && isHyp) {
262 stage2Tlb->flushAllSecurity(secure_lookup, true);
263 }
264}
265
266void
267TLB::flushAllNs(bool hyp, uint8_t target_el, bool ignore_el)
268{
269 DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n",
270 (hyp ? "hyp" : "non-hyp"));
271 int x = 0;
272 TlbEntry *te;
273 while (x < size) {
274 te = &table[x];
275 if (te->valid && te->nstid && te->isHyp == hyp &&
276 checkELMatch(target_el, te->el, ignore_el)) {
277
278 DPRINTF(TLB, " - %s\n", te->print());
279 flushedEntries++;
280 te->valid = false;
281 }
282 ++x;
283 }
284
285 flushTlb++;
286
287 // If there's a second stage TLB (and we're not it) then flush it as well
288 if (!isStage2 && !hyp) {
289 stage2Tlb->flushAllNs(false, true);
290 }
291}
292
293void
294TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, uint8_t target_el)
295{
296 DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x "
297 "(%s lookup)\n", mva, asn, (secure_lookup ?
298 "secure" : "non-secure"));
299 _flushMva(mva, asn, secure_lookup, false, false, target_el);
300 flushTlbMvaAsid++;
301}
302
303void
304TLB::flushAsid(uint64_t asn, bool secure_lookup, uint8_t target_el)
305{
306 DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn,
307 (secure_lookup ? "secure" : "non-secure"));
308
309 int x = 0 ;
310 TlbEntry *te;
311
312 while (x < size) {
313 te = &table[x];
314 if (te->valid && te->asid == asn && secure_lookup == !te->nstid &&
315 (te->vmid == vmid || secure_lookup) &&
316 checkELMatch(target_el, te->el, false)) {
317
318 te->valid = false;
319 DPRINTF(TLB, " - %s\n", te->print());
320 flushedEntries++;
321 }
322 ++x;
323 }
324 flushTlbAsid++;
325}
326
327void
328TLB::flushMva(Addr mva, bool secure_lookup, bool hyp, uint8_t target_el)
329{
330 DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva,
331 (secure_lookup ? "secure" : "non-secure"));
332 _flushMva(mva, 0xbeef, secure_lookup, hyp, true, target_el);
333 flushTlbMva++;
334}
335
336void
337TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, bool hyp,
338 bool ignore_asn, uint8_t target_el)
339{
340 TlbEntry *te;
341 // D5.7.2: Sign-extend address to 64 bits
342 mva = sext<56>(mva);
343 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
344 target_el);
345 while (te != NULL) {
346 if (secure_lookup == !te->nstid) {
347 DPRINTF(TLB, " - %s\n", te->print());
348 te->valid = false;
349 flushedEntries++;
350 }
351 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
352 target_el);
353 }
354}
355
356void
357TLB::flushIpaVmid(Addr ipa, bool secure_lookup, bool hyp, uint8_t target_el)
358{
359 assert(!isStage2);
360 stage2Tlb->_flushMva(ipa, 0xbeef, secure_lookup, hyp, true, target_el);
361}
362
363bool
364TLB::checkELMatch(uint8_t target_el, uint8_t tentry_el, bool ignore_el)
365{
366 bool elMatch = true;
367 if (!ignore_el) {
368 if (target_el == 2 || target_el == 3) {
369 elMatch = (tentry_el == target_el);
370 } else {
371 elMatch = (tentry_el == 0) || (tentry_el == 1);
372 }
373 }
374 return elMatch;
375}
376
377void
378TLB::drainResume()
379{
380 // We might have unserialized something or switched CPUs, so make
381 // sure to re-read the misc regs.
382 miscRegValid = false;
383}
384
385void
386TLB::takeOverFrom(BaseTLB *_otlb)
387{
388 TLB *otlb = dynamic_cast<TLB*>(_otlb);
389 /* Make sure we actually have a valid type */
390 if (otlb) {
391 _attr = otlb->_attr;
392 haveLPAE = otlb->haveLPAE;
393 directToStage2 = otlb->directToStage2;
394 stage2Req = otlb->stage2Req;
395
396 /* Sync the stage2 MMU if they exist in both
397 * the old CPU and the new
398 */
399 if (!isStage2 &&
400 stage2Tlb && otlb->stage2Tlb) {
401 stage2Tlb->takeOverFrom(otlb->stage2Tlb);
402 }
403 } else {
404 panic("Incompatible TLB type!");
405 }
406}
407
408void
409TLB::serialize(CheckpointOut &cp) const
410{
411 DPRINTF(Checkpoint, "Serializing Arm TLB\n");
412
413 SERIALIZE_SCALAR(_attr);
414 SERIALIZE_SCALAR(haveLPAE);
415 SERIALIZE_SCALAR(directToStage2);
416 SERIALIZE_SCALAR(stage2Req);
417
418 int num_entries = size;
419 SERIALIZE_SCALAR(num_entries);
420 for (int i = 0; i < size; i++)
421 table[i].serializeSection(cp, csprintf("TlbEntry%d", i));
422}
423
424void
425TLB::unserialize(CheckpointIn &cp)
426{
427 DPRINTF(Checkpoint, "Unserializing Arm TLB\n");
428
429 UNSERIALIZE_SCALAR(_attr);
430 UNSERIALIZE_SCALAR(haveLPAE);
431 UNSERIALIZE_SCALAR(directToStage2);
432 UNSERIALIZE_SCALAR(stage2Req);
433
434 int num_entries;
435 UNSERIALIZE_SCALAR(num_entries);
436 for (int i = 0; i < min(size, num_entries); i++)
437 table[i].unserializeSection(cp, csprintf("TlbEntry%d", i));
438}
439
440void
441TLB::regStats()
442{
443 BaseTLB::regStats();
444 instHits
445 .name(name() + ".inst_hits")
446 .desc("ITB inst hits")
447 ;
448
449 instMisses
450 .name(name() + ".inst_misses")
451 .desc("ITB inst misses")
452 ;
453
454 instAccesses
455 .name(name() + ".inst_accesses")
456 .desc("ITB inst accesses")
457 ;
458
459 readHits
460 .name(name() + ".read_hits")
461 .desc("DTB read hits")
462 ;
463
464 readMisses
465 .name(name() + ".read_misses")
466 .desc("DTB read misses")
467 ;
468
469 readAccesses
470 .name(name() + ".read_accesses")
471 .desc("DTB read accesses")
472 ;
473
474 writeHits
475 .name(name() + ".write_hits")
476 .desc("DTB write hits")
477 ;
478
479 writeMisses
480 .name(name() + ".write_misses")
481 .desc("DTB write misses")
482 ;
483
484 writeAccesses
485 .name(name() + ".write_accesses")
486 .desc("DTB write accesses")
487 ;
488
489 hits
490 .name(name() + ".hits")
491 .desc("DTB hits")
492 ;
493
494 misses
495 .name(name() + ".misses")
496 .desc("DTB misses")
497 ;
498
499 accesses
500 .name(name() + ".accesses")
501 .desc("DTB accesses")
502 ;
503
504 flushTlb
505 .name(name() + ".flush_tlb")
506 .desc("Number of times complete TLB was flushed")
507 ;
508
509 flushTlbMva
510 .name(name() + ".flush_tlb_mva")
511 .desc("Number of times TLB was flushed by MVA")
512 ;
513
514 flushTlbMvaAsid
515 .name(name() + ".flush_tlb_mva_asid")
516 .desc("Number of times TLB was flushed by MVA & ASID")
517 ;
518
519 flushTlbAsid
520 .name(name() + ".flush_tlb_asid")
521 .desc("Number of times TLB was flushed by ASID")
522 ;
523
524 flushedEntries
525 .name(name() + ".flush_entries")
526 .desc("Number of entries that have been flushed from TLB")
527 ;
528
529 alignFaults
530 .name(name() + ".align_faults")
531 .desc("Number of TLB faults due to alignment restrictions")
532 ;
533
534 prefetchFaults
535 .name(name() + ".prefetch_faults")
536 .desc("Number of TLB faults due to prefetch")
537 ;
538
539 domainFaults
540 .name(name() + ".domain_faults")
541 .desc("Number of TLB faults due to domain restrictions")
542 ;
543
544 permsFaults
545 .name(name() + ".perms_faults")
546 .desc("Number of TLB faults due to permissions restrictions")
547 ;
548
549 instAccesses = instHits + instMisses;
550 readAccesses = readHits + readMisses;
551 writeAccesses = writeHits + writeMisses;
552 hits = readHits + writeHits + instHits;
553 misses = readMisses + writeMisses + instMisses;
554 accesses = readAccesses + writeAccesses + instAccesses;
555}
556
557void
558TLB::regProbePoints()
559{
560 ppRefills.reset(new ProbePoints::PMU(getProbeManager(), "Refills"));
561}
562
563Fault
564TLB::translateSe(RequestPtr req, ThreadContext *tc, Mode mode,
565 Translation *translation, bool &delay, bool timing)
566{
567 updateMiscReg(tc);
568 Addr vaddr_tainted = req->getVaddr();
569 Addr vaddr = 0;
570 if (aarch64)
571 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
572 else
573 vaddr = vaddr_tainted;
574 Request::Flags flags = req->getFlags();
575
576 bool is_fetch = (mode == Execute);
577 bool is_write = (mode == Write);
578
579 if (!is_fetch) {
580 assert(flags & MustBeOne);
581 if (sctlr.a || !(flags & AllowUnaligned)) {
582 if (vaddr & mask(flags & AlignmentMask)) {
583 // LPAE is always disabled in SE mode
584 return std::make_shared<DataAbort>(
585 vaddr_tainted,
586 TlbEntry::DomainType::NoAccess, is_write,
587 ArmFault::AlignmentFault, isStage2,
588 ArmFault::VmsaTran);
589 }
590 }
591 }
592
593 Addr paddr;
594 Process *p = tc->getProcessPtr();
595
596 if (!p->pTable->translate(vaddr, paddr))
597 return std::make_shared<GenericPageTableFault>(vaddr_tainted);
598 req->setPaddr(paddr);
599
600 return finalizePhysical(req, tc, mode);
601}
602
603Fault
604TLB::checkPermissions(TlbEntry *te, RequestPtr req, Mode mode)
605{
606 Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
607 Request::Flags flags = req->getFlags();
608 bool is_fetch = (mode == Execute);
609 bool is_write = (mode == Write);
610 bool is_priv = isPriv && !(flags & UserMode);
611
612 // Get the translation type from the actuall table entry
613 ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran
614 : ArmFault::VmsaTran;
615
616 // If this is the second stage of translation and the request is for a
617 // stage 1 page table walk then we need to check the HCR.PTW bit. This
618 // allows us to generate a fault if the request targets an area marked
619 // as a device or strongly ordered.
620 if (isStage2 && req->isPTWalk() && hcr.ptw &&
621 (te->mtype != TlbEntry::MemoryType::Normal)) {
622 return std::make_shared<DataAbort>(
623 vaddr, te->domain, is_write,
624 ArmFault::PermissionLL + te->lookupLevel,
625 isStage2, tranMethod);
626 }
627
628 // Generate an alignment fault for unaligned data accesses to device or
629 // strongly ordered memory
630 if (!is_fetch) {
631 if (te->mtype != TlbEntry::MemoryType::Normal) {
632 if (vaddr & mask(flags & AlignmentMask)) {
633 alignFaults++;
634 return std::make_shared<DataAbort>(
635 vaddr, TlbEntry::DomainType::NoAccess, is_write,
636 ArmFault::AlignmentFault, isStage2,
637 tranMethod);
638 }
639 }
640 }
641
642 if (te->nonCacheable) {
643 // Prevent prefetching from I/O devices.
644 if (req->isPrefetch()) {
645 // Here we can safely use the fault status for the short
646 // desc. format in all cases
647 return std::make_shared<PrefetchAbort>(
648 vaddr, ArmFault::PrefetchUncacheable,
649 isStage2, tranMethod);
650 }
651 }
652
653 if (!te->longDescFormat) {
654 switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
655 case 0:
656 domainFaults++;
657 DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x"
658 " domain: %#x write:%d\n", dacr,
659 static_cast<uint8_t>(te->domain), is_write);
660 if (is_fetch) {
661 // Use PC value instead of vaddr because vaddr might
662 // be aligned to cache line and should not be the
663 // address reported in FAR
664 return std::make_shared<PrefetchAbort>(
665 req->getPC(),
666 ArmFault::DomainLL + te->lookupLevel,
667 isStage2, tranMethod);
668 } else
669 return std::make_shared<DataAbort>(
670 vaddr, te->domain, is_write,
671 ArmFault::DomainLL + te->lookupLevel,
672 isStage2, tranMethod);
673 case 1:
674 // Continue with permissions check
675 break;
676 case 2:
677 panic("UNPRED domain\n");
678 case 3:
679 return NoFault;
680 }
681 }
682
683 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
684 uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap;
685 uint8_t hap = te->hap;
686
687 if (sctlr.afe == 1 || te->longDescFormat)
688 ap |= 1;
689
690 bool abt;
691 bool isWritable = true;
692 // If this is a stage 2 access (eg for reading stage 1 page table entries)
693 // then don't perform the AP permissions check, we stil do the HAP check
694 // below.
695 if (isStage2) {
696 abt = false;
697 } else {
698 switch (ap) {
699 case 0:
700 DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n",
701 (int)sctlr.rs);
702 if (!sctlr.xp) {
703 switch ((int)sctlr.rs) {
704 case 2:
705 abt = is_write;
706 break;
707 case 1:
708 abt = is_write || !is_priv;
709 break;
710 case 0:
711 case 3:
712 default:
713 abt = true;
714 break;
715 }
716 } else {
717 abt = true;
718 }
719 break;
720 case 1:
721 abt = !is_priv;
722 break;
723 case 2:
724 abt = !is_priv && is_write;
725 isWritable = is_priv;
726 break;
727 case 3:
728 abt = false;
729 break;
730 case 4:
731 panic("UNPRED premissions\n");
732 case 5:
733 abt = !is_priv || is_write;
734 isWritable = false;
735 break;
736 case 6:
737 case 7:
738 abt = is_write;
739 isWritable = false;
740 break;
741 default:
742 panic("Unknown permissions %#x\n", ap);
743 }
744 }
745
746 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
747 bool xn = te->xn || (isWritable && sctlr.wxn) ||
748 (ap == 3 && sctlr.uwxn && is_priv);
749 if (is_fetch && (abt || xn ||
750 (te->longDescFormat && te->pxn && is_priv) ||
751 (isSecure && te->ns && scr.sif))) {
752 permsFaults++;
753 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
754 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
755 ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe);
756 // Use PC value instead of vaddr because vaddr might be aligned to
757 // cache line and should not be the address reported in FAR
758 return std::make_shared<PrefetchAbort>(
759 req->getPC(),
760 ArmFault::PermissionLL + te->lookupLevel,
761 isStage2, tranMethod);
762 } else if (abt | hapAbt) {
763 permsFaults++;
764 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
765 " write:%d\n", ap, is_priv, is_write);
766 return std::make_shared<DataAbort>(
767 vaddr, te->domain, is_write,
768 ArmFault::PermissionLL + te->lookupLevel,
769 isStage2 | !abt, tranMethod);
770 }
771 return NoFault;
772}
773
774
775Fault
776TLB::checkPermissions64(TlbEntry *te, RequestPtr req, Mode mode,
777 ThreadContext *tc)
778{
779 assert(aarch64);
780
781 Addr vaddr_tainted = req->getVaddr();
782 Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
783
784 Request::Flags flags = req->getFlags();
785 bool is_fetch = (mode == Execute);
786 bool is_write = (mode == Write);
787 bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode);
788
789 updateMiscReg(tc, curTranType);
790
791 // If this is the second stage of translation and the request is for a
792 // stage 1 page table walk then we need to check the HCR.PTW bit. This
793 // allows us to generate a fault if the request targets an area marked
794 // as a device or strongly ordered.
795 if (isStage2 && req->isPTWalk() && hcr.ptw &&
796 (te->mtype != TlbEntry::MemoryType::Normal)) {
797 return std::make_shared<DataAbort>(
798 vaddr_tainted, te->domain, is_write,
799 ArmFault::PermissionLL + te->lookupLevel,
800 isStage2, ArmFault::LpaeTran);
801 }
802
803 // Generate an alignment fault for unaligned accesses to device or
804 // strongly ordered memory
805 if (!is_fetch) {
806 if (te->mtype != TlbEntry::MemoryType::Normal) {
807 if (vaddr & mask(flags & AlignmentMask)) {
808 alignFaults++;
809 return std::make_shared<DataAbort>(
810 vaddr_tainted,
811 TlbEntry::DomainType::NoAccess, is_write,
812 ArmFault::AlignmentFault, isStage2,
813 ArmFault::LpaeTran);
814 }
815 }
816 }
817
818 if (te->nonCacheable) {
819 // Prevent prefetching from I/O devices.
820 if (req->isPrefetch()) {
821 // Here we can safely use the fault status for the short
822 // desc. format in all cases
823 return std::make_shared<PrefetchAbort>(
824 vaddr_tainted,
825 ArmFault::PrefetchUncacheable,
826 isStage2, ArmFault::LpaeTran);
827 }
828 }
829
830 uint8_t ap = 0x3 & (te->ap); // 2-bit access protection field
831 bool grant = false;
832
833 uint8_t xn = te->xn;
834 uint8_t pxn = te->pxn;
835 bool r = !is_write && !is_fetch;
836 bool w = is_write;
837 bool x = is_fetch;
838 DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
839 "w:%d, x:%d\n", ap, xn, pxn, r, w, x);
840
841 if (isStage2) {
842 assert(ArmSystem::haveVirtualization(tc) && aarch64EL != EL2);
843 // In stage 2 we use the hypervisor access permission bits.
844 // The following permissions are described in ARM DDI 0487A.f
845 // D4-1802
846 uint8_t hap = 0x3 & te->hap;
847 if (is_fetch) {
848 // sctlr.wxn overrides the xn bit
849 grant = !sctlr.wxn && !xn;
850 } else if (is_write) {
851 grant = hap & 0x2;
852 } else { // is_read
853 grant = hap & 0x1;
854 }
855 } else {
856 switch (aarch64EL) {
857 case EL0:
858 {
859 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
860 switch (perm) {
861 case 0:
862 case 1:
863 case 8:
864 case 9:
865 grant = x;
866 break;
867 case 4:
868 case 5:
869 grant = r || w || (x && !sctlr.wxn);
870 break;
871 case 6:
872 case 7:
873 grant = r || w;
874 break;
875 case 12:
876 case 13:
877 grant = r || x;
878 break;
879 case 14:
880 case 15:
881 grant = r;
882 break;
883 default:
884 grant = false;
885 }
886 }
887 break;
888 case EL1:
889 {
890 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
891 switch (perm) {
892 case 0:
893 case 2:
894 grant = r || w || (x && !sctlr.wxn);
895 break;
896 case 1:
897 case 3:
898 case 4:
899 case 5:
900 case 6:
901 case 7:
902 // regions that are writeable at EL0 should not be
903 // executable at EL1
904 grant = r || w;
905 break;
906 case 8:
907 case 10:
908 case 12:
909 case 14:
910 grant = r || x;
911 break;
912 case 9:
913 case 11:
914 case 13:
915 case 15:
916 grant = r;
917 break;
918 default:
919 grant = false;
920 }
921 }
922 break;
923 case EL2:
924 case EL3:
925 {
926 uint8_t perm = (ap & 0x2) | xn;
927 switch (perm) {
928 case 0:
929 grant = r || w || (x && !sctlr.wxn) ;
930 break;
931 case 1:
932 grant = r || w;
933 break;
934 case 2:
935 grant = r || x;
936 break;
937 case 3:
938 grant = r;
939 break;
940 default:
941 grant = false;
942 }
943 }
944 break;
945 }
946 }
947
948 if (!grant) {
949 if (is_fetch) {
950 permsFaults++;
951 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. "
952 "AP:%d priv:%d write:%d ns:%d sif:%d "
953 "sctlr.afe: %d\n",
954 ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe);
955 // Use PC value instead of vaddr because vaddr might be aligned to
956 // cache line and should not be the address reported in FAR
957 return std::make_shared<PrefetchAbort>(
958 req->getPC(),
959 ArmFault::PermissionLL + te->lookupLevel,
960 isStage2, ArmFault::LpaeTran);
961 } else {
962 permsFaults++;
963 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d "
964 "priv:%d write:%d\n", ap, is_priv, is_write);
965 return std::make_shared<DataAbort>(
966 vaddr_tainted, te->domain, is_write,
967 ArmFault::PermissionLL + te->lookupLevel,
968 isStage2, ArmFault::LpaeTran);
969 }
970 }
971
972 return NoFault;
973}
974
975Fault
976TLB::translateFs(RequestPtr req, ThreadContext *tc, Mode mode,
977 Translation *translation, bool &delay, bool timing,
978 TLB::ArmTranslationType tranType, bool functional)
979{
980 // No such thing as a functional timing access
981 assert(!(timing && functional));
982
983 updateMiscReg(tc, tranType);
984
985 Addr vaddr_tainted = req->getVaddr();
986 Addr vaddr = 0;
987 if (aarch64)
988 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
989 else
990 vaddr = vaddr_tainted;
991 Request::Flags flags = req->getFlags();
992
993 bool is_fetch = (mode == Execute);
994 bool is_write = (mode == Write);
995 bool long_desc_format = aarch64 || longDescFormatInUse(tc);
996 ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
997 : ArmFault::VmsaTran;
998
999 req->setAsid(asid);
1000
1001 DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
1002 isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran);
1003
1004 DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
1005 "flags %#lx tranType 0x%x\n", vaddr_tainted, mode, isStage2,
1006 scr, sctlr, flags, tranType);
1007
1008 if ((req->isInstFetch() && (!sctlr.i)) ||
1009 ((!req->isInstFetch()) && (!sctlr.c))){
1010 if (!req->isCacheMaintenance()) {
1011 req->setFlags(Request::UNCACHEABLE);
1012 }
1013 req->setFlags(Request::STRICT_ORDER);
1014 }
1015 if (!is_fetch) {
1016 assert(flags & MustBeOne);
1017 if (sctlr.a || !(flags & AllowUnaligned)) {
1018 if (vaddr & mask(flags & AlignmentMask)) {
1019 alignFaults++;
1020 return std::make_shared<DataAbort>(
1021 vaddr_tainted,
1022 TlbEntry::DomainType::NoAccess, is_write,
1023 ArmFault::AlignmentFault, isStage2,
1024 tranMethod);
1025 }
1026 }
1027 }
1028
1029 // If guest MMU is off or hcr.vm=0 go straight to stage2
1030 if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) {
1031
1032 req->setPaddr(vaddr);
1033 // When the MMU is off the security attribute corresponds to the
1034 // security state of the processor
1035 if (isSecure)
1036 req->setFlags(Request::SECURE);
1037
1038 // @todo: double check this (ARM ARM issue C B3.2.1)
1039 if (long_desc_format || sctlr.tre == 0 || nmrr.ir0 == 0 ||
1040 nmrr.or0 == 0 || prrr.tr0 != 0x2) {
1041 if (!req->isCacheMaintenance()) {
1042 req->setFlags(Request::UNCACHEABLE);
1043 }
1044 req->setFlags(Request::STRICT_ORDER);
1045 }
1046
1047 // Set memory attributes
1048 TlbEntry temp_te;
1049 temp_te.ns = !isSecure;
1050 if (isStage2 || hcr.dc == 0 || isSecure ||
1051 (isHyp && !(tranType & S1CTran))) {
1052
1053 temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal
1054 : TlbEntry::MemoryType::StronglyOrdered;
1055 temp_te.innerAttrs = 0x0;
1056 temp_te.outerAttrs = 0x0;
1057 temp_te.shareable = true;
1058 temp_te.outerShareable = true;
1059 } else {
1060 temp_te.mtype = TlbEntry::MemoryType::Normal;
1061 temp_te.innerAttrs = 0x3;
1062 temp_te.outerAttrs = 0x3;
1063 temp_te.shareable = false;
1064 temp_te.outerShareable = false;
1065 }
1066 temp_te.setAttributes(long_desc_format);
1067 DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: "
1068 "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n",
1069 temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
1070 isStage2);
1071 setAttr(temp_te.attributes);
1072
1073 return testTranslation(req, mode, TlbEntry::DomainType::NoAccess);
1074 }
1075
1076 DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n",
1077 isStage2 ? "IPA" : "VA", vaddr_tainted, asid);
1078 // Translation enabled
1079
1080 TlbEntry *te = NULL;
1081 TlbEntry mergeTe;
1082 Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
1083 functional, &mergeTe);
1084 // only proceed if we have a valid table entry
1085 if ((te == NULL) && (fault == NoFault)) delay = true;
1086
1087 // If we have the table entry transfer some of the attributes to the
1088 // request that triggered the translation
1089 if (te != NULL) {
1090 // Set memory attributes
1091 DPRINTF(TLBVerbose,
1092 "Setting memory attributes: shareable: %d, innerAttrs: %d, "
1093 "outerAttrs: %d, mtype: %d, isStage2: %d\n",
1094 te->shareable, te->innerAttrs, te->outerAttrs,
1095 static_cast<uint8_t>(te->mtype), isStage2);
1096 setAttr(te->attributes);
1097
1098 if (te->nonCacheable && !req->isCacheMaintenance())
1099 req->setFlags(Request::UNCACHEABLE);
1100
1101 // Require requests to be ordered if the request goes to
1102 // strongly ordered or device memory (i.e., anything other
1103 // than normal memory requires strict order).
1104 if (te->mtype != TlbEntry::MemoryType::Normal)
1105 req->setFlags(Request::STRICT_ORDER);
1106
1107 Addr pa = te->pAddr(vaddr);
1108 req->setPaddr(pa);
1109
1110 if (isSecure && !te->ns) {
1111 req->setFlags(Request::SECURE);
1112 }
1113 if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) &&
1114 (te->mtype != TlbEntry::MemoryType::Normal)) {
1115 // Unaligned accesses to Device memory should always cause an
1116 // abort regardless of sctlr.a
1117 alignFaults++;
1118 return std::make_shared<DataAbort>(
1119 vaddr_tainted,
1120 TlbEntry::DomainType::NoAccess, is_write,
1121 ArmFault::AlignmentFault, isStage2,
1122 tranMethod);
1123 }
1124
1125 // Check for a trickbox generated address fault
1126 if (fault == NoFault)
1127 fault = testTranslation(req, mode, te->domain);
1128 }
1129
1130 if (fault == NoFault) {
1131 // Generate Illegal Inst Set State fault if IL bit is set in CPSR
1132 if (aarch64 && is_fetch && cpsr.il == 1) {
1133 return std::make_shared<IllegalInstSetStateFault>();
1134 }
1135
1136 // Don't try to finalize a physical address unless the
1137 // translation has completed (i.e., there is a table entry).
1138 return te ? finalizePhysical(req, tc, mode) : NoFault;
1139 } else {
1140 return fault;
1141 }
1142}
1143
1144Fault
1145TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode,
1146 TLB::ArmTranslationType tranType)
1147{
1148 updateMiscReg(tc, tranType);
1149
1150 if (directToStage2) {
1151 assert(stage2Tlb);
1152 return stage2Tlb->translateAtomic(req, tc, mode, tranType);
1153 }
1154
1155 bool delay = false;
1156 Fault fault;
1157 if (FullSystem)
1158 fault = translateFs(req, tc, mode, NULL, delay, false, tranType);
1159 else
1160 fault = translateSe(req, tc, mode, NULL, delay, false);
1161 assert(!delay);
1162 return fault;
1163}
1164
1165Fault
1166TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode,
1167 TLB::ArmTranslationType tranType)
1168{
1169 updateMiscReg(tc, tranType);
1170
1171 if (directToStage2) {
1172 assert(stage2Tlb);
1173 return stage2Tlb->translateFunctional(req, tc, mode, tranType);
1174 }
1175
1176 bool delay = false;
1177 Fault fault;
1178 if (FullSystem)
1179 fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true);
1180 else
1181 fault = translateSe(req, tc, mode, NULL, delay, false);
1182 assert(!delay);
1183 return fault;
1184}
1185
1186Fault
1186void
1187TLB::translateTiming(RequestPtr req, ThreadContext *tc,
1188 Translation *translation, Mode mode, TLB::ArmTranslationType tranType)
1189{
1190 updateMiscReg(tc, tranType);
1191
1192 if (directToStage2) {
1193 assert(stage2Tlb);
1187TLB::translateTiming(RequestPtr req, ThreadContext *tc,
1188 Translation *translation, Mode mode, TLB::ArmTranslationType tranType)
1189{
1190 updateMiscReg(tc, tranType);
1191
1192 if (directToStage2) {
1193 assert(stage2Tlb);
1194 return stage2Tlb->translateTiming(req, tc, translation, mode, tranType);
1194 stage2Tlb->translateTiming(req, tc, translation, mode, tranType);
1195 return;
1195 }
1196
1197 assert(translation);
1198
1196 }
1197
1198 assert(translation);
1199
1199 return translateComplete(req, tc, translation, mode, tranType, isStage2);
1200 translateComplete(req, tc, translation, mode, tranType, isStage2);
1200}
1201
1202Fault
1203TLB::translateComplete(RequestPtr req, ThreadContext *tc,
1204 Translation *translation, Mode mode, TLB::ArmTranslationType tranType,
1205 bool callFromS2)
1206{
1207 bool delay = false;
1208 Fault fault;
1209 if (FullSystem)
1210 fault = translateFs(req, tc, mode, translation, delay, true, tranType);
1211 else
1212 fault = translateSe(req, tc, mode, translation, delay, true);
1213 DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay, fault !=
1214 NoFault);
1215 // If we have a translation, and we're not in the middle of doing a stage
1216 // 2 translation tell the translation that we've either finished or its
1217 // going to take a while. By not doing this when we're in the middle of a
1218 // stage 2 translation we prevent marking the translation as delayed twice,
1219 // one when the translation starts and again when the stage 1 translation
1220 // completes.
1221 if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) {
1222 if (!delay)
1223 translation->finish(fault, req, tc, mode);
1224 else
1225 translation->markDelayed();
1226 }
1227 return fault;
1228}
1229
1230BaseMasterPort*
1231TLB::getMasterPort()
1232{
1233 return &stage2Mmu->getPort();
1234}
1235
1236void
1237TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType)
1238{
1239 // check if the regs have changed, or the translation mode is different.
1240 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1241 // one type of translation anyway
1242 if (miscRegValid && miscRegContext == tc->contextId() &&
1243 ((tranType == curTranType) || isStage2)) {
1244 return;
1245 }
1246
1247 DPRINTF(TLBVerbose, "TLB variables changed!\n");
1248 cpsr = tc->readMiscReg(MISCREG_CPSR);
1249
1250 // Dependencies: SCR/SCR_EL3, CPSR
1251 isSecure = inSecureState(tc) &&
1252 !(tranType & HypMode) && !(tranType & S1S2NsTran);
1253
1254 const OperatingMode op_mode = (OperatingMode) (uint8_t)cpsr.mode;
1255 aarch64 = opModeIs64(op_mode) ||
1256 (opModeToEL(op_mode) == EL0 && ELIs64(tc, EL1));
1257
1258 if (aarch64) { // AArch64
1259 // determine EL we need to translate in
1260 switch (tranType) {
1261 case S1E0Tran:
1262 case S12E0Tran:
1263 aarch64EL = EL0;
1264 break;
1265 case S1E1Tran:
1266 case S12E1Tran:
1267 aarch64EL = EL1;
1268 break;
1269 case S1E2Tran:
1270 aarch64EL = EL2;
1271 break;
1272 case S1E3Tran:
1273 aarch64EL = EL3;
1274 break;
1275 case NormalTran:
1276 case S1CTran:
1277 case S1S2NsTran:
1278 case HypMode:
1279 aarch64EL = (ExceptionLevel) (uint8_t) cpsr.el;
1280 break;
1281 }
1282
1283 switch (aarch64EL) {
1284 case EL0:
1285 case EL1:
1286 {
1287 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
1288 ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
1289 uint64_t ttbr_asid = ttbcr.a1 ?
1290 tc->readMiscReg(MISCREG_TTBR1_EL1) :
1291 tc->readMiscReg(MISCREG_TTBR0_EL1);
1292 asid = bits(ttbr_asid,
1293 (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1294 }
1295 break;
1296 case EL2:
1297 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1298 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1299 asid = -1;
1300 break;
1301 case EL3:
1302 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3);
1303 ttbcr = tc->readMiscReg(MISCREG_TCR_EL3);
1304 asid = -1;
1305 break;
1306 }
1307 hcr = tc->readMiscReg(MISCREG_HCR_EL2);
1308 scr = tc->readMiscReg(MISCREG_SCR_EL3);
1309 isPriv = aarch64EL != EL0;
1310 if (haveVirtualization) {
1311 vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
1312 isHyp = tranType & HypMode;
1313 isHyp &= (tranType & S1S2NsTran) == 0;
1314 isHyp &= (tranType & S1CTran) == 0;
1315 // Work out if we should skip the first stage of translation and go
1316 // directly to stage 2. This value is cached so we don't have to
1317 // compute it for every translation.
1318 stage2Req = isStage2 ||
1319 (hcr.vm && !isHyp && !isSecure &&
1320 !(tranType & S1CTran) && (aarch64EL < EL2) &&
1321 !(tranType & S1E1Tran)); // <--- FIX THIS HACK
1322 directToStage2 = !isStage2 && stage2Req && !sctlr.m;
1323 } else {
1324 vmid = 0;
1325 isHyp = false;
1326 directToStage2 = false;
1327 stage2Req = false;
1328 }
1329 } else { // AArch32
1330 sctlr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_SCTLR, tc,
1331 !isSecure));
1332 ttbcr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_TTBCR, tc,
1333 !isSecure));
1334 scr = tc->readMiscReg(MISCREG_SCR);
1335 isPriv = cpsr.mode != MODE_USER;
1336 if (longDescFormatInUse(tc)) {
1337 uint64_t ttbr_asid = tc->readMiscReg(
1338 flattenMiscRegNsBanked(ttbcr.a1 ? MISCREG_TTBR1
1339 : MISCREG_TTBR0,
1340 tc, !isSecure));
1341 asid = bits(ttbr_asid, 55, 48);
1342 } else { // Short-descriptor translation table format in use
1343 CONTEXTIDR context_id = tc->readMiscReg(flattenMiscRegNsBanked(
1344 MISCREG_CONTEXTIDR, tc,!isSecure));
1345 asid = context_id.asid;
1346 }
1347 prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR, tc,
1348 !isSecure));
1349 nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR, tc,
1350 !isSecure));
1351 dacr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_DACR, tc,
1352 !isSecure));
1353 hcr = tc->readMiscReg(MISCREG_HCR);
1354
1355 if (haveVirtualization) {
1356 vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
1357 isHyp = cpsr.mode == MODE_HYP;
1358 isHyp |= tranType & HypMode;
1359 isHyp &= (tranType & S1S2NsTran) == 0;
1360 isHyp &= (tranType & S1CTran) == 0;
1361 if (isHyp) {
1362 sctlr = tc->readMiscReg(MISCREG_HSCTLR);
1363 }
1364 // Work out if we should skip the first stage of translation and go
1365 // directly to stage 2. This value is cached so we don't have to
1366 // compute it for every translation.
1367 stage2Req = hcr.vm && !isStage2 && !isHyp && !isSecure &&
1368 !(tranType & S1CTran);
1369 directToStage2 = stage2Req && !sctlr.m;
1370 } else {
1371 vmid = 0;
1372 stage2Req = false;
1373 isHyp = false;
1374 directToStage2 = false;
1375 }
1376 }
1377 miscRegValid = true;
1378 miscRegContext = tc->contextId();
1379 curTranType = tranType;
1380}
1381
1382Fault
1383TLB::getTE(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
1384 Translation *translation, bool timing, bool functional,
1385 bool is_secure, TLB::ArmTranslationType tranType)
1386{
1387 bool is_fetch = (mode == Execute);
1388 bool is_write = (mode == Write);
1389
1390 Addr vaddr_tainted = req->getVaddr();
1391 Addr vaddr = 0;
1392 ExceptionLevel target_el = aarch64 ? aarch64EL : EL1;
1393 if (aarch64) {
1394 vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el, ttbcr);
1395 } else {
1396 vaddr = vaddr_tainted;
1397 }
1398 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1399 if (*te == NULL) {
1400 if (req->isPrefetch()) {
1401 // if the request is a prefetch don't attempt to fill the TLB or go
1402 // any further with the memory access (here we can safely use the
1403 // fault status for the short desc. format in all cases)
1404 prefetchFaults++;
1405 return std::make_shared<PrefetchAbort>(
1406 vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2);
1407 }
1408
1409 if (is_fetch)
1410 instMisses++;
1411 else if (is_write)
1412 writeMisses++;
1413 else
1414 readMisses++;
1415
1416 // start translation table walk, pass variables rather than
1417 // re-retreaving in table walker for speed
1418 DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1419 vaddr_tainted, asid, vmid);
1420 Fault fault;
1421 fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode,
1422 translation, timing, functional, is_secure,
1423 tranType, stage2Req);
1424 // for timing mode, return and wait for table walk,
1425 if (timing || fault != NoFault) {
1426 return fault;
1427 }
1428
1429 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1430 if (!*te)
1431 printTlb();
1432 assert(*te);
1433 } else {
1434 if (is_fetch)
1435 instHits++;
1436 else if (is_write)
1437 writeHits++;
1438 else
1439 readHits++;
1440 }
1441 return NoFault;
1442}
1443
1444Fault
1445TLB::getResultTe(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
1446 Translation *translation, bool timing, bool functional,
1447 TlbEntry *mergeTe)
1448{
1449 Fault fault;
1450
1451 if (isStage2) {
1452 // We are already in the stage 2 TLB. Grab the table entry for stage
1453 // 2 only. We are here because stage 1 translation is disabled.
1454 TlbEntry *s2Te = NULL;
1455 // Get the stage 2 table entry
1456 fault = getTE(&s2Te, req, tc, mode, translation, timing, functional,
1457 isSecure, curTranType);
1458 // Check permissions of stage 2
1459 if ((s2Te != NULL) && (fault = NoFault)) {
1460 if(aarch64)
1461 fault = checkPermissions64(s2Te, req, mode, tc);
1462 else
1463 fault = checkPermissions(s2Te, req, mode);
1464 }
1465 *te = s2Te;
1466 return fault;
1467 }
1468
1469 TlbEntry *s1Te = NULL;
1470
1471 Addr vaddr_tainted = req->getVaddr();
1472
1473 // Get the stage 1 table entry
1474 fault = getTE(&s1Te, req, tc, mode, translation, timing, functional,
1475 isSecure, curTranType);
1476 // only proceed if we have a valid table entry
1477 if ((s1Te != NULL) && (fault == NoFault)) {
1478 // Check stage 1 permissions before checking stage 2
1479 if (aarch64)
1480 fault = checkPermissions64(s1Te, req, mode, tc);
1481 else
1482 fault = checkPermissions(s1Te, req, mode);
1483 if (stage2Req & (fault == NoFault)) {
1484 Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te,
1485 req, translation, mode, timing, functional, curTranType);
1486 fault = s2Lookup->getTe(tc, mergeTe);
1487 if (s2Lookup->isComplete()) {
1488 *te = mergeTe;
1489 // We've finished with the lookup so delete it
1490 delete s2Lookup;
1491 } else {
1492 // The lookup hasn't completed, so we can't delete it now. We
1493 // get round this by asking the object to self delete when the
1494 // translation is complete.
1495 s2Lookup->setSelfDelete();
1496 }
1497 } else {
1498 // This case deals with an S1 hit (or bypass), followed by
1499 // an S2 hit-but-perms issue
1500 if (isStage2) {
1501 DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1502 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault);
1503 if (fault != NoFault) {
1504 ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get());
1505 armFault->annotate(ArmFault::S1PTW, false);
1506 armFault->annotate(ArmFault::OVA, vaddr_tainted);
1507 }
1508 }
1509 *te = s1Te;
1510 }
1511 }
1512 return fault;
1513}
1514
1515void
1516TLB::setTestInterface(SimObject *_ti)
1517{
1518 if (!_ti) {
1519 test = nullptr;
1520 } else {
1521 TlbTestInterface *ti(dynamic_cast<TlbTestInterface *>(_ti));
1522 fatal_if(!ti, "%s is not a valid ARM TLB tester\n", _ti->name());
1523 test = ti;
1524 }
1525}
1526
1527Fault
1528TLB::testTranslation(RequestPtr req, Mode mode, TlbEntry::DomainType domain)
1529{
1530 if (!test || !req->hasSize() || req->getSize() == 0) {
1531 return NoFault;
1532 } else {
1533 return test->translationCheck(req, isPriv, mode, domain);
1534 }
1535}
1536
1537Fault
1538TLB::testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode,
1539 TlbEntry::DomainType domain, LookupLevel lookup_level)
1540{
1541 if (!test) {
1542 return NoFault;
1543 } else {
1544 return test->walkCheck(pa, size, va, is_secure, isPriv, mode,
1545 domain, lookup_level);
1546 }
1547}
1548
1549
1550ArmISA::TLB *
1551ArmTLBParams::create()
1552{
1553 return new ArmISA::TLB(this);
1554}
1201}
1202
1203Fault
1204TLB::translateComplete(RequestPtr req, ThreadContext *tc,
1205 Translation *translation, Mode mode, TLB::ArmTranslationType tranType,
1206 bool callFromS2)
1207{
1208 bool delay = false;
1209 Fault fault;
1210 if (FullSystem)
1211 fault = translateFs(req, tc, mode, translation, delay, true, tranType);
1212 else
1213 fault = translateSe(req, tc, mode, translation, delay, true);
1214 DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay, fault !=
1215 NoFault);
1216 // If we have a translation, and we're not in the middle of doing a stage
1217 // 2 translation tell the translation that we've either finished or its
1218 // going to take a while. By not doing this when we're in the middle of a
1219 // stage 2 translation we prevent marking the translation as delayed twice,
1220 // one when the translation starts and again when the stage 1 translation
1221 // completes.
1222 if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) {
1223 if (!delay)
1224 translation->finish(fault, req, tc, mode);
1225 else
1226 translation->markDelayed();
1227 }
1228 return fault;
1229}
1230
1231BaseMasterPort*
1232TLB::getMasterPort()
1233{
1234 return &stage2Mmu->getPort();
1235}
1236
1237void
1238TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType)
1239{
1240 // check if the regs have changed, or the translation mode is different.
1241 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1242 // one type of translation anyway
1243 if (miscRegValid && miscRegContext == tc->contextId() &&
1244 ((tranType == curTranType) || isStage2)) {
1245 return;
1246 }
1247
1248 DPRINTF(TLBVerbose, "TLB variables changed!\n");
1249 cpsr = tc->readMiscReg(MISCREG_CPSR);
1250
1251 // Dependencies: SCR/SCR_EL3, CPSR
1252 isSecure = inSecureState(tc) &&
1253 !(tranType & HypMode) && !(tranType & S1S2NsTran);
1254
1255 const OperatingMode op_mode = (OperatingMode) (uint8_t)cpsr.mode;
1256 aarch64 = opModeIs64(op_mode) ||
1257 (opModeToEL(op_mode) == EL0 && ELIs64(tc, EL1));
1258
1259 if (aarch64) { // AArch64
1260 // determine EL we need to translate in
1261 switch (tranType) {
1262 case S1E0Tran:
1263 case S12E0Tran:
1264 aarch64EL = EL0;
1265 break;
1266 case S1E1Tran:
1267 case S12E1Tran:
1268 aarch64EL = EL1;
1269 break;
1270 case S1E2Tran:
1271 aarch64EL = EL2;
1272 break;
1273 case S1E3Tran:
1274 aarch64EL = EL3;
1275 break;
1276 case NormalTran:
1277 case S1CTran:
1278 case S1S2NsTran:
1279 case HypMode:
1280 aarch64EL = (ExceptionLevel) (uint8_t) cpsr.el;
1281 break;
1282 }
1283
1284 switch (aarch64EL) {
1285 case EL0:
1286 case EL1:
1287 {
1288 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
1289 ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
1290 uint64_t ttbr_asid = ttbcr.a1 ?
1291 tc->readMiscReg(MISCREG_TTBR1_EL1) :
1292 tc->readMiscReg(MISCREG_TTBR0_EL1);
1293 asid = bits(ttbr_asid,
1294 (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1295 }
1296 break;
1297 case EL2:
1298 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1299 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1300 asid = -1;
1301 break;
1302 case EL3:
1303 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3);
1304 ttbcr = tc->readMiscReg(MISCREG_TCR_EL3);
1305 asid = -1;
1306 break;
1307 }
1308 hcr = tc->readMiscReg(MISCREG_HCR_EL2);
1309 scr = tc->readMiscReg(MISCREG_SCR_EL3);
1310 isPriv = aarch64EL != EL0;
1311 if (haveVirtualization) {
1312 vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
1313 isHyp = tranType & HypMode;
1314 isHyp &= (tranType & S1S2NsTran) == 0;
1315 isHyp &= (tranType & S1CTran) == 0;
1316 // Work out if we should skip the first stage of translation and go
1317 // directly to stage 2. This value is cached so we don't have to
1318 // compute it for every translation.
1319 stage2Req = isStage2 ||
1320 (hcr.vm && !isHyp && !isSecure &&
1321 !(tranType & S1CTran) && (aarch64EL < EL2) &&
1322 !(tranType & S1E1Tran)); // <--- FIX THIS HACK
1323 directToStage2 = !isStage2 && stage2Req && !sctlr.m;
1324 } else {
1325 vmid = 0;
1326 isHyp = false;
1327 directToStage2 = false;
1328 stage2Req = false;
1329 }
1330 } else { // AArch32
1331 sctlr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_SCTLR, tc,
1332 !isSecure));
1333 ttbcr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_TTBCR, tc,
1334 !isSecure));
1335 scr = tc->readMiscReg(MISCREG_SCR);
1336 isPriv = cpsr.mode != MODE_USER;
1337 if (longDescFormatInUse(tc)) {
1338 uint64_t ttbr_asid = tc->readMiscReg(
1339 flattenMiscRegNsBanked(ttbcr.a1 ? MISCREG_TTBR1
1340 : MISCREG_TTBR0,
1341 tc, !isSecure));
1342 asid = bits(ttbr_asid, 55, 48);
1343 } else { // Short-descriptor translation table format in use
1344 CONTEXTIDR context_id = tc->readMiscReg(flattenMiscRegNsBanked(
1345 MISCREG_CONTEXTIDR, tc,!isSecure));
1346 asid = context_id.asid;
1347 }
1348 prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR, tc,
1349 !isSecure));
1350 nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR, tc,
1351 !isSecure));
1352 dacr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_DACR, tc,
1353 !isSecure));
1354 hcr = tc->readMiscReg(MISCREG_HCR);
1355
1356 if (haveVirtualization) {
1357 vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
1358 isHyp = cpsr.mode == MODE_HYP;
1359 isHyp |= tranType & HypMode;
1360 isHyp &= (tranType & S1S2NsTran) == 0;
1361 isHyp &= (tranType & S1CTran) == 0;
1362 if (isHyp) {
1363 sctlr = tc->readMiscReg(MISCREG_HSCTLR);
1364 }
1365 // Work out if we should skip the first stage of translation and go
1366 // directly to stage 2. This value is cached so we don't have to
1367 // compute it for every translation.
1368 stage2Req = hcr.vm && !isStage2 && !isHyp && !isSecure &&
1369 !(tranType & S1CTran);
1370 directToStage2 = stage2Req && !sctlr.m;
1371 } else {
1372 vmid = 0;
1373 stage2Req = false;
1374 isHyp = false;
1375 directToStage2 = false;
1376 }
1377 }
1378 miscRegValid = true;
1379 miscRegContext = tc->contextId();
1380 curTranType = tranType;
1381}
1382
1383Fault
1384TLB::getTE(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
1385 Translation *translation, bool timing, bool functional,
1386 bool is_secure, TLB::ArmTranslationType tranType)
1387{
1388 bool is_fetch = (mode == Execute);
1389 bool is_write = (mode == Write);
1390
1391 Addr vaddr_tainted = req->getVaddr();
1392 Addr vaddr = 0;
1393 ExceptionLevel target_el = aarch64 ? aarch64EL : EL1;
1394 if (aarch64) {
1395 vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el, ttbcr);
1396 } else {
1397 vaddr = vaddr_tainted;
1398 }
1399 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1400 if (*te == NULL) {
1401 if (req->isPrefetch()) {
1402 // if the request is a prefetch don't attempt to fill the TLB or go
1403 // any further with the memory access (here we can safely use the
1404 // fault status for the short desc. format in all cases)
1405 prefetchFaults++;
1406 return std::make_shared<PrefetchAbort>(
1407 vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2);
1408 }
1409
1410 if (is_fetch)
1411 instMisses++;
1412 else if (is_write)
1413 writeMisses++;
1414 else
1415 readMisses++;
1416
1417 // start translation table walk, pass variables rather than
1418 // re-retreaving in table walker for speed
1419 DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1420 vaddr_tainted, asid, vmid);
1421 Fault fault;
1422 fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode,
1423 translation, timing, functional, is_secure,
1424 tranType, stage2Req);
1425 // for timing mode, return and wait for table walk,
1426 if (timing || fault != NoFault) {
1427 return fault;
1428 }
1429
1430 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1431 if (!*te)
1432 printTlb();
1433 assert(*te);
1434 } else {
1435 if (is_fetch)
1436 instHits++;
1437 else if (is_write)
1438 writeHits++;
1439 else
1440 readHits++;
1441 }
1442 return NoFault;
1443}
1444
1445Fault
1446TLB::getResultTe(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
1447 Translation *translation, bool timing, bool functional,
1448 TlbEntry *mergeTe)
1449{
1450 Fault fault;
1451
1452 if (isStage2) {
1453 // We are already in the stage 2 TLB. Grab the table entry for stage
1454 // 2 only. We are here because stage 1 translation is disabled.
1455 TlbEntry *s2Te = NULL;
1456 // Get the stage 2 table entry
1457 fault = getTE(&s2Te, req, tc, mode, translation, timing, functional,
1458 isSecure, curTranType);
1459 // Check permissions of stage 2
1460 if ((s2Te != NULL) && (fault = NoFault)) {
1461 if(aarch64)
1462 fault = checkPermissions64(s2Te, req, mode, tc);
1463 else
1464 fault = checkPermissions(s2Te, req, mode);
1465 }
1466 *te = s2Te;
1467 return fault;
1468 }
1469
1470 TlbEntry *s1Te = NULL;
1471
1472 Addr vaddr_tainted = req->getVaddr();
1473
1474 // Get the stage 1 table entry
1475 fault = getTE(&s1Te, req, tc, mode, translation, timing, functional,
1476 isSecure, curTranType);
1477 // only proceed if we have a valid table entry
1478 if ((s1Te != NULL) && (fault == NoFault)) {
1479 // Check stage 1 permissions before checking stage 2
1480 if (aarch64)
1481 fault = checkPermissions64(s1Te, req, mode, tc);
1482 else
1483 fault = checkPermissions(s1Te, req, mode);
1484 if (stage2Req & (fault == NoFault)) {
1485 Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te,
1486 req, translation, mode, timing, functional, curTranType);
1487 fault = s2Lookup->getTe(tc, mergeTe);
1488 if (s2Lookup->isComplete()) {
1489 *te = mergeTe;
1490 // We've finished with the lookup so delete it
1491 delete s2Lookup;
1492 } else {
1493 // The lookup hasn't completed, so we can't delete it now. We
1494 // get round this by asking the object to self delete when the
1495 // translation is complete.
1496 s2Lookup->setSelfDelete();
1497 }
1498 } else {
1499 // This case deals with an S1 hit (or bypass), followed by
1500 // an S2 hit-but-perms issue
1501 if (isStage2) {
1502 DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1503 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault);
1504 if (fault != NoFault) {
1505 ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get());
1506 armFault->annotate(ArmFault::S1PTW, false);
1507 armFault->annotate(ArmFault::OVA, vaddr_tainted);
1508 }
1509 }
1510 *te = s1Te;
1511 }
1512 }
1513 return fault;
1514}
1515
1516void
1517TLB::setTestInterface(SimObject *_ti)
1518{
1519 if (!_ti) {
1520 test = nullptr;
1521 } else {
1522 TlbTestInterface *ti(dynamic_cast<TlbTestInterface *>(_ti));
1523 fatal_if(!ti, "%s is not a valid ARM TLB tester\n", _ti->name());
1524 test = ti;
1525 }
1526}
1527
1528Fault
1529TLB::testTranslation(RequestPtr req, Mode mode, TlbEntry::DomainType domain)
1530{
1531 if (!test || !req->hasSize() || req->getSize() == 0) {
1532 return NoFault;
1533 } else {
1534 return test->translationCheck(req, isPriv, mode, domain);
1535 }
1536}
1537
1538Fault
1539TLB::testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode,
1540 TlbEntry::DomainType domain, LookupLevel lookup_level)
1541{
1542 if (!test) {
1543 return NoFault;
1544 } else {
1545 return test->walkCheck(pa, size, va, is_secure, isPriv, mode,
1546 domain, lookup_level);
1547 }
1548}
1549
1550
1551ArmISA::TLB *
1552ArmTLBParams::create()
1553{
1554 return new ArmISA::TLB(this);
1555}