tlb.cc (11495:1f04f97c014d) tlb.cc (11505:55256a05d9e9)
1/*
2 * Copyright (c) 2010-2013, 2016 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 * Nathan Binkert
42 * Steve Reinhardt
43 */
44
45#include "arch/arm/tlb.hh"
46
47#include <memory>
48#include <string>
49#include <vector>
50
51#include "arch/arm/faults.hh"
52#include "arch/arm/pagetable.hh"
53#include "arch/arm/system.hh"
54#include "arch/arm/table_walker.hh"
55#include "arch/arm/stage2_lookup.hh"
56#include "arch/arm/stage2_mmu.hh"
57#include "arch/arm/utility.hh"
58#include "base/inifile.hh"
59#include "base/str.hh"
60#include "base/trace.hh"
61#include "cpu/base.hh"
62#include "cpu/thread_context.hh"
63#include "debug/Checkpoint.hh"
64#include "debug/TLB.hh"
65#include "debug/TLBVerbose.hh"
66#include "mem/page_table.hh"
67#include "params/ArmTLB.hh"
68#include "sim/full_system.hh"
69#include "sim/process.hh"
70
71using namespace std;
72using namespace ArmISA;
73
74TLB::TLB(const ArmTLBParams *p)
75 : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size),
76 isStage2(p->is_stage2), stage2Req(false), _attr(0),
77 directToStage2(false), tableWalker(p->walker), stage2Tlb(NULL),
78 stage2Mmu(NULL), test(nullptr), rangeMRU(1),
79 aarch64(false), aarch64EL(EL0), isPriv(false), isSecure(false),
80 isHyp(false), asid(0), vmid(0), dacr(0),
81 miscRegValid(false), miscRegContext(0), curTranType(NormalTran)
82{
83 tableWalker->setTlb(this);
84
85 // Cache system-level properties
86 haveLPAE = tableWalker->haveLPAE();
87 haveVirtualization = tableWalker->haveVirtualization();
88 haveLargeAsid64 = tableWalker->haveLargeAsid64();
89}
90
91TLB::~TLB()
92{
93 delete[] table;
94}
95
96void
97TLB::init()
98{
99 if (stage2Mmu && !isStage2)
100 stage2Tlb = stage2Mmu->stage2Tlb();
101}
102
103void
104TLB::setMMU(Stage2MMU *m, MasterID master_id)
105{
106 stage2Mmu = m;
107 tableWalker->setMMU(m, master_id);
108}
109
110bool
111TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa)
112{
113 updateMiscReg(tc);
114
115 if (directToStage2) {
116 assert(stage2Tlb);
117 return stage2Tlb->translateFunctional(tc, va, pa);
118 }
119
120 TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false,
121 aarch64 ? aarch64EL : EL1);
122 if (!e)
123 return false;
124 pa = e->pAddr(va);
125 return true;
126}
127
128Fault
129TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
130{
131 return NoFault;
132}
133
134TlbEntry*
135TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure,
136 bool functional, bool ignore_asn, uint8_t target_el)
137{
138
139 TlbEntry *retval = NULL;
140
141 // Maintaining LRU array
142 int x = 0;
143 while (retval == NULL && x < size) {
144 if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false,
145 target_el)) ||
146 (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) {
147 // We only move the hit entry ahead when the position is higher
148 // than rangeMRU
149 if (x > rangeMRU && !functional) {
150 TlbEntry tmp_entry = table[x];
151 for (int i = x; i > 0; i--)
152 table[i] = table[i - 1];
153 table[0] = tmp_entry;
154 retval = &table[0];
155 } else {
156 retval = &table[x];
157 }
158 break;
159 }
160 ++x;
161 }
162
163 DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d "
164 "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d "
165 "el: %d\n",
166 va, asn, retval ? "hit" : "miss", vmid, hyp, secure,
167 retval ? retval->pfn : 0, retval ? retval->size : 0,
168 retval ? retval->pAddr(va) : 0, retval ? retval->ap : 0,
169 retval ? retval->ns : 0, retval ? retval->nstid : 0,
170 retval ? retval->global : 0, retval ? retval->asid : 0,
171 retval ? retval->el : 0);
172
173 return retval;
174}
175
176// insert a new TLB entry
177void
178TLB::insert(Addr addr, TlbEntry &entry)
179{
180 DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x"
181 " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
182 " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn,
183 entry.size, entry.vpn, entry.asid, entry.vmid, entry.N,
184 entry.global, entry.valid, entry.nonCacheable, entry.xn,
185 entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid,
186 entry.isHyp);
187
188 if (table[size - 1].valid)
189 DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
190 "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n",
191 table[size-1].vpn << table[size-1].N, table[size-1].asid,
192 table[size-1].vmid, table[size-1].pfn << table[size-1].N,
193 table[size-1].size, table[size-1].ap, table[size-1].ns,
194 table[size-1].nstid, table[size-1].global, table[size-1].isHyp,
195 table[size-1].el);
196
197 //inserting to MRU position and evicting the LRU one
198
199 for (int i = size - 1; i > 0; --i)
200 table[i] = table[i-1];
201 table[0] = entry;
202
203 inserts++;
204 ppRefills->notify(1);
205}
206
207void
208TLB::printTlb() const
209{
210 int x = 0;
211 TlbEntry *te;
212 DPRINTF(TLB, "Current TLB contents:\n");
213 while (x < size) {
214 te = &table[x];
215 if (te->valid)
216 DPRINTF(TLB, " * %s\n", te->print());
217 ++x;
218 }
219}
220
221void
222TLB::flushAllSecurity(bool secure_lookup, uint8_t target_el, bool ignore_el)
223{
224 DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n",
225 (secure_lookup ? "secure" : "non-secure"));
226 int x = 0;
227 TlbEntry *te;
228 while (x < size) {
229 te = &table[x];
230 if (te->valid && secure_lookup == !te->nstid &&
231 (te->vmid == vmid || secure_lookup) &&
232 checkELMatch(target_el, te->el, ignore_el)) {
233
234 DPRINTF(TLB, " - %s\n", te->print());
235 te->valid = false;
236 flushedEntries++;
237 }
238 ++x;
239 }
240
241 flushTlb++;
242
243 // If there's a second stage TLB (and we're not it) then flush it as well
244 // if we're currently in hyp mode
245 if (!isStage2 && isHyp) {
246 stage2Tlb->flushAllSecurity(secure_lookup, true);
247 }
248}
249
250void
251TLB::flushAllNs(bool hyp, uint8_t target_el, bool ignore_el)
252{
253 DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n",
254 (hyp ? "hyp" : "non-hyp"));
255 int x = 0;
256 TlbEntry *te;
257 while (x < size) {
258 te = &table[x];
259 if (te->valid && te->nstid && te->isHyp == hyp &&
260 checkELMatch(target_el, te->el, ignore_el)) {
261
262 DPRINTF(TLB, " - %s\n", te->print());
263 flushedEntries++;
264 te->valid = false;
265 }
266 ++x;
267 }
268
269 flushTlb++;
270
271 // If there's a second stage TLB (and we're not it) then flush it as well
272 if (!isStage2 && !hyp) {
273 stage2Tlb->flushAllNs(false, true);
274 }
275}
276
277void
278TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, uint8_t target_el)
279{
280 DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x "
281 "(%s lookup)\n", mva, asn, (secure_lookup ?
282 "secure" : "non-secure"));
283 _flushMva(mva, asn, secure_lookup, false, false, target_el);
284 flushTlbMvaAsid++;
285}
286
287void
288TLB::flushAsid(uint64_t asn, bool secure_lookup, uint8_t target_el)
289{
290 DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn,
291 (secure_lookup ? "secure" : "non-secure"));
292
293 int x = 0 ;
294 TlbEntry *te;
295
296 while (x < size) {
297 te = &table[x];
298 if (te->valid && te->asid == asn && secure_lookup == !te->nstid &&
299 (te->vmid == vmid || secure_lookup) &&
300 checkELMatch(target_el, te->el, false)) {
301
302 te->valid = false;
303 DPRINTF(TLB, " - %s\n", te->print());
304 flushedEntries++;
305 }
306 ++x;
307 }
308 flushTlbAsid++;
309}
310
311void
312TLB::flushMva(Addr mva, bool secure_lookup, bool hyp, uint8_t target_el)
313{
314 DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva,
315 (secure_lookup ? "secure" : "non-secure"));
316 _flushMva(mva, 0xbeef, secure_lookup, hyp, true, target_el);
317 flushTlbMva++;
318}
319
320void
321TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, bool hyp,
322 bool ignore_asn, uint8_t target_el)
323{
324 TlbEntry *te;
325 // D5.7.2: Sign-extend address to 64 bits
326 mva = sext<56>(mva);
327 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
328 target_el);
329 while (te != NULL) {
330 if (secure_lookup == !te->nstid) {
331 DPRINTF(TLB, " - %s\n", te->print());
332 te->valid = false;
333 flushedEntries++;
334 }
335 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
336 target_el);
337 }
338}
339
340bool
341TLB::checkELMatch(uint8_t target_el, uint8_t tentry_el, bool ignore_el)
342{
343 bool elMatch = true;
344 if (!ignore_el) {
345 if (target_el == 2 || target_el == 3) {
346 elMatch = (tentry_el == target_el);
347 } else {
348 elMatch = (tentry_el == 0) || (tentry_el == 1);
349 }
350 }
351 return elMatch;
352}
353
354void
355TLB::drainResume()
356{
357 // We might have unserialized something or switched CPUs, so make
358 // sure to re-read the misc regs.
359 miscRegValid = false;
360}
361
362void
363TLB::takeOverFrom(BaseTLB *_otlb)
364{
365 TLB *otlb = dynamic_cast<TLB*>(_otlb);
366 /* Make sure we actually have a valid type */
367 if (otlb) {
368 _attr = otlb->_attr;
369 haveLPAE = otlb->haveLPAE;
370 directToStage2 = otlb->directToStage2;
371 stage2Req = otlb->stage2Req;
372
373 /* Sync the stage2 MMU if they exist in both
374 * the old CPU and the new
375 */
376 if (!isStage2 &&
377 stage2Tlb && otlb->stage2Tlb) {
378 stage2Tlb->takeOverFrom(otlb->stage2Tlb);
379 }
380 } else {
381 panic("Incompatible TLB type!");
382 }
383}
384
385void
386TLB::serialize(CheckpointOut &cp) const
387{
388 DPRINTF(Checkpoint, "Serializing Arm TLB\n");
389
390 SERIALIZE_SCALAR(_attr);
391 SERIALIZE_SCALAR(haveLPAE);
392 SERIALIZE_SCALAR(directToStage2);
393 SERIALIZE_SCALAR(stage2Req);
394
395 int num_entries = size;
396 SERIALIZE_SCALAR(num_entries);
397 for (int i = 0; i < size; i++)
398 table[i].serializeSection(cp, csprintf("TlbEntry%d", i));
399}
400
401void
402TLB::unserialize(CheckpointIn &cp)
403{
404 DPRINTF(Checkpoint, "Unserializing Arm TLB\n");
405
406 UNSERIALIZE_SCALAR(_attr);
407 UNSERIALIZE_SCALAR(haveLPAE);
408 UNSERIALIZE_SCALAR(directToStage2);
409 UNSERIALIZE_SCALAR(stage2Req);
410
411 int num_entries;
412 UNSERIALIZE_SCALAR(num_entries);
413 for (int i = 0; i < min(size, num_entries); i++)
414 table[i].unserializeSection(cp, csprintf("TlbEntry%d", i));
415}
416
417void
418TLB::regStats()
419{
420 instHits
421 .name(name() + ".inst_hits")
422 .desc("ITB inst hits")
423 ;
424
425 instMisses
426 .name(name() + ".inst_misses")
427 .desc("ITB inst misses")
428 ;
429
430 instAccesses
431 .name(name() + ".inst_accesses")
432 .desc("ITB inst accesses")
433 ;
434
435 readHits
436 .name(name() + ".read_hits")
437 .desc("DTB read hits")
438 ;
439
440 readMisses
441 .name(name() + ".read_misses")
442 .desc("DTB read misses")
443 ;
444
445 readAccesses
446 .name(name() + ".read_accesses")
447 .desc("DTB read accesses")
448 ;
449
450 writeHits
451 .name(name() + ".write_hits")
452 .desc("DTB write hits")
453 ;
454
455 writeMisses
456 .name(name() + ".write_misses")
457 .desc("DTB write misses")
458 ;
459
460 writeAccesses
461 .name(name() + ".write_accesses")
462 .desc("DTB write accesses")
463 ;
464
465 hits
466 .name(name() + ".hits")
467 .desc("DTB hits")
468 ;
469
470 misses
471 .name(name() + ".misses")
472 .desc("DTB misses")
473 ;
474
475 accesses
476 .name(name() + ".accesses")
477 .desc("DTB accesses")
478 ;
479
480 flushTlb
481 .name(name() + ".flush_tlb")
482 .desc("Number of times complete TLB was flushed")
483 ;
484
485 flushTlbMva
486 .name(name() + ".flush_tlb_mva")
487 .desc("Number of times TLB was flushed by MVA")
488 ;
489
490 flushTlbMvaAsid
491 .name(name() + ".flush_tlb_mva_asid")
492 .desc("Number of times TLB was flushed by MVA & ASID")
493 ;
494
495 flushTlbAsid
496 .name(name() + ".flush_tlb_asid")
497 .desc("Number of times TLB was flushed by ASID")
498 ;
499
500 flushedEntries
501 .name(name() + ".flush_entries")
502 .desc("Number of entries that have been flushed from TLB")
503 ;
504
505 alignFaults
506 .name(name() + ".align_faults")
507 .desc("Number of TLB faults due to alignment restrictions")
508 ;
509
510 prefetchFaults
511 .name(name() + ".prefetch_faults")
512 .desc("Number of TLB faults due to prefetch")
513 ;
514
515 domainFaults
516 .name(name() + ".domain_faults")
517 .desc("Number of TLB faults due to domain restrictions")
518 ;
519
520 permsFaults
521 .name(name() + ".perms_faults")
522 .desc("Number of TLB faults due to permissions restrictions")
523 ;
524
525 instAccesses = instHits + instMisses;
526 readAccesses = readHits + readMisses;
527 writeAccesses = writeHits + writeMisses;
528 hits = readHits + writeHits + instHits;
529 misses = readMisses + writeMisses + instMisses;
530 accesses = readAccesses + writeAccesses + instAccesses;
531}
532
533void
534TLB::regProbePoints()
535{
536 ppRefills.reset(new ProbePoints::PMU(getProbeManager(), "Refills"));
537}
538
539Fault
540TLB::translateSe(RequestPtr req, ThreadContext *tc, Mode mode,
541 Translation *translation, bool &delay, bool timing)
542{
543 updateMiscReg(tc);
544 Addr vaddr_tainted = req->getVaddr();
545 Addr vaddr = 0;
546 if (aarch64)
547 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
548 else
549 vaddr = vaddr_tainted;
550 uint32_t flags = req->getFlags();
551
552 bool is_fetch = (mode == Execute);
553 bool is_write = (mode == Write);
554
555 if (!is_fetch) {
556 assert(flags & MustBeOne);
557 if (sctlr.a || !(flags & AllowUnaligned)) {
558 if (vaddr & mask(flags & AlignmentMask)) {
559 // LPAE is always disabled in SE mode
560 return std::make_shared<DataAbort>(
561 vaddr_tainted,
562 TlbEntry::DomainType::NoAccess, is_write,
563 ArmFault::AlignmentFault, isStage2,
564 ArmFault::VmsaTran);
565 }
566 }
567 }
568
569 Addr paddr;
570 Process *p = tc->getProcessPtr();
571
572 if (!p->pTable->translate(vaddr, paddr))
573 return std::make_shared<GenericPageTableFault>(vaddr_tainted);
574 req->setPaddr(paddr);
575
576 return NoFault;
577}
578
579Fault
580TLB::checkPermissions(TlbEntry *te, RequestPtr req, Mode mode)
581{
582 Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
583 uint32_t flags = req->getFlags();
584 bool is_fetch = (mode == Execute);
585 bool is_write = (mode == Write);
586 bool is_priv = isPriv && !(flags & UserMode);
587
588 // Get the translation type from the actuall table entry
589 ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran
590 : ArmFault::VmsaTran;
591
592 // If this is the second stage of translation and the request is for a
593 // stage 1 page table walk then we need to check the HCR.PTW bit. This
594 // allows us to generate a fault if the request targets an area marked
595 // as a device or strongly ordered.
596 if (isStage2 && req->isPTWalk() && hcr.ptw &&
597 (te->mtype != TlbEntry::MemoryType::Normal)) {
598 return std::make_shared<DataAbort>(
599 vaddr, te->domain, is_write,
600 ArmFault::PermissionLL + te->lookupLevel,
601 isStage2, tranMethod);
602 }
603
604 // Generate an alignment fault for unaligned data accesses to device or
605 // strongly ordered memory
606 if (!is_fetch) {
607 if (te->mtype != TlbEntry::MemoryType::Normal) {
608 if (vaddr & mask(flags & AlignmentMask)) {
609 alignFaults++;
610 return std::make_shared<DataAbort>(
611 vaddr, TlbEntry::DomainType::NoAccess, is_write,
612 ArmFault::AlignmentFault, isStage2,
613 tranMethod);
614 }
615 }
616 }
617
618 if (te->nonCacheable) {
619 // Prevent prefetching from I/O devices.
620 if (req->isPrefetch()) {
621 // Here we can safely use the fault status for the short
622 // desc. format in all cases
623 return std::make_shared<PrefetchAbort>(
624 vaddr, ArmFault::PrefetchUncacheable,
625 isStage2, tranMethod);
626 }
627 }
628
629 if (!te->longDescFormat) {
630 switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
631 case 0:
632 domainFaults++;
633 DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x"
634 " domain: %#x write:%d\n", dacr,
635 static_cast<uint8_t>(te->domain), is_write);
636 if (is_fetch)
637 return std::make_shared<PrefetchAbort>(
638 vaddr,
639 ArmFault::DomainLL + te->lookupLevel,
640 isStage2, tranMethod);
641 else
642 return std::make_shared<DataAbort>(
643 vaddr, te->domain, is_write,
644 ArmFault::DomainLL + te->lookupLevel,
645 isStage2, tranMethod);
646 case 1:
647 // Continue with permissions check
648 break;
649 case 2:
650 panic("UNPRED domain\n");
651 case 3:
652 return NoFault;
653 }
654 }
655
656 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
657 uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap;
658 uint8_t hap = te->hap;
659
660 if (sctlr.afe == 1 || te->longDescFormat)
661 ap |= 1;
662
663 bool abt;
664 bool isWritable = true;
665 // If this is a stage 2 access (eg for reading stage 1 page table entries)
666 // then don't perform the AP permissions check, we stil do the HAP check
667 // below.
668 if (isStage2) {
669 abt = false;
670 } else {
671 switch (ap) {
672 case 0:
673 DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n",
674 (int)sctlr.rs);
675 if (!sctlr.xp) {
676 switch ((int)sctlr.rs) {
677 case 2:
678 abt = is_write;
679 break;
680 case 1:
681 abt = is_write || !is_priv;
682 break;
683 case 0:
684 case 3:
685 default:
686 abt = true;
687 break;
688 }
689 } else {
690 abt = true;
691 }
692 break;
693 case 1:
694 abt = !is_priv;
695 break;
696 case 2:
697 abt = !is_priv && is_write;
698 isWritable = is_priv;
699 break;
700 case 3:
701 abt = false;
702 break;
703 case 4:
704 panic("UNPRED premissions\n");
705 case 5:
706 abt = !is_priv || is_write;
707 isWritable = false;
708 break;
709 case 6:
710 case 7:
711 abt = is_write;
712 isWritable = false;
713 break;
714 default:
715 panic("Unknown permissions %#x\n", ap);
716 }
717 }
718
719 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
720 bool xn = te->xn || (isWritable && sctlr.wxn) ||
721 (ap == 3 && sctlr.uwxn && is_priv);
722 if (is_fetch && (abt || xn ||
723 (te->longDescFormat && te->pxn && is_priv) ||
724 (isSecure && te->ns && scr.sif))) {
725 permsFaults++;
726 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
727 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
728 ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe);
729 return std::make_shared<PrefetchAbort>(
730 vaddr,
731 ArmFault::PermissionLL + te->lookupLevel,
732 isStage2, tranMethod);
733 } else if (abt | hapAbt) {
734 permsFaults++;
735 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
736 " write:%d\n", ap, is_priv, is_write);
737 return std::make_shared<DataAbort>(
738 vaddr, te->domain, is_write,
739 ArmFault::PermissionLL + te->lookupLevel,
740 isStage2 | !abt, tranMethod);
741 }
742 return NoFault;
743}
744
745
746Fault
747TLB::checkPermissions64(TlbEntry *te, RequestPtr req, Mode mode,
748 ThreadContext *tc)
749{
750 assert(aarch64);
751
752 Addr vaddr_tainted = req->getVaddr();
753 Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
754
755 uint32_t flags = req->getFlags();
756 bool is_fetch = (mode == Execute);
757 bool is_write = (mode == Write);
758 bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode);
759
760 updateMiscReg(tc, curTranType);
761
762 // If this is the second stage of translation and the request is for a
763 // stage 1 page table walk then we need to check the HCR.PTW bit. This
764 // allows us to generate a fault if the request targets an area marked
765 // as a device or strongly ordered.
766 if (isStage2 && req->isPTWalk() && hcr.ptw &&
767 (te->mtype != TlbEntry::MemoryType::Normal)) {
768 return std::make_shared<DataAbort>(
769 vaddr_tainted, te->domain, is_write,
770 ArmFault::PermissionLL + te->lookupLevel,
771 isStage2, ArmFault::LpaeTran);
772 }
773
774 // Generate an alignment fault for unaligned accesses to device or
775 // strongly ordered memory
776 if (!is_fetch) {
777 if (te->mtype != TlbEntry::MemoryType::Normal) {
778 if (vaddr & mask(flags & AlignmentMask)) {
779 alignFaults++;
780 return std::make_shared<DataAbort>(
781 vaddr_tainted,
782 TlbEntry::DomainType::NoAccess, is_write,
783 ArmFault::AlignmentFault, isStage2,
784 ArmFault::LpaeTran);
785 }
786 }
787 }
788
789 if (te->nonCacheable) {
790 // Prevent prefetching from I/O devices.
791 if (req->isPrefetch()) {
792 // Here we can safely use the fault status for the short
793 // desc. format in all cases
794 return std::make_shared<PrefetchAbort>(
795 vaddr_tainted,
796 ArmFault::PrefetchUncacheable,
797 isStage2, ArmFault::LpaeTran);
798 }
799 }
800
801 uint8_t ap = 0x3 & (te->ap); // 2-bit access protection field
802 bool grant = false;
803
804 uint8_t xn = te->xn;
805 uint8_t pxn = te->pxn;
806 bool r = !is_write && !is_fetch;
807 bool w = is_write;
808 bool x = is_fetch;
809 DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
810 "w:%d, x:%d\n", ap, xn, pxn, r, w, x);
811
812 if (isStage2) {
813 panic("Virtualization in AArch64 state is not supported yet");
814 } else {
815 switch (aarch64EL) {
816 case EL0:
817 {
818 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
819 switch (perm) {
820 case 0:
821 case 1:
822 case 8:
823 case 9:
824 grant = x;
825 break;
826 case 4:
827 case 5:
828 grant = r || w || (x && !sctlr.wxn);
829 break;
830 case 6:
831 case 7:
832 grant = r || w;
833 break;
834 case 12:
835 case 13:
836 grant = r || x;
837 break;
838 case 14:
839 case 15:
840 grant = r;
841 break;
842 default:
843 grant = false;
844 }
845 }
846 break;
847 case EL1:
848 {
849 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
850 switch (perm) {
851 case 0:
852 case 2:
853 grant = r || w || (x && !sctlr.wxn);
854 break;
855 case 1:
856 case 3:
857 case 4:
858 case 5:
859 case 6:
860 case 7:
861 // regions that are writeable at EL0 should not be
862 // executable at EL1
863 grant = r || w;
864 break;
865 case 8:
866 case 10:
867 case 12:
868 case 14:
869 grant = r || x;
870 break;
871 case 9:
872 case 11:
873 case 13:
874 case 15:
875 grant = r;
876 break;
877 default:
878 grant = false;
879 }
880 }
881 break;
882 case EL2:
883 case EL3:
884 {
885 uint8_t perm = (ap & 0x2) | xn;
886 switch (perm) {
887 case 0:
888 grant = r || w || (x && !sctlr.wxn) ;
889 break;
890 case 1:
891 grant = r || w;
892 break;
893 case 2:
894 grant = r || x;
895 break;
896 case 3:
897 grant = r;
898 break;
899 default:
900 grant = false;
901 }
902 }
903 break;
904 }
905 }
906
907 if (!grant) {
908 if (is_fetch) {
909 permsFaults++;
910 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. "
911 "AP:%d priv:%d write:%d ns:%d sif:%d "
912 "sctlr.afe: %d\n",
913 ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe);
914 // Use PC value instead of vaddr because vaddr might be aligned to
915 // cache line and should not be the address reported in FAR
916 return std::make_shared<PrefetchAbort>(
917 req->getPC(),
918 ArmFault::PermissionLL + te->lookupLevel,
919 isStage2, ArmFault::LpaeTran);
920 } else {
921 permsFaults++;
922 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d "
923 "priv:%d write:%d\n", ap, is_priv, is_write);
924 return std::make_shared<DataAbort>(
925 vaddr_tainted, te->domain, is_write,
926 ArmFault::PermissionLL + te->lookupLevel,
927 isStage2, ArmFault::LpaeTran);
928 }
929 }
930
931 return NoFault;
932}
933
934Fault
935TLB::translateFs(RequestPtr req, ThreadContext *tc, Mode mode,
936 Translation *translation, bool &delay, bool timing,
937 TLB::ArmTranslationType tranType, bool functional)
938{
939 // No such thing as a functional timing access
940 assert(!(timing && functional));
941
942 updateMiscReg(tc, tranType);
943
944 Addr vaddr_tainted = req->getVaddr();
945 Addr vaddr = 0;
946 if (aarch64)
947 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
948 else
949 vaddr = vaddr_tainted;
950 uint32_t flags = req->getFlags();
951
952 bool is_fetch = (mode == Execute);
953 bool is_write = (mode == Write);
954 bool long_desc_format = aarch64 || (haveLPAE && ttbcr.eae);
955 ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
956 : ArmFault::VmsaTran;
957
958 req->setAsid(asid);
959
960 DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
961 isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran);
962
963 DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
964 "flags %#x tranType 0x%x\n", vaddr_tainted, mode, isStage2,
965 scr, sctlr, flags, tranType);
966
967 if ((req->isInstFetch() && (!sctlr.i)) ||
968 ((!req->isInstFetch()) && (!sctlr.c))){
969 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
970 }
971 if (!is_fetch) {
972 assert(flags & MustBeOne);
973 if (sctlr.a || !(flags & AllowUnaligned)) {
974 if (vaddr & mask(flags & AlignmentMask)) {
975 alignFaults++;
976 return std::make_shared<DataAbort>(
977 vaddr_tainted,
978 TlbEntry::DomainType::NoAccess, is_write,
979 ArmFault::AlignmentFault, isStage2,
980 tranMethod);
981 }
982 }
983 }
984
985 // If guest MMU is off or hcr.vm=0 go straight to stage2
986 if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) {
987
988 req->setPaddr(vaddr);
989 // When the MMU is off the security attribute corresponds to the
990 // security state of the processor
991 if (isSecure)
992 req->setFlags(Request::SECURE);
993
994 // @todo: double check this (ARM ARM issue C B3.2.1)
995 if (long_desc_format || sctlr.tre == 0) {
996 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
997 } else {
998 if (nmrr.ir0 == 0 || nmrr.or0 == 0 || prrr.tr0 != 0x2)
999 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
1000 }
1001
1002 // Set memory attributes
1003 TlbEntry temp_te;
1004 temp_te.ns = !isSecure;
1005 if (isStage2 || hcr.dc == 0 || isSecure ||
1006 (isHyp && !(tranType & S1CTran))) {
1007
1008 temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal
1009 : TlbEntry::MemoryType::StronglyOrdered;
1010 temp_te.innerAttrs = 0x0;
1011 temp_te.outerAttrs = 0x0;
1012 temp_te.shareable = true;
1013 temp_te.outerShareable = true;
1014 } else {
1015 temp_te.mtype = TlbEntry::MemoryType::Normal;
1016 temp_te.innerAttrs = 0x3;
1017 temp_te.outerAttrs = 0x3;
1018 temp_te.shareable = false;
1019 temp_te.outerShareable = false;
1020 }
1021 temp_te.setAttributes(long_desc_format);
1022 DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: "
1023 "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n",
1024 temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
1025 isStage2);
1026 setAttr(temp_te.attributes);
1027
1028 return testTranslation(req, mode, TlbEntry::DomainType::NoAccess);
1029 }
1030
1031 DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n",
1032 isStage2 ? "IPA" : "VA", vaddr_tainted, asid);
1033 // Translation enabled
1034
1035 TlbEntry *te = NULL;
1036 TlbEntry mergeTe;
1037 Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
1038 functional, &mergeTe);
1039 // only proceed if we have a valid table entry
1040 if ((te == NULL) && (fault == NoFault)) delay = true;
1041
1042 // If we have the table entry transfer some of the attributes to the
1043 // request that triggered the translation
1044 if (te != NULL) {
1045 // Set memory attributes
1046 DPRINTF(TLBVerbose,
1047 "Setting memory attributes: shareable: %d, innerAttrs: %d, "
1048 "outerAttrs: %d, mtype: %d, isStage2: %d\n",
1049 te->shareable, te->innerAttrs, te->outerAttrs,
1050 static_cast<uint8_t>(te->mtype), isStage2);
1051 setAttr(te->attributes);
1052
1053 if (te->nonCacheable)
1054 req->setFlags(Request::UNCACHEABLE);
1055
1056 // Require requests to be ordered if the request goes to
1057 // strongly ordered or device memory (i.e., anything other
1058 // than normal memory requires strict order).
1059 if (te->mtype != TlbEntry::MemoryType::Normal)
1060 req->setFlags(Request::STRICT_ORDER);
1061
1062 Addr pa = te->pAddr(vaddr);
1063 req->setPaddr(pa);
1064
1065 if (isSecure && !te->ns) {
1066 req->setFlags(Request::SECURE);
1067 }
1068 if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) &&
1069 (te->mtype != TlbEntry::MemoryType::Normal)) {
1070 // Unaligned accesses to Device memory should always cause an
1071 // abort regardless of sctlr.a
1072 alignFaults++;
1073 return std::make_shared<DataAbort>(
1074 vaddr_tainted,
1075 TlbEntry::DomainType::NoAccess, is_write,
1076 ArmFault::AlignmentFault, isStage2,
1077 tranMethod);
1078 }
1079
1080 // Check for a trickbox generated address fault
1081 if (fault == NoFault)
1082 fault = testTranslation(req, mode, te->domain);
1083 }
1084
1085 // Generate Illegal Inst Set State fault if IL bit is set in CPSR
1086 if (fault == NoFault) {
1087 if (aarch64 && is_fetch && cpsr.il == 1) {
1088 return std::make_shared<IllegalInstSetStateFault>();
1089 }
1090 }
1091
1092 return fault;
1093}
1094
1095Fault
1096TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode,
1097 TLB::ArmTranslationType tranType)
1098{
1099 updateMiscReg(tc, tranType);
1100
1101 if (directToStage2) {
1102 assert(stage2Tlb);
1103 return stage2Tlb->translateAtomic(req, tc, mode, tranType);
1104 }
1105
1106 bool delay = false;
1107 Fault fault;
1108 if (FullSystem)
1109 fault = translateFs(req, tc, mode, NULL, delay, false, tranType);
1110 else
1111 fault = translateSe(req, tc, mode, NULL, delay, false);
1112 assert(!delay);
1113 return fault;
1114}
1115
1116Fault
1117TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode,
1118 TLB::ArmTranslationType tranType)
1119{
1120 updateMiscReg(tc, tranType);
1121
1122 if (directToStage2) {
1123 assert(stage2Tlb);
1124 return stage2Tlb->translateFunctional(req, tc, mode, tranType);
1125 }
1126
1127 bool delay = false;
1128 Fault fault;
1129 if (FullSystem)
1130 fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true);
1131 else
1132 fault = translateSe(req, tc, mode, NULL, delay, false);
1133 assert(!delay);
1134 return fault;
1135}
1136
1137Fault
1138TLB::translateTiming(RequestPtr req, ThreadContext *tc,
1139 Translation *translation, Mode mode, TLB::ArmTranslationType tranType)
1140{
1141 updateMiscReg(tc, tranType);
1142
1143 if (directToStage2) {
1144 assert(stage2Tlb);
1145 return stage2Tlb->translateTiming(req, tc, translation, mode, tranType);
1146 }
1147
1148 assert(translation);
1149
1150 return translateComplete(req, tc, translation, mode, tranType, isStage2);
1151}
1152
1153Fault
1154TLB::translateComplete(RequestPtr req, ThreadContext *tc,
1155 Translation *translation, Mode mode, TLB::ArmTranslationType tranType,
1156 bool callFromS2)
1157{
1158 bool delay = false;
1159 Fault fault;
1160 if (FullSystem)
1161 fault = translateFs(req, tc, mode, translation, delay, true, tranType);
1162 else
1163 fault = translateSe(req, tc, mode, translation, delay, true);
1164 DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay, fault !=
1165 NoFault);
1166 // If we have a translation, and we're not in the middle of doing a stage
1167 // 2 translation tell the translation that we've either finished or its
1168 // going to take a while. By not doing this when we're in the middle of a
1169 // stage 2 translation we prevent marking the translation as delayed twice,
1170 // one when the translation starts and again when the stage 1 translation
1171 // completes.
1172 if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) {
1173 if (!delay)
1174 translation->finish(fault, req, tc, mode);
1175 else
1176 translation->markDelayed();
1177 }
1178 return fault;
1179}
1180
1181BaseMasterPort*
1182TLB::getMasterPort()
1183{
1184 return &stage2Mmu->getPort();
1185}
1186
1187void
1188TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType)
1189{
1190 // check if the regs have changed, or the translation mode is different.
1191 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1192 // one type of translation anyway
1193 if (miscRegValid && miscRegContext == tc->contextId() &&
1194 ((tranType == curTranType) || isStage2)) {
1195 return;
1196 }
1197
1198 DPRINTF(TLBVerbose, "TLB variables changed!\n");
1199 cpsr = tc->readMiscReg(MISCREG_CPSR);
1/*
2 * Copyright (c) 2010-2013, 2016 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 * Nathan Binkert
42 * Steve Reinhardt
43 */
44
45#include "arch/arm/tlb.hh"
46
47#include <memory>
48#include <string>
49#include <vector>
50
51#include "arch/arm/faults.hh"
52#include "arch/arm/pagetable.hh"
53#include "arch/arm/system.hh"
54#include "arch/arm/table_walker.hh"
55#include "arch/arm/stage2_lookup.hh"
56#include "arch/arm/stage2_mmu.hh"
57#include "arch/arm/utility.hh"
58#include "base/inifile.hh"
59#include "base/str.hh"
60#include "base/trace.hh"
61#include "cpu/base.hh"
62#include "cpu/thread_context.hh"
63#include "debug/Checkpoint.hh"
64#include "debug/TLB.hh"
65#include "debug/TLBVerbose.hh"
66#include "mem/page_table.hh"
67#include "params/ArmTLB.hh"
68#include "sim/full_system.hh"
69#include "sim/process.hh"
70
71using namespace std;
72using namespace ArmISA;
73
74TLB::TLB(const ArmTLBParams *p)
75 : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size),
76 isStage2(p->is_stage2), stage2Req(false), _attr(0),
77 directToStage2(false), tableWalker(p->walker), stage2Tlb(NULL),
78 stage2Mmu(NULL), test(nullptr), rangeMRU(1),
79 aarch64(false), aarch64EL(EL0), isPriv(false), isSecure(false),
80 isHyp(false), asid(0), vmid(0), dacr(0),
81 miscRegValid(false), miscRegContext(0), curTranType(NormalTran)
82{
83 tableWalker->setTlb(this);
84
85 // Cache system-level properties
86 haveLPAE = tableWalker->haveLPAE();
87 haveVirtualization = tableWalker->haveVirtualization();
88 haveLargeAsid64 = tableWalker->haveLargeAsid64();
89}
90
91TLB::~TLB()
92{
93 delete[] table;
94}
95
96void
97TLB::init()
98{
99 if (stage2Mmu && !isStage2)
100 stage2Tlb = stage2Mmu->stage2Tlb();
101}
102
103void
104TLB::setMMU(Stage2MMU *m, MasterID master_id)
105{
106 stage2Mmu = m;
107 tableWalker->setMMU(m, master_id);
108}
109
110bool
111TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa)
112{
113 updateMiscReg(tc);
114
115 if (directToStage2) {
116 assert(stage2Tlb);
117 return stage2Tlb->translateFunctional(tc, va, pa);
118 }
119
120 TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false,
121 aarch64 ? aarch64EL : EL1);
122 if (!e)
123 return false;
124 pa = e->pAddr(va);
125 return true;
126}
127
128Fault
129TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
130{
131 return NoFault;
132}
133
134TlbEntry*
135TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure,
136 bool functional, bool ignore_asn, uint8_t target_el)
137{
138
139 TlbEntry *retval = NULL;
140
141 // Maintaining LRU array
142 int x = 0;
143 while (retval == NULL && x < size) {
144 if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false,
145 target_el)) ||
146 (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) {
147 // We only move the hit entry ahead when the position is higher
148 // than rangeMRU
149 if (x > rangeMRU && !functional) {
150 TlbEntry tmp_entry = table[x];
151 for (int i = x; i > 0; i--)
152 table[i] = table[i - 1];
153 table[0] = tmp_entry;
154 retval = &table[0];
155 } else {
156 retval = &table[x];
157 }
158 break;
159 }
160 ++x;
161 }
162
163 DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d "
164 "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d "
165 "el: %d\n",
166 va, asn, retval ? "hit" : "miss", vmid, hyp, secure,
167 retval ? retval->pfn : 0, retval ? retval->size : 0,
168 retval ? retval->pAddr(va) : 0, retval ? retval->ap : 0,
169 retval ? retval->ns : 0, retval ? retval->nstid : 0,
170 retval ? retval->global : 0, retval ? retval->asid : 0,
171 retval ? retval->el : 0);
172
173 return retval;
174}
175
176// insert a new TLB entry
177void
178TLB::insert(Addr addr, TlbEntry &entry)
179{
180 DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x"
181 " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
182 " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn,
183 entry.size, entry.vpn, entry.asid, entry.vmid, entry.N,
184 entry.global, entry.valid, entry.nonCacheable, entry.xn,
185 entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid,
186 entry.isHyp);
187
188 if (table[size - 1].valid)
189 DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
190 "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n",
191 table[size-1].vpn << table[size-1].N, table[size-1].asid,
192 table[size-1].vmid, table[size-1].pfn << table[size-1].N,
193 table[size-1].size, table[size-1].ap, table[size-1].ns,
194 table[size-1].nstid, table[size-1].global, table[size-1].isHyp,
195 table[size-1].el);
196
197 //inserting to MRU position and evicting the LRU one
198
199 for (int i = size - 1; i > 0; --i)
200 table[i] = table[i-1];
201 table[0] = entry;
202
203 inserts++;
204 ppRefills->notify(1);
205}
206
207void
208TLB::printTlb() const
209{
210 int x = 0;
211 TlbEntry *te;
212 DPRINTF(TLB, "Current TLB contents:\n");
213 while (x < size) {
214 te = &table[x];
215 if (te->valid)
216 DPRINTF(TLB, " * %s\n", te->print());
217 ++x;
218 }
219}
220
221void
222TLB::flushAllSecurity(bool secure_lookup, uint8_t target_el, bool ignore_el)
223{
224 DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n",
225 (secure_lookup ? "secure" : "non-secure"));
226 int x = 0;
227 TlbEntry *te;
228 while (x < size) {
229 te = &table[x];
230 if (te->valid && secure_lookup == !te->nstid &&
231 (te->vmid == vmid || secure_lookup) &&
232 checkELMatch(target_el, te->el, ignore_el)) {
233
234 DPRINTF(TLB, " - %s\n", te->print());
235 te->valid = false;
236 flushedEntries++;
237 }
238 ++x;
239 }
240
241 flushTlb++;
242
243 // If there's a second stage TLB (and we're not it) then flush it as well
244 // if we're currently in hyp mode
245 if (!isStage2 && isHyp) {
246 stage2Tlb->flushAllSecurity(secure_lookup, true);
247 }
248}
249
250void
251TLB::flushAllNs(bool hyp, uint8_t target_el, bool ignore_el)
252{
253 DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n",
254 (hyp ? "hyp" : "non-hyp"));
255 int x = 0;
256 TlbEntry *te;
257 while (x < size) {
258 te = &table[x];
259 if (te->valid && te->nstid && te->isHyp == hyp &&
260 checkELMatch(target_el, te->el, ignore_el)) {
261
262 DPRINTF(TLB, " - %s\n", te->print());
263 flushedEntries++;
264 te->valid = false;
265 }
266 ++x;
267 }
268
269 flushTlb++;
270
271 // If there's a second stage TLB (and we're not it) then flush it as well
272 if (!isStage2 && !hyp) {
273 stage2Tlb->flushAllNs(false, true);
274 }
275}
276
277void
278TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, uint8_t target_el)
279{
280 DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x "
281 "(%s lookup)\n", mva, asn, (secure_lookup ?
282 "secure" : "non-secure"));
283 _flushMva(mva, asn, secure_lookup, false, false, target_el);
284 flushTlbMvaAsid++;
285}
286
287void
288TLB::flushAsid(uint64_t asn, bool secure_lookup, uint8_t target_el)
289{
290 DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn,
291 (secure_lookup ? "secure" : "non-secure"));
292
293 int x = 0 ;
294 TlbEntry *te;
295
296 while (x < size) {
297 te = &table[x];
298 if (te->valid && te->asid == asn && secure_lookup == !te->nstid &&
299 (te->vmid == vmid || secure_lookup) &&
300 checkELMatch(target_el, te->el, false)) {
301
302 te->valid = false;
303 DPRINTF(TLB, " - %s\n", te->print());
304 flushedEntries++;
305 }
306 ++x;
307 }
308 flushTlbAsid++;
309}
310
311void
312TLB::flushMva(Addr mva, bool secure_lookup, bool hyp, uint8_t target_el)
313{
314 DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva,
315 (secure_lookup ? "secure" : "non-secure"));
316 _flushMva(mva, 0xbeef, secure_lookup, hyp, true, target_el);
317 flushTlbMva++;
318}
319
320void
321TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, bool hyp,
322 bool ignore_asn, uint8_t target_el)
323{
324 TlbEntry *te;
325 // D5.7.2: Sign-extend address to 64 bits
326 mva = sext<56>(mva);
327 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
328 target_el);
329 while (te != NULL) {
330 if (secure_lookup == !te->nstid) {
331 DPRINTF(TLB, " - %s\n", te->print());
332 te->valid = false;
333 flushedEntries++;
334 }
335 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
336 target_el);
337 }
338}
339
340bool
341TLB::checkELMatch(uint8_t target_el, uint8_t tentry_el, bool ignore_el)
342{
343 bool elMatch = true;
344 if (!ignore_el) {
345 if (target_el == 2 || target_el == 3) {
346 elMatch = (tentry_el == target_el);
347 } else {
348 elMatch = (tentry_el == 0) || (tentry_el == 1);
349 }
350 }
351 return elMatch;
352}
353
354void
355TLB::drainResume()
356{
357 // We might have unserialized something or switched CPUs, so make
358 // sure to re-read the misc regs.
359 miscRegValid = false;
360}
361
362void
363TLB::takeOverFrom(BaseTLB *_otlb)
364{
365 TLB *otlb = dynamic_cast<TLB*>(_otlb);
366 /* Make sure we actually have a valid type */
367 if (otlb) {
368 _attr = otlb->_attr;
369 haveLPAE = otlb->haveLPAE;
370 directToStage2 = otlb->directToStage2;
371 stage2Req = otlb->stage2Req;
372
373 /* Sync the stage2 MMU if they exist in both
374 * the old CPU and the new
375 */
376 if (!isStage2 &&
377 stage2Tlb && otlb->stage2Tlb) {
378 stage2Tlb->takeOverFrom(otlb->stage2Tlb);
379 }
380 } else {
381 panic("Incompatible TLB type!");
382 }
383}
384
385void
386TLB::serialize(CheckpointOut &cp) const
387{
388 DPRINTF(Checkpoint, "Serializing Arm TLB\n");
389
390 SERIALIZE_SCALAR(_attr);
391 SERIALIZE_SCALAR(haveLPAE);
392 SERIALIZE_SCALAR(directToStage2);
393 SERIALIZE_SCALAR(stage2Req);
394
395 int num_entries = size;
396 SERIALIZE_SCALAR(num_entries);
397 for (int i = 0; i < size; i++)
398 table[i].serializeSection(cp, csprintf("TlbEntry%d", i));
399}
400
401void
402TLB::unserialize(CheckpointIn &cp)
403{
404 DPRINTF(Checkpoint, "Unserializing Arm TLB\n");
405
406 UNSERIALIZE_SCALAR(_attr);
407 UNSERIALIZE_SCALAR(haveLPAE);
408 UNSERIALIZE_SCALAR(directToStage2);
409 UNSERIALIZE_SCALAR(stage2Req);
410
411 int num_entries;
412 UNSERIALIZE_SCALAR(num_entries);
413 for (int i = 0; i < min(size, num_entries); i++)
414 table[i].unserializeSection(cp, csprintf("TlbEntry%d", i));
415}
416
417void
418TLB::regStats()
419{
420 instHits
421 .name(name() + ".inst_hits")
422 .desc("ITB inst hits")
423 ;
424
425 instMisses
426 .name(name() + ".inst_misses")
427 .desc("ITB inst misses")
428 ;
429
430 instAccesses
431 .name(name() + ".inst_accesses")
432 .desc("ITB inst accesses")
433 ;
434
435 readHits
436 .name(name() + ".read_hits")
437 .desc("DTB read hits")
438 ;
439
440 readMisses
441 .name(name() + ".read_misses")
442 .desc("DTB read misses")
443 ;
444
445 readAccesses
446 .name(name() + ".read_accesses")
447 .desc("DTB read accesses")
448 ;
449
450 writeHits
451 .name(name() + ".write_hits")
452 .desc("DTB write hits")
453 ;
454
455 writeMisses
456 .name(name() + ".write_misses")
457 .desc("DTB write misses")
458 ;
459
460 writeAccesses
461 .name(name() + ".write_accesses")
462 .desc("DTB write accesses")
463 ;
464
465 hits
466 .name(name() + ".hits")
467 .desc("DTB hits")
468 ;
469
470 misses
471 .name(name() + ".misses")
472 .desc("DTB misses")
473 ;
474
475 accesses
476 .name(name() + ".accesses")
477 .desc("DTB accesses")
478 ;
479
480 flushTlb
481 .name(name() + ".flush_tlb")
482 .desc("Number of times complete TLB was flushed")
483 ;
484
485 flushTlbMva
486 .name(name() + ".flush_tlb_mva")
487 .desc("Number of times TLB was flushed by MVA")
488 ;
489
490 flushTlbMvaAsid
491 .name(name() + ".flush_tlb_mva_asid")
492 .desc("Number of times TLB was flushed by MVA & ASID")
493 ;
494
495 flushTlbAsid
496 .name(name() + ".flush_tlb_asid")
497 .desc("Number of times TLB was flushed by ASID")
498 ;
499
500 flushedEntries
501 .name(name() + ".flush_entries")
502 .desc("Number of entries that have been flushed from TLB")
503 ;
504
505 alignFaults
506 .name(name() + ".align_faults")
507 .desc("Number of TLB faults due to alignment restrictions")
508 ;
509
510 prefetchFaults
511 .name(name() + ".prefetch_faults")
512 .desc("Number of TLB faults due to prefetch")
513 ;
514
515 domainFaults
516 .name(name() + ".domain_faults")
517 .desc("Number of TLB faults due to domain restrictions")
518 ;
519
520 permsFaults
521 .name(name() + ".perms_faults")
522 .desc("Number of TLB faults due to permissions restrictions")
523 ;
524
525 instAccesses = instHits + instMisses;
526 readAccesses = readHits + readMisses;
527 writeAccesses = writeHits + writeMisses;
528 hits = readHits + writeHits + instHits;
529 misses = readMisses + writeMisses + instMisses;
530 accesses = readAccesses + writeAccesses + instAccesses;
531}
532
533void
534TLB::regProbePoints()
535{
536 ppRefills.reset(new ProbePoints::PMU(getProbeManager(), "Refills"));
537}
538
539Fault
540TLB::translateSe(RequestPtr req, ThreadContext *tc, Mode mode,
541 Translation *translation, bool &delay, bool timing)
542{
543 updateMiscReg(tc);
544 Addr vaddr_tainted = req->getVaddr();
545 Addr vaddr = 0;
546 if (aarch64)
547 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
548 else
549 vaddr = vaddr_tainted;
550 uint32_t flags = req->getFlags();
551
552 bool is_fetch = (mode == Execute);
553 bool is_write = (mode == Write);
554
555 if (!is_fetch) {
556 assert(flags & MustBeOne);
557 if (sctlr.a || !(flags & AllowUnaligned)) {
558 if (vaddr & mask(flags & AlignmentMask)) {
559 // LPAE is always disabled in SE mode
560 return std::make_shared<DataAbort>(
561 vaddr_tainted,
562 TlbEntry::DomainType::NoAccess, is_write,
563 ArmFault::AlignmentFault, isStage2,
564 ArmFault::VmsaTran);
565 }
566 }
567 }
568
569 Addr paddr;
570 Process *p = tc->getProcessPtr();
571
572 if (!p->pTable->translate(vaddr, paddr))
573 return std::make_shared<GenericPageTableFault>(vaddr_tainted);
574 req->setPaddr(paddr);
575
576 return NoFault;
577}
578
579Fault
580TLB::checkPermissions(TlbEntry *te, RequestPtr req, Mode mode)
581{
582 Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
583 uint32_t flags = req->getFlags();
584 bool is_fetch = (mode == Execute);
585 bool is_write = (mode == Write);
586 bool is_priv = isPriv && !(flags & UserMode);
587
588 // Get the translation type from the actuall table entry
589 ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran
590 : ArmFault::VmsaTran;
591
592 // If this is the second stage of translation and the request is for a
593 // stage 1 page table walk then we need to check the HCR.PTW bit. This
594 // allows us to generate a fault if the request targets an area marked
595 // as a device or strongly ordered.
596 if (isStage2 && req->isPTWalk() && hcr.ptw &&
597 (te->mtype != TlbEntry::MemoryType::Normal)) {
598 return std::make_shared<DataAbort>(
599 vaddr, te->domain, is_write,
600 ArmFault::PermissionLL + te->lookupLevel,
601 isStage2, tranMethod);
602 }
603
604 // Generate an alignment fault for unaligned data accesses to device or
605 // strongly ordered memory
606 if (!is_fetch) {
607 if (te->mtype != TlbEntry::MemoryType::Normal) {
608 if (vaddr & mask(flags & AlignmentMask)) {
609 alignFaults++;
610 return std::make_shared<DataAbort>(
611 vaddr, TlbEntry::DomainType::NoAccess, is_write,
612 ArmFault::AlignmentFault, isStage2,
613 tranMethod);
614 }
615 }
616 }
617
618 if (te->nonCacheable) {
619 // Prevent prefetching from I/O devices.
620 if (req->isPrefetch()) {
621 // Here we can safely use the fault status for the short
622 // desc. format in all cases
623 return std::make_shared<PrefetchAbort>(
624 vaddr, ArmFault::PrefetchUncacheable,
625 isStage2, tranMethod);
626 }
627 }
628
629 if (!te->longDescFormat) {
630 switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
631 case 0:
632 domainFaults++;
633 DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x"
634 " domain: %#x write:%d\n", dacr,
635 static_cast<uint8_t>(te->domain), is_write);
636 if (is_fetch)
637 return std::make_shared<PrefetchAbort>(
638 vaddr,
639 ArmFault::DomainLL + te->lookupLevel,
640 isStage2, tranMethod);
641 else
642 return std::make_shared<DataAbort>(
643 vaddr, te->domain, is_write,
644 ArmFault::DomainLL + te->lookupLevel,
645 isStage2, tranMethod);
646 case 1:
647 // Continue with permissions check
648 break;
649 case 2:
650 panic("UNPRED domain\n");
651 case 3:
652 return NoFault;
653 }
654 }
655
656 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
657 uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap;
658 uint8_t hap = te->hap;
659
660 if (sctlr.afe == 1 || te->longDescFormat)
661 ap |= 1;
662
663 bool abt;
664 bool isWritable = true;
665 // If this is a stage 2 access (eg for reading stage 1 page table entries)
666 // then don't perform the AP permissions check, we stil do the HAP check
667 // below.
668 if (isStage2) {
669 abt = false;
670 } else {
671 switch (ap) {
672 case 0:
673 DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n",
674 (int)sctlr.rs);
675 if (!sctlr.xp) {
676 switch ((int)sctlr.rs) {
677 case 2:
678 abt = is_write;
679 break;
680 case 1:
681 abt = is_write || !is_priv;
682 break;
683 case 0:
684 case 3:
685 default:
686 abt = true;
687 break;
688 }
689 } else {
690 abt = true;
691 }
692 break;
693 case 1:
694 abt = !is_priv;
695 break;
696 case 2:
697 abt = !is_priv && is_write;
698 isWritable = is_priv;
699 break;
700 case 3:
701 abt = false;
702 break;
703 case 4:
704 panic("UNPRED premissions\n");
705 case 5:
706 abt = !is_priv || is_write;
707 isWritable = false;
708 break;
709 case 6:
710 case 7:
711 abt = is_write;
712 isWritable = false;
713 break;
714 default:
715 panic("Unknown permissions %#x\n", ap);
716 }
717 }
718
719 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
720 bool xn = te->xn || (isWritable && sctlr.wxn) ||
721 (ap == 3 && sctlr.uwxn && is_priv);
722 if (is_fetch && (abt || xn ||
723 (te->longDescFormat && te->pxn && is_priv) ||
724 (isSecure && te->ns && scr.sif))) {
725 permsFaults++;
726 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
727 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
728 ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe);
729 return std::make_shared<PrefetchAbort>(
730 vaddr,
731 ArmFault::PermissionLL + te->lookupLevel,
732 isStage2, tranMethod);
733 } else if (abt | hapAbt) {
734 permsFaults++;
735 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
736 " write:%d\n", ap, is_priv, is_write);
737 return std::make_shared<DataAbort>(
738 vaddr, te->domain, is_write,
739 ArmFault::PermissionLL + te->lookupLevel,
740 isStage2 | !abt, tranMethod);
741 }
742 return NoFault;
743}
744
745
746Fault
747TLB::checkPermissions64(TlbEntry *te, RequestPtr req, Mode mode,
748 ThreadContext *tc)
749{
750 assert(aarch64);
751
752 Addr vaddr_tainted = req->getVaddr();
753 Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
754
755 uint32_t flags = req->getFlags();
756 bool is_fetch = (mode == Execute);
757 bool is_write = (mode == Write);
758 bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode);
759
760 updateMiscReg(tc, curTranType);
761
762 // If this is the second stage of translation and the request is for a
763 // stage 1 page table walk then we need to check the HCR.PTW bit. This
764 // allows us to generate a fault if the request targets an area marked
765 // as a device or strongly ordered.
766 if (isStage2 && req->isPTWalk() && hcr.ptw &&
767 (te->mtype != TlbEntry::MemoryType::Normal)) {
768 return std::make_shared<DataAbort>(
769 vaddr_tainted, te->domain, is_write,
770 ArmFault::PermissionLL + te->lookupLevel,
771 isStage2, ArmFault::LpaeTran);
772 }
773
774 // Generate an alignment fault for unaligned accesses to device or
775 // strongly ordered memory
776 if (!is_fetch) {
777 if (te->mtype != TlbEntry::MemoryType::Normal) {
778 if (vaddr & mask(flags & AlignmentMask)) {
779 alignFaults++;
780 return std::make_shared<DataAbort>(
781 vaddr_tainted,
782 TlbEntry::DomainType::NoAccess, is_write,
783 ArmFault::AlignmentFault, isStage2,
784 ArmFault::LpaeTran);
785 }
786 }
787 }
788
789 if (te->nonCacheable) {
790 // Prevent prefetching from I/O devices.
791 if (req->isPrefetch()) {
792 // Here we can safely use the fault status for the short
793 // desc. format in all cases
794 return std::make_shared<PrefetchAbort>(
795 vaddr_tainted,
796 ArmFault::PrefetchUncacheable,
797 isStage2, ArmFault::LpaeTran);
798 }
799 }
800
801 uint8_t ap = 0x3 & (te->ap); // 2-bit access protection field
802 bool grant = false;
803
804 uint8_t xn = te->xn;
805 uint8_t pxn = te->pxn;
806 bool r = !is_write && !is_fetch;
807 bool w = is_write;
808 bool x = is_fetch;
809 DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
810 "w:%d, x:%d\n", ap, xn, pxn, r, w, x);
811
812 if (isStage2) {
813 panic("Virtualization in AArch64 state is not supported yet");
814 } else {
815 switch (aarch64EL) {
816 case EL0:
817 {
818 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
819 switch (perm) {
820 case 0:
821 case 1:
822 case 8:
823 case 9:
824 grant = x;
825 break;
826 case 4:
827 case 5:
828 grant = r || w || (x && !sctlr.wxn);
829 break;
830 case 6:
831 case 7:
832 grant = r || w;
833 break;
834 case 12:
835 case 13:
836 grant = r || x;
837 break;
838 case 14:
839 case 15:
840 grant = r;
841 break;
842 default:
843 grant = false;
844 }
845 }
846 break;
847 case EL1:
848 {
849 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
850 switch (perm) {
851 case 0:
852 case 2:
853 grant = r || w || (x && !sctlr.wxn);
854 break;
855 case 1:
856 case 3:
857 case 4:
858 case 5:
859 case 6:
860 case 7:
861 // regions that are writeable at EL0 should not be
862 // executable at EL1
863 grant = r || w;
864 break;
865 case 8:
866 case 10:
867 case 12:
868 case 14:
869 grant = r || x;
870 break;
871 case 9:
872 case 11:
873 case 13:
874 case 15:
875 grant = r;
876 break;
877 default:
878 grant = false;
879 }
880 }
881 break;
882 case EL2:
883 case EL3:
884 {
885 uint8_t perm = (ap & 0x2) | xn;
886 switch (perm) {
887 case 0:
888 grant = r || w || (x && !sctlr.wxn) ;
889 break;
890 case 1:
891 grant = r || w;
892 break;
893 case 2:
894 grant = r || x;
895 break;
896 case 3:
897 grant = r;
898 break;
899 default:
900 grant = false;
901 }
902 }
903 break;
904 }
905 }
906
907 if (!grant) {
908 if (is_fetch) {
909 permsFaults++;
910 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. "
911 "AP:%d priv:%d write:%d ns:%d sif:%d "
912 "sctlr.afe: %d\n",
913 ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe);
914 // Use PC value instead of vaddr because vaddr might be aligned to
915 // cache line and should not be the address reported in FAR
916 return std::make_shared<PrefetchAbort>(
917 req->getPC(),
918 ArmFault::PermissionLL + te->lookupLevel,
919 isStage2, ArmFault::LpaeTran);
920 } else {
921 permsFaults++;
922 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d "
923 "priv:%d write:%d\n", ap, is_priv, is_write);
924 return std::make_shared<DataAbort>(
925 vaddr_tainted, te->domain, is_write,
926 ArmFault::PermissionLL + te->lookupLevel,
927 isStage2, ArmFault::LpaeTran);
928 }
929 }
930
931 return NoFault;
932}
933
934Fault
935TLB::translateFs(RequestPtr req, ThreadContext *tc, Mode mode,
936 Translation *translation, bool &delay, bool timing,
937 TLB::ArmTranslationType tranType, bool functional)
938{
939 // No such thing as a functional timing access
940 assert(!(timing && functional));
941
942 updateMiscReg(tc, tranType);
943
944 Addr vaddr_tainted = req->getVaddr();
945 Addr vaddr = 0;
946 if (aarch64)
947 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
948 else
949 vaddr = vaddr_tainted;
950 uint32_t flags = req->getFlags();
951
952 bool is_fetch = (mode == Execute);
953 bool is_write = (mode == Write);
954 bool long_desc_format = aarch64 || (haveLPAE && ttbcr.eae);
955 ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
956 : ArmFault::VmsaTran;
957
958 req->setAsid(asid);
959
960 DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
961 isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran);
962
963 DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
964 "flags %#x tranType 0x%x\n", vaddr_tainted, mode, isStage2,
965 scr, sctlr, flags, tranType);
966
967 if ((req->isInstFetch() && (!sctlr.i)) ||
968 ((!req->isInstFetch()) && (!sctlr.c))){
969 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
970 }
971 if (!is_fetch) {
972 assert(flags & MustBeOne);
973 if (sctlr.a || !(flags & AllowUnaligned)) {
974 if (vaddr & mask(flags & AlignmentMask)) {
975 alignFaults++;
976 return std::make_shared<DataAbort>(
977 vaddr_tainted,
978 TlbEntry::DomainType::NoAccess, is_write,
979 ArmFault::AlignmentFault, isStage2,
980 tranMethod);
981 }
982 }
983 }
984
985 // If guest MMU is off or hcr.vm=0 go straight to stage2
986 if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) {
987
988 req->setPaddr(vaddr);
989 // When the MMU is off the security attribute corresponds to the
990 // security state of the processor
991 if (isSecure)
992 req->setFlags(Request::SECURE);
993
994 // @todo: double check this (ARM ARM issue C B3.2.1)
995 if (long_desc_format || sctlr.tre == 0) {
996 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
997 } else {
998 if (nmrr.ir0 == 0 || nmrr.or0 == 0 || prrr.tr0 != 0x2)
999 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
1000 }
1001
1002 // Set memory attributes
1003 TlbEntry temp_te;
1004 temp_te.ns = !isSecure;
1005 if (isStage2 || hcr.dc == 0 || isSecure ||
1006 (isHyp && !(tranType & S1CTran))) {
1007
1008 temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal
1009 : TlbEntry::MemoryType::StronglyOrdered;
1010 temp_te.innerAttrs = 0x0;
1011 temp_te.outerAttrs = 0x0;
1012 temp_te.shareable = true;
1013 temp_te.outerShareable = true;
1014 } else {
1015 temp_te.mtype = TlbEntry::MemoryType::Normal;
1016 temp_te.innerAttrs = 0x3;
1017 temp_te.outerAttrs = 0x3;
1018 temp_te.shareable = false;
1019 temp_te.outerShareable = false;
1020 }
1021 temp_te.setAttributes(long_desc_format);
1022 DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: "
1023 "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n",
1024 temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
1025 isStage2);
1026 setAttr(temp_te.attributes);
1027
1028 return testTranslation(req, mode, TlbEntry::DomainType::NoAccess);
1029 }
1030
1031 DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n",
1032 isStage2 ? "IPA" : "VA", vaddr_tainted, asid);
1033 // Translation enabled
1034
1035 TlbEntry *te = NULL;
1036 TlbEntry mergeTe;
1037 Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
1038 functional, &mergeTe);
1039 // only proceed if we have a valid table entry
1040 if ((te == NULL) && (fault == NoFault)) delay = true;
1041
1042 // If we have the table entry transfer some of the attributes to the
1043 // request that triggered the translation
1044 if (te != NULL) {
1045 // Set memory attributes
1046 DPRINTF(TLBVerbose,
1047 "Setting memory attributes: shareable: %d, innerAttrs: %d, "
1048 "outerAttrs: %d, mtype: %d, isStage2: %d\n",
1049 te->shareable, te->innerAttrs, te->outerAttrs,
1050 static_cast<uint8_t>(te->mtype), isStage2);
1051 setAttr(te->attributes);
1052
1053 if (te->nonCacheable)
1054 req->setFlags(Request::UNCACHEABLE);
1055
1056 // Require requests to be ordered if the request goes to
1057 // strongly ordered or device memory (i.e., anything other
1058 // than normal memory requires strict order).
1059 if (te->mtype != TlbEntry::MemoryType::Normal)
1060 req->setFlags(Request::STRICT_ORDER);
1061
1062 Addr pa = te->pAddr(vaddr);
1063 req->setPaddr(pa);
1064
1065 if (isSecure && !te->ns) {
1066 req->setFlags(Request::SECURE);
1067 }
1068 if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) &&
1069 (te->mtype != TlbEntry::MemoryType::Normal)) {
1070 // Unaligned accesses to Device memory should always cause an
1071 // abort regardless of sctlr.a
1072 alignFaults++;
1073 return std::make_shared<DataAbort>(
1074 vaddr_tainted,
1075 TlbEntry::DomainType::NoAccess, is_write,
1076 ArmFault::AlignmentFault, isStage2,
1077 tranMethod);
1078 }
1079
1080 // Check for a trickbox generated address fault
1081 if (fault == NoFault)
1082 fault = testTranslation(req, mode, te->domain);
1083 }
1084
1085 // Generate Illegal Inst Set State fault if IL bit is set in CPSR
1086 if (fault == NoFault) {
1087 if (aarch64 && is_fetch && cpsr.il == 1) {
1088 return std::make_shared<IllegalInstSetStateFault>();
1089 }
1090 }
1091
1092 return fault;
1093}
1094
1095Fault
1096TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode,
1097 TLB::ArmTranslationType tranType)
1098{
1099 updateMiscReg(tc, tranType);
1100
1101 if (directToStage2) {
1102 assert(stage2Tlb);
1103 return stage2Tlb->translateAtomic(req, tc, mode, tranType);
1104 }
1105
1106 bool delay = false;
1107 Fault fault;
1108 if (FullSystem)
1109 fault = translateFs(req, tc, mode, NULL, delay, false, tranType);
1110 else
1111 fault = translateSe(req, tc, mode, NULL, delay, false);
1112 assert(!delay);
1113 return fault;
1114}
1115
1116Fault
1117TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode,
1118 TLB::ArmTranslationType tranType)
1119{
1120 updateMiscReg(tc, tranType);
1121
1122 if (directToStage2) {
1123 assert(stage2Tlb);
1124 return stage2Tlb->translateFunctional(req, tc, mode, tranType);
1125 }
1126
1127 bool delay = false;
1128 Fault fault;
1129 if (FullSystem)
1130 fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true);
1131 else
1132 fault = translateSe(req, tc, mode, NULL, delay, false);
1133 assert(!delay);
1134 return fault;
1135}
1136
1137Fault
1138TLB::translateTiming(RequestPtr req, ThreadContext *tc,
1139 Translation *translation, Mode mode, TLB::ArmTranslationType tranType)
1140{
1141 updateMiscReg(tc, tranType);
1142
1143 if (directToStage2) {
1144 assert(stage2Tlb);
1145 return stage2Tlb->translateTiming(req, tc, translation, mode, tranType);
1146 }
1147
1148 assert(translation);
1149
1150 return translateComplete(req, tc, translation, mode, tranType, isStage2);
1151}
1152
1153Fault
1154TLB::translateComplete(RequestPtr req, ThreadContext *tc,
1155 Translation *translation, Mode mode, TLB::ArmTranslationType tranType,
1156 bool callFromS2)
1157{
1158 bool delay = false;
1159 Fault fault;
1160 if (FullSystem)
1161 fault = translateFs(req, tc, mode, translation, delay, true, tranType);
1162 else
1163 fault = translateSe(req, tc, mode, translation, delay, true);
1164 DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay, fault !=
1165 NoFault);
1166 // If we have a translation, and we're not in the middle of doing a stage
1167 // 2 translation tell the translation that we've either finished or its
1168 // going to take a while. By not doing this when we're in the middle of a
1169 // stage 2 translation we prevent marking the translation as delayed twice,
1170 // one when the translation starts and again when the stage 1 translation
1171 // completes.
1172 if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) {
1173 if (!delay)
1174 translation->finish(fault, req, tc, mode);
1175 else
1176 translation->markDelayed();
1177 }
1178 return fault;
1179}
1180
1181BaseMasterPort*
1182TLB::getMasterPort()
1183{
1184 return &stage2Mmu->getPort();
1185}
1186
1187void
1188TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType)
1189{
1190 // check if the regs have changed, or the translation mode is different.
1191 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1192 // one type of translation anyway
1193 if (miscRegValid && miscRegContext == tc->contextId() &&
1194 ((tranType == curTranType) || isStage2)) {
1195 return;
1196 }
1197
1198 DPRINTF(TLBVerbose, "TLB variables changed!\n");
1199 cpsr = tc->readMiscReg(MISCREG_CPSR);
1200
1200 // Dependencies: SCR/SCR_EL3, CPSR
1201 // Dependencies: SCR/SCR_EL3, CPSR
1201 isSecure = inSecureState(tc);
1202 isSecure &= (tranType & HypMode) == 0;
1203 isSecure &= (tranType & S1S2NsTran) == 0;
1204 aarch64 = !cpsr.width;
1202 isSecure = inSecureState(tc) &&
1203 !(tranType & HypMode) && !(tranType & S1S2NsTran);
1204
1205 const OperatingMode op_mode = (OperatingMode) (uint8_t)cpsr.mode;
1206 aarch64 = opModeIs64(op_mode) ||
1207 (opModeToEL(op_mode) == EL0 && ELIs64(tc, EL1));
1208
1205 if (aarch64) { // AArch64
1206 aarch64EL = (ExceptionLevel) (uint8_t) cpsr.el;
1207 switch (aarch64EL) {
1208 case EL0:
1209 case EL1:
1210 {
1211 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
1212 ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
1213 uint64_t ttbr_asid = ttbcr.a1 ?
1214 tc->readMiscReg(MISCREG_TTBR1_EL1) :
1215 tc->readMiscReg(MISCREG_TTBR0_EL1);
1216 asid = bits(ttbr_asid,
1217 (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1218 }
1219 break;
1220 case EL2:
1221 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1222 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1223 asid = -1;
1224 break;
1225 case EL3:
1226 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3);
1227 ttbcr = tc->readMiscReg(MISCREG_TCR_EL3);
1228 asid = -1;
1229 break;
1230 }
1231 scr = tc->readMiscReg(MISCREG_SCR_EL3);
1232 isPriv = aarch64EL != EL0;
1233 // @todo: modify this behaviour to support Virtualization in
1234 // AArch64
1235 vmid = 0;
1236 isHyp = false;
1237 directToStage2 = false;
1238 stage2Req = false;
1239 } else { // AArch32
1240 sctlr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_SCTLR, tc,
1241 !isSecure));
1242 ttbcr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_TTBCR, tc,
1243 !isSecure));
1244 scr = tc->readMiscReg(MISCREG_SCR);
1245 isPriv = cpsr.mode != MODE_USER;
1246 if (haveLPAE && ttbcr.eae) {
1247 // Long-descriptor translation table format in use
1248 uint64_t ttbr_asid = tc->readMiscReg(
1249 flattenMiscRegNsBanked(ttbcr.a1 ? MISCREG_TTBR1
1250 : MISCREG_TTBR0,
1251 tc, !isSecure));
1252 asid = bits(ttbr_asid, 55, 48);
1253 } else {
1254 // Short-descriptor translation table format in use
1255 CONTEXTIDR context_id = tc->readMiscReg(flattenMiscRegNsBanked(
1256 MISCREG_CONTEXTIDR, tc,!isSecure));
1257 asid = context_id.asid;
1258 }
1259 prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR, tc,
1260 !isSecure));
1261 nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR, tc,
1262 !isSecure));
1263 dacr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_DACR, tc,
1264 !isSecure));
1265 hcr = tc->readMiscReg(MISCREG_HCR);
1266
1267 if (haveVirtualization) {
1268 vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
1269 isHyp = cpsr.mode == MODE_HYP;
1270 isHyp |= tranType & HypMode;
1271 isHyp &= (tranType & S1S2NsTran) == 0;
1272 isHyp &= (tranType & S1CTran) == 0;
1273 if (isHyp) {
1274 sctlr = tc->readMiscReg(MISCREG_HSCTLR);
1275 }
1276 // Work out if we should skip the first stage of translation and go
1277 // directly to stage 2. This value is cached so we don't have to
1278 // compute it for every translation.
1279 stage2Req = hcr.vm && !isStage2 && !isHyp && !isSecure &&
1280 !(tranType & S1CTran);
1281 directToStage2 = stage2Req && !sctlr.m;
1282 } else {
1283 vmid = 0;
1284 stage2Req = false;
1285 isHyp = false;
1286 directToStage2 = false;
1287 }
1288 }
1289 miscRegValid = true;
1290 miscRegContext = tc->contextId();
1291 curTranType = tranType;
1292}
1293
1294Fault
1295TLB::getTE(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
1296 Translation *translation, bool timing, bool functional,
1297 bool is_secure, TLB::ArmTranslationType tranType)
1298{
1299 bool is_fetch = (mode == Execute);
1300 bool is_write = (mode == Write);
1301
1302 Addr vaddr_tainted = req->getVaddr();
1303 Addr vaddr = 0;
1304 ExceptionLevel target_el = aarch64 ? aarch64EL : EL1;
1305 if (aarch64) {
1306 vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el, ttbcr);
1307 } else {
1308 vaddr = vaddr_tainted;
1309 }
1310 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1311 if (*te == NULL) {
1312 if (req->isPrefetch()) {
1313 // if the request is a prefetch don't attempt to fill the TLB or go
1314 // any further with the memory access (here we can safely use the
1315 // fault status for the short desc. format in all cases)
1316 prefetchFaults++;
1317 return std::make_shared<PrefetchAbort>(
1318 vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2);
1319 }
1320
1321 if (is_fetch)
1322 instMisses++;
1323 else if (is_write)
1324 writeMisses++;
1325 else
1326 readMisses++;
1327
1328 // start translation table walk, pass variables rather than
1329 // re-retreaving in table walker for speed
1330 DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1331 vaddr_tainted, asid, vmid);
1332 Fault fault;
1333 fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode,
1334 translation, timing, functional, is_secure,
1335 tranType);
1336 // for timing mode, return and wait for table walk,
1337 if (timing || fault != NoFault) {
1338 return fault;
1339 }
1340
1341 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1342 if (!*te)
1343 printTlb();
1344 assert(*te);
1345 } else {
1346 if (is_fetch)
1347 instHits++;
1348 else if (is_write)
1349 writeHits++;
1350 else
1351 readHits++;
1352 }
1353 return NoFault;
1354}
1355
1356Fault
1357TLB::getResultTe(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
1358 Translation *translation, bool timing, bool functional,
1359 TlbEntry *mergeTe)
1360{
1361 Fault fault;
1362 TlbEntry *s1Te = NULL;
1363
1364 Addr vaddr_tainted = req->getVaddr();
1365
1366 // Get the stage 1 table entry
1367 fault = getTE(&s1Te, req, tc, mode, translation, timing, functional,
1368 isSecure, curTranType);
1369 // only proceed if we have a valid table entry
1370 if ((s1Te != NULL) && (fault == NoFault)) {
1371 // Check stage 1 permissions before checking stage 2
1372 if (aarch64)
1373 fault = checkPermissions64(s1Te, req, mode, tc);
1374 else
1375 fault = checkPermissions(s1Te, req, mode);
1376 if (stage2Req & (fault == NoFault)) {
1377 Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te,
1378 req, translation, mode, timing, functional, curTranType);
1379 fault = s2Lookup->getTe(tc, mergeTe);
1380 if (s2Lookup->isComplete()) {
1381 *te = mergeTe;
1382 // We've finished with the lookup so delete it
1383 delete s2Lookup;
1384 } else {
1385 // The lookup hasn't completed, so we can't delete it now. We
1386 // get round this by asking the object to self delete when the
1387 // translation is complete.
1388 s2Lookup->setSelfDelete();
1389 }
1390 } else {
1391 // This case deals with an S1 hit (or bypass), followed by
1392 // an S2 hit-but-perms issue
1393 if (isStage2) {
1394 DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1395 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault);
1396 if (fault != NoFault) {
1397 ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get());
1398 armFault->annotate(ArmFault::S1PTW, false);
1399 armFault->annotate(ArmFault::OVA, vaddr_tainted);
1400 }
1401 }
1402 *te = s1Te;
1403 }
1404 }
1405 return fault;
1406}
1407
1408void
1409TLB::setTestInterface(SimObject *_ti)
1410{
1411 if (!_ti) {
1412 test = nullptr;
1413 } else {
1414 TlbTestInterface *ti(dynamic_cast<TlbTestInterface *>(_ti));
1415 fatal_if(!ti, "%s is not a valid ARM TLB tester\n", _ti->name());
1416 test = ti;
1417 }
1418}
1419
1420Fault
1421TLB::testTranslation(RequestPtr req, Mode mode, TlbEntry::DomainType domain)
1422{
1423 if (!test) {
1424 return NoFault;
1425 } else {
1426 return test->translationCheck(req, isPriv, mode, domain);
1427 }
1428}
1429
1430Fault
1431TLB::testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode,
1432 TlbEntry::DomainType domain, LookupLevel lookup_level)
1433{
1434 if (!test) {
1435 return NoFault;
1436 } else {
1437 return test->walkCheck(pa, size, va, is_secure, isPriv, mode,
1438 domain, lookup_level);
1439 }
1440}
1441
1442
1443ArmISA::TLB *
1444ArmTLBParams::create()
1445{
1446 return new ArmISA::TLB(this);
1447}
1209 if (aarch64) { // AArch64
1210 aarch64EL = (ExceptionLevel) (uint8_t) cpsr.el;
1211 switch (aarch64EL) {
1212 case EL0:
1213 case EL1:
1214 {
1215 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
1216 ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
1217 uint64_t ttbr_asid = ttbcr.a1 ?
1218 tc->readMiscReg(MISCREG_TTBR1_EL1) :
1219 tc->readMiscReg(MISCREG_TTBR0_EL1);
1220 asid = bits(ttbr_asid,
1221 (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1222 }
1223 break;
1224 case EL2:
1225 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1226 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1227 asid = -1;
1228 break;
1229 case EL3:
1230 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3);
1231 ttbcr = tc->readMiscReg(MISCREG_TCR_EL3);
1232 asid = -1;
1233 break;
1234 }
1235 scr = tc->readMiscReg(MISCREG_SCR_EL3);
1236 isPriv = aarch64EL != EL0;
1237 // @todo: modify this behaviour to support Virtualization in
1238 // AArch64
1239 vmid = 0;
1240 isHyp = false;
1241 directToStage2 = false;
1242 stage2Req = false;
1243 } else { // AArch32
1244 sctlr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_SCTLR, tc,
1245 !isSecure));
1246 ttbcr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_TTBCR, tc,
1247 !isSecure));
1248 scr = tc->readMiscReg(MISCREG_SCR);
1249 isPriv = cpsr.mode != MODE_USER;
1250 if (haveLPAE && ttbcr.eae) {
1251 // Long-descriptor translation table format in use
1252 uint64_t ttbr_asid = tc->readMiscReg(
1253 flattenMiscRegNsBanked(ttbcr.a1 ? MISCREG_TTBR1
1254 : MISCREG_TTBR0,
1255 tc, !isSecure));
1256 asid = bits(ttbr_asid, 55, 48);
1257 } else {
1258 // Short-descriptor translation table format in use
1259 CONTEXTIDR context_id = tc->readMiscReg(flattenMiscRegNsBanked(
1260 MISCREG_CONTEXTIDR, tc,!isSecure));
1261 asid = context_id.asid;
1262 }
1263 prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR, tc,
1264 !isSecure));
1265 nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR, tc,
1266 !isSecure));
1267 dacr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_DACR, tc,
1268 !isSecure));
1269 hcr = tc->readMiscReg(MISCREG_HCR);
1270
1271 if (haveVirtualization) {
1272 vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
1273 isHyp = cpsr.mode == MODE_HYP;
1274 isHyp |= tranType & HypMode;
1275 isHyp &= (tranType & S1S2NsTran) == 0;
1276 isHyp &= (tranType & S1CTran) == 0;
1277 if (isHyp) {
1278 sctlr = tc->readMiscReg(MISCREG_HSCTLR);
1279 }
1280 // Work out if we should skip the first stage of translation and go
1281 // directly to stage 2. This value is cached so we don't have to
1282 // compute it for every translation.
1283 stage2Req = hcr.vm && !isStage2 && !isHyp && !isSecure &&
1284 !(tranType & S1CTran);
1285 directToStage2 = stage2Req && !sctlr.m;
1286 } else {
1287 vmid = 0;
1288 stage2Req = false;
1289 isHyp = false;
1290 directToStage2 = false;
1291 }
1292 }
1293 miscRegValid = true;
1294 miscRegContext = tc->contextId();
1295 curTranType = tranType;
1296}
1297
1298Fault
1299TLB::getTE(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
1300 Translation *translation, bool timing, bool functional,
1301 bool is_secure, TLB::ArmTranslationType tranType)
1302{
1303 bool is_fetch = (mode == Execute);
1304 bool is_write = (mode == Write);
1305
1306 Addr vaddr_tainted = req->getVaddr();
1307 Addr vaddr = 0;
1308 ExceptionLevel target_el = aarch64 ? aarch64EL : EL1;
1309 if (aarch64) {
1310 vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el, ttbcr);
1311 } else {
1312 vaddr = vaddr_tainted;
1313 }
1314 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1315 if (*te == NULL) {
1316 if (req->isPrefetch()) {
1317 // if the request is a prefetch don't attempt to fill the TLB or go
1318 // any further with the memory access (here we can safely use the
1319 // fault status for the short desc. format in all cases)
1320 prefetchFaults++;
1321 return std::make_shared<PrefetchAbort>(
1322 vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2);
1323 }
1324
1325 if (is_fetch)
1326 instMisses++;
1327 else if (is_write)
1328 writeMisses++;
1329 else
1330 readMisses++;
1331
1332 // start translation table walk, pass variables rather than
1333 // re-retreaving in table walker for speed
1334 DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1335 vaddr_tainted, asid, vmid);
1336 Fault fault;
1337 fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode,
1338 translation, timing, functional, is_secure,
1339 tranType);
1340 // for timing mode, return and wait for table walk,
1341 if (timing || fault != NoFault) {
1342 return fault;
1343 }
1344
1345 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1346 if (!*te)
1347 printTlb();
1348 assert(*te);
1349 } else {
1350 if (is_fetch)
1351 instHits++;
1352 else if (is_write)
1353 writeHits++;
1354 else
1355 readHits++;
1356 }
1357 return NoFault;
1358}
1359
1360Fault
1361TLB::getResultTe(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
1362 Translation *translation, bool timing, bool functional,
1363 TlbEntry *mergeTe)
1364{
1365 Fault fault;
1366 TlbEntry *s1Te = NULL;
1367
1368 Addr vaddr_tainted = req->getVaddr();
1369
1370 // Get the stage 1 table entry
1371 fault = getTE(&s1Te, req, tc, mode, translation, timing, functional,
1372 isSecure, curTranType);
1373 // only proceed if we have a valid table entry
1374 if ((s1Te != NULL) && (fault == NoFault)) {
1375 // Check stage 1 permissions before checking stage 2
1376 if (aarch64)
1377 fault = checkPermissions64(s1Te, req, mode, tc);
1378 else
1379 fault = checkPermissions(s1Te, req, mode);
1380 if (stage2Req & (fault == NoFault)) {
1381 Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te,
1382 req, translation, mode, timing, functional, curTranType);
1383 fault = s2Lookup->getTe(tc, mergeTe);
1384 if (s2Lookup->isComplete()) {
1385 *te = mergeTe;
1386 // We've finished with the lookup so delete it
1387 delete s2Lookup;
1388 } else {
1389 // The lookup hasn't completed, so we can't delete it now. We
1390 // get round this by asking the object to self delete when the
1391 // translation is complete.
1392 s2Lookup->setSelfDelete();
1393 }
1394 } else {
1395 // This case deals with an S1 hit (or bypass), followed by
1396 // an S2 hit-but-perms issue
1397 if (isStage2) {
1398 DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1399 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault);
1400 if (fault != NoFault) {
1401 ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get());
1402 armFault->annotate(ArmFault::S1PTW, false);
1403 armFault->annotate(ArmFault::OVA, vaddr_tainted);
1404 }
1405 }
1406 *te = s1Te;
1407 }
1408 }
1409 return fault;
1410}
1411
1412void
1413TLB::setTestInterface(SimObject *_ti)
1414{
1415 if (!_ti) {
1416 test = nullptr;
1417 } else {
1418 TlbTestInterface *ti(dynamic_cast<TlbTestInterface *>(_ti));
1419 fatal_if(!ti, "%s is not a valid ARM TLB tester\n", _ti->name());
1420 test = ti;
1421 }
1422}
1423
1424Fault
1425TLB::testTranslation(RequestPtr req, Mode mode, TlbEntry::DomainType domain)
1426{
1427 if (!test) {
1428 return NoFault;
1429 } else {
1430 return test->translationCheck(req, isPriv, mode, domain);
1431 }
1432}
1433
1434Fault
1435TLB::testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode,
1436 TlbEntry::DomainType domain, LookupLevel lookup_level)
1437{
1438 if (!test) {
1439 return NoFault;
1440 } else {
1441 return test->walkCheck(pa, size, va, is_secure, isPriv, mode,
1442 domain, lookup_level);
1443 }
1444}
1445
1446
1447ArmISA::TLB *
1448ArmTLBParams::create()
1449{
1450 return new ArmISA::TLB(this);
1451}