1/*
2 * Copyright (c) 2010-2013, 2016-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 *          Nathan Binkert
42 *          Steve Reinhardt
43 */
44
45#include "arch/arm/tlb.hh"
46
47#include <memory>
48#include <string>
49#include <vector>
50
51#include "arch/arm/faults.hh"
52#include "arch/arm/pagetable.hh"
53#include "arch/arm/stage2_lookup.hh"
54#include "arch/arm/stage2_mmu.hh"
55#include "arch/arm/system.hh"
56#include "arch/arm/table_walker.hh"
57#include "arch/arm/utility.hh"
58#include "arch/generic/mmapped_ipr.hh"
59#include "base/inifile.hh"
60#include "base/str.hh"
61#include "base/trace.hh"
62#include "cpu/base.hh"
63#include "cpu/thread_context.hh"
64#include "debug/Checkpoint.hh"
65#include "debug/TLB.hh"
66#include "debug/TLBVerbose.hh"
67#include "mem/page_table.hh"
68#include "mem/request.hh"
69#include "params/ArmTLB.hh"
70#include "sim/full_system.hh"
71#include "sim/process.hh"
72
73using namespace std;
74using namespace ArmISA;
75
76TLB::TLB(const ArmTLBParams *p)
77    : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size),
78      isStage2(p->is_stage2), stage2Req(false), stage2DescReq(false), _attr(0),
79      directToStage2(false), tableWalker(p->walker), stage2Tlb(NULL),
80      stage2Mmu(NULL), test(nullptr), rangeMRU(1),
81      aarch64(false), aarch64EL(EL0), isPriv(false), isSecure(false),
82      isHyp(false), asid(0), vmid(0), hcr(0), dacr(0),
83      miscRegValid(false), miscRegContext(0), curTranType(NormalTran)
84{
85    const ArmSystem *sys = dynamic_cast<const ArmSystem *>(p->sys);
86
87    tableWalker->setTlb(this);
88
89    // Cache system-level properties
90    haveLPAE = tableWalker->haveLPAE();
91    haveVirtualization = tableWalker->haveVirtualization();
92    haveLargeAsid64 = tableWalker->haveLargeAsid64();
93
94    if (sys)
95        m5opRange = sys->m5opRange();
96}
97
98TLB::~TLB()
99{
100    delete[] table;
101}
102
103void
104TLB::init()
105{
106    if (stage2Mmu && !isStage2)
107        stage2Tlb = stage2Mmu->stage2Tlb();
108}
109
110void
111TLB::setMMU(Stage2MMU *m, MasterID master_id)
112{
113    stage2Mmu = m;
114    tableWalker->setMMU(m, master_id);
115}
116
117bool
118TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa)
119{
120    updateMiscReg(tc);
121
122    if (directToStage2) {
123        assert(stage2Tlb);
124        return stage2Tlb->translateFunctional(tc, va, pa);
125    }
126
127    TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false,
128                         aarch64 ? aarch64EL : EL1);
129    if (!e)
130        return false;
131    pa = e->pAddr(va);
132    return true;
133}
134
135Fault
136TLB::finalizePhysical(const RequestPtr &req,
137                      ThreadContext *tc, Mode mode) const
138{
139    const Addr paddr = req->getPaddr();
140
141    if (m5opRange.contains(paddr)) {
142        req->setFlags(Request::MMAPPED_IPR | Request::GENERIC_IPR);
143        req->setPaddr(GenericISA::iprAddressPseudoInst(
144                          (paddr >> 8) & 0xFF,
145                          paddr & 0xFF));
146    }
147
148    return NoFault;
149}
150
151TlbEntry*
152TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure,
153            bool functional, bool ignore_asn, ExceptionLevel target_el)
154{
155
156    TlbEntry *retval = NULL;
157
158    // Maintaining LRU array
159    int x = 0;
160    while (retval == NULL && x < size) {
161        if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false,
162             target_el)) ||
163            (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) {
164            // We only move the hit entry ahead when the position is higher
165            // than rangeMRU
166            if (x > rangeMRU && !functional) {
167                TlbEntry tmp_entry = table[x];
168                for (int i = x; i > 0; i--)
169                    table[i] = table[i - 1];
170                table[0] = tmp_entry;
171                retval = &table[0];
172            } else {
173                retval = &table[x];
174            }
175            break;
176        }
177        ++x;
178    }
179
180    DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d "
181            "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d "
182            "el: %d\n",
183            va, asn, retval ? "hit" : "miss", vmid, hyp, secure,
184            retval ? retval->pfn       : 0, retval ? retval->size  : 0,
185            retval ? retval->pAddr(va) : 0, retval ? retval->ap    : 0,
186            retval ? retval->ns        : 0, retval ? retval->nstid : 0,
187            retval ? retval->global    : 0, retval ? retval->asid  : 0,
188            retval ? retval->el        : 0);
189
190    return retval;
191}
192
193// insert a new TLB entry
194void
195TLB::insert(Addr addr, TlbEntry &entry)
196{
197    DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x"
198            " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
199            " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn,
200            entry.size, entry.vpn, entry.asid, entry.vmid, entry.N,
201            entry.global, entry.valid, entry.nonCacheable, entry.xn,
202            entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid,
203            entry.isHyp);
204
205    if (table[size - 1].valid)
206        DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
207                "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n",
208                table[size-1].vpn << table[size-1].N, table[size-1].asid,
209                table[size-1].vmid, table[size-1].pfn << table[size-1].N,
210                table[size-1].size, table[size-1].ap, table[size-1].ns,
211                table[size-1].nstid, table[size-1].global, table[size-1].isHyp,
212                table[size-1].el);
213
214    //inserting to MRU position and evicting the LRU one
215
216    for (int i = size - 1; i > 0; --i)
217        table[i] = table[i-1];
218    table[0] = entry;
219
220    inserts++;
221    ppRefills->notify(1);
222}
223
224void
225TLB::printTlb() const
226{
227    int x = 0;
228    TlbEntry *te;
229    DPRINTF(TLB, "Current TLB contents:\n");
230    while (x < size) {
231        te = &table[x];
232        if (te->valid)
233            DPRINTF(TLB, " *  %s\n", te->print());
234        ++x;
235    }
236}
237
238void
239TLB::flushAllSecurity(bool secure_lookup, ExceptionLevel target_el,
240                      bool ignore_el)
241{
242    DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n",
243            (secure_lookup ? "secure" : "non-secure"));
244    int x = 0;
245    TlbEntry *te;
246    while (x < size) {
247        te = &table[x];
248        const bool el_match = ignore_el ?
249            true : te->checkELMatch(target_el);
250
251        if (te->valid && secure_lookup == !te->nstid &&
252            (te->vmid == vmid || secure_lookup) && el_match) {
253
254            DPRINTF(TLB, " -  %s\n", te->print());
255            te->valid = false;
256            flushedEntries++;
257        }
258        ++x;
259    }
260
261    flushTlb++;
262
263    // If there's a second stage TLB (and we're not it) then flush it as well
264    // if we're currently in hyp mode
265    if (!isStage2 && isHyp) {
266        stage2Tlb->flushAllSecurity(secure_lookup, EL1, true);
267    }
268}
269
270void
271TLB::flushAllNs(ExceptionLevel target_el, bool ignore_el)
272{
273    bool hyp = target_el == EL2;
274
275    DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n",
276            (hyp ? "hyp" : "non-hyp"));
277    int x = 0;
278    TlbEntry *te;
279    while (x < size) {
280        te = &table[x];
281        const bool el_match = ignore_el ?
282            true : te->checkELMatch(target_el);
283
284        if (te->valid && te->nstid && te->isHyp == hyp && el_match) {
285
286            DPRINTF(TLB, " -  %s\n", te->print());
287            flushedEntries++;
288            te->valid = false;
289        }
290        ++x;
291    }
292
293    flushTlb++;
294
295    // If there's a second stage TLB (and we're not it) then flush it as well
296    if (!isStage2 && !hyp) {
297        stage2Tlb->flushAllNs(EL1, true);
298    }
299}
300
301void
302TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup,
303                  ExceptionLevel target_el)
304{
305    DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x "
306            "(%s lookup)\n", mva, asn, (secure_lookup ?
307            "secure" : "non-secure"));
308    _flushMva(mva, asn, secure_lookup, false, target_el);
309    flushTlbMvaAsid++;
310}
311
312void
313TLB::flushAsid(uint64_t asn, bool secure_lookup, ExceptionLevel target_el)
314{
315    DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn,
316            (secure_lookup ? "secure" : "non-secure"));
317
318    int x = 0 ;
319    TlbEntry *te;
320
321    while (x < size) {
322        te = &table[x];
323        if (te->valid && te->asid == asn && secure_lookup == !te->nstid &&
324            (te->vmid == vmid || secure_lookup) &&
325            te->checkELMatch(target_el)) {
326
327            te->valid = false;
328            DPRINTF(TLB, " -  %s\n", te->print());
329            flushedEntries++;
330        }
331        ++x;
332    }
333    flushTlbAsid++;
334}
335
336void
337TLB::flushMva(Addr mva, bool secure_lookup, ExceptionLevel target_el)
338{
339    DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva,
340            (secure_lookup ? "secure" : "non-secure"));
341    _flushMva(mva, 0xbeef, secure_lookup, true, target_el);
342    flushTlbMva++;
343}
344
345void
346TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup,
347               bool ignore_asn, ExceptionLevel target_el)
348{
349    TlbEntry *te;
350    // D5.7.2: Sign-extend address to 64 bits
351    mva = sext<56>(mva);
352
353    bool hyp = target_el == EL2;
354
355    te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
356                target_el);
357    while (te != NULL) {
358        if (secure_lookup == !te->nstid) {
359            DPRINTF(TLB, " -  %s\n", te->print());
360            te->valid = false;
361            flushedEntries++;
362        }
363        te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
364                    target_el);
365    }
366}
367
368void
369TLB::flushIpaVmid(Addr ipa, bool secure_lookup, ExceptionLevel target_el)
370{
371    assert(!isStage2);
372    stage2Tlb->_flushMva(ipa, 0xbeef, secure_lookup, true, target_el);
373}
374
375void
376TLB::drainResume()
377{
378    // We might have unserialized something or switched CPUs, so make
379    // sure to re-read the misc regs.
380    miscRegValid = false;
381}
382
383void
384TLB::takeOverFrom(BaseTLB *_otlb)
385{
386    TLB *otlb = dynamic_cast<TLB*>(_otlb);
387    /* Make sure we actually have a valid type */
388    if (otlb) {
389        _attr = otlb->_attr;
390        haveLPAE = otlb->haveLPAE;
391        directToStage2 = otlb->directToStage2;
392        stage2Req = otlb->stage2Req;
393        stage2DescReq = otlb->stage2DescReq;
394
395        /* Sync the stage2 MMU if they exist in both
396         * the old CPU and the new
397         */
398        if (!isStage2 &&
399            stage2Tlb && otlb->stage2Tlb) {
400            stage2Tlb->takeOverFrom(otlb->stage2Tlb);
401        }
402    } else {
403        panic("Incompatible TLB type!");
404    }
405}
406
407void
408TLB::serialize(CheckpointOut &cp) const
409{
410    DPRINTF(Checkpoint, "Serializing Arm TLB\n");
411
412    SERIALIZE_SCALAR(_attr);
413    SERIALIZE_SCALAR(haveLPAE);
414    SERIALIZE_SCALAR(directToStage2);
415    SERIALIZE_SCALAR(stage2Req);
416    SERIALIZE_SCALAR(stage2DescReq);
417
418    int num_entries = size;
419    SERIALIZE_SCALAR(num_entries);
420    for (int i = 0; i < size; i++)
421        table[i].serializeSection(cp, csprintf("TlbEntry%d", i));
422}
423
424void
425TLB::unserialize(CheckpointIn &cp)
426{
427    DPRINTF(Checkpoint, "Unserializing Arm TLB\n");
428
429    UNSERIALIZE_SCALAR(_attr);
430    UNSERIALIZE_SCALAR(haveLPAE);
431    UNSERIALIZE_SCALAR(directToStage2);
432    UNSERIALIZE_SCALAR(stage2Req);
433    UNSERIALIZE_SCALAR(stage2DescReq);
434
435    int num_entries;
436    UNSERIALIZE_SCALAR(num_entries);
437    for (int i = 0; i < min(size, num_entries); i++)
438        table[i].unserializeSection(cp, csprintf("TlbEntry%d", i));
439}
440
441void
442TLB::regStats()
443{
444    BaseTLB::regStats();
445    instHits
446        .name(name() + ".inst_hits")
447        .desc("ITB inst hits")
448        ;
449
450    instMisses
451        .name(name() + ".inst_misses")
452        .desc("ITB inst misses")
453        ;
454
455    instAccesses
456        .name(name() + ".inst_accesses")
457        .desc("ITB inst accesses")
458        ;
459
460    readHits
461        .name(name() + ".read_hits")
462        .desc("DTB read hits")
463        ;
464
465    readMisses
466        .name(name() + ".read_misses")
467        .desc("DTB read misses")
468        ;
469
470    readAccesses
471        .name(name() + ".read_accesses")
472        .desc("DTB read accesses")
473        ;
474
475    writeHits
476        .name(name() + ".write_hits")
477        .desc("DTB write hits")
478        ;
479
480    writeMisses
481        .name(name() + ".write_misses")
482        .desc("DTB write misses")
483        ;
484
485    writeAccesses
486        .name(name() + ".write_accesses")
487        .desc("DTB write accesses")
488        ;
489
490    hits
491        .name(name() + ".hits")
492        .desc("DTB hits")
493        ;
494
495    misses
496        .name(name() + ".misses")
497        .desc("DTB misses")
498        ;
499
500    accesses
501        .name(name() + ".accesses")
502        .desc("DTB accesses")
503        ;
504
505    flushTlb
506        .name(name() + ".flush_tlb")
507        .desc("Number of times complete TLB was flushed")
508        ;
509
510    flushTlbMva
511        .name(name() + ".flush_tlb_mva")
512        .desc("Number of times TLB was flushed by MVA")
513        ;
514
515    flushTlbMvaAsid
516        .name(name() + ".flush_tlb_mva_asid")
517        .desc("Number of times TLB was flushed by MVA & ASID")
518        ;
519
520    flushTlbAsid
521        .name(name() + ".flush_tlb_asid")
522        .desc("Number of times TLB was flushed by ASID")
523        ;
524
525    flushedEntries
526        .name(name() + ".flush_entries")
527        .desc("Number of entries that have been flushed from TLB")
528        ;
529
530    alignFaults
531        .name(name() + ".align_faults")
532        .desc("Number of TLB faults due to alignment restrictions")
533        ;
534
535    prefetchFaults
536        .name(name() + ".prefetch_faults")
537        .desc("Number of TLB faults due to prefetch")
538        ;
539
540    domainFaults
541        .name(name() + ".domain_faults")
542        .desc("Number of TLB faults due to domain restrictions")
543        ;
544
545    permsFaults
546        .name(name() + ".perms_faults")
547        .desc("Number of TLB faults due to permissions restrictions")
548        ;
549
550    instAccesses = instHits + instMisses;
551    readAccesses = readHits + readMisses;
552    writeAccesses = writeHits + writeMisses;
553    hits = readHits + writeHits + instHits;
554    misses = readMisses + writeMisses + instMisses;
555    accesses = readAccesses + writeAccesses + instAccesses;
556}
557
558void
559TLB::regProbePoints()
560{
561    ppRefills.reset(new ProbePoints::PMU(getProbeManager(), "Refills"));
562}
563
564Fault
565TLB::translateSe(const RequestPtr &req, ThreadContext *tc, Mode mode,
566                 Translation *translation, bool &delay, bool timing)
567{
568    updateMiscReg(tc);
569    Addr vaddr_tainted = req->getVaddr();
570    Addr vaddr = 0;
571    if (aarch64)
572        vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
573    else
574        vaddr = vaddr_tainted;
575    Request::Flags flags = req->getFlags();
576
577    bool is_fetch = (mode == Execute);
578    bool is_write = (mode == Write);
579
580    if (!is_fetch) {
581        assert(flags & MustBeOne || req->isPrefetch());
582        if (sctlr.a || !(flags & AllowUnaligned)) {
583            if (vaddr & mask(flags & AlignmentMask)) {
584                // LPAE is always disabled in SE mode
585                return std::make_shared<DataAbort>(
586                    vaddr_tainted,
587                    TlbEntry::DomainType::NoAccess, is_write,
588                    ArmFault::AlignmentFault, isStage2,
589                    ArmFault::VmsaTran);
590            }
591        }
592    }
593
594    Addr paddr;
595    Process *p = tc->getProcessPtr();
596
597    if (!p->pTable->translate(vaddr, paddr))
598        return std::make_shared<GenericPageTableFault>(vaddr_tainted);
599    req->setPaddr(paddr);
600
601    return finalizePhysical(req, tc, mode);
602}
603
604Fault
605TLB::checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode)
606{
607    // a data cache maintenance instruction that operates by MVA does
608    // not generate a Data Abort exeception due to a Permission fault
609    if (req->isCacheMaintenance()) {
610        return NoFault;
611    }
612
613    Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
614    Request::Flags flags = req->getFlags();
615    bool is_fetch  = (mode == Execute);
616    bool is_write  = (mode == Write);
617    bool is_priv   = isPriv && !(flags & UserMode);
618
619    // Get the translation type from the actuall table entry
620    ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran
621                                                         : ArmFault::VmsaTran;
622
623    // If this is the second stage of translation and the request is for a
624    // stage 1 page table walk then we need to check the HCR.PTW bit. This
625    // allows us to generate a fault if the request targets an area marked
626    // as a device or strongly ordered.
627    if (isStage2 && req->isPTWalk() && hcr.ptw &&
628        (te->mtype != TlbEntry::MemoryType::Normal)) {
629        return std::make_shared<DataAbort>(
630            vaddr, te->domain, is_write,
631            ArmFault::PermissionLL + te->lookupLevel,
632            isStage2, tranMethod);
633    }
634
635    // Generate an alignment fault for unaligned data accesses to device or
636    // strongly ordered memory
637    if (!is_fetch) {
638        if (te->mtype != TlbEntry::MemoryType::Normal) {
639            if (vaddr & mask(flags & AlignmentMask)) {
640                alignFaults++;
641                return std::make_shared<DataAbort>(
642                    vaddr, TlbEntry::DomainType::NoAccess, is_write,
643                    ArmFault::AlignmentFault, isStage2,
644                    tranMethod);
645            }
646        }
647    }
648
649    if (te->nonCacheable) {
650        // Prevent prefetching from I/O devices.
651        if (req->isPrefetch()) {
652            // Here we can safely use the fault status for the short
653            // desc. format in all cases
654            return std::make_shared<PrefetchAbort>(
655                vaddr, ArmFault::PrefetchUncacheable,
656                isStage2, tranMethod);
657        }
658    }
659
660    if (!te->longDescFormat) {
661        switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
662          case 0:
663            domainFaults++;
664            DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x"
665                    " domain: %#x write:%d\n", dacr,
666                    static_cast<uint8_t>(te->domain), is_write);
667            if (is_fetch) {
668                // Use PC value instead of vaddr because vaddr might
669                // be aligned to cache line and should not be the
670                // address reported in FAR
671                return std::make_shared<PrefetchAbort>(
672                    req->getPC(),
673                    ArmFault::DomainLL + te->lookupLevel,
674                    isStage2, tranMethod);
675            } else
676                return std::make_shared<DataAbort>(
677                    vaddr, te->domain, is_write,
678                    ArmFault::DomainLL + te->lookupLevel,
679                    isStage2, tranMethod);
680          case 1:
681            // Continue with permissions check
682            break;
683          case 2:
684            panic("UNPRED domain\n");
685          case 3:
686            return NoFault;
687        }
688    }
689
690    // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
691    uint8_t ap  = te->longDescFormat ? te->ap << 1 : te->ap;
692    uint8_t hap = te->hap;
693
694    if (sctlr.afe == 1 || te->longDescFormat)
695        ap |= 1;
696
697    bool abt;
698    bool isWritable = true;
699    // If this is a stage 2 access (eg for reading stage 1 page table entries)
700    // then don't perform the AP permissions check, we stil do the HAP check
701    // below.
702    if (isStage2) {
703        abt = false;
704    } else {
705        switch (ap) {
706          case 0:
707            DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n",
708                    (int)sctlr.rs);
709            if (!sctlr.xp) {
710                switch ((int)sctlr.rs) {
711                  case 2:
712                    abt = is_write;
713                    break;
714                  case 1:
715                    abt = is_write || !is_priv;
716                    break;
717                  case 0:
718                  case 3:
719                  default:
720                    abt = true;
721                    break;
722                }
723            } else {
724                abt = true;
725            }
726            break;
727          case 1:
728            abt = !is_priv;
729            break;
730          case 2:
731            abt = !is_priv && is_write;
732            isWritable = is_priv;
733            break;
734          case 3:
735            abt = false;
736            break;
737          case 4:
738            panic("UNPRED premissions\n");
739          case 5:
740            abt = !is_priv || is_write;
741            isWritable = false;
742            break;
743          case 6:
744          case 7:
745            abt        = is_write;
746            isWritable = false;
747            break;
748          default:
749            panic("Unknown permissions %#x\n", ap);
750        }
751    }
752
753    bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
754    bool xn     = te->xn || (isWritable && sctlr.wxn) ||
755                            (ap == 3    && sctlr.uwxn && is_priv);
756    if (is_fetch && (abt || xn ||
757                     (te->longDescFormat && te->pxn && is_priv) ||
758                     (isSecure && te->ns && scr.sif))) {
759        permsFaults++;
760        DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
761                     "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
762                     ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe);
763        // Use PC value instead of vaddr because vaddr might be aligned to
764        // cache line and should not be the address reported in FAR
765        return std::make_shared<PrefetchAbort>(
766            req->getPC(),
767            ArmFault::PermissionLL + te->lookupLevel,
768            isStage2, tranMethod);
769    } else if (abt | hapAbt) {
770        permsFaults++;
771        DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
772               " write:%d\n", ap, is_priv, is_write);
773        return std::make_shared<DataAbort>(
774            vaddr, te->domain, is_write,
775            ArmFault::PermissionLL + te->lookupLevel,
776            isStage2 | !abt, tranMethod);
777    }
778    return NoFault;
779}
780
781
782Fault
783TLB::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode,
784                        ThreadContext *tc)
785{
786    assert(aarch64);
787
788    // A data cache maintenance instruction that operates by VA does
789    // not generate a Permission fault unless:
790    // * It is a data cache invalidate (dc ivac) which requires write
791    //   permissions to the VA, or
792    // * It is executed from EL0
793    if (req->isCacheClean() && aarch64EL != EL0 && !isStage2) {
794        return NoFault;
795    }
796
797    Addr vaddr_tainted = req->getVaddr();
798    Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
799
800    Request::Flags flags = req->getFlags();
801    bool is_fetch  = (mode == Execute);
802    // Cache clean operations require read permissions to the specified VA
803    bool is_write = !req->isCacheClean() && mode == Write;
804    bool is_atomic = req->isAtomic();
805    bool is_priv M5_VAR_USED  = isPriv && !(flags & UserMode);
806
807    updateMiscReg(tc, curTranType);
808
809    // If this is the second stage of translation and the request is for a
810    // stage 1 page table walk then we need to check the HCR.PTW bit. This
811    // allows us to generate a fault if the request targets an area marked
812    // as a device or strongly ordered.
813    if (isStage2 && req->isPTWalk() && hcr.ptw &&
814        (te->mtype != TlbEntry::MemoryType::Normal)) {
815        return std::make_shared<DataAbort>(
816            vaddr_tainted, te->domain, is_write,
817            ArmFault::PermissionLL + te->lookupLevel,
818            isStage2, ArmFault::LpaeTran);
819    }
820
821    // Generate an alignment fault for unaligned accesses to device or
822    // strongly ordered memory
823    if (!is_fetch) {
824        if (te->mtype != TlbEntry::MemoryType::Normal) {
825            if (vaddr & mask(flags & AlignmentMask)) {
826                alignFaults++;
827                return std::make_shared<DataAbort>(
828                    vaddr_tainted,
829                    TlbEntry::DomainType::NoAccess,
830                    is_atomic ? false : is_write,
831                    ArmFault::AlignmentFault, isStage2,
832                    ArmFault::LpaeTran);
833            }
834        }
835    }
836
837    if (te->nonCacheable) {
838        // Prevent prefetching from I/O devices.
839        if (req->isPrefetch()) {
840            // Here we can safely use the fault status for the short
841            // desc. format in all cases
842            return std::make_shared<PrefetchAbort>(
843                vaddr_tainted,
844                ArmFault::PrefetchUncacheable,
845                isStage2, ArmFault::LpaeTran);
846        }
847    }
848
849    uint8_t ap  = 0x3 & (te->ap);  // 2-bit access protection field
850    bool grant = false;
851
852    uint8_t xn =  te->xn;
853    uint8_t pxn = te->pxn;
854    bool r = !is_write && !is_fetch;
855    bool w = is_write;
856    bool x = is_fetch;
857
858    // grant_read is used for faults from an atomic instruction that
859    // both reads and writes from a memory location. From a ISS point
860    // of view they count as read if a read to that address would have
861    // generated the fault; they count as writes otherwise
862    bool grant_read = true;
863    DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
864                        "w:%d, x:%d\n", ap, xn, pxn, r, w, x);
865
866    if (isStage2) {
867        assert(ArmSystem::haveVirtualization(tc) && aarch64EL != EL2);
868        // In stage 2 we use the hypervisor access permission bits.
869        // The following permissions are described in ARM DDI 0487A.f
870        // D4-1802
871        uint8_t hap = 0x3 & te->hap;
872        grant_read = hap & 0x1;
873        if (is_fetch) {
874            // sctlr.wxn overrides the xn bit
875            grant = !sctlr.wxn && !xn;
876        } else if (is_write) {
877            grant = hap & 0x2;
878        } else { // is_read
879            grant = grant_read;
880        }
881    } else {
882        switch (aarch64EL) {
883          case EL0:
884            {
885                grant_read = ap & 0x1;
886                uint8_t perm = (ap << 2)  | (xn << 1) | pxn;
887                switch (perm) {
888                  case 0:
889                  case 1:
890                  case 8:
891                  case 9:
892                    grant = x;
893                    break;
894                  case 4:
895                  case 5:
896                    grant = r || w || (x && !sctlr.wxn);
897                    break;
898                  case 6:
899                  case 7:
900                    grant = r || w;
901                    break;
902                  case 12:
903                  case 13:
904                    grant = r || x;
905                    break;
906                  case 14:
907                  case 15:
908                    grant = r;
909                    break;
910                  default:
911                    grant = false;
912                }
913            }
914            break;
915          case EL1:
916            {
917                if (checkPAN(tc, ap, req, mode)) {
918                    grant = false;
919                    grant_read = false;
920                    break;
921                }
922
923                uint8_t perm = (ap << 2)  | (xn << 1) | pxn;
924                switch (perm) {
925                  case 0:
926                  case 2:
927                    grant = r || w || (x && !sctlr.wxn);
928                    break;
929                  case 1:
930                  case 3:
931                  case 4:
932                  case 5:
933                  case 6:
934                  case 7:
935                    // regions that are writeable at EL0 should not be
936                    // executable at EL1
937                    grant = r || w;
938                    break;
939                  case 8:
940                  case 10:
941                  case 12:
942                  case 14:
943                    grant = r || x;
944                    break;
945                  case 9:
946                  case 11:
947                  case 13:
948                  case 15:
949                    grant = r;
950                    break;
951                  default:
952                    grant = false;
953                }
954            }
955            break;
956          case EL2:
957            if (hcr.e2h && checkPAN(tc, ap, req, mode)) {
958                grant = false;
959                grant_read = false;
960                break;
961            }
962            M5_FALLTHROUGH;
963          case EL3:
964            {
965                uint8_t perm = (ap & 0x2) | xn;
966                switch (perm) {
967                  case 0:
968                    grant = r || w || (x && !sctlr.wxn) ;
969                    break;
970                  case 1:
971                    grant = r || w;
972                    break;
973                  case 2:
974                    grant = r || x;
975                    break;
976                  case 3:
977                    grant = r;
978                    break;
979                  default:
980                    grant = false;
981                }
982            }
983            break;
984        }
985    }
986
987    if (!grant) {
988        if (is_fetch) {
989            permsFaults++;
990            DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. "
991                    "AP:%d priv:%d write:%d ns:%d sif:%d "
992                    "sctlr.afe: %d\n",
993                    ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe);
994            // Use PC value instead of vaddr because vaddr might be aligned to
995            // cache line and should not be the address reported in FAR
996            return std::make_shared<PrefetchAbort>(
997                req->getPC(),
998                ArmFault::PermissionLL + te->lookupLevel,
999                isStage2, ArmFault::LpaeTran);
1000        } else {
1001            permsFaults++;
1002            DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d "
1003                    "priv:%d write:%d\n", ap, is_priv, is_write);
1004            return std::make_shared<DataAbort>(
1005                vaddr_tainted, te->domain,
1006                (is_atomic && !grant_read) ? false : is_write,
1007                ArmFault::PermissionLL + te->lookupLevel,
1008                isStage2, ArmFault::LpaeTran);
1009        }
1010    }
1011
1012    return NoFault;
1013}
1014
1015bool
1016TLB::checkPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode)
1017{
1018    // The PAN bit has no effect on:
1019    // 1) Instruction accesses.
1020    // 2) Data Cache instructions other than DC ZVA
1021    // 3) Address translation instructions, other than ATS1E1RP and
1022    // ATS1E1WP when ARMv8.2-ATS1E1 is implemented. (Unimplemented in
1023    // gem5)
1024    // 4) Unprivileged instructions (Unimplemented in gem5)
1025    AA64MMFR1 mmfr1 = tc->readMiscReg(MISCREG_ID_AA64MMFR1_EL1);
1026    if (mmfr1.pan && cpsr.pan && (ap & 0x1) && mode != Execute &&
1027        (!req->isCacheMaintenance() ||
1028            (req->getFlags() & Request::CACHE_BLOCK_ZERO))) {
1029        return true;
1030    } else {
1031        return false;
1032    }
1033}
1034
1035Fault
1036TLB::translateFs(const RequestPtr &req, ThreadContext *tc, Mode mode,
1037        Translation *translation, bool &delay, bool timing,
1038        TLB::ArmTranslationType tranType, bool functional)
1039{
1040    // No such thing as a functional timing access
1041    assert(!(timing && functional));
1042
1043    updateMiscReg(tc, tranType);
1044
1045    Addr vaddr_tainted = req->getVaddr();
1046    Addr vaddr = 0;
1047    if (aarch64)
1048        vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
1049    else
1050        vaddr = vaddr_tainted;
1051    Request::Flags flags = req->getFlags();
1052
1053    bool is_fetch  = (mode == Execute);
1054    bool is_write  = (mode == Write);
1055    bool long_desc_format = aarch64 || longDescFormatInUse(tc);
1056    ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
1057                                                       : ArmFault::VmsaTran;
1058
1059    req->setAsid(asid);
1060
1061    DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
1062            isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran);
1063
1064    DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
1065                 "flags %#lx tranType 0x%x\n", vaddr_tainted, mode, isStage2,
1066                 scr, sctlr, flags, tranType);
1067
1068    if ((req->isInstFetch() && (!sctlr.i)) ||
1069        ((!req->isInstFetch()) && (!sctlr.c))){
1070        if (!req->isCacheMaintenance()) {
1071            req->setFlags(Request::UNCACHEABLE);
1072        }
1073        req->setFlags(Request::STRICT_ORDER);
1074    }
1075    if (!is_fetch) {
1076        assert(flags & MustBeOne || req->isPrefetch());
1077        if (sctlr.a || !(flags & AllowUnaligned)) {
1078            if (vaddr & mask(flags & AlignmentMask)) {
1079                alignFaults++;
1080                return std::make_shared<DataAbort>(
1081                    vaddr_tainted,
1082                    TlbEntry::DomainType::NoAccess, is_write,
1083                    ArmFault::AlignmentFault, isStage2,
1084                    tranMethod);
1085            }
1086        }
1087    }
1088
1089    // If guest MMU is off or hcr.vm=0 go straight to stage2
1090    if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) {
1091
1092        req->setPaddr(vaddr);
1093        // When the MMU is off the security attribute corresponds to the
1094        // security state of the processor
1095        if (isSecure)
1096            req->setFlags(Request::SECURE);
1097
1098        // @todo: double check this (ARM ARM issue C B3.2.1)
1099        if (long_desc_format || sctlr.tre == 0 || nmrr.ir0 == 0 ||
1100            nmrr.or0 == 0 || prrr.tr0 != 0x2) {
1101            if (!req->isCacheMaintenance()) {
1102                req->setFlags(Request::UNCACHEABLE);
1103            }
1104            req->setFlags(Request::STRICT_ORDER);
1105        }
1106
1107        // Set memory attributes
1108        TlbEntry temp_te;
1109        temp_te.ns = !isSecure;
1110        if (isStage2 || hcr.dc == 0 || isSecure ||
1111           (isHyp && !(tranType & S1CTran))) {
1112
1113            temp_te.mtype      = is_fetch ? TlbEntry::MemoryType::Normal
1114                                          : TlbEntry::MemoryType::StronglyOrdered;
1115            temp_te.innerAttrs = 0x0;
1116            temp_te.outerAttrs = 0x0;
1117            temp_te.shareable  = true;
1118            temp_te.outerShareable = true;
1119        } else {
1120            temp_te.mtype      = TlbEntry::MemoryType::Normal;
1121            temp_te.innerAttrs = 0x3;
1122            temp_te.outerAttrs = 0x3;
1123            temp_te.shareable  = false;
1124            temp_te.outerShareable = false;
1125        }
1126        temp_te.setAttributes(long_desc_format);
1127        DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: "
1128                "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n",
1129                temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
1130                isStage2);
1131        setAttr(temp_te.attributes);
1132
1133        return testTranslation(req, mode, TlbEntry::DomainType::NoAccess);
1134    }
1135
1136    DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n",
1137            isStage2 ? "IPA" : "VA", vaddr_tainted, asid);
1138    // Translation enabled
1139
1140    TlbEntry *te = NULL;
1141    TlbEntry mergeTe;
1142    Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
1143                              functional, &mergeTe);
1144    // only proceed if we have a valid table entry
1145    if ((te == NULL) && (fault == NoFault)) delay = true;
1146
1147    // If we have the table entry transfer some of the attributes to the
1148    // request that triggered the translation
1149    if (te != NULL) {
1150        // Set memory attributes
1151        DPRINTF(TLBVerbose,
1152                "Setting memory attributes: shareable: %d, innerAttrs: %d, "
1153                "outerAttrs: %d, mtype: %d, isStage2: %d\n",
1154                te->shareable, te->innerAttrs, te->outerAttrs,
1155                static_cast<uint8_t>(te->mtype), isStage2);
1156        setAttr(te->attributes);
1157
1158        if (te->nonCacheable && !req->isCacheMaintenance())
1159            req->setFlags(Request::UNCACHEABLE);
1160
1161        // Require requests to be ordered if the request goes to
1162        // strongly ordered or device memory (i.e., anything other
1163        // than normal memory requires strict order).
1164        if (te->mtype != TlbEntry::MemoryType::Normal)
1165            req->setFlags(Request::STRICT_ORDER);
1166
1167        Addr pa = te->pAddr(vaddr);
1168        req->setPaddr(pa);
1169
1170        if (isSecure && !te->ns) {
1171            req->setFlags(Request::SECURE);
1172        }
1173        if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) &&
1174            (te->mtype != TlbEntry::MemoryType::Normal)) {
1175                // Unaligned accesses to Device memory should always cause an
1176                // abort regardless of sctlr.a
1177                alignFaults++;
1178                return std::make_shared<DataAbort>(
1179                    vaddr_tainted,
1180                    TlbEntry::DomainType::NoAccess, is_write,
1181                    ArmFault::AlignmentFault, isStage2,
1182                    tranMethod);
1183        }
1184
1185        // Check for a trickbox generated address fault
1186        if (fault == NoFault)
1187            fault = testTranslation(req, mode, te->domain);
1188    }
1189
1190    if (fault == NoFault) {
1191        // Don't try to finalize a physical address unless the
1192        // translation has completed (i.e., there is a table entry).
1193        return te ? finalizePhysical(req, tc, mode) : NoFault;
1194    } else {
1195        return fault;
1196    }
1197}
1198
1199Fault
1200TLB::translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode,
1201    TLB::ArmTranslationType tranType)
1202{
1203    updateMiscReg(tc, tranType);
1204
1205    if (directToStage2) {
1206        assert(stage2Tlb);
1207        return stage2Tlb->translateAtomic(req, tc, mode, tranType);
1208    }
1209
1210    bool delay = false;
1211    Fault fault;
1212    if (FullSystem)
1213        fault = translateFs(req, tc, mode, NULL, delay, false, tranType);
1214    else
1215        fault = translateSe(req, tc, mode, NULL, delay, false);
1216    assert(!delay);
1217    return fault;
1218}
1219
1220Fault
1221TLB::translateFunctional(const RequestPtr &req, ThreadContext *tc, Mode mode,
1222    TLB::ArmTranslationType tranType)
1223{
1224    updateMiscReg(tc, tranType);
1225
1226    if (directToStage2) {
1227        assert(stage2Tlb);
1228        return stage2Tlb->translateFunctional(req, tc, mode, tranType);
1229    }
1230
1231    bool delay = false;
1232    Fault fault;
1233    if (FullSystem)
1234        fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true);
1235   else
1236        fault = translateSe(req, tc, mode, NULL, delay, false);
1237    assert(!delay);
1238    return fault;
1239}
1240
1241void
1242TLB::translateTiming(const RequestPtr &req, ThreadContext *tc,
1243    Translation *translation, Mode mode, TLB::ArmTranslationType tranType)
1244{
1245    updateMiscReg(tc, tranType);
1246
1247    if (directToStage2) {
1248        assert(stage2Tlb);
1249        stage2Tlb->translateTiming(req, tc, translation, mode, tranType);
1250        return;
1251    }
1252
1253    assert(translation);
1254
1255    translateComplete(req, tc, translation, mode, tranType, isStage2);
1256}
1257
1258Fault
1259TLB::translateComplete(const RequestPtr &req, ThreadContext *tc,
1260        Translation *translation, Mode mode, TLB::ArmTranslationType tranType,
1261        bool callFromS2)
1262{
1263    bool delay = false;
1264    Fault fault;
1265    if (FullSystem)
1266        fault = translateFs(req, tc, mode, translation, delay, true, tranType);
1267    else
1268        fault = translateSe(req, tc, mode, translation, delay, true);
1269    DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay, fault !=
1270            NoFault);
1271    // If we have a translation, and we're not in the middle of doing a stage
1272    // 2 translation tell the translation that we've either finished or its
1273    // going to take a while. By not doing this when we're in the middle of a
1274    // stage 2 translation we prevent marking the translation as delayed twice,
1275    // one when the translation starts and again when the stage 1 translation
1276    // completes.
1277    if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) {
1278        if (!delay)
1279            translation->finish(fault, req, tc, mode);
1280        else
1281            translation->markDelayed();
1282    }
1283    return fault;
1284}
1285
1286Port *
1287TLB::getTableWalkerPort()
1288{
1289    return &stage2Mmu->getDMAPort();
1290}
1291
1292void
1293TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType)
1294{
1295    // check if the regs have changed, or the translation mode is different.
1296    // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1297    // one type of translation anyway
1298    if (miscRegValid && miscRegContext == tc->contextId() &&
1299            ((tranType == curTranType) || isStage2)) {
1300        return;
1301    }
1302
1303    DPRINTF(TLBVerbose, "TLB variables changed!\n");
1304    cpsr = tc->readMiscReg(MISCREG_CPSR);
1305
1306    // Dependencies: SCR/SCR_EL3, CPSR
1307    isSecure = inSecureState(tc) &&
1308        !(tranType & HypMode) && !(tranType & S1S2NsTran);
1309
1310    aarch64EL = tranTypeEL(cpsr, tranType);
1311    aarch64 = isStage2 ?
1312        ELIs64(tc, EL2) :
1313        ELIs64(tc, aarch64EL == EL0 ? EL1 : aarch64EL);
1314
1315    if (aarch64) {  // AArch64
1316        // determine EL we need to translate in
1317        switch (aarch64EL) {
1318          case EL0:
1319          case EL1:
1320            {
1321                sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
1322                ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
1323                uint64_t ttbr_asid = ttbcr.a1 ?
1324                    tc->readMiscReg(MISCREG_TTBR1_EL1) :
1325                    tc->readMiscReg(MISCREG_TTBR0_EL1);
1326                asid = bits(ttbr_asid,
1327                            (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1328            }
1329            break;
1330          case EL2:
1331            sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1332            ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1333            asid = -1;
1334            break;
1335          case EL3:
1336            sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3);
1337            ttbcr = tc->readMiscReg(MISCREG_TCR_EL3);
1338            asid = -1;
1339            break;
1340        }
1341        hcr = tc->readMiscReg(MISCREG_HCR_EL2);
1342        scr = tc->readMiscReg(MISCREG_SCR_EL3);
1343        isPriv = aarch64EL != EL0;
1344        if (haveVirtualization) {
1345            vmid           = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
1346            isHyp = aarch64EL == EL2;
1347            isHyp |= tranType & HypMode;
1348            isHyp &= (tranType & S1S2NsTran) == 0;
1349            isHyp &= (tranType & S1CTran)    == 0;
1350            // Work out if we should skip the first stage of translation and go
1351            // directly to stage 2. This value is cached so we don't have to
1352            // compute it for every translation.
1353            stage2Req = isStage2 ||
1354                        (hcr.vm && !isHyp && !isSecure &&
1355                         !(tranType & S1CTran) && (aarch64EL < EL2) &&
1356                         !(tranType & S1E1Tran)); // <--- FIX THIS HACK
1357            stage2DescReq = isStage2 ||  (hcr.vm && !isHyp && !isSecure &&
1358                            (aarch64EL < EL2));
1359            directToStage2 = !isStage2 && stage2Req && !sctlr.m;
1360        } else {
1361            vmid           = 0;
1362            isHyp          = false;
1363            directToStage2 = false;
1364            stage2Req      = false;
1365            stage2DescReq  = false;
1366        }
1367    } else {  // AArch32
1368        sctlr  = tc->readMiscReg(snsBankedIndex(MISCREG_SCTLR, tc,
1369                                 !isSecure));
1370        ttbcr  = tc->readMiscReg(snsBankedIndex(MISCREG_TTBCR, tc,
1371                                 !isSecure));
1372        scr    = tc->readMiscReg(MISCREG_SCR);
1373        isPriv = cpsr.mode != MODE_USER;
1374        if (longDescFormatInUse(tc)) {
1375            uint64_t ttbr_asid = tc->readMiscReg(
1376                snsBankedIndex(ttbcr.a1 ? MISCREG_TTBR1 :
1377                                          MISCREG_TTBR0,
1378                                       tc, !isSecure));
1379            asid = bits(ttbr_asid, 55, 48);
1380        } else { // Short-descriptor translation table format in use
1381            CONTEXTIDR context_id = tc->readMiscReg(snsBankedIndex(
1382                MISCREG_CONTEXTIDR, tc,!isSecure));
1383            asid = context_id.asid;
1384        }
1385        prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR, tc,
1386                               !isSecure));
1387        nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR, tc,
1388                               !isSecure));
1389        dacr = tc->readMiscReg(snsBankedIndex(MISCREG_DACR, tc,
1390                               !isSecure));
1391        hcr  = tc->readMiscReg(MISCREG_HCR);
1392
1393        if (haveVirtualization) {
1394            vmid   = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
1395            isHyp  = cpsr.mode == MODE_HYP;
1396            isHyp |=  tranType & HypMode;
1397            isHyp &= (tranType & S1S2NsTran) == 0;
1398            isHyp &= (tranType & S1CTran)    == 0;
1399            if (isHyp) {
1400                sctlr = tc->readMiscReg(MISCREG_HSCTLR);
1401            }
1402            // Work out if we should skip the first stage of translation and go
1403            // directly to stage 2. This value is cached so we don't have to
1404            // compute it for every translation.
1405            stage2Req      = hcr.vm && !isStage2 && !isHyp && !isSecure &&
1406                             !(tranType & S1CTran);
1407            stage2DescReq  = hcr.vm && !isStage2 && !isHyp && !isSecure;
1408            directToStage2 = stage2Req && !sctlr.m;
1409        } else {
1410            vmid           = 0;
1411            stage2Req      = false;
1412            isHyp          = false;
1413            directToStage2 = false;
1414            stage2DescReq  = false;
1415        }
1416    }
1417    miscRegValid = true;
1418    miscRegContext = tc->contextId();
1419    curTranType  = tranType;
1420}
1421
1422ExceptionLevel
1423TLB::tranTypeEL(CPSR cpsr, ArmTranslationType type)
1424{
1425    switch (type) {
1426      case S1E0Tran:
1427      case S12E0Tran:
1428        return EL0;
1429
1430      case S1E1Tran:
1431      case S12E1Tran:
1432        return EL1;
1433
1434      case S1E2Tran:
1435        return EL2;
1436
1437      case S1E3Tran:
1438        return EL3;
1439
1440      case NormalTran:
1441      case S1CTran:
1442      case S1S2NsTran:
1443      case HypMode:
1444        return currEL(cpsr);
1445
1446      default:
1447        panic("Unknown translation mode!\n");
1448    }
1449}
1450
1451Fault
1452TLB::getTE(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode,
1453        Translation *translation, bool timing, bool functional,
1454        bool is_secure, TLB::ArmTranslationType tranType)
1455{
1456    // In a 2-stage system, the IPA->PA translation can be started via this
1457    // call so make sure the miscRegs are correct.
1458    if (isStage2) {
1459        updateMiscReg(tc, tranType);
1460    }
1461    bool is_fetch = (mode == Execute);
1462    bool is_write = (mode == Write);
1463
1464    Addr vaddr_tainted = req->getVaddr();
1465    Addr vaddr = 0;
1466    ExceptionLevel target_el = aarch64 ? aarch64EL : EL1;
1467    if (aarch64) {
1468        vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el, ttbcr);
1469    } else {
1470        vaddr = vaddr_tainted;
1471    }
1472    *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1473    if (*te == NULL) {
1474        if (req->isPrefetch()) {
1475            // if the request is a prefetch don't attempt to fill the TLB or go
1476            // any further with the memory access (here we can safely use the
1477            // fault status for the short desc. format in all cases)
1478           prefetchFaults++;
1479           return std::make_shared<PrefetchAbort>(
1480               vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2);
1481        }
1482
1483        if (is_fetch)
1484            instMisses++;
1485        else if (is_write)
1486            writeMisses++;
1487        else
1488            readMisses++;
1489
1490        // start translation table walk, pass variables rather than
1491        // re-retreaving in table walker for speed
1492        DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1493                vaddr_tainted, asid, vmid);
1494        Fault fault;
1495        fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode,
1496                                  translation, timing, functional, is_secure,
1497                                  tranType, stage2DescReq);
1498        // for timing mode, return and wait for table walk,
1499        if (timing || fault != NoFault) {
1500            return fault;
1501        }
1502
1503        *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1504        if (!*te)
1505            printTlb();
1506        assert(*te);
1507    } else {
1508        if (is_fetch)
1509            instHits++;
1510        else if (is_write)
1511            writeHits++;
1512        else
1513            readHits++;
1514    }
1515    return NoFault;
1516}
1517
1518Fault
1519TLB::getResultTe(TlbEntry **te, const RequestPtr &req,
1520        ThreadContext *tc, Mode mode,
1521        Translation *translation, bool timing, bool functional,
1522        TlbEntry *mergeTe)
1523{
1524    Fault fault;
1525
1526    if (isStage2) {
1527        // We are already in the stage 2 TLB. Grab the table entry for stage
1528        // 2 only. We are here because stage 1 translation is disabled.
1529        TlbEntry *s2Te = NULL;
1530        // Get the stage 2 table entry
1531        fault = getTE(&s2Te, req, tc, mode, translation, timing, functional,
1532                      isSecure, curTranType);
1533        // Check permissions of stage 2
1534        if ((s2Te != NULL) && (fault == NoFault)) {
1535            if (aarch64)
1536                fault = checkPermissions64(s2Te, req, mode, tc);
1537            else
1538                fault = checkPermissions(s2Te, req, mode);
1539        }
1540        *te = s2Te;
1541        return fault;
1542    }
1543
1544    TlbEntry *s1Te = NULL;
1545
1546    Addr vaddr_tainted = req->getVaddr();
1547
1548    // Get the stage 1 table entry
1549    fault = getTE(&s1Te, req, tc, mode, translation, timing, functional,
1550                  isSecure, curTranType);
1551    // only proceed if we have a valid table entry
1552    if ((s1Te != NULL) && (fault == NoFault)) {
1553        // Check stage 1 permissions before checking stage 2
1554        if (aarch64)
1555            fault = checkPermissions64(s1Te, req, mode, tc);
1556        else
1557            fault = checkPermissions(s1Te, req, mode);
1558        if (stage2Req & (fault == NoFault)) {
1559            Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te,
1560                req, translation, mode, timing, functional, curTranType);
1561            fault = s2Lookup->getTe(tc, mergeTe);
1562            if (s2Lookup->isComplete()) {
1563                *te = mergeTe;
1564                // We've finished with the lookup so delete it
1565                delete s2Lookup;
1566            } else {
1567                // The lookup hasn't completed, so we can't delete it now. We
1568                // get round this by asking the object to self delete when the
1569                // translation is complete.
1570                s2Lookup->setSelfDelete();
1571            }
1572        } else {
1573            // This case deals with an S1 hit (or bypass), followed by
1574            // an S2 hit-but-perms issue
1575            if (isStage2) {
1576                DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1577                        vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault);
1578                if (fault != NoFault) {
1579                    ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get());
1580                    armFault->annotate(ArmFault::S1PTW, false);
1581                    armFault->annotate(ArmFault::OVA, vaddr_tainted);
1582                }
1583            }
1584            *te = s1Te;
1585        }
1586    }
1587    return fault;
1588}
1589
1590void
1591TLB::setTestInterface(SimObject *_ti)
1592{
1593    if (!_ti) {
1594        test = nullptr;
1595    } else {
1596        TlbTestInterface *ti(dynamic_cast<TlbTestInterface *>(_ti));
1597        fatal_if(!ti, "%s is not a valid ARM TLB tester\n", _ti->name());
1598        test = ti;
1599    }
1600}
1601
1602Fault
1603TLB::testTranslation(const RequestPtr &req, Mode mode,
1604                     TlbEntry::DomainType domain)
1605{
1606    if (!test || !req->hasSize() || req->getSize() == 0 ||
1607        req->isCacheMaintenance()) {
1608        return NoFault;
1609    } else {
1610        return test->translationCheck(req, isPriv, mode, domain);
1611    }
1612}
1613
1614Fault
1615TLB::testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode,
1616              TlbEntry::DomainType domain, LookupLevel lookup_level)
1617{
1618    if (!test) {
1619        return NoFault;
1620    } else {
1621        return test->walkCheck(pa, size, va, is_secure, isPriv, mode,
1622                               domain, lookup_level);
1623    }
1624}
1625
1626
1627ArmISA::TLB *
1628ArmTLBParams::create()
1629{
1630    return new ArmISA::TLB(this);
1631}
1632