tlb.cc revision 10463:25c5da51bbe0
1/*
2 * Copyright (c) 2010-2013 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 *          Nathan Binkert
42 *          Steve Reinhardt
43 */
44
45#include <string>
46#include <vector>
47
48#include "arch/arm/faults.hh"
49#include "arch/arm/pagetable.hh"
50#include "arch/arm/system.hh"
51#include "arch/arm/table_walker.hh"
52#include "arch/arm/stage2_lookup.hh"
53#include "arch/arm/stage2_mmu.hh"
54#include "arch/arm/tlb.hh"
55#include "arch/arm/utility.hh"
56#include "base/inifile.hh"
57#include "base/str.hh"
58#include "base/trace.hh"
59#include "cpu/base.hh"
60#include "cpu/thread_context.hh"
61#include "debug/Checkpoint.hh"
62#include "debug/TLB.hh"
63#include "debug/TLBVerbose.hh"
64#include "mem/page_table.hh"
65#include "params/ArmTLB.hh"
66#include "sim/full_system.hh"
67#include "sim/process.hh"
68
69using namespace std;
70using namespace ArmISA;
71
72TLB::TLB(const ArmTLBParams *p)
73    : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size),
74      isStage2(p->is_stage2), stage2Req(false), _attr(0),
75      directToStage2(false), tableWalker(p->walker), stage2Tlb(NULL),
76      stage2Mmu(NULL), rangeMRU(1), bootUncacheability(false),
77      miscRegValid(false), curTranType(NormalTran)
78{
79    tableWalker->setTlb(this);
80
81    // Cache system-level properties
82    haveLPAE = tableWalker->haveLPAE();
83    haveVirtualization = tableWalker->haveVirtualization();
84    haveLargeAsid64 = tableWalker->haveLargeAsid64();
85}
86
87TLB::~TLB()
88{
89    delete[] table;
90}
91
92void
93TLB::init()
94{
95    if (stage2Mmu && !isStage2)
96        stage2Tlb = stage2Mmu->stage2Tlb();
97}
98
99void
100TLB::setMMU(Stage2MMU *m)
101{
102    stage2Mmu = m;
103    tableWalker->setMMU(m);
104}
105
106bool
107TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa)
108{
109    updateMiscReg(tc);
110
111    if (directToStage2) {
112        assert(stage2Tlb);
113        return stage2Tlb->translateFunctional(tc, va, pa);
114    }
115
116    TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false,
117                         aarch64 ? aarch64EL : EL1);
118    if (!e)
119        return false;
120    pa = e->pAddr(va);
121    return true;
122}
123
124Fault
125TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
126{
127    return NoFault;
128}
129
130TlbEntry*
131TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure,
132            bool functional, bool ignore_asn, uint8_t target_el)
133{
134
135    TlbEntry *retval = NULL;
136
137    // Maintaining LRU array
138    int x = 0;
139    while (retval == NULL && x < size) {
140        if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false,
141             target_el)) ||
142            (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) {
143            // We only move the hit entry ahead when the position is higher
144            // than rangeMRU
145            if (x > rangeMRU && !functional) {
146                TlbEntry tmp_entry = table[x];
147                for(int i = x; i > 0; i--)
148                    table[i] = table[i - 1];
149                table[0] = tmp_entry;
150                retval = &table[0];
151            } else {
152                retval = &table[x];
153            }
154            break;
155        }
156        ++x;
157    }
158
159    DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d "
160            "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d "
161            "el: %d\n",
162            va, asn, retval ? "hit" : "miss", vmid, hyp, secure,
163            retval ? retval->pfn       : 0, retval ? retval->size  : 0,
164            retval ? retval->pAddr(va) : 0, retval ? retval->ap    : 0,
165            retval ? retval->ns        : 0, retval ? retval->nstid : 0,
166            retval ? retval->global    : 0, retval ? retval->asid  : 0,
167            retval ? retval->el        : 0);
168
169    return retval;
170}
171
172// insert a new TLB entry
173void
174TLB::insert(Addr addr, TlbEntry &entry)
175{
176    DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x"
177            " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
178            " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn,
179            entry.size, entry.vpn, entry.asid, entry.vmid, entry.N,
180            entry.global, entry.valid, entry.nonCacheable, entry.xn,
181            entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid,
182            entry.isHyp);
183
184    if (table[size - 1].valid)
185        DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
186                "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n",
187                table[size-1].vpn << table[size-1].N, table[size-1].asid,
188                table[size-1].vmid, table[size-1].pfn << table[size-1].N,
189                table[size-1].size, table[size-1].ap, table[size-1].ns,
190                table[size-1].nstid, table[size-1].global, table[size-1].isHyp,
191                table[size-1].el);
192
193    //inserting to MRU position and evicting the LRU one
194
195    for (int i = size - 1; i > 0; --i)
196        table[i] = table[i-1];
197    table[0] = entry;
198
199    inserts++;
200    ppRefills->notify(1);
201}
202
203void
204TLB::printTlb() const
205{
206    int x = 0;
207    TlbEntry *te;
208    DPRINTF(TLB, "Current TLB contents:\n");
209    while (x < size) {
210        te = &table[x];
211        if (te->valid)
212            DPRINTF(TLB, " *  %s\n", te->print());
213        ++x;
214    }
215}
216
217void
218TLB::flushAllSecurity(bool secure_lookup, uint8_t target_el, bool ignore_el)
219{
220    DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n",
221            (secure_lookup ? "secure" : "non-secure"));
222    int x = 0;
223    TlbEntry *te;
224    while (x < size) {
225        te = &table[x];
226        if (te->valid && secure_lookup == !te->nstid &&
227            (te->vmid == vmid || secure_lookup) &&
228            checkELMatch(target_el, te->el, ignore_el)) {
229
230            DPRINTF(TLB, " -  %s\n", te->print());
231            te->valid = false;
232            flushedEntries++;
233        }
234        ++x;
235    }
236
237    flushTlb++;
238
239    // If there's a second stage TLB (and we're not it) then flush it as well
240    // if we're currently in hyp mode
241    if (!isStage2 && isHyp) {
242        stage2Tlb->flushAllSecurity(secure_lookup, true);
243    }
244}
245
246void
247TLB::flushAllNs(bool hyp, uint8_t target_el, bool ignore_el)
248{
249    DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n",
250            (hyp ? "hyp" : "non-hyp"));
251    int x = 0;
252    TlbEntry *te;
253    while (x < size) {
254        te = &table[x];
255        if (te->valid && te->nstid && te->isHyp == hyp &&
256            checkELMatch(target_el, te->el, ignore_el)) {
257
258            DPRINTF(TLB, " -  %s\n", te->print());
259            flushedEntries++;
260            te->valid = false;
261        }
262        ++x;
263    }
264
265    flushTlb++;
266
267    // If there's a second stage TLB (and we're not it) then flush it as well
268    if (!isStage2 && !hyp) {
269        stage2Tlb->flushAllNs(false, true);
270    }
271}
272
273void
274TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, uint8_t target_el)
275{
276    DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x "
277            "(%s lookup)\n", mva, asn, (secure_lookup ?
278            "secure" : "non-secure"));
279    _flushMva(mva, asn, secure_lookup, false, false, target_el);
280    flushTlbMvaAsid++;
281}
282
283void
284TLB::flushAsid(uint64_t asn, bool secure_lookup, uint8_t target_el)
285{
286    DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn,
287            (secure_lookup ? "secure" : "non-secure"));
288
289    int x = 0 ;
290    TlbEntry *te;
291
292    while (x < size) {
293        te = &table[x];
294        if (te->valid && te->asid == asn && secure_lookup == !te->nstid &&
295            (te->vmid == vmid || secure_lookup) &&
296            checkELMatch(target_el, te->el, false)) {
297
298            te->valid = false;
299            DPRINTF(TLB, " -  %s\n", te->print());
300            flushedEntries++;
301        }
302        ++x;
303    }
304    flushTlbAsid++;
305}
306
307void
308TLB::flushMva(Addr mva, bool secure_lookup, bool hyp, uint8_t target_el)
309{
310    DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva,
311            (secure_lookup ? "secure" : "non-secure"));
312    _flushMva(mva, 0xbeef, secure_lookup, hyp, true, target_el);
313    flushTlbMva++;
314}
315
316void
317TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, bool hyp,
318               bool ignore_asn, uint8_t target_el)
319{
320    TlbEntry *te;
321    // D5.7.2: Sign-extend address to 64 bits
322    mva = sext<56>(mva);
323    te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
324                target_el);
325    while (te != NULL) {
326        if (secure_lookup == !te->nstid) {
327            DPRINTF(TLB, " -  %s\n", te->print());
328            te->valid = false;
329            flushedEntries++;
330        }
331        te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
332                    target_el);
333    }
334}
335
336bool
337TLB::checkELMatch(uint8_t target_el, uint8_t tentry_el, bool ignore_el)
338{
339    bool elMatch = true;
340    if (!ignore_el) {
341        if (target_el == 2 || target_el == 3) {
342            elMatch = (tentry_el  == target_el);
343        } else {
344            elMatch = (tentry_el == 0) || (tentry_el  == 1);
345        }
346    }
347    return elMatch;
348}
349
350void
351TLB::drainResume()
352{
353    // We might have unserialized something or switched CPUs, so make
354    // sure to re-read the misc regs.
355    miscRegValid = false;
356}
357
358void
359TLB::takeOverFrom(BaseTLB *_otlb)
360{
361    TLB *otlb = dynamic_cast<TLB*>(_otlb);
362    /* Make sure we actually have a valid type */
363    if (otlb) {
364        _attr = otlb->_attr;
365        haveLPAE = otlb->haveLPAE;
366        directToStage2 = otlb->directToStage2;
367        stage2Req = otlb->stage2Req;
368        bootUncacheability = otlb->bootUncacheability;
369
370        /* Sync the stage2 MMU if they exist in both
371         * the old CPU and the new
372         */
373        if (!isStage2 &&
374            stage2Tlb && otlb->stage2Tlb) {
375            stage2Tlb->takeOverFrom(otlb->stage2Tlb);
376        }
377    } else {
378        panic("Incompatible TLB type!");
379    }
380}
381
382void
383TLB::serialize(ostream &os)
384{
385    DPRINTF(Checkpoint, "Serializing Arm TLB\n");
386
387    SERIALIZE_SCALAR(_attr);
388    SERIALIZE_SCALAR(haveLPAE);
389    SERIALIZE_SCALAR(directToStage2);
390    SERIALIZE_SCALAR(stage2Req);
391    SERIALIZE_SCALAR(bootUncacheability);
392
393    int num_entries = size;
394    SERIALIZE_SCALAR(num_entries);
395    for(int i = 0; i < size; i++){
396        nameOut(os, csprintf("%s.TlbEntry%d", name(), i));
397        table[i].serialize(os);
398    }
399}
400
401void
402TLB::unserialize(Checkpoint *cp, const string &section)
403{
404    DPRINTF(Checkpoint, "Unserializing Arm TLB\n");
405
406    UNSERIALIZE_SCALAR(_attr);
407    UNSERIALIZE_SCALAR(haveLPAE);
408    UNSERIALIZE_SCALAR(directToStage2);
409    UNSERIALIZE_SCALAR(stage2Req);
410    UNSERIALIZE_SCALAR(bootUncacheability);
411
412    int num_entries;
413    UNSERIALIZE_SCALAR(num_entries);
414    for(int i = 0; i < min(size, num_entries); i++){
415        table[i].unserialize(cp, csprintf("%s.TlbEntry%d", section, i));
416    }
417}
418
419void
420TLB::regStats()
421{
422    instHits
423        .name(name() + ".inst_hits")
424        .desc("ITB inst hits")
425        ;
426
427    instMisses
428        .name(name() + ".inst_misses")
429        .desc("ITB inst misses")
430        ;
431
432    instAccesses
433        .name(name() + ".inst_accesses")
434        .desc("ITB inst accesses")
435        ;
436
437    readHits
438        .name(name() + ".read_hits")
439        .desc("DTB read hits")
440        ;
441
442    readMisses
443        .name(name() + ".read_misses")
444        .desc("DTB read misses")
445        ;
446
447    readAccesses
448        .name(name() + ".read_accesses")
449        .desc("DTB read accesses")
450        ;
451
452    writeHits
453        .name(name() + ".write_hits")
454        .desc("DTB write hits")
455        ;
456
457    writeMisses
458        .name(name() + ".write_misses")
459        .desc("DTB write misses")
460        ;
461
462    writeAccesses
463        .name(name() + ".write_accesses")
464        .desc("DTB write accesses")
465        ;
466
467    hits
468        .name(name() + ".hits")
469        .desc("DTB hits")
470        ;
471
472    misses
473        .name(name() + ".misses")
474        .desc("DTB misses")
475        ;
476
477    accesses
478        .name(name() + ".accesses")
479        .desc("DTB accesses")
480        ;
481
482    flushTlb
483        .name(name() + ".flush_tlb")
484        .desc("Number of times complete TLB was flushed")
485        ;
486
487    flushTlbMva
488        .name(name() + ".flush_tlb_mva")
489        .desc("Number of times TLB was flushed by MVA")
490        ;
491
492    flushTlbMvaAsid
493        .name(name() + ".flush_tlb_mva_asid")
494        .desc("Number of times TLB was flushed by MVA & ASID")
495        ;
496
497    flushTlbAsid
498        .name(name() + ".flush_tlb_asid")
499        .desc("Number of times TLB was flushed by ASID")
500        ;
501
502    flushedEntries
503        .name(name() + ".flush_entries")
504        .desc("Number of entries that have been flushed from TLB")
505        ;
506
507    alignFaults
508        .name(name() + ".align_faults")
509        .desc("Number of TLB faults due to alignment restrictions")
510        ;
511
512    prefetchFaults
513        .name(name() + ".prefetch_faults")
514        .desc("Number of TLB faults due to prefetch")
515        ;
516
517    domainFaults
518        .name(name() + ".domain_faults")
519        .desc("Number of TLB faults due to domain restrictions")
520        ;
521
522    permsFaults
523        .name(name() + ".perms_faults")
524        .desc("Number of TLB faults due to permissions restrictions")
525        ;
526
527    instAccesses = instHits + instMisses;
528    readAccesses = readHits + readMisses;
529    writeAccesses = writeHits + writeMisses;
530    hits = readHits + writeHits + instHits;
531    misses = readMisses + writeMisses + instMisses;
532    accesses = readAccesses + writeAccesses + instAccesses;
533}
534
535void
536TLB::regProbePoints()
537{
538    ppRefills.reset(new ProbePoints::PMU(getProbeManager(), "Refills"));
539}
540
541Fault
542TLB::translateSe(RequestPtr req, ThreadContext *tc, Mode mode,
543                 Translation *translation, bool &delay, bool timing)
544{
545    updateMiscReg(tc);
546    Addr vaddr_tainted = req->getVaddr();
547    Addr vaddr = 0;
548    if (aarch64)
549        vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL);
550    else
551        vaddr = vaddr_tainted;
552    uint32_t flags = req->getFlags();
553
554    bool is_fetch = (mode == Execute);
555    bool is_write = (mode == Write);
556
557    if (!is_fetch) {
558        assert(flags & MustBeOne);
559        if (sctlr.a || !(flags & AllowUnaligned)) {
560            if (vaddr & mask(flags & AlignmentMask)) {
561                // LPAE is always disabled in SE mode
562                return new DataAbort(vaddr_tainted,
563                        TlbEntry::DomainType::NoAccess, is_write,
564                                     ArmFault::AlignmentFault, isStage2,
565                                     ArmFault::VmsaTran);
566            }
567        }
568    }
569
570    Addr paddr;
571    Process *p = tc->getProcessPtr();
572
573    if (!p->pTable->translate(vaddr, paddr))
574        return Fault(new GenericPageTableFault(vaddr_tainted));
575    req->setPaddr(paddr);
576
577    return NoFault;
578}
579
580Fault
581TLB::trickBoxCheck(RequestPtr req, Mode mode, TlbEntry::DomainType domain)
582{
583    return NoFault;
584}
585
586Fault
587TLB::walkTrickBoxCheck(Addr pa, bool is_secure, Addr va, Addr sz, bool is_exec,
588        bool is_write, TlbEntry::DomainType domain, LookupLevel lookup_level)
589{
590    return NoFault;
591}
592
593Fault
594TLB::checkPermissions(TlbEntry *te, RequestPtr req, Mode mode)
595{
596    Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
597    uint32_t flags = req->getFlags();
598    bool is_fetch  = (mode == Execute);
599    bool is_write  = (mode == Write);
600    bool is_priv   = isPriv && !(flags & UserMode);
601
602    // Get the translation type from the actuall table entry
603    ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran
604                                                         : ArmFault::VmsaTran;
605
606    // If this is the second stage of translation and the request is for a
607    // stage 1 page table walk then we need to check the HCR.PTW bit. This
608    // allows us to generate a fault if the request targets an area marked
609    // as a device or strongly ordered.
610    if (isStage2 && req->isPTWalk() && hcr.ptw &&
611        (te->mtype != TlbEntry::MemoryType::Normal)) {
612        return new DataAbort(vaddr, te->domain, is_write,
613                             ArmFault::PermissionLL + te->lookupLevel,
614                             isStage2, tranMethod);
615    }
616
617    // Generate an alignment fault for unaligned data accesses to device or
618    // strongly ordered memory
619    if (!is_fetch) {
620        if (te->mtype != TlbEntry::MemoryType::Normal) {
621            if (vaddr & mask(flags & AlignmentMask)) {
622                alignFaults++;
623                return new DataAbort(vaddr, TlbEntry::DomainType::NoAccess, is_write,
624                                     ArmFault::AlignmentFault, isStage2,
625                                     tranMethod);
626            }
627        }
628    }
629
630    if (te->nonCacheable) {
631        // Prevent prefetching from I/O devices.
632        if (req->isPrefetch()) {
633            // Here we can safely use the fault status for the short
634            // desc. format in all cases
635            return new PrefetchAbort(vaddr, ArmFault::PrefetchUncacheable,
636                                     isStage2, tranMethod);
637        }
638    }
639
640    if (!te->longDescFormat) {
641        switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
642          case 0:
643            domainFaults++;
644            DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x"
645                    " domain: %#x write:%d\n", dacr,
646                    static_cast<uint8_t>(te->domain), is_write);
647            if (is_fetch)
648                return new PrefetchAbort(vaddr,
649                                         ArmFault::DomainLL + te->lookupLevel,
650                                         isStage2, tranMethod);
651            else
652                return new DataAbort(vaddr, te->domain, is_write,
653                                     ArmFault::DomainLL + te->lookupLevel,
654                                     isStage2, tranMethod);
655          case 1:
656            // Continue with permissions check
657            break;
658          case 2:
659            panic("UNPRED domain\n");
660          case 3:
661            return NoFault;
662        }
663    }
664
665    // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
666    uint8_t ap  = te->longDescFormat ? te->ap << 1 : te->ap;
667    uint8_t hap = te->hap;
668
669    if (sctlr.afe == 1 || te->longDescFormat)
670        ap |= 1;
671
672    bool abt;
673    bool isWritable = true;
674    // If this is a stage 2 access (eg for reading stage 1 page table entries)
675    // then don't perform the AP permissions check, we stil do the HAP check
676    // below.
677    if (isStage2) {
678        abt = false;
679    } else {
680        switch (ap) {
681          case 0:
682            DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n",
683                    (int)sctlr.rs);
684            if (!sctlr.xp) {
685                switch ((int)sctlr.rs) {
686                  case 2:
687                    abt = is_write;
688                    break;
689                  case 1:
690                    abt = is_write || !is_priv;
691                    break;
692                  case 0:
693                  case 3:
694                  default:
695                    abt = true;
696                    break;
697                }
698            } else {
699                abt = true;
700            }
701            break;
702          case 1:
703            abt = !is_priv;
704            break;
705          case 2:
706            abt = !is_priv && is_write;
707            isWritable = is_priv;
708            break;
709          case 3:
710            abt = false;
711            break;
712          case 4:
713            panic("UNPRED premissions\n");
714          case 5:
715            abt = !is_priv || is_write;
716            isWritable = false;
717            break;
718          case 6:
719          case 7:
720            abt        = is_write;
721            isWritable = false;
722            break;
723          default:
724            panic("Unknown permissions %#x\n", ap);
725        }
726    }
727
728    bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
729    bool xn     = te->xn || (isWritable && sctlr.wxn) ||
730                            (ap == 3    && sctlr.uwxn && is_priv);
731    if (is_fetch && (abt || xn ||
732                     (te->longDescFormat && te->pxn && !is_priv) ||
733                     (isSecure && te->ns && scr.sif))) {
734        permsFaults++;
735        DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
736                     "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
737                     ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe);
738        return new PrefetchAbort(vaddr,
739                                 ArmFault::PermissionLL + te->lookupLevel,
740                                 isStage2, tranMethod);
741    } else if (abt | hapAbt) {
742        permsFaults++;
743        DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
744               " write:%d\n", ap, is_priv, is_write);
745        return new DataAbort(vaddr, te->domain, is_write,
746                             ArmFault::PermissionLL + te->lookupLevel,
747                             isStage2 | !abt, tranMethod);
748    }
749    return NoFault;
750}
751
752
753Fault
754TLB::checkPermissions64(TlbEntry *te, RequestPtr req, Mode mode,
755                        ThreadContext *tc)
756{
757    assert(aarch64);
758
759    Addr vaddr_tainted = req->getVaddr();
760    Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL);
761
762    uint32_t flags = req->getFlags();
763    bool is_fetch  = (mode == Execute);
764    bool is_write  = (mode == Write);
765    bool is_priv M5_VAR_USED  = isPriv && !(flags & UserMode);
766
767    updateMiscReg(tc, curTranType);
768
769    // If this is the second stage of translation and the request is for a
770    // stage 1 page table walk then we need to check the HCR.PTW bit. This
771    // allows us to generate a fault if the request targets an area marked
772    // as a device or strongly ordered.
773    if (isStage2 && req->isPTWalk() && hcr.ptw &&
774        (te->mtype != TlbEntry::MemoryType::Normal)) {
775        return new DataAbort(vaddr_tainted, te->domain, is_write,
776                             ArmFault::PermissionLL + te->lookupLevel,
777                             isStage2, ArmFault::LpaeTran);
778    }
779
780    // Generate an alignment fault for unaligned accesses to device or
781    // strongly ordered memory
782    if (!is_fetch) {
783        if (te->mtype != TlbEntry::MemoryType::Normal) {
784            if (vaddr & mask(flags & AlignmentMask)) {
785                alignFaults++;
786                return new DataAbort(vaddr_tainted,
787                                     TlbEntry::DomainType::NoAccess, is_write,
788                                     ArmFault::AlignmentFault, isStage2,
789                                     ArmFault::LpaeTran);
790            }
791        }
792    }
793
794    if (te->nonCacheable) {
795        // Prevent prefetching from I/O devices.
796        if (req->isPrefetch()) {
797            // Here we can safely use the fault status for the short
798            // desc. format in all cases
799            return new PrefetchAbort(vaddr_tainted,
800                                     ArmFault::PrefetchUncacheable,
801                                     isStage2, ArmFault::LpaeTran);
802        }
803    }
804
805    uint8_t ap  = 0x3 & (te->ap);  // 2-bit access protection field
806    bool grant = false;
807
808    uint8_t xn =  te->xn;
809    uint8_t pxn = te->pxn;
810    bool r = !is_write && !is_fetch;
811    bool w = is_write;
812    bool x = is_fetch;
813    DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
814                        "w:%d, x:%d\n", ap, xn, pxn, r, w, x);
815
816    if (isStage2) {
817        panic("Virtualization in AArch64 state is not supported yet");
818    } else {
819        switch (aarch64EL) {
820          case EL0:
821            {
822                uint8_t perm = (ap << 2)  | (xn << 1) | pxn;
823                switch (perm) {
824                  case 0:
825                  case 1:
826                  case 8:
827                  case 9:
828                    grant = x;
829                    break;
830                  case 4:
831                  case 5:
832                    grant = r || w || (x && !sctlr.wxn);
833                    break;
834                  case 6:
835                  case 7:
836                    grant = r || w;
837                    break;
838                  case 12:
839                  case 13:
840                    grant = r || x;
841                    break;
842                  case 14:
843                  case 15:
844                    grant = r;
845                    break;
846                  default:
847                    grant = false;
848                }
849            }
850            break;
851          case EL1:
852            {
853                uint8_t perm = (ap << 2)  | (xn << 1) | pxn;
854                switch (perm) {
855                  case 0:
856                  case 2:
857                    grant = r || w || (x && !sctlr.wxn);
858                    break;
859                  case 1:
860                  case 3:
861                  case 4:
862                  case 5:
863                  case 6:
864                  case 7:
865                    // regions that are writeable at EL0 should not be
866                    // executable at EL1
867                    grant = r || w;
868                    break;
869                  case 8:
870                  case 10:
871                  case 12:
872                  case 14:
873                    grant = r || x;
874                    break;
875                  case 9:
876                  case 11:
877                  case 13:
878                  case 15:
879                    grant = r;
880                    break;
881                  default:
882                    grant = false;
883                }
884            }
885            break;
886          case EL2:
887          case EL3:
888            {
889                uint8_t perm = (ap & 0x2) | xn;
890                switch (perm) {
891                  case 0:
892                    grant = r || w || (x && !sctlr.wxn) ;
893                    break;
894                  case 1:
895                    grant = r || w;
896                    break;
897                  case 2:
898                    grant = r || x;
899                    break;
900                  case 3:
901                    grant = r;
902                    break;
903                  default:
904                    grant = false;
905                }
906            }
907            break;
908        }
909    }
910
911    if (!grant) {
912        if (is_fetch) {
913            permsFaults++;
914            DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. "
915                    "AP:%d priv:%d write:%d ns:%d sif:%d "
916                    "sctlr.afe: %d\n",
917                    ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe);
918            // Use PC value instead of vaddr because vaddr might be aligned to
919            // cache line and should not be the address reported in FAR
920            return new PrefetchAbort(req->getPC(),
921                                     ArmFault::PermissionLL + te->lookupLevel,
922                                     isStage2, ArmFault::LpaeTran);
923        } else {
924            permsFaults++;
925            DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d "
926                    "priv:%d write:%d\n", ap, is_priv, is_write);
927            return new DataAbort(vaddr_tainted, te->domain, is_write,
928                                 ArmFault::PermissionLL + te->lookupLevel,
929                                 isStage2, ArmFault::LpaeTran);
930        }
931    }
932
933    return NoFault;
934}
935
936Fault
937TLB::translateFs(RequestPtr req, ThreadContext *tc, Mode mode,
938        Translation *translation, bool &delay, bool timing,
939        TLB::ArmTranslationType tranType, bool functional)
940{
941    // No such thing as a functional timing access
942    assert(!(timing && functional));
943
944    updateMiscReg(tc, tranType);
945
946    Addr vaddr_tainted = req->getVaddr();
947    Addr vaddr = 0;
948    if (aarch64)
949        vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL);
950    else
951        vaddr = vaddr_tainted;
952    uint32_t flags = req->getFlags();
953
954    bool is_fetch  = (mode == Execute);
955    bool is_write  = (mode == Write);
956    bool long_desc_format = aarch64 || (haveLPAE && ttbcr.eae);
957    ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
958                                                       : ArmFault::VmsaTran;
959
960    req->setAsid(asid);
961
962    DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
963            isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran);
964
965    DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
966                 "flags %#x tranType 0x%x\n", vaddr_tainted, mode, isStage2,
967                 scr, sctlr, flags, tranType);
968
969    // Generate an alignment fault for unaligned PC
970    if (aarch64 && is_fetch && (req->getPC() & mask(2))) {
971        return new PCAlignmentFault(req->getPC());
972    }
973
974    // If this is a clrex instruction, provide a PA of 0 with no fault
975    // This will force the monitor to set the tracked address to 0
976    // a bit of a hack but this effectively clrears this processors monitor
977    if (flags & Request::CLEAR_LL){
978        // @todo: check implications of security extensions
979       req->setPaddr(0);
980       req->setFlags(Request::UNCACHEABLE);
981       req->setFlags(Request::CLEAR_LL);
982       return NoFault;
983    }
984    if ((req->isInstFetch() && (!sctlr.i)) ||
985        ((!req->isInstFetch()) && (!sctlr.c))){
986       req->setFlags(Request::UNCACHEABLE);
987    }
988    if (!is_fetch) {
989        assert(flags & MustBeOne);
990        if (sctlr.a || !(flags & AllowUnaligned)) {
991            if (vaddr & mask(flags & AlignmentMask)) {
992                alignFaults++;
993                return new DataAbort(vaddr_tainted,
994                                     TlbEntry::DomainType::NoAccess, is_write,
995                                     ArmFault::AlignmentFault, isStage2,
996                                     tranMethod);
997            }
998        }
999    }
1000
1001    // If guest MMU is off or hcr.vm=0 go straight to stage2
1002    if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) {
1003
1004        req->setPaddr(vaddr);
1005        // When the MMU is off the security attribute corresponds to the
1006        // security state of the processor
1007        if (isSecure)
1008            req->setFlags(Request::SECURE);
1009
1010        // @todo: double check this (ARM ARM issue C B3.2.1)
1011        if (long_desc_format || sctlr.tre == 0) {
1012            req->setFlags(Request::UNCACHEABLE);
1013        } else {
1014            if (nmrr.ir0 == 0 || nmrr.or0 == 0 || prrr.tr0 != 0x2)
1015                req->setFlags(Request::UNCACHEABLE);
1016        }
1017
1018        // Set memory attributes
1019        TlbEntry temp_te;
1020        temp_te.ns = !isSecure;
1021        if (isStage2 || hcr.dc == 0 || isSecure ||
1022           (isHyp && !(tranType & S1CTran))) {
1023
1024            temp_te.mtype      = is_fetch ? TlbEntry::MemoryType::Normal
1025                                          : TlbEntry::MemoryType::StronglyOrdered;
1026            temp_te.innerAttrs = 0x0;
1027            temp_te.outerAttrs = 0x0;
1028            temp_te.shareable  = true;
1029            temp_te.outerShareable = true;
1030        } else {
1031            temp_te.mtype      = TlbEntry::MemoryType::Normal;
1032            temp_te.innerAttrs = 0x3;
1033            temp_te.outerAttrs = 0x3;
1034            temp_te.shareable  = false;
1035            temp_te.outerShareable = false;
1036        }
1037        temp_te.setAttributes(long_desc_format);
1038        DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: "
1039                "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n",
1040                temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
1041                isStage2);
1042        setAttr(temp_te.attributes);
1043
1044        return trickBoxCheck(req, mode, TlbEntry::DomainType::NoAccess);
1045    }
1046
1047    DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n",
1048            isStage2 ? "IPA" : "VA", vaddr_tainted, asid);
1049    // Translation enabled
1050
1051    TlbEntry *te = NULL;
1052    TlbEntry mergeTe;
1053    Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
1054                              functional, &mergeTe);
1055    // only proceed if we have a valid table entry
1056    if ((te == NULL) && (fault == NoFault)) delay = true;
1057
1058    // If we have the table entry transfer some of the attributes to the
1059    // request that triggered the translation
1060    if (te != NULL) {
1061        // Set memory attributes
1062        DPRINTF(TLBVerbose,
1063                "Setting memory attributes: shareable: %d, innerAttrs: %d, "
1064                "outerAttrs: %d, mtype: %d, isStage2: %d\n",
1065                te->shareable, te->innerAttrs, te->outerAttrs,
1066                static_cast<uint8_t>(te->mtype), isStage2);
1067        setAttr(te->attributes);
1068        if (te->nonCacheable) {
1069            req->setFlags(Request::UNCACHEABLE);
1070        }
1071
1072        if (!bootUncacheability &&
1073            ((ArmSystem*)tc->getSystemPtr())->adderBootUncacheable(vaddr)) {
1074            req->setFlags(Request::UNCACHEABLE);
1075        }
1076
1077        req->setPaddr(te->pAddr(vaddr));
1078        if (isSecure && !te->ns) {
1079            req->setFlags(Request::SECURE);
1080        }
1081        if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) &&
1082            (te->mtype != TlbEntry::MemoryType::Normal)) {
1083                // Unaligned accesses to Device memory should always cause an
1084                // abort regardless of sctlr.a
1085                alignFaults++;
1086                return new DataAbort(vaddr_tainted,
1087                                     TlbEntry::DomainType::NoAccess, is_write,
1088                                     ArmFault::AlignmentFault, isStage2,
1089                                     tranMethod);
1090        }
1091
1092        // Check for a trickbox generated address fault
1093        if (fault == NoFault) {
1094            fault = trickBoxCheck(req, mode, te->domain);
1095        }
1096    }
1097
1098    // Generate Illegal Inst Set State fault if IL bit is set in CPSR
1099    if (fault == NoFault) {
1100        CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
1101        if (aarch64 && is_fetch && cpsr.il == 1) {
1102            return new IllegalInstSetStateFault();
1103        }
1104    }
1105
1106    return fault;
1107}
1108
1109Fault
1110TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode,
1111    TLB::ArmTranslationType tranType)
1112{
1113    updateMiscReg(tc, tranType);
1114
1115    if (directToStage2) {
1116        assert(stage2Tlb);
1117        return stage2Tlb->translateAtomic(req, tc, mode, tranType);
1118    }
1119
1120    bool delay = false;
1121    Fault fault;
1122    if (FullSystem)
1123        fault = translateFs(req, tc, mode, NULL, delay, false, tranType);
1124    else
1125        fault = translateSe(req, tc, mode, NULL, delay, false);
1126    assert(!delay);
1127    return fault;
1128}
1129
1130Fault
1131TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode,
1132    TLB::ArmTranslationType tranType)
1133{
1134    updateMiscReg(tc, tranType);
1135
1136    if (directToStage2) {
1137        assert(stage2Tlb);
1138        return stage2Tlb->translateFunctional(req, tc, mode, tranType);
1139    }
1140
1141    bool delay = false;
1142    Fault fault;
1143    if (FullSystem)
1144        fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true);
1145   else
1146        fault = translateSe(req, tc, mode, NULL, delay, false);
1147    assert(!delay);
1148    return fault;
1149}
1150
1151Fault
1152TLB::translateTiming(RequestPtr req, ThreadContext *tc,
1153    Translation *translation, Mode mode, TLB::ArmTranslationType tranType)
1154{
1155    updateMiscReg(tc, tranType);
1156
1157    if (directToStage2) {
1158        assert(stage2Tlb);
1159        return stage2Tlb->translateTiming(req, tc, translation, mode, tranType);
1160    }
1161
1162    assert(translation);
1163
1164    return translateComplete(req, tc, translation, mode, tranType, isStage2);
1165}
1166
1167Fault
1168TLB::translateComplete(RequestPtr req, ThreadContext *tc,
1169        Translation *translation, Mode mode, TLB::ArmTranslationType tranType,
1170        bool callFromS2)
1171{
1172    bool delay = false;
1173    Fault fault;
1174    if (FullSystem)
1175        fault = translateFs(req, tc, mode, translation, delay, true, tranType);
1176    else
1177        fault = translateSe(req, tc, mode, translation, delay, true);
1178    DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay, fault !=
1179            NoFault);
1180    // If we have a translation, and we're not in the middle of doing a stage
1181    // 2 translation tell the translation that we've either finished or its
1182    // going to take a while. By not doing this when we're in the middle of a
1183    // stage 2 translation we prevent marking the translation as delayed twice,
1184    // one when the translation starts and again when the stage 1 translation
1185    // completes.
1186    if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) {
1187        if (!delay)
1188            translation->finish(fault, req, tc, mode);
1189        else
1190            translation->markDelayed();
1191    }
1192    return fault;
1193}
1194
1195BaseMasterPort*
1196TLB::getMasterPort()
1197{
1198    return &tableWalker->getMasterPort("port");
1199}
1200
1201DmaPort&
1202TLB::getWalkerPort()
1203{
1204    return tableWalker->getWalkerPort();
1205}
1206
1207void
1208TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType)
1209{
1210    // check if the regs have changed, or the translation mode is different.
1211    // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1212    // one type of translation anyway
1213    if (miscRegValid && ((tranType == curTranType) || isStage2)) {
1214        return;
1215    }
1216
1217    DPRINTF(TLBVerbose, "TLB variables changed!\n");
1218    CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
1219    // Dependencies: SCR/SCR_EL3, CPSR
1220    isSecure  = inSecureState(tc);
1221    isSecure &= (tranType & HypMode)    == 0;
1222    isSecure &= (tranType & S1S2NsTran) == 0;
1223    aarch64 = !cpsr.width;
1224    if (aarch64) {  // AArch64
1225        aarch64EL = (ExceptionLevel) (uint8_t) cpsr.el;
1226        switch (aarch64EL) {
1227          case EL0:
1228          case EL1:
1229            {
1230                sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
1231                ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
1232                uint64_t ttbr_asid = ttbcr.a1 ?
1233                    tc->readMiscReg(MISCREG_TTBR1_EL1) :
1234                    tc->readMiscReg(MISCREG_TTBR0_EL1);
1235                asid = bits(ttbr_asid,
1236                            (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1237            }
1238            break;
1239          case EL2:
1240            sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1241            ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1242            asid = -1;
1243            break;
1244          case EL3:
1245            sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3);
1246            ttbcr = tc->readMiscReg(MISCREG_TCR_EL3);
1247            asid = -1;
1248            break;
1249        }
1250        scr = tc->readMiscReg(MISCREG_SCR_EL3);
1251        isPriv = aarch64EL != EL0;
1252        // @todo: modify this behaviour to support Virtualization in
1253        // AArch64
1254        vmid           = 0;
1255        isHyp          = false;
1256        directToStage2 = false;
1257        stage2Req      = false;
1258    } else {  // AArch32
1259        sctlr  = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_SCTLR, tc,
1260                                 !isSecure));
1261        ttbcr  = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_TTBCR, tc,
1262                                 !isSecure));
1263        scr    = tc->readMiscReg(MISCREG_SCR);
1264        isPriv = cpsr.mode != MODE_USER;
1265        if (haveLPAE && ttbcr.eae) {
1266            // Long-descriptor translation table format in use
1267            uint64_t ttbr_asid = tc->readMiscReg(
1268                flattenMiscRegNsBanked(ttbcr.a1 ? MISCREG_TTBR1
1269                                                : MISCREG_TTBR0,
1270                                       tc, !isSecure));
1271            asid = bits(ttbr_asid, 55, 48);
1272        } else {
1273            // Short-descriptor translation table format in use
1274            CONTEXTIDR context_id = tc->readMiscReg(flattenMiscRegNsBanked(
1275                MISCREG_CONTEXTIDR, tc,!isSecure));
1276            asid = context_id.asid;
1277        }
1278        prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR, tc,
1279                               !isSecure));
1280        nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR, tc,
1281                               !isSecure));
1282        dacr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_DACR, tc,
1283                               !isSecure));
1284        hcr  = tc->readMiscReg(MISCREG_HCR);
1285
1286        if (haveVirtualization) {
1287            vmid   = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
1288            isHyp  = cpsr.mode == MODE_HYP;
1289            isHyp |=  tranType & HypMode;
1290            isHyp &= (tranType & S1S2NsTran) == 0;
1291            isHyp &= (tranType & S1CTran)    == 0;
1292            if (isHyp) {
1293                sctlr = tc->readMiscReg(MISCREG_HSCTLR);
1294            }
1295            // Work out if we should skip the first stage of translation and go
1296            // directly to stage 2. This value is cached so we don't have to
1297            // compute it for every translation.
1298            stage2Req      = hcr.vm && !isStage2 && !isHyp && !isSecure &&
1299                             !(tranType & S1CTran);
1300            directToStage2 = stage2Req && !sctlr.m;
1301        } else {
1302            vmid           = 0;
1303            stage2Req      = false;
1304            isHyp          = false;
1305            directToStage2 = false;
1306        }
1307    }
1308    miscRegValid = true;
1309    curTranType  = tranType;
1310}
1311
1312Fault
1313TLB::getTE(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
1314        Translation *translation, bool timing, bool functional,
1315        bool is_secure, TLB::ArmTranslationType tranType)
1316{
1317    bool is_fetch = (mode == Execute);
1318    bool is_write = (mode == Write);
1319
1320    Addr vaddr_tainted = req->getVaddr();
1321    Addr vaddr = 0;
1322    ExceptionLevel target_el = aarch64 ? aarch64EL : EL1;
1323    if (aarch64) {
1324        vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el);
1325    } else {
1326        vaddr = vaddr_tainted;
1327    }
1328    *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1329    if (*te == NULL) {
1330        if (req->isPrefetch()) {
1331            // if the request is a prefetch don't attempt to fill the TLB or go
1332            // any further with the memory access (here we can safely use the
1333            // fault status for the short desc. format in all cases)
1334           prefetchFaults++;
1335           return new PrefetchAbort(vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2);
1336        }
1337
1338        if (is_fetch)
1339            instMisses++;
1340        else if (is_write)
1341            writeMisses++;
1342        else
1343            readMisses++;
1344
1345        // start translation table walk, pass variables rather than
1346        // re-retreaving in table walker for speed
1347        DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1348                vaddr_tainted, asid, vmid);
1349        Fault fault;
1350        fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode,
1351                                  translation, timing, functional, is_secure,
1352                                  tranType);
1353        // for timing mode, return and wait for table walk,
1354        if (timing || fault != NoFault) {
1355            return fault;
1356        }
1357
1358        *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1359        if (!*te)
1360            printTlb();
1361        assert(*te);
1362    } else {
1363        if (is_fetch)
1364            instHits++;
1365        else if (is_write)
1366            writeHits++;
1367        else
1368            readHits++;
1369    }
1370    return NoFault;
1371}
1372
1373Fault
1374TLB::getResultTe(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
1375        Translation *translation, bool timing, bool functional,
1376        TlbEntry *mergeTe)
1377{
1378    Fault fault;
1379    TlbEntry *s1Te = NULL;
1380
1381    Addr vaddr_tainted = req->getVaddr();
1382
1383    // Get the stage 1 table entry
1384    fault = getTE(&s1Te, req, tc, mode, translation, timing, functional,
1385                  isSecure, curTranType);
1386    // only proceed if we have a valid table entry
1387    if ((s1Te != NULL) && (fault == NoFault)) {
1388        // Check stage 1 permissions before checking stage 2
1389        if (aarch64)
1390            fault = checkPermissions64(s1Te, req, mode, tc);
1391        else
1392            fault = checkPermissions(s1Te, req, mode);
1393        if (stage2Req & (fault == NoFault)) {
1394            Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te,
1395                req, translation, mode, timing, functional, curTranType);
1396            fault = s2Lookup->getTe(tc, mergeTe);
1397            if (s2Lookup->isComplete()) {
1398                *te = mergeTe;
1399                // We've finished with the lookup so delete it
1400                delete s2Lookup;
1401            } else {
1402                // The lookup hasn't completed, so we can't delete it now. We
1403                // get round this by asking the object to self delete when the
1404                // translation is complete.
1405                s2Lookup->setSelfDelete();
1406            }
1407        } else {
1408            // This case deals with an S1 hit (or bypass), followed by
1409            // an S2 hit-but-perms issue
1410            if (isStage2) {
1411                DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1412                        vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault);
1413                if (fault != NoFault) {
1414                    ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get());
1415                    armFault->annotate(ArmFault::S1PTW, false);
1416                    armFault->annotate(ArmFault::OVA, vaddr_tainted);
1417                }
1418            }
1419            *te = s1Te;
1420        }
1421    }
1422    return fault;
1423}
1424
1425ArmISA::TLB *
1426ArmTLBParams::create()
1427{
1428    return new ArmISA::TLB(this);
1429}
1430