table_walker.cc revision 10474:799c8ee4ecba
1/*
2 * Copyright (c) 2010, 2012-2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Ali Saidi
38 *          Giacomo Gabrielli
39 */
40
41#include <memory>
42
43#include "arch/arm/faults.hh"
44#include "arch/arm/stage2_mmu.hh"
45#include "arch/arm/system.hh"
46#include "arch/arm/table_walker.hh"
47#include "arch/arm/tlb.hh"
48#include "cpu/base.hh"
49#include "cpu/thread_context.hh"
50#include "debug/Checkpoint.hh"
51#include "debug/Drain.hh"
52#include "debug/TLB.hh"
53#include "debug/TLBVerbose.hh"
54#include "sim/system.hh"
55
56using namespace ArmISA;
57
58TableWalker::TableWalker(const Params *p)
59    : MemObject(p), port(this, p->sys), drainManager(NULL),
60      stage2Mmu(NULL), isStage2(p->is_stage2), tlb(NULL),
61      currState(NULL), pending(false), masterId(p->sys->getMasterId(name())),
62      numSquashable(p->num_squash_per_cycle),
63      doL1DescEvent(this), doL2DescEvent(this),
64      doL0LongDescEvent(this), doL1LongDescEvent(this), doL2LongDescEvent(this),
65      doL3LongDescEvent(this),
66      doProcessEvent(this)
67{
68    sctlr = 0;
69
70    // Cache system-level properties
71    if (FullSystem) {
72        armSys = dynamic_cast<ArmSystem *>(p->sys);
73        assert(armSys);
74        haveSecurity = armSys->haveSecurity();
75        _haveLPAE = armSys->haveLPAE();
76        _haveVirtualization = armSys->haveVirtualization();
77        physAddrRange = armSys->physAddrRange();
78        _haveLargeAsid64 = armSys->haveLargeAsid64();
79    } else {
80        armSys = NULL;
81        haveSecurity = _haveLPAE = _haveVirtualization = false;
82        _haveLargeAsid64 = false;
83        physAddrRange = 32;
84    }
85
86}
87
88TableWalker::~TableWalker()
89{
90    ;
91}
92
93TableWalker::WalkerState::WalkerState() : stage2Tran(NULL), l2Desc(l1Desc)
94{
95}
96
97void
98TableWalker::completeDrain()
99{
100    if (drainManager && stateQueues[L1].empty() && stateQueues[L2].empty() &&
101        pendingQueue.empty()) {
102        setDrainState(Drainable::Drained);
103        DPRINTF(Drain, "TableWalker done draining, processing drain event\n");
104        drainManager->signalDrainDone();
105        drainManager = NULL;
106    }
107}
108
109unsigned int
110TableWalker::drain(DrainManager *dm)
111{
112    unsigned int count = port.drain(dm);
113
114    bool state_queues_not_empty = false;
115
116    for (int i = 0; i < MAX_LOOKUP_LEVELS; ++i) {
117        if (!stateQueues[i].empty()) {
118            state_queues_not_empty = true;
119            break;
120        }
121    }
122
123    if (state_queues_not_empty || pendingQueue.size()) {
124        drainManager = dm;
125        setDrainState(Drainable::Draining);
126        DPRINTF(Drain, "TableWalker not drained\n");
127
128        // return port drain count plus the table walker itself needs to drain
129        return count + 1;
130    } else {
131        setDrainState(Drainable::Drained);
132        DPRINTF(Drain, "TableWalker free, no need to drain\n");
133
134        // table walker is drained, but its ports may still need to be drained
135        return count;
136    }
137}
138
139void
140TableWalker::drainResume()
141{
142    Drainable::drainResume();
143    if (params()->sys->isTimingMode() && currState) {
144        delete currState;
145        currState = NULL;
146    }
147}
148
149BaseMasterPort&
150TableWalker::getMasterPort(const std::string &if_name, PortID idx)
151{
152    if (if_name == "port") {
153        return port;
154    }
155    return MemObject::getMasterPort(if_name, idx);
156}
157
158Fault
159TableWalker::walk(RequestPtr _req, ThreadContext *_tc, uint16_t _asid,
160                  uint8_t _vmid, bool _isHyp, TLB::Mode _mode,
161                  TLB::Translation *_trans, bool _timing, bool _functional,
162                  bool secure, TLB::ArmTranslationType tranType)
163{
164    assert(!(_functional && _timing));
165    WalkerState *savedCurrState = NULL;
166
167    if (!currState && !_functional) {
168        // For atomic mode, a new WalkerState instance should be only created
169        // once per TLB. For timing mode, a new instance is generated for every
170        // TLB miss.
171        DPRINTF(TLBVerbose, "creating new instance of WalkerState\n");
172
173        currState = new WalkerState();
174        currState->tableWalker = this;
175    } else if (_functional) {
176        // If we are mixing functional mode with timing (or even
177        // atomic), we need to to be careful and clean up after
178        // ourselves to not risk getting into an inconsistent state.
179        DPRINTF(TLBVerbose, "creating functional instance of WalkerState\n");
180        savedCurrState = currState;
181        currState = new WalkerState();
182        currState->tableWalker = this;
183    } else if (_timing) {
184        // This is a translation that was completed and then faulted again
185        // because some underlying parameters that affect the translation
186        // changed out from under us (e.g. asid). It will either be a
187        // misprediction, in which case nothing will happen or we'll use
188        // this fault to re-execute the faulting instruction which should clean
189        // up everything.
190        if (currState->vaddr_tainted == _req->getVaddr()) {
191            return std::make_shared<ReExec>();
192        }
193    }
194
195    currState->tc = _tc;
196    currState->aarch64 = opModeIs64(currOpMode(_tc));
197    currState->el = currEL(_tc);
198    currState->transState = _trans;
199    currState->req = _req;
200    currState->fault = NoFault;
201    currState->asid = _asid;
202    currState->vmid = _vmid;
203    currState->isHyp = _isHyp;
204    currState->timing = _timing;
205    currState->functional = _functional;
206    currState->mode = _mode;
207    currState->tranType = tranType;
208    currState->isSecure = secure;
209    currState->physAddrRange = physAddrRange;
210
211    /** @todo These should be cached or grabbed from cached copies in
212     the TLB, all these miscreg reads are expensive */
213    currState->vaddr_tainted = currState->req->getVaddr();
214    if (currState->aarch64)
215        currState->vaddr = purifyTaggedAddr(currState->vaddr_tainted,
216                                            currState->tc, currState->el);
217    else
218        currState->vaddr = currState->vaddr_tainted;
219
220    if (currState->aarch64) {
221        switch (currState->el) {
222          case EL0:
223          case EL1:
224            currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
225            currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL1);
226            break;
227          // @todo: uncomment this to enable Virtualization
228          // case EL2:
229          //   assert(haveVirtualization);
230          //   currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL2);
231          //   currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL2);
232          //   break;
233          case EL3:
234            assert(haveSecurity);
235            currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL3);
236            currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL3);
237            break;
238          default:
239            panic("Invalid exception level");
240            break;
241        }
242    } else {
243        currState->sctlr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
244            MISCREG_SCTLR, currState->tc, !currState->isSecure));
245        currState->ttbcr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
246            MISCREG_TTBCR, currState->tc, !currState->isSecure));
247        currState->htcr  = currState->tc->readMiscReg(MISCREG_HTCR);
248        currState->hcr   = currState->tc->readMiscReg(MISCREG_HCR);
249        currState->vtcr  = currState->tc->readMiscReg(MISCREG_VTCR);
250    }
251    sctlr = currState->sctlr;
252
253    currState->isFetch = (currState->mode == TLB::Execute);
254    currState->isWrite = (currState->mode == TLB::Write);
255
256    // We only do a second stage of translation if we're not secure, or in
257    // hyp mode, the second stage MMU is enabled, and this table walker
258    // instance is the first stage.
259    currState->doingStage2 = false;
260    // @todo: for now disable this in AArch64 (HCR is not set)
261    currState->stage2Req = !currState->aarch64 && currState->hcr.vm &&
262                           !isStage2 && !currState->isSecure && !currState->isHyp;
263
264    bool long_desc_format = currState->aarch64 ||
265                            (_haveLPAE && currState->ttbcr.eae) ||
266                            _isHyp || isStage2;
267
268    if (long_desc_format) {
269        // Helper variables used for hierarchical permissions
270        currState->secureLookup = currState->isSecure;
271        currState->rwTable = true;
272        currState->userTable = true;
273        currState->xnTable = false;
274        currState->pxnTable = false;
275    }
276
277    if (!currState->timing) {
278        Fault fault = NoFault;
279        if (currState->aarch64)
280            fault = processWalkAArch64();
281        else if (long_desc_format)
282            fault = processWalkLPAE();
283        else
284            fault = processWalk();
285
286        // If this was a functional non-timing access restore state to
287        // how we found it.
288        if (currState->functional) {
289            delete currState;
290            currState = savedCurrState;
291        }
292        return fault;
293    }
294
295    if (pending || pendingQueue.size()) {
296        pendingQueue.push_back(currState);
297        currState = NULL;
298    } else {
299        pending = true;
300        if (currState->aarch64)
301            return processWalkAArch64();
302        else if (long_desc_format)
303            return processWalkLPAE();
304        else
305            return processWalk();
306    }
307
308    return NoFault;
309}
310
311void
312TableWalker::processWalkWrapper()
313{
314    assert(!currState);
315    assert(pendingQueue.size());
316    currState = pendingQueue.front();
317
318    ExceptionLevel target_el = EL0;
319    if (currState->aarch64)
320        target_el = currEL(currState->tc);
321    else
322        target_el = EL1;
323
324    // Check if a previous walk filled this request already
325    // @TODO Should this always be the TLB or should we look in the stage2 TLB?
326    TlbEntry* te = tlb->lookup(currState->vaddr, currState->asid,
327            currState->vmid, currState->isHyp, currState->isSecure, true, false,
328            target_el);
329
330    // Check if we still need to have a walk for this request. If the requesting
331    // instruction has been squashed, or a previous walk has filled the TLB with
332    // a match, we just want to get rid of the walk. The latter could happen
333    // when there are multiple outstanding misses to a single page and a
334    // previous request has been successfully translated.
335    if (!currState->transState->squashed() && !te) {
336        // We've got a valid request, lets process it
337        pending = true;
338        pendingQueue.pop_front();
339        if (currState->aarch64)
340            processWalkAArch64();
341        else if ((_haveLPAE && currState->ttbcr.eae) || currState->isHyp || isStage2)
342            processWalkLPAE();
343        else
344            processWalk();
345        return;
346    }
347
348
349    // If the instruction that we were translating for has been
350    // squashed we shouldn't bother.
351    unsigned num_squashed = 0;
352    ThreadContext *tc = currState->tc;
353    while ((num_squashed < numSquashable) && currState &&
354           (currState->transState->squashed() || te)) {
355        pendingQueue.pop_front();
356        num_squashed++;
357
358        DPRINTF(TLB, "Squashing table walk for address %#x\n",
359                      currState->vaddr_tainted);
360
361        if (currState->transState->squashed()) {
362            // finish the translation which will delete the translation object
363            currState->transState->finish(
364                std::make_shared<UnimpFault>("Squashed Inst"),
365                currState->req, currState->tc, currState->mode);
366        } else {
367            // translate the request now that we know it will work
368            tlb->translateTiming(currState->req, currState->tc,
369                        currState->transState, currState->mode);
370
371        }
372
373        // delete the current request
374        delete currState;
375
376        // peak at the next one
377        if (pendingQueue.size()) {
378            currState = pendingQueue.front();
379            te = tlb->lookup(currState->vaddr, currState->asid,
380                currState->vmid, currState->isHyp, currState->isSecure, true,
381                false, target_el);
382        } else {
383            // Terminate the loop, nothing more to do
384            currState = NULL;
385        }
386    }
387
388    // if we've still got pending translations schedule more work
389    nextWalk(tc);
390    currState = NULL;
391    completeDrain();
392}
393
394Fault
395TableWalker::processWalk()
396{
397    Addr ttbr = 0;
398
399    // If translation isn't enabled, we shouldn't be here
400    assert(currState->sctlr.m || isStage2);
401
402    DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
403            currState->vaddr_tainted, currState->ttbcr, mbits(currState->vaddr, 31,
404                                                      32 - currState->ttbcr.n));
405
406    if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31,
407                                          32 - currState->ttbcr.n)) {
408        DPRINTF(TLB, " - Selecting TTBR0\n");
409        // Check if table walk is allowed when Security Extensions are enabled
410        if (haveSecurity && currState->ttbcr.pd0) {
411            if (currState->isFetch)
412                return std::make_shared<PrefetchAbort>(
413                    currState->vaddr_tainted,
414                    ArmFault::TranslationLL + L1,
415                    isStage2,
416                    ArmFault::VmsaTran);
417            else
418                return std::make_shared<DataAbort>(
419                    currState->vaddr_tainted,
420                    TlbEntry::DomainType::NoAccess, currState->isWrite,
421                    ArmFault::TranslationLL + L1, isStage2,
422                    ArmFault::VmsaTran);
423        }
424        ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
425            MISCREG_TTBR0, currState->tc, !currState->isSecure));
426    } else {
427        DPRINTF(TLB, " - Selecting TTBR1\n");
428        // Check if table walk is allowed when Security Extensions are enabled
429        if (haveSecurity && currState->ttbcr.pd1) {
430            if (currState->isFetch)
431                return std::make_shared<PrefetchAbort>(
432                    currState->vaddr_tainted,
433                    ArmFault::TranslationLL + L1,
434                    isStage2,
435                    ArmFault::VmsaTran);
436            else
437                return std::make_shared<DataAbort>(
438                    currState->vaddr_tainted,
439                    TlbEntry::DomainType::NoAccess, currState->isWrite,
440                    ArmFault::TranslationLL + L1, isStage2,
441                    ArmFault::VmsaTran);
442        }
443        ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
444            MISCREG_TTBR1, currState->tc, !currState->isSecure));
445        currState->ttbcr.n = 0;
446    }
447
448    Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) |
449        (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2);
450    DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr,
451            currState->isSecure ? "s" : "ns");
452
453    // Trickbox address check
454    Fault f;
455    f = tlb->walkTrickBoxCheck(l1desc_addr, currState->isSecure,
456            currState->vaddr, sizeof(uint32_t), currState->isFetch,
457            currState->isWrite, TlbEntry::DomainType::NoAccess, L1);
458    if (f) {
459        DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
460        if (currState->timing) {
461            pending = false;
462            nextWalk(currState->tc);
463            currState = NULL;
464        } else {
465            currState->tc = NULL;
466            currState->req = NULL;
467        }
468        return f;
469    }
470
471    Request::Flags flag = 0;
472    if (currState->sctlr.c == 0) {
473        flag = Request::UNCACHEABLE;
474    }
475
476    bool delayed;
477    delayed = fetchDescriptor(l1desc_addr, (uint8_t*)&currState->l1Desc.data,
478                              sizeof(uint32_t), flag, L1, &doL1DescEvent,
479                              &TableWalker::doL1Descriptor);
480    if (!delayed) {
481       f = currState->fault;
482    }
483
484    return f;
485}
486
487Fault
488TableWalker::processWalkLPAE()
489{
490    Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
491    int tsz, n;
492    LookupLevel start_lookup_level = L1;
493
494    DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n",
495            currState->vaddr_tainted, currState->ttbcr);
496
497    Request::Flags flag = 0;
498    if (currState->isSecure)
499        flag.set(Request::SECURE);
500
501    // work out which base address register to use, if in hyp mode we always
502    // use HTTBR
503    if (isStage2) {
504        DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n");
505        ttbr = currState->tc->readMiscReg(MISCREG_VTTBR);
506        tsz  = sext<4>(currState->vtcr.t0sz);
507        start_lookup_level = currState->vtcr.sl0 ? L1 : L2;
508    } else if (currState->isHyp) {
509        DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n");
510        ttbr = currState->tc->readMiscReg(MISCREG_HTTBR);
511        tsz  = currState->htcr.t0sz;
512    } else {
513        assert(_haveLPAE && currState->ttbcr.eae);
514
515        // Determine boundaries of TTBR0/1 regions
516        if (currState->ttbcr.t0sz)
517            ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1;
518        else if (currState->ttbcr.t1sz)
519            ttbr0_max = (1ULL << 32) -
520                (1ULL << (32 - currState->ttbcr.t1sz)) - 1;
521        else
522            ttbr0_max = (1ULL << 32) - 1;
523        if (currState->ttbcr.t1sz)
524            ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz));
525        else
526            ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz));
527
528        // The following code snippet selects the appropriate translation table base
529        // address (TTBR0 or TTBR1) and the appropriate starting lookup level
530        // depending on the address range supported by the translation table (ARM
531        // ARM issue C B3.6.4)
532        if (currState->vaddr <= ttbr0_max) {
533            DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n");
534            // Check if table walk is allowed
535            if (currState->ttbcr.epd0) {
536                if (currState->isFetch)
537                    return std::make_shared<PrefetchAbort>(
538                        currState->vaddr_tainted,
539                        ArmFault::TranslationLL + L1,
540                        isStage2,
541                        ArmFault::LpaeTran);
542                else
543                    return std::make_shared<DataAbort>(
544                        currState->vaddr_tainted,
545                        TlbEntry::DomainType::NoAccess,
546                        currState->isWrite,
547                        ArmFault::TranslationLL + L1,
548                        isStage2,
549                        ArmFault::LpaeTran);
550            }
551            ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
552                MISCREG_TTBR0, currState->tc, !currState->isSecure));
553            tsz = currState->ttbcr.t0sz;
554            if (ttbr0_max < (1ULL << 30))  // Upper limit < 1 GB
555                start_lookup_level = L2;
556        } else if (currState->vaddr >= ttbr1_min) {
557            DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n");
558            // Check if table walk is allowed
559            if (currState->ttbcr.epd1) {
560                if (currState->isFetch)
561                    return std::make_shared<PrefetchAbort>(
562                        currState->vaddr_tainted,
563                        ArmFault::TranslationLL + L1,
564                        isStage2,
565                        ArmFault::LpaeTran);
566                else
567                    return std::make_shared<DataAbort>(
568                        currState->vaddr_tainted,
569                        TlbEntry::DomainType::NoAccess,
570                        currState->isWrite,
571                        ArmFault::TranslationLL + L1,
572                        isStage2,
573                        ArmFault::LpaeTran);
574            }
575            ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
576                MISCREG_TTBR1, currState->tc, !currState->isSecure));
577            tsz = currState->ttbcr.t1sz;
578            if (ttbr1_min >= (1ULL << 31) + (1ULL << 30))  // Lower limit >= 3 GB
579                start_lookup_level = L2;
580        } else {
581            // Out of boundaries -> translation fault
582            if (currState->isFetch)
583                return std::make_shared<PrefetchAbort>(
584                    currState->vaddr_tainted,
585                    ArmFault::TranslationLL + L1,
586                    isStage2,
587                    ArmFault::LpaeTran);
588            else
589                return std::make_shared<DataAbort>(
590                    currState->vaddr_tainted,
591                    TlbEntry::DomainType::NoAccess,
592                    currState->isWrite, ArmFault::TranslationLL + L1,
593                    isStage2, ArmFault::LpaeTran);
594        }
595
596    }
597
598    // Perform lookup (ARM ARM issue C B3.6.6)
599    if (start_lookup_level == L1) {
600        n = 5 - tsz;
601        desc_addr = mbits(ttbr, 39, n) |
602            (bits(currState->vaddr, n + 26, 30) << 3);
603        DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
604                desc_addr, currState->isSecure ? "s" : "ns");
605    } else {
606        // Skip first-level lookup
607        n = (tsz >= 2 ? 14 - tsz : 12);
608        desc_addr = mbits(ttbr, 39, n) |
609            (bits(currState->vaddr, n + 17, 21) << 3);
610        DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
611                desc_addr, currState->isSecure ? "s" : "ns");
612    }
613
614    // Trickbox address check
615    Fault f = tlb->walkTrickBoxCheck(desc_addr, currState->isSecure,
616                        currState->vaddr, sizeof(uint64_t), currState->isFetch,
617                        currState->isWrite, TlbEntry::DomainType::NoAccess,
618                        start_lookup_level);
619    if (f) {
620        DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
621        if (currState->timing) {
622            pending = false;
623            nextWalk(currState->tc);
624            currState = NULL;
625        } else {
626            currState->tc = NULL;
627            currState->req = NULL;
628        }
629        return f;
630    }
631
632    if (currState->sctlr.c == 0) {
633        flag = Request::UNCACHEABLE;
634    }
635
636    if (currState->isSecure)
637        flag.set(Request::SECURE);
638
639    currState->longDesc.lookupLevel = start_lookup_level;
640    currState->longDesc.aarch64 = false;
641    currState->longDesc.grainSize = Grain4KB;
642
643    Event *event = start_lookup_level == L1 ? (Event *) &doL1LongDescEvent
644                                            : (Event *) &doL2LongDescEvent;
645
646    bool delayed = fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
647                                   sizeof(uint64_t), flag, start_lookup_level,
648                                   event, &TableWalker::doLongDescriptor);
649    if (!delayed) {
650        f = currState->fault;
651    }
652
653    return f;
654}
655
656unsigned
657TableWalker::adjustTableSizeAArch64(unsigned tsz)
658{
659    if (tsz < 25)
660        return 25;
661    if (tsz > 48)
662        return 48;
663    return tsz;
664}
665
666bool
667TableWalker::checkAddrSizeFaultAArch64(Addr addr, int currPhysAddrRange)
668{
669    return (currPhysAddrRange != MaxPhysAddrRange &&
670            bits(addr, MaxPhysAddrRange - 1, currPhysAddrRange));
671}
672
673Fault
674TableWalker::processWalkAArch64()
675{
676    assert(currState->aarch64);
677
678    DPRINTF(TLB, "Beginning table walk for address %#llx, TCR: %#llx\n",
679            currState->vaddr_tainted, currState->tcr);
680
681    static const GrainSize GrainMapDefault[] =
682      { Grain4KB, Grain64KB, Grain16KB, ReservedGrain };
683    static const GrainSize GrainMap_EL1_tg1[] =
684      { ReservedGrain, Grain16KB, Grain4KB, Grain64KB };
685
686    // Determine TTBR, table size, granule size and phys. address range
687    Addr ttbr = 0;
688    int tsz = 0, ps = 0;
689    GrainSize tg = Grain4KB; // grain size computed from tg* field
690    bool fault = false;
691    switch (currState->el) {
692      case EL0:
693      case EL1:
694        switch (bits(currState->vaddr, 63,48)) {
695          case 0:
696            DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
697            ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL1);
698            tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
699            tg = GrainMapDefault[currState->tcr.tg0];
700            if (bits(currState->vaddr, 63, tsz) != 0x0 ||
701                currState->tcr.epd0)
702              fault = true;
703            break;
704          case 0xffff:
705            DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
706            ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL1);
707            tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz);
708            tg = GrainMap_EL1_tg1[currState->tcr.tg1];
709            if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) ||
710                currState->tcr.epd1)
711              fault = true;
712            break;
713          default:
714            // top two bytes must be all 0s or all 1s, else invalid addr
715            fault = true;
716        }
717        ps = currState->tcr.ips;
718        break;
719      case EL2:
720      case EL3:
721        switch(bits(currState->vaddr, 63,48)) {
722            case 0:
723                DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
724                if (currState->el == EL2)
725                    ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL2);
726                else
727                    ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL3);
728                tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
729                tg = GrainMapDefault[currState->tcr.tg0];
730                break;
731            default:
732                // invalid addr if top two bytes are not all 0s
733                fault = true;
734        }
735        ps = currState->tcr.ips;
736        break;
737    }
738
739    if (fault) {
740        Fault f;
741        if (currState->isFetch)
742            f =  std::make_shared<PrefetchAbort>(
743                currState->vaddr_tainted,
744                ArmFault::TranslationLL + L0, isStage2,
745                ArmFault::LpaeTran);
746        else
747            f = std::make_shared<DataAbort>(
748                currState->vaddr_tainted,
749                TlbEntry::DomainType::NoAccess,
750                currState->isWrite,
751                ArmFault::TranslationLL + L0,
752                isStage2, ArmFault::LpaeTran);
753
754        if (currState->timing) {
755            pending = false;
756            nextWalk(currState->tc);
757            currState = NULL;
758        } else {
759            currState->tc = NULL;
760            currState->req = NULL;
761        }
762        return f;
763
764    }
765
766    if (tg == ReservedGrain) {
767        warn_once("Reserved granule size requested; gem5's IMPLEMENTATION "
768                  "DEFINED behavior takes this to mean 4KB granules\n");
769        tg = Grain4KB;
770    }
771
772    int stride = tg - 3;
773    LookupLevel start_lookup_level = MAX_LOOKUP_LEVELS;
774
775    // Determine starting lookup level
776    // See aarch64/translation/walk in Appendix G: ARMv8 Pseudocode Library
777    // in ARM DDI 0487A.  These table values correspond to the cascading tests
778    // to compute the lookup level and are of the form
779    // (grain_size + N*stride), for N = {1, 2, 3}.
780    // A value of 64 will never succeed and a value of 0 will always succeed.
781    {
782        struct GrainMap {
783            GrainSize grain_size;
784            unsigned lookup_level_cutoff[MAX_LOOKUP_LEVELS];
785        };
786        static const GrainMap GM[] = {
787            { Grain4KB,  { 39, 30,  0, 0 } },
788            { Grain16KB, { 47, 36, 25, 0 } },
789            { Grain64KB, { 64, 42, 29, 0 } }
790        };
791
792        const unsigned *lookup = NULL; // points to a lookup_level_cutoff
793
794        for (unsigned i = 0; i < 3; ++i) { // choose entry of GM[]
795            if (tg == GM[i].grain_size) {
796                lookup = GM[i].lookup_level_cutoff;
797                break;
798            }
799        }
800        assert(lookup);
801
802        for (int L = L0; L != MAX_LOOKUP_LEVELS; ++L) {
803            if (tsz > lookup[L]) {
804                start_lookup_level = (LookupLevel) L;
805                break;
806            }
807        }
808        panic_if(start_lookup_level == MAX_LOOKUP_LEVELS,
809                 "Table walker couldn't find lookup level\n");
810    }
811
812    // Determine table base address
813    int base_addr_lo = 3 + tsz - stride * (3 - start_lookup_level) - tg;
814    Addr base_addr = mbits(ttbr, 47, base_addr_lo);
815
816    // Determine physical address size and raise an Address Size Fault if
817    // necessary
818    int pa_range = decodePhysAddrRange64(ps);
819    // Clamp to lower limit
820    if (pa_range > physAddrRange)
821        currState->physAddrRange = physAddrRange;
822    else
823        currState->physAddrRange = pa_range;
824    if (checkAddrSizeFaultAArch64(base_addr, currState->physAddrRange)) {
825        DPRINTF(TLB, "Address size fault before any lookup\n");
826        Fault f;
827        if (currState->isFetch)
828            f = std::make_shared<PrefetchAbort>(
829                currState->vaddr_tainted,
830                ArmFault::AddressSizeLL + start_lookup_level,
831                isStage2,
832                ArmFault::LpaeTran);
833        else
834            f = std::make_shared<DataAbort>(
835                currState->vaddr_tainted,
836                TlbEntry::DomainType::NoAccess,
837                currState->isWrite,
838                ArmFault::AddressSizeLL + start_lookup_level,
839                isStage2,
840                ArmFault::LpaeTran);
841
842
843        if (currState->timing) {
844            pending = false;
845            nextWalk(currState->tc);
846            currState = NULL;
847        } else {
848            currState->tc = NULL;
849            currState->req = NULL;
850        }
851        return f;
852
853   }
854
855    // Determine descriptor address
856    Addr desc_addr = base_addr |
857        (bits(currState->vaddr, tsz - 1,
858              stride * (3 - start_lookup_level) + tg) << 3);
859
860    // Trickbox address check
861    Fault f = tlb->walkTrickBoxCheck(desc_addr, currState->isSecure,
862                        currState->vaddr, sizeof(uint64_t), currState->isFetch,
863                        currState->isWrite, TlbEntry::DomainType::NoAccess,
864                        start_lookup_level);
865    if (f) {
866        DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
867        if (currState->timing) {
868            pending = false;
869            nextWalk(currState->tc);
870            currState = NULL;
871        } else {
872            currState->tc = NULL;
873            currState->req = NULL;
874        }
875        return f;
876    }
877
878    Request::Flags flag = 0;
879    if (currState->sctlr.c == 0) {
880        flag = Request::UNCACHEABLE;
881    }
882
883    currState->longDesc.lookupLevel = start_lookup_level;
884    currState->longDesc.aarch64 = true;
885    currState->longDesc.grainSize = tg;
886
887    if (currState->timing) {
888        Event *event;
889        switch (start_lookup_level) {
890          case L0:
891            event = (Event *) &doL0LongDescEvent;
892            break;
893          case L1:
894            event = (Event *) &doL1LongDescEvent;
895            break;
896          case L2:
897            event = (Event *) &doL2LongDescEvent;
898            break;
899          case L3:
900            event = (Event *) &doL3LongDescEvent;
901            break;
902          default:
903            panic("Invalid table lookup level");
904            break;
905        }
906        port.dmaAction(MemCmd::ReadReq, desc_addr, sizeof(uint64_t), event,
907                       (uint8_t*) &currState->longDesc.data,
908                       currState->tc->getCpuPtr()->clockPeriod(), flag);
909        DPRINTF(TLBVerbose,
910                "Adding to walker fifo: queue size before adding: %d\n",
911                stateQueues[start_lookup_level].size());
912        stateQueues[start_lookup_level].push_back(currState);
913        currState = NULL;
914    } else if (!currState->functional) {
915        port.dmaAction(MemCmd::ReadReq, desc_addr, sizeof(uint64_t),
916                       NULL, (uint8_t*) &currState->longDesc.data,
917                       currState->tc->getCpuPtr()->clockPeriod(), flag);
918        doLongDescriptor();
919        f = currState->fault;
920    } else {
921        RequestPtr req = new Request(desc_addr, sizeof(uint64_t), flag,
922                                     masterId);
923        PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
924        pkt->dataStatic((uint8_t*) &currState->longDesc.data);
925        port.sendFunctional(pkt);
926        doLongDescriptor();
927        delete req;
928        delete pkt;
929        f = currState->fault;
930    }
931
932    return f;
933}
934
935void
936TableWalker::memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr,
937                      uint8_t texcb, bool s)
938{
939    // Note: tc and sctlr local variables are hiding tc and sctrl class
940    // variables
941    DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s);
942    te.shareable = false; // default value
943    te.nonCacheable = false;
944    te.outerShareable = false;
945    if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) {
946        switch(texcb) {
947          case 0: // Stongly-ordered
948            te.nonCacheable = true;
949            te.mtype = TlbEntry::MemoryType::StronglyOrdered;
950            te.shareable = true;
951            te.innerAttrs = 1;
952            te.outerAttrs = 0;
953            break;
954          case 1: // Shareable Device
955            te.nonCacheable = true;
956            te.mtype = TlbEntry::MemoryType::Device;
957            te.shareable = true;
958            te.innerAttrs = 3;
959            te.outerAttrs = 0;
960            break;
961          case 2: // Outer and Inner Write-Through, no Write-Allocate
962            te.mtype = TlbEntry::MemoryType::Normal;
963            te.shareable = s;
964            te.innerAttrs = 6;
965            te.outerAttrs = bits(texcb, 1, 0);
966            break;
967          case 3: // Outer and Inner Write-Back, no Write-Allocate
968            te.mtype = TlbEntry::MemoryType::Normal;
969            te.shareable = s;
970            te.innerAttrs = 7;
971            te.outerAttrs = bits(texcb, 1, 0);
972            break;
973          case 4: // Outer and Inner Non-cacheable
974            te.nonCacheable = true;
975            te.mtype = TlbEntry::MemoryType::Normal;
976            te.shareable = s;
977            te.innerAttrs = 0;
978            te.outerAttrs = bits(texcb, 1, 0);
979            break;
980          case 5: // Reserved
981            panic("Reserved texcb value!\n");
982            break;
983          case 6: // Implementation Defined
984            panic("Implementation-defined texcb value!\n");
985            break;
986          case 7: // Outer and Inner Write-Back, Write-Allocate
987            te.mtype = TlbEntry::MemoryType::Normal;
988            te.shareable = s;
989            te.innerAttrs = 5;
990            te.outerAttrs = 1;
991            break;
992          case 8: // Non-shareable Device
993            te.nonCacheable = true;
994            te.mtype = TlbEntry::MemoryType::Device;
995            te.shareable = false;
996            te.innerAttrs = 3;
997            te.outerAttrs = 0;
998            break;
999          case 9 ... 15:  // Reserved
1000            panic("Reserved texcb value!\n");
1001            break;
1002          case 16 ... 31: // Cacheable Memory
1003            te.mtype = TlbEntry::MemoryType::Normal;
1004            te.shareable = s;
1005            if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0)
1006                te.nonCacheable = true;
1007            te.innerAttrs = bits(texcb, 1, 0);
1008            te.outerAttrs = bits(texcb, 3, 2);
1009            break;
1010          default:
1011            panic("More than 32 states for 5 bits?\n");
1012        }
1013    } else {
1014        assert(tc);
1015        PRRR prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR,
1016                                    currState->tc, !currState->isSecure));
1017        NMRR nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR,
1018                                    currState->tc, !currState->isSecure));
1019        DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
1020        uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
1021        switch(bits(texcb, 2,0)) {
1022          case 0:
1023            curr_tr = prrr.tr0;
1024            curr_ir = nmrr.ir0;
1025            curr_or = nmrr.or0;
1026            te.outerShareable = (prrr.nos0 == 0);
1027            break;
1028          case 1:
1029            curr_tr = prrr.tr1;
1030            curr_ir = nmrr.ir1;
1031            curr_or = nmrr.or1;
1032            te.outerShareable = (prrr.nos1 == 0);
1033            break;
1034          case 2:
1035            curr_tr = prrr.tr2;
1036            curr_ir = nmrr.ir2;
1037            curr_or = nmrr.or2;
1038            te.outerShareable = (prrr.nos2 == 0);
1039            break;
1040          case 3:
1041            curr_tr = prrr.tr3;
1042            curr_ir = nmrr.ir3;
1043            curr_or = nmrr.or3;
1044            te.outerShareable = (prrr.nos3 == 0);
1045            break;
1046          case 4:
1047            curr_tr = prrr.tr4;
1048            curr_ir = nmrr.ir4;
1049            curr_or = nmrr.or4;
1050            te.outerShareable = (prrr.nos4 == 0);
1051            break;
1052          case 5:
1053            curr_tr = prrr.tr5;
1054            curr_ir = nmrr.ir5;
1055            curr_or = nmrr.or5;
1056            te.outerShareable = (prrr.nos5 == 0);
1057            break;
1058          case 6:
1059            panic("Imp defined type\n");
1060          case 7:
1061            curr_tr = prrr.tr7;
1062            curr_ir = nmrr.ir7;
1063            curr_or = nmrr.or7;
1064            te.outerShareable = (prrr.nos7 == 0);
1065            break;
1066        }
1067
1068        switch(curr_tr) {
1069          case 0:
1070            DPRINTF(TLBVerbose, "StronglyOrdered\n");
1071            te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1072            te.nonCacheable = true;
1073            te.innerAttrs = 1;
1074            te.outerAttrs = 0;
1075            te.shareable = true;
1076            break;
1077          case 1:
1078            DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n",
1079                    prrr.ds1, prrr.ds0, s);
1080            te.mtype = TlbEntry::MemoryType::Device;
1081            te.nonCacheable = true;
1082            te.innerAttrs = 3;
1083            te.outerAttrs = 0;
1084            if (prrr.ds1 && s)
1085                te.shareable = true;
1086            if (prrr.ds0 && !s)
1087                te.shareable = true;
1088            break;
1089          case 2:
1090            DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n",
1091                    prrr.ns1, prrr.ns0, s);
1092            te.mtype = TlbEntry::MemoryType::Normal;
1093            if (prrr.ns1 && s)
1094                te.shareable = true;
1095            if (prrr.ns0 && !s)
1096                te.shareable = true;
1097            break;
1098          case 3:
1099            panic("Reserved type");
1100        }
1101
1102        if (te.mtype == TlbEntry::MemoryType::Normal){
1103            switch(curr_ir) {
1104              case 0:
1105                te.nonCacheable = true;
1106                te.innerAttrs = 0;
1107                break;
1108              case 1:
1109                te.innerAttrs = 5;
1110                break;
1111              case 2:
1112                te.innerAttrs = 6;
1113                break;
1114              case 3:
1115                te.innerAttrs = 7;
1116                break;
1117            }
1118
1119            switch(curr_or) {
1120              case 0:
1121                te.nonCacheable = true;
1122                te.outerAttrs = 0;
1123                break;
1124              case 1:
1125                te.outerAttrs = 1;
1126                break;
1127              case 2:
1128                te.outerAttrs = 2;
1129                break;
1130              case 3:
1131                te.outerAttrs = 3;
1132                break;
1133            }
1134        }
1135    }
1136    DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, "
1137            "outerAttrs: %d\n",
1138            te.shareable, te.innerAttrs, te.outerAttrs);
1139    te.setAttributes(false);
1140}
1141
1142void
1143TableWalker::memAttrsLPAE(ThreadContext *tc, TlbEntry &te,
1144    LongDescriptor &lDescriptor)
1145{
1146    assert(_haveLPAE);
1147
1148    uint8_t attr;
1149    uint8_t sh = lDescriptor.sh();
1150    // Different format and source of attributes if this is a stage 2
1151    // translation
1152    if (isStage2) {
1153        attr = lDescriptor.memAttr();
1154        uint8_t attr_3_2 = (attr >> 2) & 0x3;
1155        uint8_t attr_1_0 =  attr       & 0x3;
1156
1157        DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh);
1158
1159        if (attr_3_2 == 0) {
1160            te.mtype        = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered
1161                                            : TlbEntry::MemoryType::Device;
1162            te.outerAttrs   = 0;
1163            te.innerAttrs   = attr_1_0 == 0 ? 1 : 3;
1164            te.nonCacheable = true;
1165        } else {
1166            te.mtype        = TlbEntry::MemoryType::Normal;
1167            te.outerAttrs   = attr_3_2 == 1 ? 0 :
1168                              attr_3_2 == 2 ? 2 : 1;
1169            te.innerAttrs   = attr_1_0 == 1 ? 0 :
1170                              attr_1_0 == 2 ? 6 : 5;
1171            te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1172        }
1173    } else {
1174        uint8_t attrIndx = lDescriptor.attrIndx();
1175
1176        // LPAE always uses remapping of memory attributes, irrespective of the
1177        // value of SCTLR.TRE
1178        MiscRegIndex reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0;
1179        int reg_as_int = flattenMiscRegNsBanked(reg, currState->tc,
1180                                                !currState->isSecure);
1181        uint32_t mair = currState->tc->readMiscReg(reg_as_int);
1182        attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1183        uint8_t attr_7_4 = bits(attr, 7, 4);
1184        uint8_t attr_3_0 = bits(attr, 3, 0);
1185        DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr);
1186
1187        // Note: the memory subsystem only cares about the 'cacheable' memory
1188        // attribute. The other attributes are only used to fill the PAR register
1189        // accordingly to provide the illusion of full support
1190        te.nonCacheable = false;
1191
1192        switch (attr_7_4) {
1193          case 0x0:
1194            // Strongly-ordered or Device memory
1195            if (attr_3_0 == 0x0)
1196                te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1197            else if (attr_3_0 == 0x4)
1198                te.mtype = TlbEntry::MemoryType::Device;
1199            else
1200                panic("Unpredictable behavior\n");
1201            te.nonCacheable = true;
1202            te.outerAttrs   = 0;
1203            break;
1204          case 0x4:
1205            // Normal memory, Outer Non-cacheable
1206            te.mtype = TlbEntry::MemoryType::Normal;
1207            te.outerAttrs = 0;
1208            if (attr_3_0 == 0x4)
1209                // Inner Non-cacheable
1210                te.nonCacheable = true;
1211            else if (attr_3_0 < 0x8)
1212                panic("Unpredictable behavior\n");
1213            break;
1214          case 0x8:
1215          case 0x9:
1216          case 0xa:
1217          case 0xb:
1218          case 0xc:
1219          case 0xd:
1220          case 0xe:
1221          case 0xf:
1222            if (attr_7_4 & 0x4) {
1223                te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1224            } else {
1225                te.outerAttrs = 0x2;
1226            }
1227            // Normal memory, Outer Cacheable
1228            te.mtype = TlbEntry::MemoryType::Normal;
1229            if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1230                panic("Unpredictable behavior\n");
1231            break;
1232          default:
1233            panic("Unpredictable behavior\n");
1234            break;
1235        }
1236
1237        switch (attr_3_0) {
1238          case 0x0:
1239            te.innerAttrs = 0x1;
1240            break;
1241          case 0x4:
1242            te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1243            break;
1244          case 0x8:
1245          case 0x9:
1246          case 0xA:
1247          case 0xB:
1248            te.innerAttrs = 6;
1249            break;
1250          case 0xC:
1251          case 0xD:
1252          case 0xE:
1253          case 0xF:
1254            te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1255            break;
1256          default:
1257            panic("Unpredictable behavior\n");
1258            break;
1259        }
1260    }
1261
1262    te.outerShareable = sh == 2;
1263    te.shareable       = (sh & 0x2) ? true : false;
1264    te.setAttributes(true);
1265    te.attributes |= (uint64_t) attr << 56;
1266}
1267
1268void
1269TableWalker::memAttrsAArch64(ThreadContext *tc, TlbEntry &te, uint8_t attrIndx,
1270                             uint8_t sh)
1271{
1272    DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh);
1273
1274    // Select MAIR
1275    uint64_t mair;
1276    switch (currState->el) {
1277      case EL0:
1278      case EL1:
1279        mair = tc->readMiscReg(MISCREG_MAIR_EL1);
1280        break;
1281      case EL2:
1282        mair = tc->readMiscReg(MISCREG_MAIR_EL2);
1283        break;
1284      case EL3:
1285        mair = tc->readMiscReg(MISCREG_MAIR_EL3);
1286        break;
1287      default:
1288        panic("Invalid exception level");
1289        break;
1290    }
1291
1292    // Select attributes
1293    uint8_t attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1294    uint8_t attr_lo = bits(attr, 3, 0);
1295    uint8_t attr_hi = bits(attr, 7, 4);
1296
1297    // Memory type
1298    te.mtype = attr_hi == 0 ? TlbEntry::MemoryType::Device : TlbEntry::MemoryType::Normal;
1299
1300    // Cacheability
1301    te.nonCacheable = false;
1302    if (te.mtype == TlbEntry::MemoryType::Device ||  // Device memory
1303        attr_hi == 0x8 ||  // Normal memory, Outer Non-cacheable
1304        attr_lo == 0x8) {  // Normal memory, Inner Non-cacheable
1305        te.nonCacheable = true;
1306    }
1307
1308    te.shareable       = sh == 2;
1309    te.outerShareable = (sh & 0x2) ? true : false;
1310    // Attributes formatted according to the 64-bit PAR
1311    te.attributes = ((uint64_t) attr << 56) |
1312        (1 << 11) |     // LPAE bit
1313        (te.ns << 9) |  // NS bit
1314        (sh << 7);
1315}
1316
1317void
1318TableWalker::doL1Descriptor()
1319{
1320    if (currState->fault != NoFault) {
1321        return;
1322    }
1323
1324    DPRINTF(TLB, "L1 descriptor for %#x is %#x\n",
1325            currState->vaddr_tainted, currState->l1Desc.data);
1326    TlbEntry te;
1327
1328    switch (currState->l1Desc.type()) {
1329      case L1Descriptor::Ignore:
1330      case L1Descriptor::Reserved:
1331        if (!currState->timing) {
1332            currState->tc = NULL;
1333            currState->req = NULL;
1334        }
1335        DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n");
1336        if (currState->isFetch)
1337            currState->fault =
1338                std::make_shared<PrefetchAbort>(
1339                    currState->vaddr_tainted,
1340                    ArmFault::TranslationLL + L1,
1341                    isStage2,
1342                    ArmFault::VmsaTran);
1343        else
1344            currState->fault =
1345                std::make_shared<DataAbort>(
1346                    currState->vaddr_tainted,
1347                    TlbEntry::DomainType::NoAccess,
1348                    currState->isWrite,
1349                    ArmFault::TranslationLL + L1, isStage2,
1350                    ArmFault::VmsaTran);
1351        return;
1352      case L1Descriptor::Section:
1353        if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
1354            /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is
1355              * enabled if set, do l1.Desc.setAp0() instead of generating
1356              * AccessFlag0
1357              */
1358
1359            currState->fault = std::make_shared<DataAbort>(
1360                currState->vaddr_tainted,
1361                currState->l1Desc.domain(),
1362                currState->isWrite,
1363                ArmFault::AccessFlagLL + L1,
1364                isStage2,
1365                ArmFault::VmsaTran);
1366        }
1367        if (currState->l1Desc.supersection()) {
1368            panic("Haven't implemented supersections\n");
1369        }
1370        insertTableEntry(currState->l1Desc, false);
1371        return;
1372      case L1Descriptor::PageTable:
1373        {
1374            Addr l2desc_addr;
1375            l2desc_addr = currState->l1Desc.l2Addr() |
1376                (bits(currState->vaddr, 19, 12) << 2);
1377            DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n",
1378                    l2desc_addr, currState->isSecure ? "s" : "ns");
1379
1380            // Trickbox address check
1381            currState->fault = tlb->walkTrickBoxCheck(
1382                l2desc_addr, currState->isSecure, currState->vaddr,
1383                sizeof(uint32_t), currState->isFetch, currState->isWrite,
1384                currState->l1Desc.domain(), L2);
1385
1386            if (currState->fault) {
1387                if (!currState->timing) {
1388                    currState->tc = NULL;
1389                    currState->req = NULL;
1390                }
1391                return;
1392            }
1393
1394            Request::Flags flag = 0;
1395            if (currState->isSecure)
1396                flag.set(Request::SECURE);
1397
1398            bool delayed;
1399            delayed = fetchDescriptor(l2desc_addr,
1400                                      (uint8_t*)&currState->l2Desc.data,
1401                                      sizeof(uint32_t), flag, -1, &doL2DescEvent,
1402                                      &TableWalker::doL2Descriptor);
1403            if (delayed) {
1404                currState->delayed = true;
1405            }
1406
1407            return;
1408        }
1409      default:
1410        panic("A new type in a 2 bit field?\n");
1411    }
1412}
1413
1414void
1415TableWalker::doLongDescriptor()
1416{
1417    if (currState->fault != NoFault) {
1418        return;
1419    }
1420
1421    DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n",
1422            currState->longDesc.lookupLevel, currState->vaddr_tainted,
1423            currState->longDesc.data,
1424            currState->aarch64 ? "AArch64" : "long-desc.");
1425
1426    if ((currState->longDesc.type() == LongDescriptor::Block) ||
1427        (currState->longDesc.type() == LongDescriptor::Page)) {
1428        DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1429                "xn: %d, ap: %d, af: %d, type: %d\n",
1430                currState->longDesc.lookupLevel,
1431                currState->longDesc.data,
1432                currState->longDesc.pxn(),
1433                currState->longDesc.xn(),
1434                currState->longDesc.ap(),
1435                currState->longDesc.af(),
1436                currState->longDesc.type());
1437    } else {
1438        DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, type: %d\n",
1439                currState->longDesc.lookupLevel,
1440                currState->longDesc.data,
1441                currState->longDesc.type());
1442    }
1443
1444    TlbEntry te;
1445
1446    switch (currState->longDesc.type()) {
1447      case LongDescriptor::Invalid:
1448        if (!currState->timing) {
1449            currState->tc = NULL;
1450            currState->req = NULL;
1451        }
1452
1453        DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n",
1454                currState->longDesc.lookupLevel,
1455                ArmFault::TranslationLL + currState->longDesc.lookupLevel);
1456        if (currState->isFetch)
1457            currState->fault = std::make_shared<PrefetchAbort>(
1458                currState->vaddr_tainted,
1459                ArmFault::TranslationLL + currState->longDesc.lookupLevel,
1460                isStage2,
1461                ArmFault::LpaeTran);
1462        else
1463            currState->fault = std::make_shared<DataAbort>(
1464                currState->vaddr_tainted,
1465                TlbEntry::DomainType::NoAccess,
1466                currState->isWrite,
1467                ArmFault::TranslationLL + currState->longDesc.lookupLevel,
1468                isStage2,
1469                ArmFault::LpaeTran);
1470        return;
1471      case LongDescriptor::Block:
1472      case LongDescriptor::Page:
1473        {
1474            bool fault = false;
1475            bool aff = false;
1476            // Check for address size fault
1477            if (checkAddrSizeFaultAArch64(
1478                    mbits(currState->longDesc.data, MaxPhysAddrRange - 1,
1479                          currState->longDesc.offsetBits()),
1480                    currState->physAddrRange)) {
1481                fault = true;
1482                DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1483                        currState->longDesc.lookupLevel);
1484            // Check for access fault
1485            } else if (currState->longDesc.af() == 0) {
1486                fault = true;
1487                DPRINTF(TLB, "L%d descriptor causing Access Fault\n",
1488                        currState->longDesc.lookupLevel);
1489                aff = true;
1490            }
1491            if (fault) {
1492                if (currState->isFetch)
1493                    currState->fault = std::make_shared<PrefetchAbort>(
1494                        currState->vaddr_tainted,
1495                        (aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) +
1496                        currState->longDesc.lookupLevel,
1497                        isStage2,
1498                        ArmFault::LpaeTran);
1499                else
1500                    currState->fault = std::make_shared<DataAbort>(
1501                        currState->vaddr_tainted,
1502                        TlbEntry::DomainType::NoAccess, currState->isWrite,
1503                        (aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) +
1504                        currState->longDesc.lookupLevel,
1505                        isStage2,
1506                        ArmFault::LpaeTran);
1507            } else {
1508                insertTableEntry(currState->longDesc, true);
1509            }
1510        }
1511        return;
1512      case LongDescriptor::Table:
1513        {
1514            // Set hierarchical permission flags
1515            currState->secureLookup = currState->secureLookup &&
1516                currState->longDesc.secureTable();
1517            currState->rwTable = currState->rwTable &&
1518                currState->longDesc.rwTable();
1519            currState->userTable = currState->userTable &&
1520                currState->longDesc.userTable();
1521            currState->xnTable = currState->xnTable ||
1522                currState->longDesc.xnTable();
1523            currState->pxnTable = currState->pxnTable ||
1524                currState->longDesc.pxnTable();
1525
1526            // Set up next level lookup
1527            Addr next_desc_addr = currState->longDesc.nextDescAddr(
1528                currState->vaddr);
1529
1530            DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1531                    currState->longDesc.lookupLevel,
1532                    currState->longDesc.lookupLevel + 1,
1533                    next_desc_addr,
1534                    currState->secureLookup ? "s" : "ns");
1535
1536            // Check for address size fault
1537            if (currState->aarch64 && checkAddrSizeFaultAArch64(
1538                    next_desc_addr, currState->physAddrRange)) {
1539                DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1540                        currState->longDesc.lookupLevel);
1541                if (currState->isFetch)
1542                    currState->fault = std::make_shared<PrefetchAbort>(
1543                        currState->vaddr_tainted,
1544                        ArmFault::AddressSizeLL
1545                        + currState->longDesc.lookupLevel,
1546                        isStage2,
1547                        ArmFault::LpaeTran);
1548                else
1549                    currState->fault = std::make_shared<DataAbort>(
1550                        currState->vaddr_tainted,
1551                        TlbEntry::DomainType::NoAccess, currState->isWrite,
1552                        ArmFault::AddressSizeLL
1553                        + currState->longDesc.lookupLevel,
1554                        isStage2,
1555                        ArmFault::LpaeTran);
1556                return;
1557            }
1558
1559            // Trickbox address check
1560            currState->fault = tlb->walkTrickBoxCheck(
1561                            next_desc_addr, currState->vaddr,
1562                            currState->vaddr, sizeof(uint64_t),
1563                            currState->isFetch, currState->isWrite,
1564                            TlbEntry::DomainType::Client,
1565                            toLookupLevel(currState->longDesc.lookupLevel +1));
1566
1567            if (currState->fault) {
1568                if (!currState->timing) {
1569                    currState->tc = NULL;
1570                    currState->req = NULL;
1571                }
1572                return;
1573            }
1574
1575            Request::Flags flag = 0;
1576            if (currState->secureLookup)
1577                flag.set(Request::SECURE);
1578
1579            currState->longDesc.lookupLevel =
1580                (LookupLevel) (currState->longDesc.lookupLevel + 1);
1581            Event *event = NULL;
1582            switch (currState->longDesc.lookupLevel) {
1583              case L1:
1584                assert(currState->aarch64);
1585                event = &doL1LongDescEvent;
1586                break;
1587              case L2:
1588                event = &doL2LongDescEvent;
1589                break;
1590              case L3:
1591                event = &doL3LongDescEvent;
1592                break;
1593              default:
1594                panic("Wrong lookup level in table walk\n");
1595                break;
1596            }
1597
1598            bool delayed;
1599            delayed = fetchDescriptor(next_desc_addr, (uint8_t*)&currState->longDesc.data,
1600                                      sizeof(uint64_t), flag, -1, event,
1601                                      &TableWalker::doLongDescriptor);
1602            if (delayed) {
1603                 currState->delayed = true;
1604            }
1605        }
1606        return;
1607      default:
1608        panic("A new type in a 2 bit field?\n");
1609    }
1610}
1611
1612void
1613TableWalker::doL2Descriptor()
1614{
1615    if (currState->fault != NoFault) {
1616        return;
1617    }
1618
1619    DPRINTF(TLB, "L2 descriptor for %#x is %#x\n",
1620            currState->vaddr_tainted, currState->l2Desc.data);
1621    TlbEntry te;
1622
1623    if (currState->l2Desc.invalid()) {
1624        DPRINTF(TLB, "L2 descriptor invalid, causing fault\n");
1625        if (!currState->timing) {
1626            currState->tc = NULL;
1627            currState->req = NULL;
1628        }
1629        if (currState->isFetch)
1630            currState->fault = std::make_shared<PrefetchAbort>(
1631                    currState->vaddr_tainted,
1632                    ArmFault::TranslationLL + L2,
1633                    isStage2,
1634                    ArmFault::VmsaTran);
1635        else
1636            currState->fault = std::make_shared<DataAbort>(
1637                currState->vaddr_tainted, currState->l1Desc.domain(),
1638                currState->isWrite, ArmFault::TranslationLL + L2,
1639                isStage2,
1640                ArmFault::VmsaTran);
1641        return;
1642    }
1643
1644    if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) {
1645        /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is enabled
1646          * if set, do l2.Desc.setAp0() instead of generating AccessFlag0
1647          */
1648         DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n",
1649                 currState->sctlr.afe, currState->l2Desc.ap());
1650
1651        currState->fault = std::make_shared<DataAbort>(
1652            currState->vaddr_tainted,
1653            TlbEntry::DomainType::NoAccess, currState->isWrite,
1654            ArmFault::AccessFlagLL + L2, isStage2,
1655            ArmFault::VmsaTran);
1656    }
1657
1658    insertTableEntry(currState->l2Desc, false);
1659}
1660
1661void
1662TableWalker::doL1DescriptorWrapper()
1663{
1664    currState = stateQueues[L1].front();
1665    currState->delayed = false;
1666    // if there's a stage2 translation object we don't need it any more
1667    if (currState->stage2Tran) {
1668        delete currState->stage2Tran;
1669        currState->stage2Tran = NULL;
1670    }
1671
1672
1673    DPRINTF(TLBVerbose, "L1 Desc object host addr: %p\n",&currState->l1Desc.data);
1674    DPRINTF(TLBVerbose, "L1 Desc object      data: %08x\n",currState->l1Desc.data);
1675
1676    DPRINTF(TLBVerbose, "calling doL1Descriptor for vaddr:%#x\n", currState->vaddr_tainted);
1677    doL1Descriptor();
1678
1679    stateQueues[L1].pop_front();
1680    completeDrain();
1681    // Check if fault was generated
1682    if (currState->fault != NoFault) {
1683        currState->transState->finish(currState->fault, currState->req,
1684                                      currState->tc, currState->mode);
1685
1686        pending = false;
1687        nextWalk(currState->tc);
1688
1689        currState->req = NULL;
1690        currState->tc = NULL;
1691        currState->delayed = false;
1692        delete currState;
1693    }
1694    else if (!currState->delayed) {
1695        // delay is not set so there is no L2 to do
1696        // Don't finish the translation if a stage 2 look up is underway
1697        if (!currState->doingStage2) {
1698            DPRINTF(TLBVerbose, "calling translateTiming again\n");
1699            currState->fault = tlb->translateTiming(currState->req, currState->tc,
1700                currState->transState, currState->mode);
1701        }
1702
1703        pending = false;
1704        nextWalk(currState->tc);
1705
1706        currState->req = NULL;
1707        currState->tc = NULL;
1708        currState->delayed = false;
1709        delete currState;
1710    } else {
1711        // need to do L2 descriptor
1712        stateQueues[L2].push_back(currState);
1713    }
1714    currState = NULL;
1715}
1716
1717void
1718TableWalker::doL2DescriptorWrapper()
1719{
1720    currState = stateQueues[L2].front();
1721    assert(currState->delayed);
1722    // if there's a stage2 translation object we don't need it any more
1723    if (currState->stage2Tran) {
1724        delete currState->stage2Tran;
1725        currState->stage2Tran = NULL;
1726    }
1727
1728    DPRINTF(TLBVerbose, "calling doL2Descriptor for vaddr:%#x\n",
1729            currState->vaddr_tainted);
1730    doL2Descriptor();
1731
1732    // Check if fault was generated
1733    if (currState->fault != NoFault) {
1734        currState->transState->finish(currState->fault, currState->req,
1735                                      currState->tc, currState->mode);
1736    }
1737    else {
1738        // Don't finish the translation if a stage 2 look up is underway
1739        if (!currState->doingStage2) {
1740            DPRINTF(TLBVerbose, "calling translateTiming again\n");
1741            currState->fault = tlb->translateTiming(currState->req,
1742                currState->tc, currState->transState, currState->mode);
1743        }
1744    }
1745
1746
1747    stateQueues[L2].pop_front();
1748    completeDrain();
1749    pending = false;
1750    nextWalk(currState->tc);
1751
1752    currState->req = NULL;
1753    currState->tc = NULL;
1754    currState->delayed = false;
1755
1756    delete currState;
1757    currState = NULL;
1758}
1759
1760void
1761TableWalker::doL0LongDescriptorWrapper()
1762{
1763    doLongDescriptorWrapper(L0);
1764}
1765
1766void
1767TableWalker::doL1LongDescriptorWrapper()
1768{
1769    doLongDescriptorWrapper(L1);
1770}
1771
1772void
1773TableWalker::doL2LongDescriptorWrapper()
1774{
1775    doLongDescriptorWrapper(L2);
1776}
1777
1778void
1779TableWalker::doL3LongDescriptorWrapper()
1780{
1781    doLongDescriptorWrapper(L3);
1782}
1783
1784void
1785TableWalker::doLongDescriptorWrapper(LookupLevel curr_lookup_level)
1786{
1787    currState = stateQueues[curr_lookup_level].front();
1788    assert(curr_lookup_level == currState->longDesc.lookupLevel);
1789    currState->delayed = false;
1790
1791    // if there's a stage2 translation object we don't need it any more
1792    if (currState->stage2Tran) {
1793        delete currState->stage2Tran;
1794        currState->stage2Tran = NULL;
1795    }
1796
1797    DPRINTF(TLBVerbose, "calling doLongDescriptor for vaddr:%#x\n",
1798            currState->vaddr_tainted);
1799    doLongDescriptor();
1800
1801    stateQueues[curr_lookup_level].pop_front();
1802
1803    if (currState->fault != NoFault) {
1804        // A fault was generated
1805        currState->transState->finish(currState->fault, currState->req,
1806                                      currState->tc, currState->mode);
1807
1808        pending = false;
1809        nextWalk(currState->tc);
1810
1811        currState->req = NULL;
1812        currState->tc = NULL;
1813        currState->delayed = false;
1814        delete currState;
1815    } else if (!currState->delayed) {
1816        // No additional lookups required
1817        // Don't finish the translation if a stage 2 look up is underway
1818        if (!currState->doingStage2) {
1819            DPRINTF(TLBVerbose, "calling translateTiming again\n");
1820            currState->fault = tlb->translateTiming(currState->req, currState->tc,
1821                                                    currState->transState,
1822                                                    currState->mode);
1823        }
1824
1825        pending = false;
1826        nextWalk(currState->tc);
1827
1828        currState->req = NULL;
1829        currState->tc = NULL;
1830        currState->delayed = false;
1831        delete currState;
1832    } else {
1833        if (curr_lookup_level >= MAX_LOOKUP_LEVELS - 1)
1834            panic("Max. number of lookups already reached in table walk\n");
1835        // Need to perform additional lookups
1836        stateQueues[currState->longDesc.lookupLevel].push_back(currState);
1837    }
1838    currState = NULL;
1839}
1840
1841
1842void
1843TableWalker::nextWalk(ThreadContext *tc)
1844{
1845    if (pendingQueue.size())
1846        schedule(doProcessEvent, clockEdge(Cycles(1)));
1847}
1848
1849bool
1850TableWalker::fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes,
1851    Request::Flags flags, int queueIndex, Event *event,
1852    void (TableWalker::*doDescriptor)())
1853{
1854    bool isTiming = currState->timing;
1855
1856    // do the requests for the page table descriptors have to go through the
1857    // second stage MMU
1858    if (currState->stage2Req) {
1859        Fault fault;
1860        flags = flags | TLB::MustBeOne;
1861
1862        if (isTiming) {
1863            Stage2MMU::Stage2Translation *tran = new
1864                Stage2MMU::Stage2Translation(*stage2Mmu, data, event,
1865                                             currState->vaddr);
1866            currState->stage2Tran = tran;
1867            stage2Mmu->readDataTimed(currState->tc, descAddr, tran, numBytes,
1868                                     flags, masterId);
1869            fault = tran->fault;
1870        } else {
1871            fault = stage2Mmu->readDataUntimed(currState->tc,
1872                currState->vaddr, descAddr, data, numBytes, flags, masterId,
1873                currState->functional);
1874        }
1875
1876        if (fault != NoFault) {
1877            currState->fault = fault;
1878        }
1879        if (isTiming) {
1880            if (queueIndex >= 0) {
1881                DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
1882                        stateQueues[queueIndex].size());
1883                stateQueues[queueIndex].push_back(currState);
1884                currState = NULL;
1885            }
1886        } else {
1887            (this->*doDescriptor)();
1888        }
1889    } else {
1890        if (isTiming) {
1891            port.dmaAction(MemCmd::ReadReq, descAddr, numBytes, event, data,
1892                           currState->tc->getCpuPtr()->clockPeriod(), flags);
1893            if (queueIndex >= 0) {
1894                DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
1895                        stateQueues[queueIndex].size());
1896                stateQueues[queueIndex].push_back(currState);
1897                currState = NULL;
1898            }
1899        } else if (!currState->functional) {
1900            port.dmaAction(MemCmd::ReadReq, descAddr, numBytes, NULL, data,
1901                           currState->tc->getCpuPtr()->clockPeriod(), flags);
1902            (this->*doDescriptor)();
1903        } else {
1904            RequestPtr req = new Request(descAddr, numBytes, flags, masterId);
1905            req->taskId(ContextSwitchTaskId::DMA);
1906            PacketPtr  pkt = new Packet(req, MemCmd::ReadReq);
1907            pkt->dataStatic(data);
1908            port.sendFunctional(pkt);
1909            (this->*doDescriptor)();
1910            delete req;
1911            delete pkt;
1912        }
1913    }
1914    return (isTiming);
1915}
1916
1917void
1918TableWalker::insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
1919{
1920    TlbEntry te;
1921
1922    // Create and fill a new page table entry
1923    te.valid          = true;
1924    te.longDescFormat = longDescriptor;
1925    te.isHyp          = currState->isHyp;
1926    te.asid           = currState->asid;
1927    te.vmid           = currState->vmid;
1928    te.N              = descriptor.offsetBits();
1929    te.vpn            = currState->vaddr >> te.N;
1930    te.size           = (1<<te.N) - 1;
1931    te.pfn            = descriptor.pfn();
1932    te.domain         = descriptor.domain();
1933    te.lookupLevel    = descriptor.lookupLevel;
1934    te.ns             = !descriptor.secure(haveSecurity, currState) || isStage2;
1935    te.nstid          = !currState->isSecure;
1936    te.xn             = descriptor.xn();
1937    if (currState->aarch64)
1938        te.el         = currState->el;
1939    else
1940        te.el         = 1;
1941
1942    // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
1943    // as global
1944    te.global         = descriptor.global(currState) || isStage2;
1945    if (longDescriptor) {
1946        LongDescriptor lDescriptor =
1947            dynamic_cast<LongDescriptor &>(descriptor);
1948
1949        te.xn |= currState->xnTable;
1950        te.pxn = currState->pxnTable || lDescriptor.pxn();
1951        if (isStage2) {
1952            // this is actually the HAP field, but its stored in the same bit
1953            // possitions as the AP field in a stage 1 translation.
1954            te.hap = lDescriptor.ap();
1955        } else {
1956           te.ap = ((!currState->rwTable || descriptor.ap() >> 1) << 1) |
1957               (currState->userTable && (descriptor.ap() & 0x1));
1958        }
1959        if (currState->aarch64)
1960            memAttrsAArch64(currState->tc, te, currState->longDesc.attrIndx(),
1961                            currState->longDesc.sh());
1962        else
1963            memAttrsLPAE(currState->tc, te, lDescriptor);
1964    } else {
1965        te.ap = descriptor.ap();
1966        memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(),
1967                 descriptor.shareable());
1968    }
1969
1970    // Debug output
1971    DPRINTF(TLB, descriptor.dbgHeader().c_str());
1972    DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
1973            te.N, te.pfn, te.size, te.global, te.valid);
1974    DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
1975            "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
1976            te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp,
1977            te.nonCacheable, te.ns);
1978    DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
1979            descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
1980            descriptor.getRawData());
1981
1982    // Insert the entry into the TLB
1983    tlb->insert(currState->vaddr, te);
1984    if (!currState->timing) {
1985        currState->tc  = NULL;
1986        currState->req = NULL;
1987    }
1988}
1989
1990ArmISA::TableWalker *
1991ArmTableWalkerParams::create()
1992{
1993    return new ArmISA::TableWalker(this);
1994}
1995
1996LookupLevel
1997TableWalker::toLookupLevel(uint8_t lookup_level_as_int)
1998{
1999    switch (lookup_level_as_int) {
2000      case L1:
2001        return L1;
2002      case L2:
2003        return L2;
2004      case L3:
2005        return L3;
2006      default:
2007        panic("Invalid lookup level conversion");
2008    }
2009}
2010