table_walker.cc revision 13892:0182a0601f66
1/*
2 * Copyright (c) 2010, 2012-2018 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Ali Saidi
38 *          Giacomo Gabrielli
39 */
40#include "arch/arm/table_walker.hh"
41
42#include <memory>
43
44#include "arch/arm/faults.hh"
45#include "arch/arm/stage2_mmu.hh"
46#include "arch/arm/system.hh"
47#include "arch/arm/tlb.hh"
48#include "cpu/base.hh"
49#include "cpu/thread_context.hh"
50#include "debug/Checkpoint.hh"
51#include "debug/Drain.hh"
52#include "debug/TLB.hh"
53#include "debug/TLBVerbose.hh"
54#include "dev/dma_device.hh"
55#include "sim/system.hh"
56
57using namespace ArmISA;
58
59TableWalker::TableWalker(const Params *p)
60    : ClockedObject(p),
61      stage2Mmu(NULL), port(NULL), masterId(Request::invldMasterId),
62      isStage2(p->is_stage2), tlb(NULL),
63      currState(NULL), pending(false),
64      numSquashable(p->num_squash_per_cycle),
65      pendingReqs(0),
66      pendingChangeTick(curTick()),
67      doL1DescEvent([this]{ doL1DescriptorWrapper(); }, name()),
68      doL2DescEvent([this]{ doL2DescriptorWrapper(); }, name()),
69      doL0LongDescEvent([this]{ doL0LongDescriptorWrapper(); }, name()),
70      doL1LongDescEvent([this]{ doL1LongDescriptorWrapper(); }, name()),
71      doL2LongDescEvent([this]{ doL2LongDescriptorWrapper(); }, name()),
72      doL3LongDescEvent([this]{ doL3LongDescriptorWrapper(); }, name()),
73      LongDescEventByLevel { &doL0LongDescEvent, &doL1LongDescEvent,
74                             &doL2LongDescEvent, &doL3LongDescEvent },
75      doProcessEvent([this]{ processWalkWrapper(); }, name())
76{
77    sctlr = 0;
78
79    // Cache system-level properties
80    if (FullSystem) {
81        ArmSystem *armSys = dynamic_cast<ArmSystem *>(p->sys);
82        assert(armSys);
83        haveSecurity = armSys->haveSecurity();
84        _haveLPAE = armSys->haveLPAE();
85        _haveVirtualization = armSys->haveVirtualization();
86        physAddrRange = armSys->physAddrRange();
87        _haveLargeAsid64 = armSys->haveLargeAsid64();
88    } else {
89        haveSecurity = _haveLPAE = _haveVirtualization = false;
90        _haveLargeAsid64 = false;
91        physAddrRange = 32;
92    }
93
94}
95
96TableWalker::~TableWalker()
97{
98    ;
99}
100
101void
102TableWalker::setMMU(Stage2MMU *m, MasterID master_id)
103{
104    stage2Mmu = m;
105    port = &m->getDMAPort();
106    masterId = master_id;
107}
108
109void
110TableWalker::init()
111{
112    fatal_if(!stage2Mmu, "Table walker must have a valid stage-2 MMU\n");
113    fatal_if(!port, "Table walker must have a valid port\n");
114    fatal_if(!tlb, "Table walker must have a valid TLB\n");
115}
116
117Port &
118TableWalker::getPort(const std::string &if_name, PortID idx)
119{
120    if (if_name == "port") {
121        if (!isStage2) {
122            return *port;
123        } else {
124            fatal("Cannot access table walker port through stage-two walker\n");
125        }
126    }
127    return ClockedObject::getPort(if_name, idx);
128}
129
130TableWalker::WalkerState::WalkerState() :
131    tc(nullptr), aarch64(false), el(EL0), physAddrRange(0), req(nullptr),
132    asid(0), vmid(0), isHyp(false), transState(nullptr),
133    vaddr(0), vaddr_tainted(0), isWrite(false), isFetch(false), isSecure(false),
134    secureLookup(false), rwTable(false), userTable(false), xnTable(false),
135    pxnTable(false), stage2Req(false),
136    stage2Tran(nullptr), timing(false), functional(false),
137    mode(BaseTLB::Read), tranType(TLB::NormalTran), l2Desc(l1Desc),
138    delayed(false), tableWalker(nullptr)
139{
140}
141
142void
143TableWalker::completeDrain()
144{
145    if (drainState() == DrainState::Draining &&
146        stateQueues[L0].empty() && stateQueues[L1].empty() &&
147        stateQueues[L2].empty() && stateQueues[L3].empty() &&
148        pendingQueue.empty()) {
149
150        DPRINTF(Drain, "TableWalker done draining, processing drain event\n");
151        signalDrainDone();
152    }
153}
154
155DrainState
156TableWalker::drain()
157{
158    bool state_queues_not_empty = false;
159
160    for (int i = 0; i < MAX_LOOKUP_LEVELS; ++i) {
161        if (!stateQueues[i].empty()) {
162            state_queues_not_empty = true;
163            break;
164        }
165    }
166
167    if (state_queues_not_empty || pendingQueue.size()) {
168        DPRINTF(Drain, "TableWalker not drained\n");
169        return DrainState::Draining;
170    } else {
171        DPRINTF(Drain, "TableWalker free, no need to drain\n");
172        return DrainState::Drained;
173    }
174}
175
176void
177TableWalker::drainResume()
178{
179    if (params()->sys->isTimingMode() && currState) {
180        delete currState;
181        currState = NULL;
182        pendingChange();
183    }
184}
185
186Fault
187TableWalker::walk(const RequestPtr &_req, ThreadContext *_tc, uint16_t _asid,
188                  uint8_t _vmid, bool _isHyp, TLB::Mode _mode,
189                  TLB::Translation *_trans, bool _timing, bool _functional,
190                  bool secure, TLB::ArmTranslationType tranType,
191                  bool _stage2Req)
192{
193    assert(!(_functional && _timing));
194    ++statWalks;
195
196    WalkerState *savedCurrState = NULL;
197
198    if (!currState && !_functional) {
199        // For atomic mode, a new WalkerState instance should be only created
200        // once per TLB. For timing mode, a new instance is generated for every
201        // TLB miss.
202        DPRINTF(TLBVerbose, "creating new instance of WalkerState\n");
203
204        currState = new WalkerState();
205        currState->tableWalker = this;
206    } else if (_functional) {
207        // If we are mixing functional mode with timing (or even
208        // atomic), we need to to be careful and clean up after
209        // ourselves to not risk getting into an inconsistent state.
210        DPRINTF(TLBVerbose, "creating functional instance of WalkerState\n");
211        savedCurrState = currState;
212        currState = new WalkerState();
213        currState->tableWalker = this;
214    } else if (_timing) {
215        // This is a translation that was completed and then faulted again
216        // because some underlying parameters that affect the translation
217        // changed out from under us (e.g. asid). It will either be a
218        // misprediction, in which case nothing will happen or we'll use
219        // this fault to re-execute the faulting instruction which should clean
220        // up everything.
221        if (currState->vaddr_tainted == _req->getVaddr()) {
222            ++statSquashedBefore;
223            return std::make_shared<ReExec>();
224        }
225    }
226    pendingChange();
227
228    currState->startTime = curTick();
229    currState->tc = _tc;
230    // ARM DDI 0487A.f (ARMv8 ARM) pg J8-5672
231    // aarch32/translation/translation/AArch32.TranslateAddress dictates
232    // even AArch32 EL0 will use AArch64 translation if EL1 is in AArch64.
233    if (isStage2) {
234        currState->el = EL1;
235        currState->aarch64 = ELIs64(_tc, EL2);
236    } else {
237        currState->el =
238            TLB::tranTypeEL(_tc->readMiscReg(MISCREG_CPSR), tranType);
239        currState->aarch64 =
240            ELIs64(_tc, currState->el == EL0 ? EL1 : currState->el);
241    }
242    currState->transState = _trans;
243    currState->req = _req;
244    currState->fault = NoFault;
245    currState->asid = _asid;
246    currState->vmid = _vmid;
247    currState->isHyp = _isHyp;
248    currState->timing = _timing;
249    currState->functional = _functional;
250    currState->mode = _mode;
251    currState->tranType = tranType;
252    currState->isSecure = secure;
253    currState->physAddrRange = physAddrRange;
254
255    /** @todo These should be cached or grabbed from cached copies in
256     the TLB, all these miscreg reads are expensive */
257    currState->vaddr_tainted = currState->req->getVaddr();
258    if (currState->aarch64)
259        currState->vaddr = purifyTaggedAddr(currState->vaddr_tainted,
260                                            currState->tc, currState->el);
261    else
262        currState->vaddr = currState->vaddr_tainted;
263
264    if (currState->aarch64) {
265        if (isStage2) {
266            currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
267            currState->vtcr = currState->tc->readMiscReg(MISCREG_VTCR_EL2);
268        } else switch (currState->el) {
269          case EL0:
270          case EL1:
271            currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
272            currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL1);
273            break;
274          case EL2:
275            assert(_haveVirtualization);
276            currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL2);
277            currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL2);
278            break;
279          case EL3:
280            assert(haveSecurity);
281            currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL3);
282            currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL3);
283            break;
284          default:
285            panic("Invalid exception level");
286            break;
287        }
288        currState->hcr = currState->tc->readMiscReg(MISCREG_HCR_EL2);
289    } else {
290        currState->sctlr = currState->tc->readMiscReg(snsBankedIndex(
291            MISCREG_SCTLR, currState->tc, !currState->isSecure));
292        currState->ttbcr = currState->tc->readMiscReg(snsBankedIndex(
293            MISCREG_TTBCR, currState->tc, !currState->isSecure));
294        currState->htcr  = currState->tc->readMiscReg(MISCREG_HTCR);
295        currState->hcr   = currState->tc->readMiscReg(MISCREG_HCR);
296        currState->vtcr  = currState->tc->readMiscReg(MISCREG_VTCR);
297    }
298    sctlr = currState->sctlr;
299
300    currState->isFetch = (currState->mode == TLB::Execute);
301    currState->isWrite = (currState->mode == TLB::Write);
302
303    statRequestOrigin[REQUESTED][currState->isFetch]++;
304
305    currState->stage2Req = _stage2Req && !isStage2;
306
307    bool long_desc_format = currState->aarch64 || _isHyp || isStage2 ||
308                            longDescFormatInUse(currState->tc);
309
310    if (long_desc_format) {
311        // Helper variables used for hierarchical permissions
312        currState->secureLookup = currState->isSecure;
313        currState->rwTable = true;
314        currState->userTable = true;
315        currState->xnTable = false;
316        currState->pxnTable = false;
317
318        ++statWalksLongDescriptor;
319    } else {
320        ++statWalksShortDescriptor;
321    }
322
323    if (!currState->timing) {
324        Fault fault = NoFault;
325        if (currState->aarch64)
326            fault = processWalkAArch64();
327        else if (long_desc_format)
328            fault = processWalkLPAE();
329        else
330            fault = processWalk();
331
332        // If this was a functional non-timing access restore state to
333        // how we found it.
334        if (currState->functional) {
335            delete currState;
336            currState = savedCurrState;
337        }
338        return fault;
339    }
340
341    if (pending || pendingQueue.size()) {
342        pendingQueue.push_back(currState);
343        currState = NULL;
344        pendingChange();
345    } else {
346        pending = true;
347        pendingChange();
348        if (currState->aarch64)
349            return processWalkAArch64();
350        else if (long_desc_format)
351            return processWalkLPAE();
352        else
353            return processWalk();
354    }
355
356    return NoFault;
357}
358
359void
360TableWalker::processWalkWrapper()
361{
362    assert(!currState);
363    assert(pendingQueue.size());
364    pendingChange();
365    currState = pendingQueue.front();
366
367    // Check if a previous walk filled this request already
368    // @TODO Should this always be the TLB or should we look in the stage2 TLB?
369    TlbEntry* te = tlb->lookup(currState->vaddr, currState->asid,
370            currState->vmid, currState->isHyp, currState->isSecure, true, false,
371            currState->el);
372
373    // Check if we still need to have a walk for this request. If the requesting
374    // instruction has been squashed, or a previous walk has filled the TLB with
375    // a match, we just want to get rid of the walk. The latter could happen
376    // when there are multiple outstanding misses to a single page and a
377    // previous request has been successfully translated.
378    if (!currState->transState->squashed() && !te) {
379        // We've got a valid request, lets process it
380        pending = true;
381        pendingQueue.pop_front();
382        // Keep currState in case one of the processWalk... calls NULLs it
383        WalkerState *curr_state_copy = currState;
384        Fault f;
385        if (currState->aarch64)
386            f = processWalkAArch64();
387        else if (longDescFormatInUse(currState->tc) ||
388                 currState->isHyp || isStage2)
389            f = processWalkLPAE();
390        else
391            f = processWalk();
392
393        if (f != NoFault) {
394            curr_state_copy->transState->finish(f, curr_state_copy->req,
395                    curr_state_copy->tc, curr_state_copy->mode);
396
397            delete curr_state_copy;
398        }
399        return;
400    }
401
402
403    // If the instruction that we were translating for has been
404    // squashed we shouldn't bother.
405    unsigned num_squashed = 0;
406    ThreadContext *tc = currState->tc;
407    while ((num_squashed < numSquashable) && currState &&
408           (currState->transState->squashed() || te)) {
409        pendingQueue.pop_front();
410        num_squashed++;
411        statSquashedBefore++;
412
413        DPRINTF(TLB, "Squashing table walk for address %#x\n",
414                      currState->vaddr_tainted);
415
416        if (currState->transState->squashed()) {
417            // finish the translation which will delete the translation object
418            currState->transState->finish(
419                std::make_shared<UnimpFault>("Squashed Inst"),
420                currState->req, currState->tc, currState->mode);
421        } else {
422            // translate the request now that we know it will work
423            statWalkServiceTime.sample(curTick() - currState->startTime);
424            tlb->translateTiming(currState->req, currState->tc,
425                        currState->transState, currState->mode);
426
427        }
428
429        // delete the current request
430        delete currState;
431
432        // peak at the next one
433        if (pendingQueue.size()) {
434            currState = pendingQueue.front();
435            te = tlb->lookup(currState->vaddr, currState->asid,
436                currState->vmid, currState->isHyp, currState->isSecure, true,
437                false, currState->el);
438        } else {
439            // Terminate the loop, nothing more to do
440            currState = NULL;
441        }
442    }
443    pendingChange();
444
445    // if we still have pending translations, schedule more work
446    nextWalk(tc);
447    currState = NULL;
448}
449
450Fault
451TableWalker::processWalk()
452{
453    Addr ttbr = 0;
454
455    // If translation isn't enabled, we shouldn't be here
456    assert(currState->sctlr.m || isStage2);
457
458    DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
459            currState->vaddr_tainted, currState->ttbcr, mbits(currState->vaddr, 31,
460                                                      32 - currState->ttbcr.n));
461
462    statWalkWaitTime.sample(curTick() - currState->startTime);
463
464    if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31,
465                                          32 - currState->ttbcr.n)) {
466        DPRINTF(TLB, " - Selecting TTBR0\n");
467        // Check if table walk is allowed when Security Extensions are enabled
468        if (haveSecurity && currState->ttbcr.pd0) {
469            if (currState->isFetch)
470                return std::make_shared<PrefetchAbort>(
471                    currState->vaddr_tainted,
472                    ArmFault::TranslationLL + L1,
473                    isStage2,
474                    ArmFault::VmsaTran);
475            else
476                return std::make_shared<DataAbort>(
477                    currState->vaddr_tainted,
478                    TlbEntry::DomainType::NoAccess, currState->isWrite,
479                    ArmFault::TranslationLL + L1, isStage2,
480                    ArmFault::VmsaTran);
481        }
482        ttbr = currState->tc->readMiscReg(snsBankedIndex(
483            MISCREG_TTBR0, currState->tc, !currState->isSecure));
484    } else {
485        DPRINTF(TLB, " - Selecting TTBR1\n");
486        // Check if table walk is allowed when Security Extensions are enabled
487        if (haveSecurity && currState->ttbcr.pd1) {
488            if (currState->isFetch)
489                return std::make_shared<PrefetchAbort>(
490                    currState->vaddr_tainted,
491                    ArmFault::TranslationLL + L1,
492                    isStage2,
493                    ArmFault::VmsaTran);
494            else
495                return std::make_shared<DataAbort>(
496                    currState->vaddr_tainted,
497                    TlbEntry::DomainType::NoAccess, currState->isWrite,
498                    ArmFault::TranslationLL + L1, isStage2,
499                    ArmFault::VmsaTran);
500        }
501        ttbr = currState->tc->readMiscReg(snsBankedIndex(
502            MISCREG_TTBR1, currState->tc, !currState->isSecure));
503        currState->ttbcr.n = 0;
504    }
505
506    Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) |
507        (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2);
508    DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr,
509            currState->isSecure ? "s" : "ns");
510
511    // Trickbox address check
512    Fault f;
513    f = testWalk(l1desc_addr, sizeof(uint32_t),
514                 TlbEntry::DomainType::NoAccess, L1);
515    if (f) {
516        DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
517        if (currState->timing) {
518            pending = false;
519            nextWalk(currState->tc);
520            currState = NULL;
521        } else {
522            currState->tc = NULL;
523            currState->req = NULL;
524        }
525        return f;
526    }
527
528    Request::Flags flag = Request::PT_WALK;
529    if (currState->sctlr.c == 0) {
530        flag.set(Request::UNCACHEABLE);
531    }
532
533    if (currState->isSecure) {
534        flag.set(Request::SECURE);
535    }
536
537    bool delayed;
538    delayed = fetchDescriptor(l1desc_addr, (uint8_t*)&currState->l1Desc.data,
539                              sizeof(uint32_t), flag, L1, &doL1DescEvent,
540                              &TableWalker::doL1Descriptor);
541    if (!delayed) {
542       f = currState->fault;
543    }
544
545    return f;
546}
547
548Fault
549TableWalker::processWalkLPAE()
550{
551    Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
552    int tsz, n;
553    LookupLevel start_lookup_level = L1;
554
555    DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n",
556            currState->vaddr_tainted, currState->ttbcr);
557
558    statWalkWaitTime.sample(curTick() - currState->startTime);
559
560    Request::Flags flag = Request::PT_WALK;
561    if (currState->isSecure)
562        flag.set(Request::SECURE);
563
564    // work out which base address register to use, if in hyp mode we always
565    // use HTTBR
566    if (isStage2) {
567        DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n");
568        ttbr = currState->tc->readMiscReg(MISCREG_VTTBR);
569        tsz  = sext<4>(currState->vtcr.t0sz);
570        start_lookup_level = currState->vtcr.sl0 ? L1 : L2;
571    } else if (currState->isHyp) {
572        DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n");
573        ttbr = currState->tc->readMiscReg(MISCREG_HTTBR);
574        tsz  = currState->htcr.t0sz;
575    } else {
576        assert(longDescFormatInUse(currState->tc));
577
578        // Determine boundaries of TTBR0/1 regions
579        if (currState->ttbcr.t0sz)
580            ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1;
581        else if (currState->ttbcr.t1sz)
582            ttbr0_max = (1ULL << 32) -
583                (1ULL << (32 - currState->ttbcr.t1sz)) - 1;
584        else
585            ttbr0_max = (1ULL << 32) - 1;
586        if (currState->ttbcr.t1sz)
587            ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz));
588        else
589            ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz));
590
591        // The following code snippet selects the appropriate translation table base
592        // address (TTBR0 or TTBR1) and the appropriate starting lookup level
593        // depending on the address range supported by the translation table (ARM
594        // ARM issue C B3.6.4)
595        if (currState->vaddr <= ttbr0_max) {
596            DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n");
597            // Check if table walk is allowed
598            if (currState->ttbcr.epd0) {
599                if (currState->isFetch)
600                    return std::make_shared<PrefetchAbort>(
601                        currState->vaddr_tainted,
602                        ArmFault::TranslationLL + L1,
603                        isStage2,
604                        ArmFault::LpaeTran);
605                else
606                    return std::make_shared<DataAbort>(
607                        currState->vaddr_tainted,
608                        TlbEntry::DomainType::NoAccess,
609                        currState->isWrite,
610                        ArmFault::TranslationLL + L1,
611                        isStage2,
612                        ArmFault::LpaeTran);
613            }
614            ttbr = currState->tc->readMiscReg(snsBankedIndex(
615                MISCREG_TTBR0, currState->tc, !currState->isSecure));
616            tsz = currState->ttbcr.t0sz;
617            if (ttbr0_max < (1ULL << 30))  // Upper limit < 1 GB
618                start_lookup_level = L2;
619        } else if (currState->vaddr >= ttbr1_min) {
620            DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n");
621            // Check if table walk is allowed
622            if (currState->ttbcr.epd1) {
623                if (currState->isFetch)
624                    return std::make_shared<PrefetchAbort>(
625                        currState->vaddr_tainted,
626                        ArmFault::TranslationLL + L1,
627                        isStage2,
628                        ArmFault::LpaeTran);
629                else
630                    return std::make_shared<DataAbort>(
631                        currState->vaddr_tainted,
632                        TlbEntry::DomainType::NoAccess,
633                        currState->isWrite,
634                        ArmFault::TranslationLL + L1,
635                        isStage2,
636                        ArmFault::LpaeTran);
637            }
638            ttbr = currState->tc->readMiscReg(snsBankedIndex(
639                MISCREG_TTBR1, currState->tc, !currState->isSecure));
640            tsz = currState->ttbcr.t1sz;
641            if (ttbr1_min >= (1ULL << 31) + (1ULL << 30))  // Lower limit >= 3 GB
642                start_lookup_level = L2;
643        } else {
644            // Out of boundaries -> translation fault
645            if (currState->isFetch)
646                return std::make_shared<PrefetchAbort>(
647                    currState->vaddr_tainted,
648                    ArmFault::TranslationLL + L1,
649                    isStage2,
650                    ArmFault::LpaeTran);
651            else
652                return std::make_shared<DataAbort>(
653                    currState->vaddr_tainted,
654                    TlbEntry::DomainType::NoAccess,
655                    currState->isWrite, ArmFault::TranslationLL + L1,
656                    isStage2, ArmFault::LpaeTran);
657        }
658
659    }
660
661    // Perform lookup (ARM ARM issue C B3.6.6)
662    if (start_lookup_level == L1) {
663        n = 5 - tsz;
664        desc_addr = mbits(ttbr, 39, n) |
665            (bits(currState->vaddr, n + 26, 30) << 3);
666        DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
667                desc_addr, currState->isSecure ? "s" : "ns");
668    } else {
669        // Skip first-level lookup
670        n = (tsz >= 2 ? 14 - tsz : 12);
671        desc_addr = mbits(ttbr, 39, n) |
672            (bits(currState->vaddr, n + 17, 21) << 3);
673        DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
674                desc_addr, currState->isSecure ? "s" : "ns");
675    }
676
677    // Trickbox address check
678    Fault f = testWalk(desc_addr, sizeof(uint64_t),
679                       TlbEntry::DomainType::NoAccess, start_lookup_level);
680    if (f) {
681        DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
682        if (currState->timing) {
683            pending = false;
684            nextWalk(currState->tc);
685            currState = NULL;
686        } else {
687            currState->tc = NULL;
688            currState->req = NULL;
689        }
690        return f;
691    }
692
693    if (currState->sctlr.c == 0) {
694        flag.set(Request::UNCACHEABLE);
695    }
696
697    currState->longDesc.lookupLevel = start_lookup_level;
698    currState->longDesc.aarch64 = false;
699    currState->longDesc.grainSize = Grain4KB;
700
701    bool delayed = fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
702                                   sizeof(uint64_t), flag, start_lookup_level,
703                                   LongDescEventByLevel[start_lookup_level],
704                                   &TableWalker::doLongDescriptor);
705    if (!delayed) {
706        f = currState->fault;
707    }
708
709    return f;
710}
711
712unsigned
713TableWalker::adjustTableSizeAArch64(unsigned tsz)
714{
715    if (tsz < 25)
716        return 25;
717    if (tsz > 48)
718        return 48;
719    return tsz;
720}
721
722bool
723TableWalker::checkAddrSizeFaultAArch64(Addr addr, int currPhysAddrRange)
724{
725    return (currPhysAddrRange != MaxPhysAddrRange &&
726            bits(addr, MaxPhysAddrRange - 1, currPhysAddrRange));
727}
728
729Fault
730TableWalker::processWalkAArch64()
731{
732    assert(currState->aarch64);
733
734    DPRINTF(TLB, "Beginning table walk for address %#llx, TCR: %#llx\n",
735            currState->vaddr_tainted, currState->tcr);
736
737    static const GrainSize GrainMap_tg0[] =
738      { Grain4KB, Grain64KB, Grain16KB, ReservedGrain };
739    static const GrainSize GrainMap_tg1[] =
740      { ReservedGrain, Grain16KB, Grain4KB, Grain64KB };
741
742    statWalkWaitTime.sample(curTick() - currState->startTime);
743
744    // Determine TTBR, table size, granule size and phys. address range
745    Addr ttbr = 0;
746    int tsz = 0, ps = 0;
747    GrainSize tg = Grain4KB; // grain size computed from tg* field
748    bool fault = false;
749
750    LookupLevel start_lookup_level = MAX_LOOKUP_LEVELS;
751
752    switch (currState->el) {
753      case EL0:
754      case EL1:
755        if (isStage2) {
756            DPRINTF(TLB, " - Selecting VTTBR0 (AArch64 stage 2)\n");
757            ttbr = currState->tc->readMiscReg(MISCREG_VTTBR_EL2);
758            tsz = 64 - currState->vtcr.t0sz64;
759            tg = GrainMap_tg0[currState->vtcr.tg0];
760            // ARM DDI 0487A.f D7-2148
761            // The starting level of stage 2 translation depends on
762            // VTCR_EL2.SL0 and VTCR_EL2.TG0
763            LookupLevel __ = MAX_LOOKUP_LEVELS; // invalid level
764            uint8_t sl_tg = (currState->vtcr.sl0 << 2) | currState->vtcr.tg0;
765            static const LookupLevel SLL[] = {
766                L2, L3, L3, __, // sl0 == 0
767                L1, L2, L2, __, // sl0 == 1, etc.
768                L0, L1, L1, __,
769                __, __, __, __
770            };
771            start_lookup_level = SLL[sl_tg];
772            panic_if(start_lookup_level == MAX_LOOKUP_LEVELS,
773                     "Cannot discern lookup level from vtcr.{sl0,tg0}");
774            ps = currState->vtcr.ps;
775        } else {
776            switch (bits(currState->vaddr, 63,48)) {
777              case 0:
778                DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
779                ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL1);
780                tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
781                tg = GrainMap_tg0[currState->tcr.tg0];
782                if (bits(currState->vaddr, 63, tsz) != 0x0 ||
783                    currState->tcr.epd0)
784                  fault = true;
785                break;
786              case 0xffff:
787                DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
788                ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL1);
789                tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz);
790                tg = GrainMap_tg1[currState->tcr.tg1];
791                if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) ||
792                    currState->tcr.epd1)
793                  fault = true;
794                break;
795              default:
796                // top two bytes must be all 0s or all 1s, else invalid addr
797                fault = true;
798            }
799            ps = currState->tcr.ips;
800        }
801        break;
802      case EL2:
803        switch(bits(currState->vaddr, 63,48)) {
804          case 0:
805            DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
806            ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL2);
807            tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
808            tg = GrainMap_tg0[currState->tcr.tg0];
809            break;
810
811          case 0xffff:
812            DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
813            ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL2);
814            tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz);
815            tg = GrainMap_tg1[currState->tcr.tg1];
816            if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) ||
817                currState->tcr.epd1 || !currState->hcr.e2h)
818              fault = true;
819            break;
820
821           default:
822              // invalid addr if top two bytes are not all 0s
823              fault = true;
824        }
825        ps = currState->tcr.ps;
826        break;
827      case EL3:
828        switch(bits(currState->vaddr, 63,48)) {
829            case 0:
830                DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
831                ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL3);
832                tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
833                tg = GrainMap_tg0[currState->tcr.tg0];
834                break;
835            default:
836                // invalid addr if top two bytes are not all 0s
837                fault = true;
838        }
839        ps = currState->tcr.ps;
840        break;
841    }
842
843    if (fault) {
844        Fault f;
845        if (currState->isFetch)
846            f =  std::make_shared<PrefetchAbort>(
847                currState->vaddr_tainted,
848                ArmFault::TranslationLL + L0, isStage2,
849                ArmFault::LpaeTran);
850        else
851            f = std::make_shared<DataAbort>(
852                currState->vaddr_tainted,
853                TlbEntry::DomainType::NoAccess,
854                currState->isWrite,
855                ArmFault::TranslationLL + L0,
856                isStage2, ArmFault::LpaeTran);
857
858        if (currState->timing) {
859            pending = false;
860            nextWalk(currState->tc);
861            currState = NULL;
862        } else {
863            currState->tc = NULL;
864            currState->req = NULL;
865        }
866        return f;
867
868    }
869
870    if (tg == ReservedGrain) {
871        warn_once("Reserved granule size requested; gem5's IMPLEMENTATION "
872                  "DEFINED behavior takes this to mean 4KB granules\n");
873        tg = Grain4KB;
874    }
875
876    // Determine starting lookup level
877    // See aarch64/translation/walk in Appendix G: ARMv8 Pseudocode Library
878    // in ARM DDI 0487A.  These table values correspond to the cascading tests
879    // to compute the lookup level and are of the form
880    // (grain_size + N*stride), for N = {1, 2, 3}.
881    // A value of 64 will never succeed and a value of 0 will always succeed.
882    if (start_lookup_level == MAX_LOOKUP_LEVELS) {
883        struct GrainMap {
884            GrainSize grain_size;
885            unsigned lookup_level_cutoff[MAX_LOOKUP_LEVELS];
886        };
887        static const GrainMap GM[] = {
888            { Grain4KB,  { 39, 30,  0, 0 } },
889            { Grain16KB, { 47, 36, 25, 0 } },
890            { Grain64KB, { 64, 42, 29, 0 } }
891        };
892
893        const unsigned *lookup = NULL; // points to a lookup_level_cutoff
894
895        for (unsigned i = 0; i < 3; ++i) { // choose entry of GM[]
896            if (tg == GM[i].grain_size) {
897                lookup = GM[i].lookup_level_cutoff;
898                break;
899            }
900        }
901        assert(lookup);
902
903        for (int L = L0; L != MAX_LOOKUP_LEVELS; ++L) {
904            if (tsz > lookup[L]) {
905                start_lookup_level = (LookupLevel) L;
906                break;
907            }
908        }
909        panic_if(start_lookup_level == MAX_LOOKUP_LEVELS,
910                 "Table walker couldn't find lookup level\n");
911    }
912
913    int stride = tg - 3;
914
915    // Determine table base address
916    int base_addr_lo = 3 + tsz - stride * (3 - start_lookup_level) - tg;
917    Addr base_addr = mbits(ttbr, 47, base_addr_lo);
918
919    // Determine physical address size and raise an Address Size Fault if
920    // necessary
921    int pa_range = decodePhysAddrRange64(ps);
922    // Clamp to lower limit
923    if (pa_range > physAddrRange)
924        currState->physAddrRange = physAddrRange;
925    else
926        currState->physAddrRange = pa_range;
927    if (checkAddrSizeFaultAArch64(base_addr, currState->physAddrRange)) {
928        DPRINTF(TLB, "Address size fault before any lookup\n");
929        Fault f;
930        if (currState->isFetch)
931            f = std::make_shared<PrefetchAbort>(
932                currState->vaddr_tainted,
933                ArmFault::AddressSizeLL + start_lookup_level,
934                isStage2,
935                ArmFault::LpaeTran);
936        else
937            f = std::make_shared<DataAbort>(
938                currState->vaddr_tainted,
939                TlbEntry::DomainType::NoAccess,
940                currState->isWrite,
941                ArmFault::AddressSizeLL + start_lookup_level,
942                isStage2,
943                ArmFault::LpaeTran);
944
945
946        if (currState->timing) {
947            pending = false;
948            nextWalk(currState->tc);
949            currState = NULL;
950        } else {
951            currState->tc = NULL;
952            currState->req = NULL;
953        }
954        return f;
955
956   }
957
958    // Determine descriptor address
959    Addr desc_addr = base_addr |
960        (bits(currState->vaddr, tsz - 1,
961              stride * (3 - start_lookup_level) + tg) << 3);
962
963    // Trickbox address check
964    Fault f = testWalk(desc_addr, sizeof(uint64_t),
965                       TlbEntry::DomainType::NoAccess, start_lookup_level);
966    if (f) {
967        DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
968        if (currState->timing) {
969            pending = false;
970            nextWalk(currState->tc);
971            currState = NULL;
972        } else {
973            currState->tc = NULL;
974            currState->req = NULL;
975        }
976        return f;
977    }
978
979    Request::Flags flag = Request::PT_WALK;
980    if (currState->sctlr.c == 0) {
981        flag.set(Request::UNCACHEABLE);
982    }
983
984    if (currState->isSecure) {
985        flag.set(Request::SECURE);
986    }
987
988    currState->longDesc.lookupLevel = start_lookup_level;
989    currState->longDesc.aarch64 = true;
990    currState->longDesc.grainSize = tg;
991
992    if (currState->timing) {
993        fetchDescriptor(desc_addr, (uint8_t*) &currState->longDesc.data,
994                        sizeof(uint64_t), flag, start_lookup_level,
995                        LongDescEventByLevel[start_lookup_level], NULL);
996    } else {
997        fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
998                        sizeof(uint64_t), flag, -1, NULL,
999                        &TableWalker::doLongDescriptor);
1000        f = currState->fault;
1001    }
1002
1003    return f;
1004}
1005
1006void
1007TableWalker::memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr,
1008                      uint8_t texcb, bool s)
1009{
1010    // Note: tc and sctlr local variables are hiding tc and sctrl class
1011    // variables
1012    DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s);
1013    te.shareable = false; // default value
1014    te.nonCacheable = false;
1015    te.outerShareable = false;
1016    if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) {
1017        switch(texcb) {
1018          case 0: // Stongly-ordered
1019            te.nonCacheable = true;
1020            te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1021            te.shareable = true;
1022            te.innerAttrs = 1;
1023            te.outerAttrs = 0;
1024            break;
1025          case 1: // Shareable Device
1026            te.nonCacheable = true;
1027            te.mtype = TlbEntry::MemoryType::Device;
1028            te.shareable = true;
1029            te.innerAttrs = 3;
1030            te.outerAttrs = 0;
1031            break;
1032          case 2: // Outer and Inner Write-Through, no Write-Allocate
1033            te.mtype = TlbEntry::MemoryType::Normal;
1034            te.shareable = s;
1035            te.innerAttrs = 6;
1036            te.outerAttrs = bits(texcb, 1, 0);
1037            break;
1038          case 3: // Outer and Inner Write-Back, no Write-Allocate
1039            te.mtype = TlbEntry::MemoryType::Normal;
1040            te.shareable = s;
1041            te.innerAttrs = 7;
1042            te.outerAttrs = bits(texcb, 1, 0);
1043            break;
1044          case 4: // Outer and Inner Non-cacheable
1045            te.nonCacheable = true;
1046            te.mtype = TlbEntry::MemoryType::Normal;
1047            te.shareable = s;
1048            te.innerAttrs = 0;
1049            te.outerAttrs = bits(texcb, 1, 0);
1050            break;
1051          case 5: // Reserved
1052            panic("Reserved texcb value!\n");
1053            break;
1054          case 6: // Implementation Defined
1055            panic("Implementation-defined texcb value!\n");
1056            break;
1057          case 7: // Outer and Inner Write-Back, Write-Allocate
1058            te.mtype = TlbEntry::MemoryType::Normal;
1059            te.shareable = s;
1060            te.innerAttrs = 5;
1061            te.outerAttrs = 1;
1062            break;
1063          case 8: // Non-shareable Device
1064            te.nonCacheable = true;
1065            te.mtype = TlbEntry::MemoryType::Device;
1066            te.shareable = false;
1067            te.innerAttrs = 3;
1068            te.outerAttrs = 0;
1069            break;
1070          case 9 ... 15:  // Reserved
1071            panic("Reserved texcb value!\n");
1072            break;
1073          case 16 ... 31: // Cacheable Memory
1074            te.mtype = TlbEntry::MemoryType::Normal;
1075            te.shareable = s;
1076            if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0)
1077                te.nonCacheable = true;
1078            te.innerAttrs = bits(texcb, 1, 0);
1079            te.outerAttrs = bits(texcb, 3, 2);
1080            break;
1081          default:
1082            panic("More than 32 states for 5 bits?\n");
1083        }
1084    } else {
1085        assert(tc);
1086        PRRR prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR,
1087                                    currState->tc, !currState->isSecure));
1088        NMRR nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR,
1089                                    currState->tc, !currState->isSecure));
1090        DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
1091        uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
1092        switch(bits(texcb, 2,0)) {
1093          case 0:
1094            curr_tr = prrr.tr0;
1095            curr_ir = nmrr.ir0;
1096            curr_or = nmrr.or0;
1097            te.outerShareable = (prrr.nos0 == 0);
1098            break;
1099          case 1:
1100            curr_tr = prrr.tr1;
1101            curr_ir = nmrr.ir1;
1102            curr_or = nmrr.or1;
1103            te.outerShareable = (prrr.nos1 == 0);
1104            break;
1105          case 2:
1106            curr_tr = prrr.tr2;
1107            curr_ir = nmrr.ir2;
1108            curr_or = nmrr.or2;
1109            te.outerShareable = (prrr.nos2 == 0);
1110            break;
1111          case 3:
1112            curr_tr = prrr.tr3;
1113            curr_ir = nmrr.ir3;
1114            curr_or = nmrr.or3;
1115            te.outerShareable = (prrr.nos3 == 0);
1116            break;
1117          case 4:
1118            curr_tr = prrr.tr4;
1119            curr_ir = nmrr.ir4;
1120            curr_or = nmrr.or4;
1121            te.outerShareable = (prrr.nos4 == 0);
1122            break;
1123          case 5:
1124            curr_tr = prrr.tr5;
1125            curr_ir = nmrr.ir5;
1126            curr_or = nmrr.or5;
1127            te.outerShareable = (prrr.nos5 == 0);
1128            break;
1129          case 6:
1130            panic("Imp defined type\n");
1131          case 7:
1132            curr_tr = prrr.tr7;
1133            curr_ir = nmrr.ir7;
1134            curr_or = nmrr.or7;
1135            te.outerShareable = (prrr.nos7 == 0);
1136            break;
1137        }
1138
1139        switch(curr_tr) {
1140          case 0:
1141            DPRINTF(TLBVerbose, "StronglyOrdered\n");
1142            te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1143            te.nonCacheable = true;
1144            te.innerAttrs = 1;
1145            te.outerAttrs = 0;
1146            te.shareable = true;
1147            break;
1148          case 1:
1149            DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n",
1150                    prrr.ds1, prrr.ds0, s);
1151            te.mtype = TlbEntry::MemoryType::Device;
1152            te.nonCacheable = true;
1153            te.innerAttrs = 3;
1154            te.outerAttrs = 0;
1155            if (prrr.ds1 && s)
1156                te.shareable = true;
1157            if (prrr.ds0 && !s)
1158                te.shareable = true;
1159            break;
1160          case 2:
1161            DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n",
1162                    prrr.ns1, prrr.ns0, s);
1163            te.mtype = TlbEntry::MemoryType::Normal;
1164            if (prrr.ns1 && s)
1165                te.shareable = true;
1166            if (prrr.ns0 && !s)
1167                te.shareable = true;
1168            break;
1169          case 3:
1170            panic("Reserved type");
1171        }
1172
1173        if (te.mtype == TlbEntry::MemoryType::Normal){
1174            switch(curr_ir) {
1175              case 0:
1176                te.nonCacheable = true;
1177                te.innerAttrs = 0;
1178                break;
1179              case 1:
1180                te.innerAttrs = 5;
1181                break;
1182              case 2:
1183                te.innerAttrs = 6;
1184                break;
1185              case 3:
1186                te.innerAttrs = 7;
1187                break;
1188            }
1189
1190            switch(curr_or) {
1191              case 0:
1192                te.nonCacheable = true;
1193                te.outerAttrs = 0;
1194                break;
1195              case 1:
1196                te.outerAttrs = 1;
1197                break;
1198              case 2:
1199                te.outerAttrs = 2;
1200                break;
1201              case 3:
1202                te.outerAttrs = 3;
1203                break;
1204            }
1205        }
1206    }
1207    DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, "
1208            "outerAttrs: %d\n",
1209            te.shareable, te.innerAttrs, te.outerAttrs);
1210    te.setAttributes(false);
1211}
1212
1213void
1214TableWalker::memAttrsLPAE(ThreadContext *tc, TlbEntry &te,
1215    LongDescriptor &lDescriptor)
1216{
1217    assert(_haveLPAE);
1218
1219    uint8_t attr;
1220    uint8_t sh = lDescriptor.sh();
1221    // Different format and source of attributes if this is a stage 2
1222    // translation
1223    if (isStage2) {
1224        attr = lDescriptor.memAttr();
1225        uint8_t attr_3_2 = (attr >> 2) & 0x3;
1226        uint8_t attr_1_0 =  attr       & 0x3;
1227
1228        DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh);
1229
1230        if (attr_3_2 == 0) {
1231            te.mtype        = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered
1232                                            : TlbEntry::MemoryType::Device;
1233            te.outerAttrs   = 0;
1234            te.innerAttrs   = attr_1_0 == 0 ? 1 : 3;
1235            te.nonCacheable = true;
1236        } else {
1237            te.mtype        = TlbEntry::MemoryType::Normal;
1238            te.outerAttrs   = attr_3_2 == 1 ? 0 :
1239                              attr_3_2 == 2 ? 2 : 1;
1240            te.innerAttrs   = attr_1_0 == 1 ? 0 :
1241                              attr_1_0 == 2 ? 6 : 5;
1242            te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1243        }
1244    } else {
1245        uint8_t attrIndx = lDescriptor.attrIndx();
1246
1247        // LPAE always uses remapping of memory attributes, irrespective of the
1248        // value of SCTLR.TRE
1249        MiscRegIndex reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0;
1250        int reg_as_int = snsBankedIndex(reg, currState->tc,
1251                                        !currState->isSecure);
1252        uint32_t mair = currState->tc->readMiscReg(reg_as_int);
1253        attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1254        uint8_t attr_7_4 = bits(attr, 7, 4);
1255        uint8_t attr_3_0 = bits(attr, 3, 0);
1256        DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr);
1257
1258        // Note: the memory subsystem only cares about the 'cacheable' memory
1259        // attribute. The other attributes are only used to fill the PAR register
1260        // accordingly to provide the illusion of full support
1261        te.nonCacheable = false;
1262
1263        switch (attr_7_4) {
1264          case 0x0:
1265            // Strongly-ordered or Device memory
1266            if (attr_3_0 == 0x0)
1267                te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1268            else if (attr_3_0 == 0x4)
1269                te.mtype = TlbEntry::MemoryType::Device;
1270            else
1271                panic("Unpredictable behavior\n");
1272            te.nonCacheable = true;
1273            te.outerAttrs   = 0;
1274            break;
1275          case 0x4:
1276            // Normal memory, Outer Non-cacheable
1277            te.mtype = TlbEntry::MemoryType::Normal;
1278            te.outerAttrs = 0;
1279            if (attr_3_0 == 0x4)
1280                // Inner Non-cacheable
1281                te.nonCacheable = true;
1282            else if (attr_3_0 < 0x8)
1283                panic("Unpredictable behavior\n");
1284            break;
1285          case 0x8:
1286          case 0x9:
1287          case 0xa:
1288          case 0xb:
1289          case 0xc:
1290          case 0xd:
1291          case 0xe:
1292          case 0xf:
1293            if (attr_7_4 & 0x4) {
1294                te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1295            } else {
1296                te.outerAttrs = 0x2;
1297            }
1298            // Normal memory, Outer Cacheable
1299            te.mtype = TlbEntry::MemoryType::Normal;
1300            if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1301                panic("Unpredictable behavior\n");
1302            break;
1303          default:
1304            panic("Unpredictable behavior\n");
1305            break;
1306        }
1307
1308        switch (attr_3_0) {
1309          case 0x0:
1310            te.innerAttrs = 0x1;
1311            break;
1312          case 0x4:
1313            te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1314            break;
1315          case 0x8:
1316          case 0x9:
1317          case 0xA:
1318          case 0xB:
1319            te.innerAttrs = 6;
1320            break;
1321          case 0xC:
1322          case 0xD:
1323          case 0xE:
1324          case 0xF:
1325            te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1326            break;
1327          default:
1328            panic("Unpredictable behavior\n");
1329            break;
1330        }
1331    }
1332
1333    te.outerShareable = sh == 2;
1334    te.shareable       = (sh & 0x2) ? true : false;
1335    te.setAttributes(true);
1336    te.attributes |= (uint64_t) attr << 56;
1337}
1338
1339void
1340TableWalker::memAttrsAArch64(ThreadContext *tc, TlbEntry &te,
1341                             LongDescriptor &lDescriptor)
1342{
1343    uint8_t attr;
1344    uint8_t attr_hi;
1345    uint8_t attr_lo;
1346    uint8_t sh = lDescriptor.sh();
1347
1348    if (isStage2) {
1349        attr = lDescriptor.memAttr();
1350        uint8_t attr_hi = (attr >> 2) & 0x3;
1351        uint8_t attr_lo =  attr       & 0x3;
1352
1353        DPRINTF(TLBVerbose, "memAttrsAArch64 MemAttr:%#x sh:%#x\n", attr, sh);
1354
1355        if (attr_hi == 0) {
1356            te.mtype        = attr_lo == 0 ? TlbEntry::MemoryType::StronglyOrdered
1357                                            : TlbEntry::MemoryType::Device;
1358            te.outerAttrs   = 0;
1359            te.innerAttrs   = attr_lo == 0 ? 1 : 3;
1360            te.nonCacheable = true;
1361        } else {
1362            te.mtype        = TlbEntry::MemoryType::Normal;
1363            te.outerAttrs   = attr_hi == 1 ? 0 :
1364                              attr_hi == 2 ? 2 : 1;
1365            te.innerAttrs   = attr_lo == 1 ? 0 :
1366                              attr_lo == 2 ? 6 : 5;
1367            // Treat write-through memory as uncacheable, this is safe
1368            // but for performance reasons not optimal.
1369            te.nonCacheable = (attr_hi == 1) || (attr_hi == 2) ||
1370                (attr_lo == 1) || (attr_lo == 2);
1371        }
1372    } else {
1373        uint8_t attrIndx = lDescriptor.attrIndx();
1374
1375        DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh);
1376
1377        // Select MAIR
1378        uint64_t mair;
1379        switch (currState->el) {
1380          case EL0:
1381          case EL1:
1382            mair = tc->readMiscReg(MISCREG_MAIR_EL1);
1383            break;
1384          case EL2:
1385            mair = tc->readMiscReg(MISCREG_MAIR_EL2);
1386            break;
1387          case EL3:
1388            mair = tc->readMiscReg(MISCREG_MAIR_EL3);
1389            break;
1390          default:
1391            panic("Invalid exception level");
1392            break;
1393        }
1394
1395        // Select attributes
1396        attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1397        attr_lo = bits(attr, 3, 0);
1398        attr_hi = bits(attr, 7, 4);
1399
1400        // Memory type
1401        te.mtype = attr_hi == 0 ? TlbEntry::MemoryType::Device : TlbEntry::MemoryType::Normal;
1402
1403        // Cacheability
1404        te.nonCacheable = false;
1405        if (te.mtype == TlbEntry::MemoryType::Device) {  // Device memory
1406            te.nonCacheable = true;
1407        }
1408        // Treat write-through memory as uncacheable, this is safe
1409        // but for performance reasons not optimal.
1410        switch (attr_hi) {
1411          case 0x1 ... 0x3: // Normal Memory, Outer Write-through transient
1412          case 0x4:         // Normal memory, Outer Non-cacheable
1413          case 0x8 ... 0xb: // Normal Memory, Outer Write-through non-transient
1414            te.nonCacheable = true;
1415        }
1416        switch (attr_lo) {
1417          case 0x1 ... 0x3: // Normal Memory, Inner Write-through transient
1418          case 0x9 ... 0xb: // Normal Memory, Inner Write-through non-transient
1419            warn_if(!attr_hi, "Unpredictable behavior");
1420            M5_FALLTHROUGH;
1421          case 0x4:         // Device-nGnRE memory or
1422                            // Normal memory, Inner Non-cacheable
1423          case 0x8:         // Device-nGRE memory or
1424                            // Normal memory, Inner Write-through non-transient
1425            te.nonCacheable = true;
1426        }
1427
1428        te.shareable       = sh == 2;
1429        te.outerShareable = (sh & 0x2) ? true : false;
1430        // Attributes formatted according to the 64-bit PAR
1431        te.attributes = ((uint64_t) attr << 56) |
1432            (1 << 11) |     // LPAE bit
1433            (te.ns << 9) |  // NS bit
1434            (sh << 7);
1435    }
1436}
1437
1438void
1439TableWalker::doL1Descriptor()
1440{
1441    if (currState->fault != NoFault) {
1442        return;
1443    }
1444
1445    currState->l1Desc.data = htog(currState->l1Desc.data,
1446                                  byteOrder(currState->tc));
1447
1448    DPRINTF(TLB, "L1 descriptor for %#x is %#x\n",
1449            currState->vaddr_tainted, currState->l1Desc.data);
1450    TlbEntry te;
1451
1452    switch (currState->l1Desc.type()) {
1453      case L1Descriptor::Ignore:
1454      case L1Descriptor::Reserved:
1455        if (!currState->timing) {
1456            currState->tc = NULL;
1457            currState->req = NULL;
1458        }
1459        DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n");
1460        if (currState->isFetch)
1461            currState->fault =
1462                std::make_shared<PrefetchAbort>(
1463                    currState->vaddr_tainted,
1464                    ArmFault::TranslationLL + L1,
1465                    isStage2,
1466                    ArmFault::VmsaTran);
1467        else
1468            currState->fault =
1469                std::make_shared<DataAbort>(
1470                    currState->vaddr_tainted,
1471                    TlbEntry::DomainType::NoAccess,
1472                    currState->isWrite,
1473                    ArmFault::TranslationLL + L1, isStage2,
1474                    ArmFault::VmsaTran);
1475        return;
1476      case L1Descriptor::Section:
1477        if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
1478            /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is
1479              * enabled if set, do l1.Desc.setAp0() instead of generating
1480              * AccessFlag0
1481              */
1482
1483            currState->fault = std::make_shared<DataAbort>(
1484                currState->vaddr_tainted,
1485                currState->l1Desc.domain(),
1486                currState->isWrite,
1487                ArmFault::AccessFlagLL + L1,
1488                isStage2,
1489                ArmFault::VmsaTran);
1490        }
1491        if (currState->l1Desc.supersection()) {
1492            panic("Haven't implemented supersections\n");
1493        }
1494        insertTableEntry(currState->l1Desc, false);
1495        return;
1496      case L1Descriptor::PageTable:
1497        {
1498            Addr l2desc_addr;
1499            l2desc_addr = currState->l1Desc.l2Addr() |
1500                (bits(currState->vaddr, 19, 12) << 2);
1501            DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n",
1502                    l2desc_addr, currState->isSecure ? "s" : "ns");
1503
1504            // Trickbox address check
1505            currState->fault = testWalk(l2desc_addr, sizeof(uint32_t),
1506                                        currState->l1Desc.domain(), L2);
1507
1508            if (currState->fault) {
1509                if (!currState->timing) {
1510                    currState->tc = NULL;
1511                    currState->req = NULL;
1512                }
1513                return;
1514            }
1515
1516            Request::Flags flag = Request::PT_WALK;
1517            if (currState->isSecure)
1518                flag.set(Request::SECURE);
1519
1520            bool delayed;
1521            delayed = fetchDescriptor(l2desc_addr,
1522                                      (uint8_t*)&currState->l2Desc.data,
1523                                      sizeof(uint32_t), flag, -1, &doL2DescEvent,
1524                                      &TableWalker::doL2Descriptor);
1525            if (delayed) {
1526                currState->delayed = true;
1527            }
1528
1529            return;
1530        }
1531      default:
1532        panic("A new type in a 2 bit field?\n");
1533    }
1534}
1535
1536void
1537TableWalker::doLongDescriptor()
1538{
1539    if (currState->fault != NoFault) {
1540        return;
1541    }
1542
1543    currState->longDesc.data = htog(currState->longDesc.data,
1544                                    byteOrder(currState->tc));
1545
1546    DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n",
1547            currState->longDesc.lookupLevel, currState->vaddr_tainted,
1548            currState->longDesc.data,
1549            currState->aarch64 ? "AArch64" : "long-desc.");
1550
1551    if ((currState->longDesc.type() == LongDescriptor::Block) ||
1552        (currState->longDesc.type() == LongDescriptor::Page)) {
1553        DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1554                "xn: %d, ap: %d, af: %d, type: %d\n",
1555                currState->longDesc.lookupLevel,
1556                currState->longDesc.data,
1557                currState->longDesc.pxn(),
1558                currState->longDesc.xn(),
1559                currState->longDesc.ap(),
1560                currState->longDesc.af(),
1561                currState->longDesc.type());
1562    } else {
1563        DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, type: %d\n",
1564                currState->longDesc.lookupLevel,
1565                currState->longDesc.data,
1566                currState->longDesc.type());
1567    }
1568
1569    TlbEntry te;
1570
1571    switch (currState->longDesc.type()) {
1572      case LongDescriptor::Invalid:
1573        if (!currState->timing) {
1574            currState->tc = NULL;
1575            currState->req = NULL;
1576        }
1577
1578        DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n",
1579                currState->longDesc.lookupLevel,
1580                ArmFault::TranslationLL + currState->longDesc.lookupLevel);
1581        if (currState->isFetch)
1582            currState->fault = std::make_shared<PrefetchAbort>(
1583                currState->vaddr_tainted,
1584                ArmFault::TranslationLL + currState->longDesc.lookupLevel,
1585                isStage2,
1586                ArmFault::LpaeTran);
1587        else
1588            currState->fault = std::make_shared<DataAbort>(
1589                currState->vaddr_tainted,
1590                TlbEntry::DomainType::NoAccess,
1591                currState->isWrite,
1592                ArmFault::TranslationLL + currState->longDesc.lookupLevel,
1593                isStage2,
1594                ArmFault::LpaeTran);
1595        return;
1596      case LongDescriptor::Block:
1597      case LongDescriptor::Page:
1598        {
1599            bool fault = false;
1600            bool aff = false;
1601            // Check for address size fault
1602            if (checkAddrSizeFaultAArch64(
1603                    mbits(currState->longDesc.data, MaxPhysAddrRange - 1,
1604                          currState->longDesc.offsetBits()),
1605                    currState->physAddrRange)) {
1606                fault = true;
1607                DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1608                        currState->longDesc.lookupLevel);
1609            // Check for access fault
1610            } else if (currState->longDesc.af() == 0) {
1611                fault = true;
1612                DPRINTF(TLB, "L%d descriptor causing Access Fault\n",
1613                        currState->longDesc.lookupLevel);
1614                aff = true;
1615            }
1616            if (fault) {
1617                if (currState->isFetch)
1618                    currState->fault = std::make_shared<PrefetchAbort>(
1619                        currState->vaddr_tainted,
1620                        (aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) +
1621                        currState->longDesc.lookupLevel,
1622                        isStage2,
1623                        ArmFault::LpaeTran);
1624                else
1625                    currState->fault = std::make_shared<DataAbort>(
1626                        currState->vaddr_tainted,
1627                        TlbEntry::DomainType::NoAccess, currState->isWrite,
1628                        (aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) +
1629                        currState->longDesc.lookupLevel,
1630                        isStage2,
1631                        ArmFault::LpaeTran);
1632            } else {
1633                insertTableEntry(currState->longDesc, true);
1634            }
1635        }
1636        return;
1637      case LongDescriptor::Table:
1638        {
1639            // Set hierarchical permission flags
1640            currState->secureLookup = currState->secureLookup &&
1641                currState->longDesc.secureTable();
1642            currState->rwTable = currState->rwTable &&
1643                currState->longDesc.rwTable();
1644            currState->userTable = currState->userTable &&
1645                currState->longDesc.userTable();
1646            currState->xnTable = currState->xnTable ||
1647                currState->longDesc.xnTable();
1648            currState->pxnTable = currState->pxnTable ||
1649                currState->longDesc.pxnTable();
1650
1651            // Set up next level lookup
1652            Addr next_desc_addr = currState->longDesc.nextDescAddr(
1653                currState->vaddr);
1654
1655            DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1656                    currState->longDesc.lookupLevel,
1657                    currState->longDesc.lookupLevel + 1,
1658                    next_desc_addr,
1659                    currState->secureLookup ? "s" : "ns");
1660
1661            // Check for address size fault
1662            if (currState->aarch64 && checkAddrSizeFaultAArch64(
1663                    next_desc_addr, currState->physAddrRange)) {
1664                DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1665                        currState->longDesc.lookupLevel);
1666                if (currState->isFetch)
1667                    currState->fault = std::make_shared<PrefetchAbort>(
1668                        currState->vaddr_tainted,
1669                        ArmFault::AddressSizeLL
1670                        + currState->longDesc.lookupLevel,
1671                        isStage2,
1672                        ArmFault::LpaeTran);
1673                else
1674                    currState->fault = std::make_shared<DataAbort>(
1675                        currState->vaddr_tainted,
1676                        TlbEntry::DomainType::NoAccess, currState->isWrite,
1677                        ArmFault::AddressSizeLL
1678                        + currState->longDesc.lookupLevel,
1679                        isStage2,
1680                        ArmFault::LpaeTran);
1681                return;
1682            }
1683
1684            // Trickbox address check
1685            currState->fault = testWalk(
1686                next_desc_addr, sizeof(uint64_t), TlbEntry::DomainType::Client,
1687                toLookupLevel(currState->longDesc.lookupLevel +1));
1688
1689            if (currState->fault) {
1690                if (!currState->timing) {
1691                    currState->tc = NULL;
1692                    currState->req = NULL;
1693                }
1694                return;
1695            }
1696
1697            Request::Flags flag = Request::PT_WALK;
1698            if (currState->secureLookup)
1699                flag.set(Request::SECURE);
1700
1701            LookupLevel L = currState->longDesc.lookupLevel =
1702                (LookupLevel) (currState->longDesc.lookupLevel + 1);
1703            Event *event = NULL;
1704            switch (L) {
1705              case L1:
1706                assert(currState->aarch64);
1707              case L2:
1708              case L3:
1709                event = LongDescEventByLevel[L];
1710                break;
1711              default:
1712                panic("Wrong lookup level in table walk\n");
1713                break;
1714            }
1715
1716            bool delayed;
1717            delayed = fetchDescriptor(next_desc_addr, (uint8_t*)&currState->longDesc.data,
1718                                      sizeof(uint64_t), flag, -1, event,
1719                                      &TableWalker::doLongDescriptor);
1720            if (delayed) {
1721                 currState->delayed = true;
1722            }
1723        }
1724        return;
1725      default:
1726        panic("A new type in a 2 bit field?\n");
1727    }
1728}
1729
1730void
1731TableWalker::doL2Descriptor()
1732{
1733    if (currState->fault != NoFault) {
1734        return;
1735    }
1736
1737    currState->l2Desc.data = htog(currState->l2Desc.data,
1738                                  byteOrder(currState->tc));
1739
1740    DPRINTF(TLB, "L2 descriptor for %#x is %#x\n",
1741            currState->vaddr_tainted, currState->l2Desc.data);
1742    TlbEntry te;
1743
1744    if (currState->l2Desc.invalid()) {
1745        DPRINTF(TLB, "L2 descriptor invalid, causing fault\n");
1746        if (!currState->timing) {
1747            currState->tc = NULL;
1748            currState->req = NULL;
1749        }
1750        if (currState->isFetch)
1751            currState->fault = std::make_shared<PrefetchAbort>(
1752                    currState->vaddr_tainted,
1753                    ArmFault::TranslationLL + L2,
1754                    isStage2,
1755                    ArmFault::VmsaTran);
1756        else
1757            currState->fault = std::make_shared<DataAbort>(
1758                currState->vaddr_tainted, currState->l1Desc.domain(),
1759                currState->isWrite, ArmFault::TranslationLL + L2,
1760                isStage2,
1761                ArmFault::VmsaTran);
1762        return;
1763    }
1764
1765    if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) {
1766        /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is enabled
1767          * if set, do l2.Desc.setAp0() instead of generating AccessFlag0
1768          */
1769         DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n",
1770                 currState->sctlr.afe, currState->l2Desc.ap());
1771
1772        currState->fault = std::make_shared<DataAbort>(
1773            currState->vaddr_tainted,
1774            TlbEntry::DomainType::NoAccess, currState->isWrite,
1775            ArmFault::AccessFlagLL + L2, isStage2,
1776            ArmFault::VmsaTran);
1777    }
1778
1779    insertTableEntry(currState->l2Desc, false);
1780}
1781
1782void
1783TableWalker::doL1DescriptorWrapper()
1784{
1785    currState = stateQueues[L1].front();
1786    currState->delayed = false;
1787    // if there's a stage2 translation object we don't need it any more
1788    if (currState->stage2Tran) {
1789        delete currState->stage2Tran;
1790        currState->stage2Tran = NULL;
1791    }
1792
1793
1794    DPRINTF(TLBVerbose, "L1 Desc object host addr: %p\n",&currState->l1Desc.data);
1795    DPRINTF(TLBVerbose, "L1 Desc object      data: %08x\n",currState->l1Desc.data);
1796
1797    DPRINTF(TLBVerbose, "calling doL1Descriptor for vaddr:%#x\n", currState->vaddr_tainted);
1798    doL1Descriptor();
1799
1800    stateQueues[L1].pop_front();
1801    // Check if fault was generated
1802    if (currState->fault != NoFault) {
1803        currState->transState->finish(currState->fault, currState->req,
1804                                      currState->tc, currState->mode);
1805        statWalksShortTerminatedAtLevel[0]++;
1806
1807        pending = false;
1808        nextWalk(currState->tc);
1809
1810        currState->req = NULL;
1811        currState->tc = NULL;
1812        currState->delayed = false;
1813        delete currState;
1814    }
1815    else if (!currState->delayed) {
1816        // delay is not set so there is no L2 to do
1817        // Don't finish the translation if a stage 2 look up is underway
1818        statWalkServiceTime.sample(curTick() - currState->startTime);
1819        DPRINTF(TLBVerbose, "calling translateTiming again\n");
1820        tlb->translateTiming(currState->req, currState->tc,
1821                             currState->transState, currState->mode);
1822        statWalksShortTerminatedAtLevel[0]++;
1823
1824        pending = false;
1825        nextWalk(currState->tc);
1826
1827        currState->req = NULL;
1828        currState->tc = NULL;
1829        currState->delayed = false;
1830        delete currState;
1831    } else {
1832        // need to do L2 descriptor
1833        stateQueues[L2].push_back(currState);
1834    }
1835    currState = NULL;
1836}
1837
1838void
1839TableWalker::doL2DescriptorWrapper()
1840{
1841    currState = stateQueues[L2].front();
1842    assert(currState->delayed);
1843    // if there's a stage2 translation object we don't need it any more
1844    if (currState->stage2Tran) {
1845        delete currState->stage2Tran;
1846        currState->stage2Tran = NULL;
1847    }
1848
1849    DPRINTF(TLBVerbose, "calling doL2Descriptor for vaddr:%#x\n",
1850            currState->vaddr_tainted);
1851    doL2Descriptor();
1852
1853    // Check if fault was generated
1854    if (currState->fault != NoFault) {
1855        currState->transState->finish(currState->fault, currState->req,
1856                                      currState->tc, currState->mode);
1857        statWalksShortTerminatedAtLevel[1]++;
1858    } else {
1859        statWalkServiceTime.sample(curTick() - currState->startTime);
1860        DPRINTF(TLBVerbose, "calling translateTiming again\n");
1861        tlb->translateTiming(currState->req, currState->tc,
1862                             currState->transState, currState->mode);
1863        statWalksShortTerminatedAtLevel[1]++;
1864    }
1865
1866
1867    stateQueues[L2].pop_front();
1868    pending = false;
1869    nextWalk(currState->tc);
1870
1871    currState->req = NULL;
1872    currState->tc = NULL;
1873    currState->delayed = false;
1874
1875    delete currState;
1876    currState = NULL;
1877}
1878
1879void
1880TableWalker::doL0LongDescriptorWrapper()
1881{
1882    doLongDescriptorWrapper(L0);
1883}
1884
1885void
1886TableWalker::doL1LongDescriptorWrapper()
1887{
1888    doLongDescriptorWrapper(L1);
1889}
1890
1891void
1892TableWalker::doL2LongDescriptorWrapper()
1893{
1894    doLongDescriptorWrapper(L2);
1895}
1896
1897void
1898TableWalker::doL3LongDescriptorWrapper()
1899{
1900    doLongDescriptorWrapper(L3);
1901}
1902
1903void
1904TableWalker::doLongDescriptorWrapper(LookupLevel curr_lookup_level)
1905{
1906    currState = stateQueues[curr_lookup_level].front();
1907    assert(curr_lookup_level == currState->longDesc.lookupLevel);
1908    currState->delayed = false;
1909
1910    // if there's a stage2 translation object we don't need it any more
1911    if (currState->stage2Tran) {
1912        delete currState->stage2Tran;
1913        currState->stage2Tran = NULL;
1914    }
1915
1916    DPRINTF(TLBVerbose, "calling doLongDescriptor for vaddr:%#x\n",
1917            currState->vaddr_tainted);
1918    doLongDescriptor();
1919
1920    stateQueues[curr_lookup_level].pop_front();
1921
1922    if (currState->fault != NoFault) {
1923        // A fault was generated
1924        currState->transState->finish(currState->fault, currState->req,
1925                                      currState->tc, currState->mode);
1926
1927        pending = false;
1928        nextWalk(currState->tc);
1929
1930        currState->req = NULL;
1931        currState->tc = NULL;
1932        currState->delayed = false;
1933        delete currState;
1934    } else if (!currState->delayed) {
1935        // No additional lookups required
1936        DPRINTF(TLBVerbose, "calling translateTiming again\n");
1937        statWalkServiceTime.sample(curTick() - currState->startTime);
1938        tlb->translateTiming(currState->req, currState->tc,
1939                             currState->transState, currState->mode);
1940        statWalksLongTerminatedAtLevel[(unsigned) curr_lookup_level]++;
1941
1942        pending = false;
1943        nextWalk(currState->tc);
1944
1945        currState->req = NULL;
1946        currState->tc = NULL;
1947        currState->delayed = false;
1948        delete currState;
1949    } else {
1950        if (curr_lookup_level >= MAX_LOOKUP_LEVELS - 1)
1951            panic("Max. number of lookups already reached in table walk\n");
1952        // Need to perform additional lookups
1953        stateQueues[currState->longDesc.lookupLevel].push_back(currState);
1954    }
1955    currState = NULL;
1956}
1957
1958
1959void
1960TableWalker::nextWalk(ThreadContext *tc)
1961{
1962    if (pendingQueue.size())
1963        schedule(doProcessEvent, clockEdge(Cycles(1)));
1964    else
1965        completeDrain();
1966}
1967
1968bool
1969TableWalker::fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes,
1970    Request::Flags flags, int queueIndex, Event *event,
1971    void (TableWalker::*doDescriptor)())
1972{
1973    bool isTiming = currState->timing;
1974
1975    DPRINTF(TLBVerbose, "Fetching descriptor at address: 0x%x stage2Req: %d\n",
1976            descAddr, currState->stage2Req);
1977
1978    // If this translation has a stage 2 then we know descAddr is an IPA and
1979    // needs to be translated before we can access the page table. Do that
1980    // check here.
1981    if (currState->stage2Req) {
1982        Fault fault;
1983        flags = flags | TLB::MustBeOne;
1984
1985        if (isTiming) {
1986            Stage2MMU::Stage2Translation *tran = new
1987                Stage2MMU::Stage2Translation(*stage2Mmu, data, event,
1988                                             currState->vaddr);
1989            currState->stage2Tran = tran;
1990            stage2Mmu->readDataTimed(currState->tc, descAddr, tran, numBytes,
1991                                     flags);
1992            fault = tran->fault;
1993        } else {
1994            fault = stage2Mmu->readDataUntimed(currState->tc,
1995                currState->vaddr, descAddr, data, numBytes, flags,
1996                currState->functional);
1997        }
1998
1999        if (fault != NoFault) {
2000            currState->fault = fault;
2001        }
2002        if (isTiming) {
2003            if (queueIndex >= 0) {
2004                DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
2005                        stateQueues[queueIndex].size());
2006                stateQueues[queueIndex].push_back(currState);
2007                currState = NULL;
2008            }
2009        } else {
2010            (this->*doDescriptor)();
2011        }
2012    } else {
2013        if (isTiming) {
2014            port->dmaAction(MemCmd::ReadReq, descAddr, numBytes, event, data,
2015                           currState->tc->getCpuPtr()->clockPeriod(),flags);
2016            if (queueIndex >= 0) {
2017                DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
2018                        stateQueues[queueIndex].size());
2019                stateQueues[queueIndex].push_back(currState);
2020                currState = NULL;
2021            }
2022        } else if (!currState->functional) {
2023            port->dmaAction(MemCmd::ReadReq, descAddr, numBytes, NULL, data,
2024                           currState->tc->getCpuPtr()->clockPeriod(), flags);
2025            (this->*doDescriptor)();
2026        } else {
2027            RequestPtr req = std::make_shared<Request>(
2028                descAddr, numBytes, flags, masterId);
2029
2030            req->taskId(ContextSwitchTaskId::DMA);
2031            PacketPtr  pkt = new Packet(req, MemCmd::ReadReq);
2032            pkt->dataStatic(data);
2033            port->sendFunctional(pkt);
2034            (this->*doDescriptor)();
2035            delete pkt;
2036        }
2037    }
2038    return (isTiming);
2039}
2040
2041void
2042TableWalker::insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
2043{
2044    TlbEntry te;
2045
2046    // Create and fill a new page table entry
2047    te.valid          = true;
2048    te.longDescFormat = longDescriptor;
2049    te.isHyp          = currState->isHyp;
2050    te.asid           = currState->asid;
2051    te.vmid           = currState->vmid;
2052    te.N              = descriptor.offsetBits();
2053    te.vpn            = currState->vaddr >> te.N;
2054    te.size           = (1<<te.N) - 1;
2055    te.pfn            = descriptor.pfn();
2056    te.domain         = descriptor.domain();
2057    te.lookupLevel    = descriptor.lookupLevel;
2058    te.ns             = !descriptor.secure(haveSecurity, currState) || isStage2;
2059    te.nstid          = !currState->isSecure;
2060    te.xn             = descriptor.xn();
2061    if (currState->aarch64)
2062        te.el         = currState->el;
2063    else
2064        te.el         = 1;
2065
2066    statPageSizes[pageSizeNtoStatBin(te.N)]++;
2067    statRequestOrigin[COMPLETED][currState->isFetch]++;
2068
2069    // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
2070    // as global
2071    te.global         = descriptor.global(currState) || isStage2;
2072    if (longDescriptor) {
2073        LongDescriptor lDescriptor =
2074            dynamic_cast<LongDescriptor &>(descriptor);
2075
2076        te.xn |= currState->xnTable;
2077        te.pxn = currState->pxnTable || lDescriptor.pxn();
2078        if (isStage2) {
2079            // this is actually the HAP field, but its stored in the same bit
2080            // possitions as the AP field in a stage 1 translation.
2081            te.hap = lDescriptor.ap();
2082        } else {
2083           te.ap = ((!currState->rwTable || descriptor.ap() >> 1) << 1) |
2084               (currState->userTable && (descriptor.ap() & 0x1));
2085        }
2086        if (currState->aarch64)
2087            memAttrsAArch64(currState->tc, te, lDescriptor);
2088        else
2089            memAttrsLPAE(currState->tc, te, lDescriptor);
2090    } else {
2091        te.ap = descriptor.ap();
2092        memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(),
2093                 descriptor.shareable());
2094    }
2095
2096    // Debug output
2097    DPRINTF(TLB, descriptor.dbgHeader().c_str());
2098    DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2099            te.N, te.pfn, te.size, te.global, te.valid);
2100    DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2101            "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
2102            te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp,
2103            te.nonCacheable, te.ns);
2104    DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
2105            descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
2106            descriptor.getRawData());
2107
2108    // Insert the entry into the TLB
2109    tlb->insert(currState->vaddr, te);
2110    if (!currState->timing) {
2111        currState->tc  = NULL;
2112        currState->req = NULL;
2113    }
2114}
2115
2116ArmISA::TableWalker *
2117ArmTableWalkerParams::create()
2118{
2119    return new ArmISA::TableWalker(this);
2120}
2121
2122LookupLevel
2123TableWalker::toLookupLevel(uint8_t lookup_level_as_int)
2124{
2125    switch (lookup_level_as_int) {
2126      case L1:
2127        return L1;
2128      case L2:
2129        return L2;
2130      case L3:
2131        return L3;
2132      default:
2133        panic("Invalid lookup level conversion");
2134    }
2135}
2136
2137/* this method keeps track of the table walker queue's residency, so
2138 * needs to be called whenever requests start and complete. */
2139void
2140TableWalker::pendingChange()
2141{
2142    unsigned n = pendingQueue.size();
2143    if ((currState != NULL) && (currState != pendingQueue.front())) {
2144        ++n;
2145    }
2146
2147    if (n != pendingReqs) {
2148        Tick now = curTick();
2149        statPendingWalks.sample(pendingReqs, now - pendingChangeTick);
2150        pendingReqs = n;
2151        pendingChangeTick = now;
2152    }
2153}
2154
2155Fault
2156TableWalker::testWalk(Addr pa, Addr size, TlbEntry::DomainType domain,
2157                      LookupLevel lookup_level)
2158{
2159    return tlb->testWalk(pa, size, currState->vaddr, currState->isSecure,
2160                         currState->mode, domain, lookup_level);
2161}
2162
2163
2164uint8_t
2165TableWalker::pageSizeNtoStatBin(uint8_t N)
2166{
2167    /* for statPageSizes */
2168    switch(N) {
2169        case 12: return 0; // 4K
2170        case 14: return 1; // 16K (using 16K granule in v8-64)
2171        case 16: return 2; // 64K
2172        case 20: return 3; // 1M
2173        case 21: return 4; // 2M-LPAE
2174        case 24: return 5; // 16M
2175        case 25: return 6; // 32M (using 16K granule in v8-64)
2176        case 29: return 7; // 512M (using 64K granule in v8-64)
2177        case 30: return 8; // 1G-LPAE
2178        default:
2179            panic("unknown page size");
2180            return 255;
2181    }
2182}
2183
2184void
2185TableWalker::regStats()
2186{
2187    ClockedObject::regStats();
2188
2189    statWalks
2190        .name(name() + ".walks")
2191        .desc("Table walker walks requested")
2192        ;
2193
2194    statWalksShortDescriptor
2195        .name(name() + ".walksShort")
2196        .desc("Table walker walks initiated with short descriptors")
2197        .flags(Stats::nozero)
2198        ;
2199
2200    statWalksLongDescriptor
2201        .name(name() + ".walksLong")
2202        .desc("Table walker walks initiated with long descriptors")
2203        .flags(Stats::nozero)
2204        ;
2205
2206    statWalksShortTerminatedAtLevel
2207        .init(2)
2208        .name(name() + ".walksShortTerminationLevel")
2209        .desc("Level at which table walker walks "
2210              "with short descriptors terminate")
2211        .flags(Stats::nozero)
2212        ;
2213    statWalksShortTerminatedAtLevel.subname(0, "Level1");
2214    statWalksShortTerminatedAtLevel.subname(1, "Level2");
2215
2216    statWalksLongTerminatedAtLevel
2217        .init(4)
2218        .name(name() + ".walksLongTerminationLevel")
2219        .desc("Level at which table walker walks "
2220              "with long descriptors terminate")
2221        .flags(Stats::nozero)
2222        ;
2223    statWalksLongTerminatedAtLevel.subname(0, "Level0");
2224    statWalksLongTerminatedAtLevel.subname(1, "Level1");
2225    statWalksLongTerminatedAtLevel.subname(2, "Level2");
2226    statWalksLongTerminatedAtLevel.subname(3, "Level3");
2227
2228    statSquashedBefore
2229        .name(name() + ".walksSquashedBefore")
2230        .desc("Table walks squashed before starting")
2231        .flags(Stats::nozero)
2232        ;
2233
2234    statSquashedAfter
2235        .name(name() + ".walksSquashedAfter")
2236        .desc("Table walks squashed after completion")
2237        .flags(Stats::nozero)
2238        ;
2239
2240    statWalkWaitTime
2241        .init(16)
2242        .name(name() + ".walkWaitTime")
2243        .desc("Table walker wait (enqueue to first request) latency")
2244        .flags(Stats::pdf | Stats::nozero | Stats::nonan)
2245        ;
2246
2247    statWalkServiceTime
2248        .init(16)
2249        .name(name() + ".walkCompletionTime")
2250        .desc("Table walker service (enqueue to completion) latency")
2251        .flags(Stats::pdf | Stats::nozero | Stats::nonan)
2252        ;
2253
2254    statPendingWalks
2255        .init(16)
2256        .name(name() + ".walksPending")
2257        .desc("Table walker pending requests distribution")
2258        .flags(Stats::pdf | Stats::dist | Stats::nozero | Stats::nonan)
2259        ;
2260
2261    statPageSizes // see DDI 0487A D4-1661
2262        .init(9)
2263        .name(name() + ".walkPageSizes")
2264        .desc("Table walker page sizes translated")
2265        .flags(Stats::total | Stats::pdf | Stats::dist | Stats::nozero)
2266        ;
2267    statPageSizes.subname(0, "4K");
2268    statPageSizes.subname(1, "16K");
2269    statPageSizes.subname(2, "64K");
2270    statPageSizes.subname(3, "1M");
2271    statPageSizes.subname(4, "2M");
2272    statPageSizes.subname(5, "16M");
2273    statPageSizes.subname(6, "32M");
2274    statPageSizes.subname(7, "512M");
2275    statPageSizes.subname(8, "1G");
2276
2277    statRequestOrigin
2278        .init(2,2) // Instruction/Data, requests/completed
2279        .name(name() + ".walkRequestOrigin")
2280        .desc("Table walker requests started/completed, data/inst")
2281        .flags(Stats::total)
2282        ;
2283    statRequestOrigin.subname(0,"Requested");
2284    statRequestOrigin.subname(1,"Completed");
2285    statRequestOrigin.ysubname(0,"Data");
2286    statRequestOrigin.ysubname(1,"Inst");
2287}
2288