table_walker.cc revision 14093
1/*
2 * Copyright (c) 2010, 2012-2018 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Ali Saidi
38 *          Giacomo Gabrielli
39 */
40#include "arch/arm/table_walker.hh"
41
42#include <memory>
43
44#include "arch/arm/faults.hh"
45#include "arch/arm/stage2_mmu.hh"
46#include "arch/arm/system.hh"
47#include "arch/arm/tlb.hh"
48#include "cpu/base.hh"
49#include "cpu/thread_context.hh"
50#include "debug/Checkpoint.hh"
51#include "debug/Drain.hh"
52#include "debug/TLB.hh"
53#include "debug/TLBVerbose.hh"
54#include "dev/dma_device.hh"
55#include "sim/system.hh"
56
57using namespace ArmISA;
58
59TableWalker::TableWalker(const Params *p)
60    : ClockedObject(p),
61      stage2Mmu(NULL), port(NULL), masterId(Request::invldMasterId),
62      isStage2(p->is_stage2), tlb(NULL),
63      currState(NULL), pending(false),
64      numSquashable(p->num_squash_per_cycle),
65      pendingReqs(0),
66      pendingChangeTick(curTick()),
67      doL1DescEvent([this]{ doL1DescriptorWrapper(); }, name()),
68      doL2DescEvent([this]{ doL2DescriptorWrapper(); }, name()),
69      doL0LongDescEvent([this]{ doL0LongDescriptorWrapper(); }, name()),
70      doL1LongDescEvent([this]{ doL1LongDescriptorWrapper(); }, name()),
71      doL2LongDescEvent([this]{ doL2LongDescriptorWrapper(); }, name()),
72      doL3LongDescEvent([this]{ doL3LongDescriptorWrapper(); }, name()),
73      LongDescEventByLevel { &doL0LongDescEvent, &doL1LongDescEvent,
74                             &doL2LongDescEvent, &doL3LongDescEvent },
75      doProcessEvent([this]{ processWalkWrapper(); }, name())
76{
77    sctlr = 0;
78
79    // Cache system-level properties
80    if (FullSystem) {
81        ArmSystem *armSys = dynamic_cast<ArmSystem *>(p->sys);
82        assert(armSys);
83        haveSecurity = armSys->haveSecurity();
84        _haveLPAE = armSys->haveLPAE();
85        _haveVirtualization = armSys->haveVirtualization();
86        physAddrRange = armSys->physAddrRange();
87        _haveLargeAsid64 = armSys->haveLargeAsid64();
88    } else {
89        haveSecurity = _haveLPAE = _haveVirtualization = false;
90        _haveLargeAsid64 = false;
91        physAddrRange = 32;
92    }
93
94}
95
96TableWalker::~TableWalker()
97{
98    ;
99}
100
101void
102TableWalker::setMMU(Stage2MMU *m, MasterID master_id)
103{
104    stage2Mmu = m;
105    port = &m->getDMAPort();
106    masterId = master_id;
107}
108
109void
110TableWalker::init()
111{
112    fatal_if(!stage2Mmu, "Table walker must have a valid stage-2 MMU\n");
113    fatal_if(!port, "Table walker must have a valid port\n");
114    fatal_if(!tlb, "Table walker must have a valid TLB\n");
115}
116
117Port &
118TableWalker::getPort(const std::string &if_name, PortID idx)
119{
120    if (if_name == "port") {
121        if (!isStage2) {
122            return *port;
123        } else {
124            fatal("Cannot access table walker port through stage-two walker\n");
125        }
126    }
127    return ClockedObject::getPort(if_name, idx);
128}
129
130TableWalker::WalkerState::WalkerState() :
131    tc(nullptr), aarch64(false), el(EL0), physAddrRange(0), req(nullptr),
132    asid(0), vmid(0), isHyp(false), transState(nullptr),
133    vaddr(0), vaddr_tainted(0),
134    sctlr(0), scr(0), cpsr(0), tcr(0),
135    htcr(0), hcr(0), vtcr(0),
136    isWrite(false), isFetch(false), isSecure(false),
137    secureLookup(false), rwTable(false), userTable(false), xnTable(false),
138    pxnTable(false), stage2Req(false),
139    stage2Tran(nullptr), timing(false), functional(false),
140    mode(BaseTLB::Read), tranType(TLB::NormalTran), l2Desc(l1Desc),
141    delayed(false), tableWalker(nullptr)
142{
143}
144
145void
146TableWalker::completeDrain()
147{
148    if (drainState() == DrainState::Draining &&
149        stateQueues[L0].empty() && stateQueues[L1].empty() &&
150        stateQueues[L2].empty() && stateQueues[L3].empty() &&
151        pendingQueue.empty()) {
152
153        DPRINTF(Drain, "TableWalker done draining, processing drain event\n");
154        signalDrainDone();
155    }
156}
157
158DrainState
159TableWalker::drain()
160{
161    bool state_queues_not_empty = false;
162
163    for (int i = 0; i < MAX_LOOKUP_LEVELS; ++i) {
164        if (!stateQueues[i].empty()) {
165            state_queues_not_empty = true;
166            break;
167        }
168    }
169
170    if (state_queues_not_empty || pendingQueue.size()) {
171        DPRINTF(Drain, "TableWalker not drained\n");
172        return DrainState::Draining;
173    } else {
174        DPRINTF(Drain, "TableWalker free, no need to drain\n");
175        return DrainState::Drained;
176    }
177}
178
179void
180TableWalker::drainResume()
181{
182    if (params()->sys->isTimingMode() && currState) {
183        delete currState;
184        currState = NULL;
185        pendingChange();
186    }
187}
188
189Fault
190TableWalker::walk(const RequestPtr &_req, ThreadContext *_tc, uint16_t _asid,
191                  uint8_t _vmid, bool _isHyp, TLB::Mode _mode,
192                  TLB::Translation *_trans, bool _timing, bool _functional,
193                  bool secure, TLB::ArmTranslationType tranType,
194                  bool _stage2Req)
195{
196    assert(!(_functional && _timing));
197    ++statWalks;
198
199    WalkerState *savedCurrState = NULL;
200
201    if (!currState && !_functional) {
202        // For atomic mode, a new WalkerState instance should be only created
203        // once per TLB. For timing mode, a new instance is generated for every
204        // TLB miss.
205        DPRINTF(TLBVerbose, "creating new instance of WalkerState\n");
206
207        currState = new WalkerState();
208        currState->tableWalker = this;
209    } else if (_functional) {
210        // If we are mixing functional mode with timing (or even
211        // atomic), we need to to be careful and clean up after
212        // ourselves to not risk getting into an inconsistent state.
213        DPRINTF(TLBVerbose, "creating functional instance of WalkerState\n");
214        savedCurrState = currState;
215        currState = new WalkerState();
216        currState->tableWalker = this;
217    } else if (_timing) {
218        // This is a translation that was completed and then faulted again
219        // because some underlying parameters that affect the translation
220        // changed out from under us (e.g. asid). It will either be a
221        // misprediction, in which case nothing will happen or we'll use
222        // this fault to re-execute the faulting instruction which should clean
223        // up everything.
224        if (currState->vaddr_tainted == _req->getVaddr()) {
225            ++statSquashedBefore;
226            return std::make_shared<ReExec>();
227        }
228    }
229    pendingChange();
230
231    currState->startTime = curTick();
232    currState->tc = _tc;
233    // ARM DDI 0487A.f (ARMv8 ARM) pg J8-5672
234    // aarch32/translation/translation/AArch32.TranslateAddress dictates
235    // even AArch32 EL0 will use AArch64 translation if EL1 is in AArch64.
236    if (isStage2) {
237        currState->el = EL1;
238        currState->aarch64 = ELIs64(_tc, EL2);
239    } else {
240        currState->el =
241            TLB::tranTypeEL(_tc->readMiscReg(MISCREG_CPSR), tranType);
242        currState->aarch64 =
243            ELIs64(_tc, currState->el == EL0 ? EL1 : currState->el);
244    }
245    currState->transState = _trans;
246    currState->req = _req;
247    currState->fault = NoFault;
248    currState->asid = _asid;
249    currState->vmid = _vmid;
250    currState->isHyp = _isHyp;
251    currState->timing = _timing;
252    currState->functional = _functional;
253    currState->mode = _mode;
254    currState->tranType = tranType;
255    currState->isSecure = secure;
256    currState->physAddrRange = physAddrRange;
257
258    /** @todo These should be cached or grabbed from cached copies in
259     the TLB, all these miscreg reads are expensive */
260    currState->vaddr_tainted = currState->req->getVaddr();
261    if (currState->aarch64)
262        currState->vaddr = purifyTaggedAddr(currState->vaddr_tainted,
263                                            currState->tc, currState->el);
264    else
265        currState->vaddr = currState->vaddr_tainted;
266
267    if (currState->aarch64) {
268        if (isStage2) {
269            currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
270            currState->vtcr = currState->tc->readMiscReg(MISCREG_VTCR_EL2);
271        } else switch (currState->el) {
272          case EL0:
273          case EL1:
274            currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
275            currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL1);
276            break;
277          case EL2:
278            assert(_haveVirtualization);
279            currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL2);
280            currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL2);
281            break;
282          case EL3:
283            assert(haveSecurity);
284            currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL3);
285            currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL3);
286            break;
287          default:
288            panic("Invalid exception level");
289            break;
290        }
291        currState->hcr = currState->tc->readMiscReg(MISCREG_HCR_EL2);
292    } else {
293        currState->sctlr = currState->tc->readMiscReg(snsBankedIndex(
294            MISCREG_SCTLR, currState->tc, !currState->isSecure));
295        currState->ttbcr = currState->tc->readMiscReg(snsBankedIndex(
296            MISCREG_TTBCR, currState->tc, !currState->isSecure));
297        currState->htcr  = currState->tc->readMiscReg(MISCREG_HTCR);
298        currState->hcr   = currState->tc->readMiscReg(MISCREG_HCR);
299        currState->vtcr  = currState->tc->readMiscReg(MISCREG_VTCR);
300    }
301    sctlr = currState->sctlr;
302
303    currState->isFetch = (currState->mode == TLB::Execute);
304    currState->isWrite = (currState->mode == TLB::Write);
305
306    statRequestOrigin[REQUESTED][currState->isFetch]++;
307
308    currState->stage2Req = _stage2Req && !isStage2;
309
310    bool long_desc_format = currState->aarch64 || _isHyp || isStage2 ||
311                            longDescFormatInUse(currState->tc);
312
313    if (long_desc_format) {
314        // Helper variables used for hierarchical permissions
315        currState->secureLookup = currState->isSecure;
316        currState->rwTable = true;
317        currState->userTable = true;
318        currState->xnTable = false;
319        currState->pxnTable = false;
320
321        ++statWalksLongDescriptor;
322    } else {
323        ++statWalksShortDescriptor;
324    }
325
326    if (!currState->timing) {
327        Fault fault = NoFault;
328        if (currState->aarch64)
329            fault = processWalkAArch64();
330        else if (long_desc_format)
331            fault = processWalkLPAE();
332        else
333            fault = processWalk();
334
335        // If this was a functional non-timing access restore state to
336        // how we found it.
337        if (currState->functional) {
338            delete currState;
339            currState = savedCurrState;
340        }
341        return fault;
342    }
343
344    if (pending || pendingQueue.size()) {
345        pendingQueue.push_back(currState);
346        currState = NULL;
347        pendingChange();
348    } else {
349        pending = true;
350        pendingChange();
351        if (currState->aarch64)
352            return processWalkAArch64();
353        else if (long_desc_format)
354            return processWalkLPAE();
355        else
356            return processWalk();
357    }
358
359    return NoFault;
360}
361
362void
363TableWalker::processWalkWrapper()
364{
365    assert(!currState);
366    assert(pendingQueue.size());
367    pendingChange();
368    currState = pendingQueue.front();
369
370    // Check if a previous walk filled this request already
371    // @TODO Should this always be the TLB or should we look in the stage2 TLB?
372    TlbEntry* te = tlb->lookup(currState->vaddr, currState->asid,
373            currState->vmid, currState->isHyp, currState->isSecure, true, false,
374            currState->el);
375
376    // Check if we still need to have a walk for this request. If the requesting
377    // instruction has been squashed, or a previous walk has filled the TLB with
378    // a match, we just want to get rid of the walk. The latter could happen
379    // when there are multiple outstanding misses to a single page and a
380    // previous request has been successfully translated.
381    if (!currState->transState->squashed() && !te) {
382        // We've got a valid request, lets process it
383        pending = true;
384        pendingQueue.pop_front();
385        // Keep currState in case one of the processWalk... calls NULLs it
386        WalkerState *curr_state_copy = currState;
387        Fault f;
388        if (currState->aarch64)
389            f = processWalkAArch64();
390        else if (longDescFormatInUse(currState->tc) ||
391                 currState->isHyp || isStage2)
392            f = processWalkLPAE();
393        else
394            f = processWalk();
395
396        if (f != NoFault) {
397            curr_state_copy->transState->finish(f, curr_state_copy->req,
398                    curr_state_copy->tc, curr_state_copy->mode);
399
400            delete curr_state_copy;
401        }
402        return;
403    }
404
405
406    // If the instruction that we were translating for has been
407    // squashed we shouldn't bother.
408    unsigned num_squashed = 0;
409    ThreadContext *tc = currState->tc;
410    while ((num_squashed < numSquashable) && currState &&
411           (currState->transState->squashed() || te)) {
412        pendingQueue.pop_front();
413        num_squashed++;
414        statSquashedBefore++;
415
416        DPRINTF(TLB, "Squashing table walk for address %#x\n",
417                      currState->vaddr_tainted);
418
419        if (currState->transState->squashed()) {
420            // finish the translation which will delete the translation object
421            currState->transState->finish(
422                std::make_shared<UnimpFault>("Squashed Inst"),
423                currState->req, currState->tc, currState->mode);
424        } else {
425            // translate the request now that we know it will work
426            statWalkServiceTime.sample(curTick() - currState->startTime);
427            tlb->translateTiming(currState->req, currState->tc,
428                        currState->transState, currState->mode);
429
430        }
431
432        // delete the current request
433        delete currState;
434
435        // peak at the next one
436        if (pendingQueue.size()) {
437            currState = pendingQueue.front();
438            te = tlb->lookup(currState->vaddr, currState->asid,
439                currState->vmid, currState->isHyp, currState->isSecure, true,
440                false, currState->el);
441        } else {
442            // Terminate the loop, nothing more to do
443            currState = NULL;
444        }
445    }
446    pendingChange();
447
448    // if we still have pending translations, schedule more work
449    nextWalk(tc);
450    currState = NULL;
451}
452
453Fault
454TableWalker::processWalk()
455{
456    Addr ttbr = 0;
457
458    // If translation isn't enabled, we shouldn't be here
459    assert(currState->sctlr.m || isStage2);
460
461    DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
462            currState->vaddr_tainted, currState->ttbcr, mbits(currState->vaddr, 31,
463                                                      32 - currState->ttbcr.n));
464
465    statWalkWaitTime.sample(curTick() - currState->startTime);
466
467    if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31,
468                                          32 - currState->ttbcr.n)) {
469        DPRINTF(TLB, " - Selecting TTBR0\n");
470        // Check if table walk is allowed when Security Extensions are enabled
471        if (haveSecurity && currState->ttbcr.pd0) {
472            if (currState->isFetch)
473                return std::make_shared<PrefetchAbort>(
474                    currState->vaddr_tainted,
475                    ArmFault::TranslationLL + L1,
476                    isStage2,
477                    ArmFault::VmsaTran);
478            else
479                return std::make_shared<DataAbort>(
480                    currState->vaddr_tainted,
481                    TlbEntry::DomainType::NoAccess, currState->isWrite,
482                    ArmFault::TranslationLL + L1, isStage2,
483                    ArmFault::VmsaTran);
484        }
485        ttbr = currState->tc->readMiscReg(snsBankedIndex(
486            MISCREG_TTBR0, currState->tc, !currState->isSecure));
487    } else {
488        DPRINTF(TLB, " - Selecting TTBR1\n");
489        // Check if table walk is allowed when Security Extensions are enabled
490        if (haveSecurity && currState->ttbcr.pd1) {
491            if (currState->isFetch)
492                return std::make_shared<PrefetchAbort>(
493                    currState->vaddr_tainted,
494                    ArmFault::TranslationLL + L1,
495                    isStage2,
496                    ArmFault::VmsaTran);
497            else
498                return std::make_shared<DataAbort>(
499                    currState->vaddr_tainted,
500                    TlbEntry::DomainType::NoAccess, currState->isWrite,
501                    ArmFault::TranslationLL + L1, isStage2,
502                    ArmFault::VmsaTran);
503        }
504        ttbr = currState->tc->readMiscReg(snsBankedIndex(
505            MISCREG_TTBR1, currState->tc, !currState->isSecure));
506        currState->ttbcr.n = 0;
507    }
508
509    Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) |
510        (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2);
511    DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr,
512            currState->isSecure ? "s" : "ns");
513
514    // Trickbox address check
515    Fault f;
516    f = testWalk(l1desc_addr, sizeof(uint32_t),
517                 TlbEntry::DomainType::NoAccess, L1);
518    if (f) {
519        DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
520        if (currState->timing) {
521            pending = false;
522            nextWalk(currState->tc);
523            currState = NULL;
524        } else {
525            currState->tc = NULL;
526            currState->req = NULL;
527        }
528        return f;
529    }
530
531    Request::Flags flag = Request::PT_WALK;
532    if (currState->sctlr.c == 0) {
533        flag.set(Request::UNCACHEABLE);
534    }
535
536    if (currState->isSecure) {
537        flag.set(Request::SECURE);
538    }
539
540    bool delayed;
541    delayed = fetchDescriptor(l1desc_addr, (uint8_t*)&currState->l1Desc.data,
542                              sizeof(uint32_t), flag, L1, &doL1DescEvent,
543                              &TableWalker::doL1Descriptor);
544    if (!delayed) {
545       f = currState->fault;
546    }
547
548    return f;
549}
550
551Fault
552TableWalker::processWalkLPAE()
553{
554    Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
555    int tsz, n;
556    LookupLevel start_lookup_level = L1;
557
558    DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n",
559            currState->vaddr_tainted, currState->ttbcr);
560
561    statWalkWaitTime.sample(curTick() - currState->startTime);
562
563    Request::Flags flag = Request::PT_WALK;
564    if (currState->isSecure)
565        flag.set(Request::SECURE);
566
567    // work out which base address register to use, if in hyp mode we always
568    // use HTTBR
569    if (isStage2) {
570        DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n");
571        ttbr = currState->tc->readMiscReg(MISCREG_VTTBR);
572        tsz  = sext<4>(currState->vtcr.t0sz);
573        start_lookup_level = currState->vtcr.sl0 ? L1 : L2;
574    } else if (currState->isHyp) {
575        DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n");
576        ttbr = currState->tc->readMiscReg(MISCREG_HTTBR);
577        tsz  = currState->htcr.t0sz;
578    } else {
579        assert(longDescFormatInUse(currState->tc));
580
581        // Determine boundaries of TTBR0/1 regions
582        if (currState->ttbcr.t0sz)
583            ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1;
584        else if (currState->ttbcr.t1sz)
585            ttbr0_max = (1ULL << 32) -
586                (1ULL << (32 - currState->ttbcr.t1sz)) - 1;
587        else
588            ttbr0_max = (1ULL << 32) - 1;
589        if (currState->ttbcr.t1sz)
590            ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz));
591        else
592            ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz));
593
594        // The following code snippet selects the appropriate translation table base
595        // address (TTBR0 or TTBR1) and the appropriate starting lookup level
596        // depending on the address range supported by the translation table (ARM
597        // ARM issue C B3.6.4)
598        if (currState->vaddr <= ttbr0_max) {
599            DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n");
600            // Check if table walk is allowed
601            if (currState->ttbcr.epd0) {
602                if (currState->isFetch)
603                    return std::make_shared<PrefetchAbort>(
604                        currState->vaddr_tainted,
605                        ArmFault::TranslationLL + L1,
606                        isStage2,
607                        ArmFault::LpaeTran);
608                else
609                    return std::make_shared<DataAbort>(
610                        currState->vaddr_tainted,
611                        TlbEntry::DomainType::NoAccess,
612                        currState->isWrite,
613                        ArmFault::TranslationLL + L1,
614                        isStage2,
615                        ArmFault::LpaeTran);
616            }
617            ttbr = currState->tc->readMiscReg(snsBankedIndex(
618                MISCREG_TTBR0, currState->tc, !currState->isSecure));
619            tsz = currState->ttbcr.t0sz;
620            if (ttbr0_max < (1ULL << 30))  // Upper limit < 1 GB
621                start_lookup_level = L2;
622        } else if (currState->vaddr >= ttbr1_min) {
623            DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n");
624            // Check if table walk is allowed
625            if (currState->ttbcr.epd1) {
626                if (currState->isFetch)
627                    return std::make_shared<PrefetchAbort>(
628                        currState->vaddr_tainted,
629                        ArmFault::TranslationLL + L1,
630                        isStage2,
631                        ArmFault::LpaeTran);
632                else
633                    return std::make_shared<DataAbort>(
634                        currState->vaddr_tainted,
635                        TlbEntry::DomainType::NoAccess,
636                        currState->isWrite,
637                        ArmFault::TranslationLL + L1,
638                        isStage2,
639                        ArmFault::LpaeTran);
640            }
641            ttbr = currState->tc->readMiscReg(snsBankedIndex(
642                MISCREG_TTBR1, currState->tc, !currState->isSecure));
643            tsz = currState->ttbcr.t1sz;
644            if (ttbr1_min >= (1ULL << 31) + (1ULL << 30))  // Lower limit >= 3 GB
645                start_lookup_level = L2;
646        } else {
647            // Out of boundaries -> translation fault
648            if (currState->isFetch)
649                return std::make_shared<PrefetchAbort>(
650                    currState->vaddr_tainted,
651                    ArmFault::TranslationLL + L1,
652                    isStage2,
653                    ArmFault::LpaeTran);
654            else
655                return std::make_shared<DataAbort>(
656                    currState->vaddr_tainted,
657                    TlbEntry::DomainType::NoAccess,
658                    currState->isWrite, ArmFault::TranslationLL + L1,
659                    isStage2, ArmFault::LpaeTran);
660        }
661
662    }
663
664    // Perform lookup (ARM ARM issue C B3.6.6)
665    if (start_lookup_level == L1) {
666        n = 5 - tsz;
667        desc_addr = mbits(ttbr, 39, n) |
668            (bits(currState->vaddr, n + 26, 30) << 3);
669        DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
670                desc_addr, currState->isSecure ? "s" : "ns");
671    } else {
672        // Skip first-level lookup
673        n = (tsz >= 2 ? 14 - tsz : 12);
674        desc_addr = mbits(ttbr, 39, n) |
675            (bits(currState->vaddr, n + 17, 21) << 3);
676        DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
677                desc_addr, currState->isSecure ? "s" : "ns");
678    }
679
680    // Trickbox address check
681    Fault f = testWalk(desc_addr, sizeof(uint64_t),
682                       TlbEntry::DomainType::NoAccess, start_lookup_level);
683    if (f) {
684        DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
685        if (currState->timing) {
686            pending = false;
687            nextWalk(currState->tc);
688            currState = NULL;
689        } else {
690            currState->tc = NULL;
691            currState->req = NULL;
692        }
693        return f;
694    }
695
696    if (currState->sctlr.c == 0) {
697        flag.set(Request::UNCACHEABLE);
698    }
699
700    currState->longDesc.lookupLevel = start_lookup_level;
701    currState->longDesc.aarch64 = false;
702    currState->longDesc.grainSize = Grain4KB;
703
704    bool delayed = fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
705                                   sizeof(uint64_t), flag, start_lookup_level,
706                                   LongDescEventByLevel[start_lookup_level],
707                                   &TableWalker::doLongDescriptor);
708    if (!delayed) {
709        f = currState->fault;
710    }
711
712    return f;
713}
714
715unsigned
716TableWalker::adjustTableSizeAArch64(unsigned tsz)
717{
718    if (tsz < 25)
719        return 25;
720    if (tsz > 48)
721        return 48;
722    return tsz;
723}
724
725bool
726TableWalker::checkAddrSizeFaultAArch64(Addr addr, int currPhysAddrRange)
727{
728    return (currPhysAddrRange != MaxPhysAddrRange &&
729            bits(addr, MaxPhysAddrRange - 1, currPhysAddrRange));
730}
731
732Fault
733TableWalker::processWalkAArch64()
734{
735    assert(currState->aarch64);
736
737    DPRINTF(TLB, "Beginning table walk for address %#llx, TCR: %#llx\n",
738            currState->vaddr_tainted, currState->tcr);
739
740    static const GrainSize GrainMap_tg0[] =
741      { Grain4KB, Grain64KB, Grain16KB, ReservedGrain };
742    static const GrainSize GrainMap_tg1[] =
743      { ReservedGrain, Grain16KB, Grain4KB, Grain64KB };
744
745    statWalkWaitTime.sample(curTick() - currState->startTime);
746
747    // Determine TTBR, table size, granule size and phys. address range
748    Addr ttbr = 0;
749    int tsz = 0, ps = 0;
750    GrainSize tg = Grain4KB; // grain size computed from tg* field
751    bool fault = false;
752
753    LookupLevel start_lookup_level = MAX_LOOKUP_LEVELS;
754
755    switch (currState->el) {
756      case EL0:
757      case EL1:
758        if (isStage2) {
759            DPRINTF(TLB, " - Selecting VTTBR0 (AArch64 stage 2)\n");
760            ttbr = currState->tc->readMiscReg(MISCREG_VTTBR_EL2);
761            tsz = 64 - currState->vtcr.t0sz64;
762            tg = GrainMap_tg0[currState->vtcr.tg0];
763            // ARM DDI 0487A.f D7-2148
764            // The starting level of stage 2 translation depends on
765            // VTCR_EL2.SL0 and VTCR_EL2.TG0
766            LookupLevel __ = MAX_LOOKUP_LEVELS; // invalid level
767            uint8_t sl_tg = (currState->vtcr.sl0 << 2) | currState->vtcr.tg0;
768            static const LookupLevel SLL[] = {
769                L2, L3, L3, __, // sl0 == 0
770                L1, L2, L2, __, // sl0 == 1, etc.
771                L0, L1, L1, __,
772                __, __, __, __
773            };
774            start_lookup_level = SLL[sl_tg];
775            panic_if(start_lookup_level == MAX_LOOKUP_LEVELS,
776                     "Cannot discern lookup level from vtcr.{sl0,tg0}");
777            ps = currState->vtcr.ps;
778        } else {
779            switch (bits(currState->vaddr, 63,48)) {
780              case 0:
781                DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
782                ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL1);
783                tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
784                tg = GrainMap_tg0[currState->tcr.tg0];
785                if (bits(currState->vaddr, 63, tsz) != 0x0 ||
786                    currState->tcr.epd0)
787                  fault = true;
788                break;
789              case 0xffff:
790                DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
791                ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL1);
792                tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz);
793                tg = GrainMap_tg1[currState->tcr.tg1];
794                if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) ||
795                    currState->tcr.epd1)
796                  fault = true;
797                break;
798              default:
799                // top two bytes must be all 0s or all 1s, else invalid addr
800                fault = true;
801            }
802            ps = currState->tcr.ips;
803        }
804        break;
805      case EL2:
806        switch(bits(currState->vaddr, 63,48)) {
807          case 0:
808            DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
809            ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL2);
810            tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
811            tg = GrainMap_tg0[currState->tcr.tg0];
812            break;
813
814          case 0xffff:
815            DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
816            ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL2);
817            tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz);
818            tg = GrainMap_tg1[currState->tcr.tg1];
819            if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) ||
820                currState->tcr.epd1 || !currState->hcr.e2h)
821              fault = true;
822            break;
823
824           default:
825              // invalid addr if top two bytes are not all 0s
826              fault = true;
827        }
828        ps = currState->tcr.ps;
829        break;
830      case EL3:
831        switch(bits(currState->vaddr, 63,48)) {
832            case 0:
833                DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
834                ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL3);
835                tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
836                tg = GrainMap_tg0[currState->tcr.tg0];
837                break;
838            default:
839                // invalid addr if top two bytes are not all 0s
840                fault = true;
841        }
842        ps = currState->tcr.ps;
843        break;
844    }
845
846    if (fault) {
847        Fault f;
848        if (currState->isFetch)
849            f =  std::make_shared<PrefetchAbort>(
850                currState->vaddr_tainted,
851                ArmFault::TranslationLL + L0, isStage2,
852                ArmFault::LpaeTran);
853        else
854            f = std::make_shared<DataAbort>(
855                currState->vaddr_tainted,
856                TlbEntry::DomainType::NoAccess,
857                currState->isWrite,
858                ArmFault::TranslationLL + L0,
859                isStage2, ArmFault::LpaeTran);
860
861        if (currState->timing) {
862            pending = false;
863            nextWalk(currState->tc);
864            currState = NULL;
865        } else {
866            currState->tc = NULL;
867            currState->req = NULL;
868        }
869        return f;
870
871    }
872
873    if (tg == ReservedGrain) {
874        warn_once("Reserved granule size requested; gem5's IMPLEMENTATION "
875                  "DEFINED behavior takes this to mean 4KB granules\n");
876        tg = Grain4KB;
877    }
878
879    // Determine starting lookup level
880    // See aarch64/translation/walk in Appendix G: ARMv8 Pseudocode Library
881    // in ARM DDI 0487A.  These table values correspond to the cascading tests
882    // to compute the lookup level and are of the form
883    // (grain_size + N*stride), for N = {1, 2, 3}.
884    // A value of 64 will never succeed and a value of 0 will always succeed.
885    if (start_lookup_level == MAX_LOOKUP_LEVELS) {
886        struct GrainMap {
887            GrainSize grain_size;
888            unsigned lookup_level_cutoff[MAX_LOOKUP_LEVELS];
889        };
890        static const GrainMap GM[] = {
891            { Grain4KB,  { 39, 30,  0, 0 } },
892            { Grain16KB, { 47, 36, 25, 0 } },
893            { Grain64KB, { 64, 42, 29, 0 } }
894        };
895
896        const unsigned *lookup = NULL; // points to a lookup_level_cutoff
897
898        for (unsigned i = 0; i < 3; ++i) { // choose entry of GM[]
899            if (tg == GM[i].grain_size) {
900                lookup = GM[i].lookup_level_cutoff;
901                break;
902            }
903        }
904        assert(lookup);
905
906        for (int L = L0; L != MAX_LOOKUP_LEVELS; ++L) {
907            if (tsz > lookup[L]) {
908                start_lookup_level = (LookupLevel) L;
909                break;
910            }
911        }
912        panic_if(start_lookup_level == MAX_LOOKUP_LEVELS,
913                 "Table walker couldn't find lookup level\n");
914    }
915
916    int stride = tg - 3;
917
918    // Determine table base address
919    int base_addr_lo = 3 + tsz - stride * (3 - start_lookup_level) - tg;
920    Addr base_addr = mbits(ttbr, 47, base_addr_lo);
921
922    // Determine physical address size and raise an Address Size Fault if
923    // necessary
924    int pa_range = decodePhysAddrRange64(ps);
925    // Clamp to lower limit
926    if (pa_range > physAddrRange)
927        currState->physAddrRange = physAddrRange;
928    else
929        currState->physAddrRange = pa_range;
930    if (checkAddrSizeFaultAArch64(base_addr, currState->physAddrRange)) {
931        DPRINTF(TLB, "Address size fault before any lookup\n");
932        Fault f;
933        if (currState->isFetch)
934            f = std::make_shared<PrefetchAbort>(
935                currState->vaddr_tainted,
936                ArmFault::AddressSizeLL + start_lookup_level,
937                isStage2,
938                ArmFault::LpaeTran);
939        else
940            f = std::make_shared<DataAbort>(
941                currState->vaddr_tainted,
942                TlbEntry::DomainType::NoAccess,
943                currState->isWrite,
944                ArmFault::AddressSizeLL + start_lookup_level,
945                isStage2,
946                ArmFault::LpaeTran);
947
948
949        if (currState->timing) {
950            pending = false;
951            nextWalk(currState->tc);
952            currState = NULL;
953        } else {
954            currState->tc = NULL;
955            currState->req = NULL;
956        }
957        return f;
958
959   }
960
961    // Determine descriptor address
962    Addr desc_addr = base_addr |
963        (bits(currState->vaddr, tsz - 1,
964              stride * (3 - start_lookup_level) + tg) << 3);
965
966    // Trickbox address check
967    Fault f = testWalk(desc_addr, sizeof(uint64_t),
968                       TlbEntry::DomainType::NoAccess, start_lookup_level);
969    if (f) {
970        DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
971        if (currState->timing) {
972            pending = false;
973            nextWalk(currState->tc);
974            currState = NULL;
975        } else {
976            currState->tc = NULL;
977            currState->req = NULL;
978        }
979        return f;
980    }
981
982    Request::Flags flag = Request::PT_WALK;
983    if (currState->sctlr.c == 0) {
984        flag.set(Request::UNCACHEABLE);
985    }
986
987    if (currState->isSecure) {
988        flag.set(Request::SECURE);
989    }
990
991    currState->longDesc.lookupLevel = start_lookup_level;
992    currState->longDesc.aarch64 = true;
993    currState->longDesc.grainSize = tg;
994
995    if (currState->timing) {
996        fetchDescriptor(desc_addr, (uint8_t*) &currState->longDesc.data,
997                        sizeof(uint64_t), flag, start_lookup_level,
998                        LongDescEventByLevel[start_lookup_level], NULL);
999    } else {
1000        fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
1001                        sizeof(uint64_t), flag, -1, NULL,
1002                        &TableWalker::doLongDescriptor);
1003        f = currState->fault;
1004    }
1005
1006    return f;
1007}
1008
1009void
1010TableWalker::memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr,
1011                      uint8_t texcb, bool s)
1012{
1013    // Note: tc and sctlr local variables are hiding tc and sctrl class
1014    // variables
1015    DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s);
1016    te.shareable = false; // default value
1017    te.nonCacheable = false;
1018    te.outerShareable = false;
1019    if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) {
1020        switch(texcb) {
1021          case 0: // Stongly-ordered
1022            te.nonCacheable = true;
1023            te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1024            te.shareable = true;
1025            te.innerAttrs = 1;
1026            te.outerAttrs = 0;
1027            break;
1028          case 1: // Shareable Device
1029            te.nonCacheable = true;
1030            te.mtype = TlbEntry::MemoryType::Device;
1031            te.shareable = true;
1032            te.innerAttrs = 3;
1033            te.outerAttrs = 0;
1034            break;
1035          case 2: // Outer and Inner Write-Through, no Write-Allocate
1036            te.mtype = TlbEntry::MemoryType::Normal;
1037            te.shareable = s;
1038            te.innerAttrs = 6;
1039            te.outerAttrs = bits(texcb, 1, 0);
1040            break;
1041          case 3: // Outer and Inner Write-Back, no Write-Allocate
1042            te.mtype = TlbEntry::MemoryType::Normal;
1043            te.shareable = s;
1044            te.innerAttrs = 7;
1045            te.outerAttrs = bits(texcb, 1, 0);
1046            break;
1047          case 4: // Outer and Inner Non-cacheable
1048            te.nonCacheable = true;
1049            te.mtype = TlbEntry::MemoryType::Normal;
1050            te.shareable = s;
1051            te.innerAttrs = 0;
1052            te.outerAttrs = bits(texcb, 1, 0);
1053            break;
1054          case 5: // Reserved
1055            panic("Reserved texcb value!\n");
1056            break;
1057          case 6: // Implementation Defined
1058            panic("Implementation-defined texcb value!\n");
1059            break;
1060          case 7: // Outer and Inner Write-Back, Write-Allocate
1061            te.mtype = TlbEntry::MemoryType::Normal;
1062            te.shareable = s;
1063            te.innerAttrs = 5;
1064            te.outerAttrs = 1;
1065            break;
1066          case 8: // Non-shareable Device
1067            te.nonCacheable = true;
1068            te.mtype = TlbEntry::MemoryType::Device;
1069            te.shareable = false;
1070            te.innerAttrs = 3;
1071            te.outerAttrs = 0;
1072            break;
1073          case 9 ... 15:  // Reserved
1074            panic("Reserved texcb value!\n");
1075            break;
1076          case 16 ... 31: // Cacheable Memory
1077            te.mtype = TlbEntry::MemoryType::Normal;
1078            te.shareable = s;
1079            if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0)
1080                te.nonCacheable = true;
1081            te.innerAttrs = bits(texcb, 1, 0);
1082            te.outerAttrs = bits(texcb, 3, 2);
1083            break;
1084          default:
1085            panic("More than 32 states for 5 bits?\n");
1086        }
1087    } else {
1088        assert(tc);
1089        PRRR prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR,
1090                                    currState->tc, !currState->isSecure));
1091        NMRR nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR,
1092                                    currState->tc, !currState->isSecure));
1093        DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
1094        uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
1095        switch(bits(texcb, 2,0)) {
1096          case 0:
1097            curr_tr = prrr.tr0;
1098            curr_ir = nmrr.ir0;
1099            curr_or = nmrr.or0;
1100            te.outerShareable = (prrr.nos0 == 0);
1101            break;
1102          case 1:
1103            curr_tr = prrr.tr1;
1104            curr_ir = nmrr.ir1;
1105            curr_or = nmrr.or1;
1106            te.outerShareable = (prrr.nos1 == 0);
1107            break;
1108          case 2:
1109            curr_tr = prrr.tr2;
1110            curr_ir = nmrr.ir2;
1111            curr_or = nmrr.or2;
1112            te.outerShareable = (prrr.nos2 == 0);
1113            break;
1114          case 3:
1115            curr_tr = prrr.tr3;
1116            curr_ir = nmrr.ir3;
1117            curr_or = nmrr.or3;
1118            te.outerShareable = (prrr.nos3 == 0);
1119            break;
1120          case 4:
1121            curr_tr = prrr.tr4;
1122            curr_ir = nmrr.ir4;
1123            curr_or = nmrr.or4;
1124            te.outerShareable = (prrr.nos4 == 0);
1125            break;
1126          case 5:
1127            curr_tr = prrr.tr5;
1128            curr_ir = nmrr.ir5;
1129            curr_or = nmrr.or5;
1130            te.outerShareable = (prrr.nos5 == 0);
1131            break;
1132          case 6:
1133            panic("Imp defined type\n");
1134          case 7:
1135            curr_tr = prrr.tr7;
1136            curr_ir = nmrr.ir7;
1137            curr_or = nmrr.or7;
1138            te.outerShareable = (prrr.nos7 == 0);
1139            break;
1140        }
1141
1142        switch(curr_tr) {
1143          case 0:
1144            DPRINTF(TLBVerbose, "StronglyOrdered\n");
1145            te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1146            te.nonCacheable = true;
1147            te.innerAttrs = 1;
1148            te.outerAttrs = 0;
1149            te.shareable = true;
1150            break;
1151          case 1:
1152            DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n",
1153                    prrr.ds1, prrr.ds0, s);
1154            te.mtype = TlbEntry::MemoryType::Device;
1155            te.nonCacheable = true;
1156            te.innerAttrs = 3;
1157            te.outerAttrs = 0;
1158            if (prrr.ds1 && s)
1159                te.shareable = true;
1160            if (prrr.ds0 && !s)
1161                te.shareable = true;
1162            break;
1163          case 2:
1164            DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n",
1165                    prrr.ns1, prrr.ns0, s);
1166            te.mtype = TlbEntry::MemoryType::Normal;
1167            if (prrr.ns1 && s)
1168                te.shareable = true;
1169            if (prrr.ns0 && !s)
1170                te.shareable = true;
1171            break;
1172          case 3:
1173            panic("Reserved type");
1174        }
1175
1176        if (te.mtype == TlbEntry::MemoryType::Normal){
1177            switch(curr_ir) {
1178              case 0:
1179                te.nonCacheable = true;
1180                te.innerAttrs = 0;
1181                break;
1182              case 1:
1183                te.innerAttrs = 5;
1184                break;
1185              case 2:
1186                te.innerAttrs = 6;
1187                break;
1188              case 3:
1189                te.innerAttrs = 7;
1190                break;
1191            }
1192
1193            switch(curr_or) {
1194              case 0:
1195                te.nonCacheable = true;
1196                te.outerAttrs = 0;
1197                break;
1198              case 1:
1199                te.outerAttrs = 1;
1200                break;
1201              case 2:
1202                te.outerAttrs = 2;
1203                break;
1204              case 3:
1205                te.outerAttrs = 3;
1206                break;
1207            }
1208        }
1209    }
1210    DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, "
1211            "outerAttrs: %d\n",
1212            te.shareable, te.innerAttrs, te.outerAttrs);
1213    te.setAttributes(false);
1214}
1215
1216void
1217TableWalker::memAttrsLPAE(ThreadContext *tc, TlbEntry &te,
1218    LongDescriptor &lDescriptor)
1219{
1220    assert(_haveLPAE);
1221
1222    uint8_t attr;
1223    uint8_t sh = lDescriptor.sh();
1224    // Different format and source of attributes if this is a stage 2
1225    // translation
1226    if (isStage2) {
1227        attr = lDescriptor.memAttr();
1228        uint8_t attr_3_2 = (attr >> 2) & 0x3;
1229        uint8_t attr_1_0 =  attr       & 0x3;
1230
1231        DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh);
1232
1233        if (attr_3_2 == 0) {
1234            te.mtype        = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered
1235                                            : TlbEntry::MemoryType::Device;
1236            te.outerAttrs   = 0;
1237            te.innerAttrs   = attr_1_0 == 0 ? 1 : 3;
1238            te.nonCacheable = true;
1239        } else {
1240            te.mtype        = TlbEntry::MemoryType::Normal;
1241            te.outerAttrs   = attr_3_2 == 1 ? 0 :
1242                              attr_3_2 == 2 ? 2 : 1;
1243            te.innerAttrs   = attr_1_0 == 1 ? 0 :
1244                              attr_1_0 == 2 ? 6 : 5;
1245            te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1246        }
1247    } else {
1248        uint8_t attrIndx = lDescriptor.attrIndx();
1249
1250        // LPAE always uses remapping of memory attributes, irrespective of the
1251        // value of SCTLR.TRE
1252        MiscRegIndex reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0;
1253        int reg_as_int = snsBankedIndex(reg, currState->tc,
1254                                        !currState->isSecure);
1255        uint32_t mair = currState->tc->readMiscReg(reg_as_int);
1256        attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1257        uint8_t attr_7_4 = bits(attr, 7, 4);
1258        uint8_t attr_3_0 = bits(attr, 3, 0);
1259        DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr);
1260
1261        // Note: the memory subsystem only cares about the 'cacheable' memory
1262        // attribute. The other attributes are only used to fill the PAR register
1263        // accordingly to provide the illusion of full support
1264        te.nonCacheable = false;
1265
1266        switch (attr_7_4) {
1267          case 0x0:
1268            // Strongly-ordered or Device memory
1269            if (attr_3_0 == 0x0)
1270                te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1271            else if (attr_3_0 == 0x4)
1272                te.mtype = TlbEntry::MemoryType::Device;
1273            else
1274                panic("Unpredictable behavior\n");
1275            te.nonCacheable = true;
1276            te.outerAttrs   = 0;
1277            break;
1278          case 0x4:
1279            // Normal memory, Outer Non-cacheable
1280            te.mtype = TlbEntry::MemoryType::Normal;
1281            te.outerAttrs = 0;
1282            if (attr_3_0 == 0x4)
1283                // Inner Non-cacheable
1284                te.nonCacheable = true;
1285            else if (attr_3_0 < 0x8)
1286                panic("Unpredictable behavior\n");
1287            break;
1288          case 0x8:
1289          case 0x9:
1290          case 0xa:
1291          case 0xb:
1292          case 0xc:
1293          case 0xd:
1294          case 0xe:
1295          case 0xf:
1296            if (attr_7_4 & 0x4) {
1297                te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1298            } else {
1299                te.outerAttrs = 0x2;
1300            }
1301            // Normal memory, Outer Cacheable
1302            te.mtype = TlbEntry::MemoryType::Normal;
1303            if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1304                panic("Unpredictable behavior\n");
1305            break;
1306          default:
1307            panic("Unpredictable behavior\n");
1308            break;
1309        }
1310
1311        switch (attr_3_0) {
1312          case 0x0:
1313            te.innerAttrs = 0x1;
1314            break;
1315          case 0x4:
1316            te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1317            break;
1318          case 0x8:
1319          case 0x9:
1320          case 0xA:
1321          case 0xB:
1322            te.innerAttrs = 6;
1323            break;
1324          case 0xC:
1325          case 0xD:
1326          case 0xE:
1327          case 0xF:
1328            te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1329            break;
1330          default:
1331            panic("Unpredictable behavior\n");
1332            break;
1333        }
1334    }
1335
1336    te.outerShareable = sh == 2;
1337    te.shareable       = (sh & 0x2) ? true : false;
1338    te.setAttributes(true);
1339    te.attributes |= (uint64_t) attr << 56;
1340}
1341
1342void
1343TableWalker::memAttrsAArch64(ThreadContext *tc, TlbEntry &te,
1344                             LongDescriptor &lDescriptor)
1345{
1346    uint8_t attr;
1347    uint8_t attr_hi;
1348    uint8_t attr_lo;
1349    uint8_t sh = lDescriptor.sh();
1350
1351    if (isStage2) {
1352        attr = lDescriptor.memAttr();
1353        uint8_t attr_hi = (attr >> 2) & 0x3;
1354        uint8_t attr_lo =  attr       & 0x3;
1355
1356        DPRINTF(TLBVerbose, "memAttrsAArch64 MemAttr:%#x sh:%#x\n", attr, sh);
1357
1358        if (attr_hi == 0) {
1359            te.mtype        = attr_lo == 0 ? TlbEntry::MemoryType::StronglyOrdered
1360                                            : TlbEntry::MemoryType::Device;
1361            te.outerAttrs   = 0;
1362            te.innerAttrs   = attr_lo == 0 ? 1 : 3;
1363            te.nonCacheable = true;
1364        } else {
1365            te.mtype        = TlbEntry::MemoryType::Normal;
1366            te.outerAttrs   = attr_hi == 1 ? 0 :
1367                              attr_hi == 2 ? 2 : 1;
1368            te.innerAttrs   = attr_lo == 1 ? 0 :
1369                              attr_lo == 2 ? 6 : 5;
1370            // Treat write-through memory as uncacheable, this is safe
1371            // but for performance reasons not optimal.
1372            te.nonCacheable = (attr_hi == 1) || (attr_hi == 2) ||
1373                (attr_lo == 1) || (attr_lo == 2);
1374        }
1375    } else {
1376        uint8_t attrIndx = lDescriptor.attrIndx();
1377
1378        DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh);
1379
1380        // Select MAIR
1381        uint64_t mair;
1382        switch (currState->el) {
1383          case EL0:
1384          case EL1:
1385            mair = tc->readMiscReg(MISCREG_MAIR_EL1);
1386            break;
1387          case EL2:
1388            mair = tc->readMiscReg(MISCREG_MAIR_EL2);
1389            break;
1390          case EL3:
1391            mair = tc->readMiscReg(MISCREG_MAIR_EL3);
1392            break;
1393          default:
1394            panic("Invalid exception level");
1395            break;
1396        }
1397
1398        // Select attributes
1399        attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1400        attr_lo = bits(attr, 3, 0);
1401        attr_hi = bits(attr, 7, 4);
1402
1403        // Memory type
1404        te.mtype = attr_hi == 0 ? TlbEntry::MemoryType::Device : TlbEntry::MemoryType::Normal;
1405
1406        // Cacheability
1407        te.nonCacheable = false;
1408        if (te.mtype == TlbEntry::MemoryType::Device) {  // Device memory
1409            te.nonCacheable = true;
1410        }
1411        // Treat write-through memory as uncacheable, this is safe
1412        // but for performance reasons not optimal.
1413        switch (attr_hi) {
1414          case 0x1 ... 0x3: // Normal Memory, Outer Write-through transient
1415          case 0x4:         // Normal memory, Outer Non-cacheable
1416          case 0x8 ... 0xb: // Normal Memory, Outer Write-through non-transient
1417            te.nonCacheable = true;
1418        }
1419        switch (attr_lo) {
1420          case 0x1 ... 0x3: // Normal Memory, Inner Write-through transient
1421          case 0x9 ... 0xb: // Normal Memory, Inner Write-through non-transient
1422            warn_if(!attr_hi, "Unpredictable behavior");
1423            M5_FALLTHROUGH;
1424          case 0x4:         // Device-nGnRE memory or
1425                            // Normal memory, Inner Non-cacheable
1426          case 0x8:         // Device-nGRE memory or
1427                            // Normal memory, Inner Write-through non-transient
1428            te.nonCacheable = true;
1429        }
1430
1431        te.shareable       = sh == 2;
1432        te.outerShareable = (sh & 0x2) ? true : false;
1433        // Attributes formatted according to the 64-bit PAR
1434        te.attributes = ((uint64_t) attr << 56) |
1435            (1 << 11) |     // LPAE bit
1436            (te.ns << 9) |  // NS bit
1437            (sh << 7);
1438    }
1439}
1440
1441void
1442TableWalker::doL1Descriptor()
1443{
1444    if (currState->fault != NoFault) {
1445        return;
1446    }
1447
1448    currState->l1Desc.data = htog(currState->l1Desc.data,
1449                                  byteOrder(currState->tc));
1450
1451    DPRINTF(TLB, "L1 descriptor for %#x is %#x\n",
1452            currState->vaddr_tainted, currState->l1Desc.data);
1453    TlbEntry te;
1454
1455    switch (currState->l1Desc.type()) {
1456      case L1Descriptor::Ignore:
1457      case L1Descriptor::Reserved:
1458        if (!currState->timing) {
1459            currState->tc = NULL;
1460            currState->req = NULL;
1461        }
1462        DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n");
1463        if (currState->isFetch)
1464            currState->fault =
1465                std::make_shared<PrefetchAbort>(
1466                    currState->vaddr_tainted,
1467                    ArmFault::TranslationLL + L1,
1468                    isStage2,
1469                    ArmFault::VmsaTran);
1470        else
1471            currState->fault =
1472                std::make_shared<DataAbort>(
1473                    currState->vaddr_tainted,
1474                    TlbEntry::DomainType::NoAccess,
1475                    currState->isWrite,
1476                    ArmFault::TranslationLL + L1, isStage2,
1477                    ArmFault::VmsaTran);
1478        return;
1479      case L1Descriptor::Section:
1480        if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
1481            /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is
1482              * enabled if set, do l1.Desc.setAp0() instead of generating
1483              * AccessFlag0
1484              */
1485
1486            currState->fault = std::make_shared<DataAbort>(
1487                currState->vaddr_tainted,
1488                currState->l1Desc.domain(),
1489                currState->isWrite,
1490                ArmFault::AccessFlagLL + L1,
1491                isStage2,
1492                ArmFault::VmsaTran);
1493        }
1494        if (currState->l1Desc.supersection()) {
1495            panic("Haven't implemented supersections\n");
1496        }
1497        insertTableEntry(currState->l1Desc, false);
1498        return;
1499      case L1Descriptor::PageTable:
1500        {
1501            Addr l2desc_addr;
1502            l2desc_addr = currState->l1Desc.l2Addr() |
1503                (bits(currState->vaddr, 19, 12) << 2);
1504            DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n",
1505                    l2desc_addr, currState->isSecure ? "s" : "ns");
1506
1507            // Trickbox address check
1508            currState->fault = testWalk(l2desc_addr, sizeof(uint32_t),
1509                                        currState->l1Desc.domain(), L2);
1510
1511            if (currState->fault) {
1512                if (!currState->timing) {
1513                    currState->tc = NULL;
1514                    currState->req = NULL;
1515                }
1516                return;
1517            }
1518
1519            Request::Flags flag = Request::PT_WALK;
1520            if (currState->isSecure)
1521                flag.set(Request::SECURE);
1522
1523            bool delayed;
1524            delayed = fetchDescriptor(l2desc_addr,
1525                                      (uint8_t*)&currState->l2Desc.data,
1526                                      sizeof(uint32_t), flag, -1, &doL2DescEvent,
1527                                      &TableWalker::doL2Descriptor);
1528            if (delayed) {
1529                currState->delayed = true;
1530            }
1531
1532            return;
1533        }
1534      default:
1535        panic("A new type in a 2 bit field?\n");
1536    }
1537}
1538
1539Fault
1540TableWalker::generateLongDescFault(ArmFault::FaultSource src)
1541{
1542    if (currState->isFetch) {
1543        return std::make_shared<PrefetchAbort>(
1544            currState->vaddr_tainted,
1545            src + currState->longDesc.lookupLevel,
1546            isStage2,
1547            ArmFault::LpaeTran);
1548    } else {
1549        return std::make_shared<DataAbort>(
1550            currState->vaddr_tainted,
1551            TlbEntry::DomainType::NoAccess,
1552            currState->isWrite,
1553            src + currState->longDesc.lookupLevel,
1554            isStage2,
1555            ArmFault::LpaeTran);
1556    }
1557}
1558
1559void
1560TableWalker::doLongDescriptor()
1561{
1562    if (currState->fault != NoFault) {
1563        return;
1564    }
1565
1566    currState->longDesc.data = htog(currState->longDesc.data,
1567                                    byteOrder(currState->tc));
1568
1569    DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n",
1570            currState->longDesc.lookupLevel, currState->vaddr_tainted,
1571            currState->longDesc.data,
1572            currState->aarch64 ? "AArch64" : "long-desc.");
1573
1574    if ((currState->longDesc.type() == LongDescriptor::Block) ||
1575        (currState->longDesc.type() == LongDescriptor::Page)) {
1576        DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1577                "xn: %d, ap: %d, af: %d, type: %d\n",
1578                currState->longDesc.lookupLevel,
1579                currState->longDesc.data,
1580                currState->longDesc.pxn(),
1581                currState->longDesc.xn(),
1582                currState->longDesc.ap(),
1583                currState->longDesc.af(),
1584                currState->longDesc.type());
1585    } else {
1586        DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, type: %d\n",
1587                currState->longDesc.lookupLevel,
1588                currState->longDesc.data,
1589                currState->longDesc.type());
1590    }
1591
1592    TlbEntry te;
1593
1594    switch (currState->longDesc.type()) {
1595      case LongDescriptor::Invalid:
1596        if (!currState->timing) {
1597            currState->tc = NULL;
1598            currState->req = NULL;
1599        }
1600
1601        DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n",
1602                currState->longDesc.lookupLevel,
1603                ArmFault::TranslationLL + currState->longDesc.lookupLevel);
1604
1605        currState->fault = generateLongDescFault(ArmFault::TranslationLL);
1606        return;
1607
1608      case LongDescriptor::Block:
1609      case LongDescriptor::Page:
1610        {
1611            auto fault_source = ArmFault::FaultSourceInvalid;
1612            // Check for address size fault
1613            if (checkAddrSizeFaultAArch64(
1614                    mbits(currState->longDesc.data, MaxPhysAddrRange - 1,
1615                          currState->longDesc.offsetBits()),
1616                    currState->physAddrRange)) {
1617
1618                DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1619                        currState->longDesc.lookupLevel);
1620                fault_source = ArmFault::AddressSizeLL;
1621
1622            // Check for access fault
1623            } else if (currState->longDesc.af() == 0) {
1624
1625                DPRINTF(TLB, "L%d descriptor causing Access Fault\n",
1626                        currState->longDesc.lookupLevel);
1627                fault_source = ArmFault::AccessFlagLL;
1628            }
1629
1630            if (fault_source != ArmFault::FaultSourceInvalid) {
1631                currState->fault = generateLongDescFault(fault_source);
1632            } else {
1633                insertTableEntry(currState->longDesc, true);
1634            }
1635        }
1636        return;
1637      case LongDescriptor::Table:
1638        {
1639            // Set hierarchical permission flags
1640            currState->secureLookup = currState->secureLookup &&
1641                currState->longDesc.secureTable();
1642            currState->rwTable = currState->rwTable &&
1643                currState->longDesc.rwTable();
1644            currState->userTable = currState->userTable &&
1645                currState->longDesc.userTable();
1646            currState->xnTable = currState->xnTable ||
1647                currState->longDesc.xnTable();
1648            currState->pxnTable = currState->pxnTable ||
1649                currState->longDesc.pxnTable();
1650
1651            // Set up next level lookup
1652            Addr next_desc_addr = currState->longDesc.nextDescAddr(
1653                currState->vaddr);
1654
1655            DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1656                    currState->longDesc.lookupLevel,
1657                    currState->longDesc.lookupLevel + 1,
1658                    next_desc_addr,
1659                    currState->secureLookup ? "s" : "ns");
1660
1661            // Check for address size fault
1662            if (currState->aarch64 && checkAddrSizeFaultAArch64(
1663                    next_desc_addr, currState->physAddrRange)) {
1664                DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1665                        currState->longDesc.lookupLevel);
1666
1667                currState->fault = generateLongDescFault(
1668                    ArmFault::AddressSizeLL);
1669                return;
1670            }
1671
1672            // Trickbox address check
1673            currState->fault = testWalk(
1674                next_desc_addr, sizeof(uint64_t), TlbEntry::DomainType::Client,
1675                toLookupLevel(currState->longDesc.lookupLevel +1));
1676
1677            if (currState->fault) {
1678                if (!currState->timing) {
1679                    currState->tc = NULL;
1680                    currState->req = NULL;
1681                }
1682                return;
1683            }
1684
1685            Request::Flags flag = Request::PT_WALK;
1686            if (currState->secureLookup)
1687                flag.set(Request::SECURE);
1688
1689            LookupLevel L = currState->longDesc.lookupLevel =
1690                (LookupLevel) (currState->longDesc.lookupLevel + 1);
1691            Event *event = NULL;
1692            switch (L) {
1693              case L1:
1694                assert(currState->aarch64);
1695              case L2:
1696              case L3:
1697                event = LongDescEventByLevel[L];
1698                break;
1699              default:
1700                panic("Wrong lookup level in table walk\n");
1701                break;
1702            }
1703
1704            bool delayed;
1705            delayed = fetchDescriptor(next_desc_addr, (uint8_t*)&currState->longDesc.data,
1706                                      sizeof(uint64_t), flag, -1, event,
1707                                      &TableWalker::doLongDescriptor);
1708            if (delayed) {
1709                 currState->delayed = true;
1710            }
1711        }
1712        return;
1713      default:
1714        panic("A new type in a 2 bit field?\n");
1715    }
1716}
1717
1718void
1719TableWalker::doL2Descriptor()
1720{
1721    if (currState->fault != NoFault) {
1722        return;
1723    }
1724
1725    currState->l2Desc.data = htog(currState->l2Desc.data,
1726                                  byteOrder(currState->tc));
1727
1728    DPRINTF(TLB, "L2 descriptor for %#x is %#x\n",
1729            currState->vaddr_tainted, currState->l2Desc.data);
1730    TlbEntry te;
1731
1732    if (currState->l2Desc.invalid()) {
1733        DPRINTF(TLB, "L2 descriptor invalid, causing fault\n");
1734        if (!currState->timing) {
1735            currState->tc = NULL;
1736            currState->req = NULL;
1737        }
1738        if (currState->isFetch)
1739            currState->fault = std::make_shared<PrefetchAbort>(
1740                    currState->vaddr_tainted,
1741                    ArmFault::TranslationLL + L2,
1742                    isStage2,
1743                    ArmFault::VmsaTran);
1744        else
1745            currState->fault = std::make_shared<DataAbort>(
1746                currState->vaddr_tainted, currState->l1Desc.domain(),
1747                currState->isWrite, ArmFault::TranslationLL + L2,
1748                isStage2,
1749                ArmFault::VmsaTran);
1750        return;
1751    }
1752
1753    if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) {
1754        /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is enabled
1755          * if set, do l2.Desc.setAp0() instead of generating AccessFlag0
1756          */
1757         DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n",
1758                 currState->sctlr.afe, currState->l2Desc.ap());
1759
1760        currState->fault = std::make_shared<DataAbort>(
1761            currState->vaddr_tainted,
1762            TlbEntry::DomainType::NoAccess, currState->isWrite,
1763            ArmFault::AccessFlagLL + L2, isStage2,
1764            ArmFault::VmsaTran);
1765    }
1766
1767    insertTableEntry(currState->l2Desc, false);
1768}
1769
1770void
1771TableWalker::doL1DescriptorWrapper()
1772{
1773    currState = stateQueues[L1].front();
1774    currState->delayed = false;
1775    // if there's a stage2 translation object we don't need it any more
1776    if (currState->stage2Tran) {
1777        delete currState->stage2Tran;
1778        currState->stage2Tran = NULL;
1779    }
1780
1781
1782    DPRINTF(TLBVerbose, "L1 Desc object host addr: %p\n",&currState->l1Desc.data);
1783    DPRINTF(TLBVerbose, "L1 Desc object      data: %08x\n",currState->l1Desc.data);
1784
1785    DPRINTF(TLBVerbose, "calling doL1Descriptor for vaddr:%#x\n", currState->vaddr_tainted);
1786    doL1Descriptor();
1787
1788    stateQueues[L1].pop_front();
1789    // Check if fault was generated
1790    if (currState->fault != NoFault) {
1791        currState->transState->finish(currState->fault, currState->req,
1792                                      currState->tc, currState->mode);
1793        statWalksShortTerminatedAtLevel[0]++;
1794
1795        pending = false;
1796        nextWalk(currState->tc);
1797
1798        currState->req = NULL;
1799        currState->tc = NULL;
1800        currState->delayed = false;
1801        delete currState;
1802    }
1803    else if (!currState->delayed) {
1804        // delay is not set so there is no L2 to do
1805        // Don't finish the translation if a stage 2 look up is underway
1806        statWalkServiceTime.sample(curTick() - currState->startTime);
1807        DPRINTF(TLBVerbose, "calling translateTiming again\n");
1808        tlb->translateTiming(currState->req, currState->tc,
1809                             currState->transState, currState->mode);
1810        statWalksShortTerminatedAtLevel[0]++;
1811
1812        pending = false;
1813        nextWalk(currState->tc);
1814
1815        currState->req = NULL;
1816        currState->tc = NULL;
1817        currState->delayed = false;
1818        delete currState;
1819    } else {
1820        // need to do L2 descriptor
1821        stateQueues[L2].push_back(currState);
1822    }
1823    currState = NULL;
1824}
1825
1826void
1827TableWalker::doL2DescriptorWrapper()
1828{
1829    currState = stateQueues[L2].front();
1830    assert(currState->delayed);
1831    // if there's a stage2 translation object we don't need it any more
1832    if (currState->stage2Tran) {
1833        delete currState->stage2Tran;
1834        currState->stage2Tran = NULL;
1835    }
1836
1837    DPRINTF(TLBVerbose, "calling doL2Descriptor for vaddr:%#x\n",
1838            currState->vaddr_tainted);
1839    doL2Descriptor();
1840
1841    // Check if fault was generated
1842    if (currState->fault != NoFault) {
1843        currState->transState->finish(currState->fault, currState->req,
1844                                      currState->tc, currState->mode);
1845        statWalksShortTerminatedAtLevel[1]++;
1846    } else {
1847        statWalkServiceTime.sample(curTick() - currState->startTime);
1848        DPRINTF(TLBVerbose, "calling translateTiming again\n");
1849        tlb->translateTiming(currState->req, currState->tc,
1850                             currState->transState, currState->mode);
1851        statWalksShortTerminatedAtLevel[1]++;
1852    }
1853
1854
1855    stateQueues[L2].pop_front();
1856    pending = false;
1857    nextWalk(currState->tc);
1858
1859    currState->req = NULL;
1860    currState->tc = NULL;
1861    currState->delayed = false;
1862
1863    delete currState;
1864    currState = NULL;
1865}
1866
1867void
1868TableWalker::doL0LongDescriptorWrapper()
1869{
1870    doLongDescriptorWrapper(L0);
1871}
1872
1873void
1874TableWalker::doL1LongDescriptorWrapper()
1875{
1876    doLongDescriptorWrapper(L1);
1877}
1878
1879void
1880TableWalker::doL2LongDescriptorWrapper()
1881{
1882    doLongDescriptorWrapper(L2);
1883}
1884
1885void
1886TableWalker::doL3LongDescriptorWrapper()
1887{
1888    doLongDescriptorWrapper(L3);
1889}
1890
1891void
1892TableWalker::doLongDescriptorWrapper(LookupLevel curr_lookup_level)
1893{
1894    currState = stateQueues[curr_lookup_level].front();
1895    assert(curr_lookup_level == currState->longDesc.lookupLevel);
1896    currState->delayed = false;
1897
1898    // if there's a stage2 translation object we don't need it any more
1899    if (currState->stage2Tran) {
1900        delete currState->stage2Tran;
1901        currState->stage2Tran = NULL;
1902    }
1903
1904    DPRINTF(TLBVerbose, "calling doLongDescriptor for vaddr:%#x\n",
1905            currState->vaddr_tainted);
1906    doLongDescriptor();
1907
1908    stateQueues[curr_lookup_level].pop_front();
1909
1910    if (currState->fault != NoFault) {
1911        // A fault was generated
1912        currState->transState->finish(currState->fault, currState->req,
1913                                      currState->tc, currState->mode);
1914
1915        pending = false;
1916        nextWalk(currState->tc);
1917
1918        currState->req = NULL;
1919        currState->tc = NULL;
1920        currState->delayed = false;
1921        delete currState;
1922    } else if (!currState->delayed) {
1923        // No additional lookups required
1924        DPRINTF(TLBVerbose, "calling translateTiming again\n");
1925        statWalkServiceTime.sample(curTick() - currState->startTime);
1926        tlb->translateTiming(currState->req, currState->tc,
1927                             currState->transState, currState->mode);
1928        statWalksLongTerminatedAtLevel[(unsigned) curr_lookup_level]++;
1929
1930        pending = false;
1931        nextWalk(currState->tc);
1932
1933        currState->req = NULL;
1934        currState->tc = NULL;
1935        currState->delayed = false;
1936        delete currState;
1937    } else {
1938        if (curr_lookup_level >= MAX_LOOKUP_LEVELS - 1)
1939            panic("Max. number of lookups already reached in table walk\n");
1940        // Need to perform additional lookups
1941        stateQueues[currState->longDesc.lookupLevel].push_back(currState);
1942    }
1943    currState = NULL;
1944}
1945
1946
1947void
1948TableWalker::nextWalk(ThreadContext *tc)
1949{
1950    if (pendingQueue.size())
1951        schedule(doProcessEvent, clockEdge(Cycles(1)));
1952    else
1953        completeDrain();
1954}
1955
1956bool
1957TableWalker::fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes,
1958    Request::Flags flags, int queueIndex, Event *event,
1959    void (TableWalker::*doDescriptor)())
1960{
1961    bool isTiming = currState->timing;
1962
1963    DPRINTF(TLBVerbose, "Fetching descriptor at address: 0x%x stage2Req: %d\n",
1964            descAddr, currState->stage2Req);
1965
1966    // If this translation has a stage 2 then we know descAddr is an IPA and
1967    // needs to be translated before we can access the page table. Do that
1968    // check here.
1969    if (currState->stage2Req) {
1970        Fault fault;
1971        flags = flags | TLB::MustBeOne;
1972
1973        if (isTiming) {
1974            Stage2MMU::Stage2Translation *tran = new
1975                Stage2MMU::Stage2Translation(*stage2Mmu, data, event,
1976                                             currState->vaddr);
1977            currState->stage2Tran = tran;
1978            stage2Mmu->readDataTimed(currState->tc, descAddr, tran, numBytes,
1979                                     flags);
1980            fault = tran->fault;
1981        } else {
1982            fault = stage2Mmu->readDataUntimed(currState->tc,
1983                currState->vaddr, descAddr, data, numBytes, flags,
1984                currState->functional);
1985        }
1986
1987        if (fault != NoFault) {
1988            currState->fault = fault;
1989        }
1990        if (isTiming) {
1991            if (queueIndex >= 0) {
1992                DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
1993                        stateQueues[queueIndex].size());
1994                stateQueues[queueIndex].push_back(currState);
1995                currState = NULL;
1996            }
1997        } else {
1998            (this->*doDescriptor)();
1999        }
2000    } else {
2001        if (isTiming) {
2002            port->dmaAction(MemCmd::ReadReq, descAddr, numBytes, event, data,
2003                           currState->tc->getCpuPtr()->clockPeriod(),flags);
2004            if (queueIndex >= 0) {
2005                DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
2006                        stateQueues[queueIndex].size());
2007                stateQueues[queueIndex].push_back(currState);
2008                currState = NULL;
2009            }
2010        } else if (!currState->functional) {
2011            port->dmaAction(MemCmd::ReadReq, descAddr, numBytes, NULL, data,
2012                           currState->tc->getCpuPtr()->clockPeriod(), flags);
2013            (this->*doDescriptor)();
2014        } else {
2015            RequestPtr req = std::make_shared<Request>(
2016                descAddr, numBytes, flags, masterId);
2017
2018            req->taskId(ContextSwitchTaskId::DMA);
2019            PacketPtr  pkt = new Packet(req, MemCmd::ReadReq);
2020            pkt->dataStatic(data);
2021            port->sendFunctional(pkt);
2022            (this->*doDescriptor)();
2023            delete pkt;
2024        }
2025    }
2026    return (isTiming);
2027}
2028
2029void
2030TableWalker::insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
2031{
2032    TlbEntry te;
2033
2034    // Create and fill a new page table entry
2035    te.valid          = true;
2036    te.longDescFormat = longDescriptor;
2037    te.isHyp          = currState->isHyp;
2038    te.asid           = currState->asid;
2039    te.vmid           = currState->vmid;
2040    te.N              = descriptor.offsetBits();
2041    te.vpn            = currState->vaddr >> te.N;
2042    te.size           = (1<<te.N) - 1;
2043    te.pfn            = descriptor.pfn();
2044    te.domain         = descriptor.domain();
2045    te.lookupLevel    = descriptor.lookupLevel;
2046    te.ns             = !descriptor.secure(haveSecurity, currState) || isStage2;
2047    te.nstid          = !currState->isSecure;
2048    te.xn             = descriptor.xn();
2049    if (currState->aarch64)
2050        te.el         = currState->el;
2051    else
2052        te.el         = EL1;
2053
2054    statPageSizes[pageSizeNtoStatBin(te.N)]++;
2055    statRequestOrigin[COMPLETED][currState->isFetch]++;
2056
2057    // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
2058    // as global
2059    te.global         = descriptor.global(currState) || isStage2;
2060    if (longDescriptor) {
2061        LongDescriptor lDescriptor =
2062            dynamic_cast<LongDescriptor &>(descriptor);
2063
2064        te.xn |= currState->xnTable;
2065        te.pxn = currState->pxnTable || lDescriptor.pxn();
2066        if (isStage2) {
2067            // this is actually the HAP field, but its stored in the same bit
2068            // possitions as the AP field in a stage 1 translation.
2069            te.hap = lDescriptor.ap();
2070        } else {
2071           te.ap = ((!currState->rwTable || descriptor.ap() >> 1) << 1) |
2072               (currState->userTable && (descriptor.ap() & 0x1));
2073        }
2074        if (currState->aarch64)
2075            memAttrsAArch64(currState->tc, te, lDescriptor);
2076        else
2077            memAttrsLPAE(currState->tc, te, lDescriptor);
2078    } else {
2079        te.ap = descriptor.ap();
2080        memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(),
2081                 descriptor.shareable());
2082    }
2083
2084    // Debug output
2085    DPRINTF(TLB, descriptor.dbgHeader().c_str());
2086    DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2087            te.N, te.pfn, te.size, te.global, te.valid);
2088    DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2089            "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
2090            te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp,
2091            te.nonCacheable, te.ns);
2092    DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
2093            descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
2094            descriptor.getRawData());
2095
2096    // Insert the entry into the TLB
2097    tlb->insert(currState->vaddr, te);
2098    if (!currState->timing) {
2099        currState->tc  = NULL;
2100        currState->req = NULL;
2101    }
2102}
2103
2104ArmISA::TableWalker *
2105ArmTableWalkerParams::create()
2106{
2107    return new ArmISA::TableWalker(this);
2108}
2109
2110LookupLevel
2111TableWalker::toLookupLevel(uint8_t lookup_level_as_int)
2112{
2113    switch (lookup_level_as_int) {
2114      case L1:
2115        return L1;
2116      case L2:
2117        return L2;
2118      case L3:
2119        return L3;
2120      default:
2121        panic("Invalid lookup level conversion");
2122    }
2123}
2124
2125/* this method keeps track of the table walker queue's residency, so
2126 * needs to be called whenever requests start and complete. */
2127void
2128TableWalker::pendingChange()
2129{
2130    unsigned n = pendingQueue.size();
2131    if ((currState != NULL) && (currState != pendingQueue.front())) {
2132        ++n;
2133    }
2134
2135    if (n != pendingReqs) {
2136        Tick now = curTick();
2137        statPendingWalks.sample(pendingReqs, now - pendingChangeTick);
2138        pendingReqs = n;
2139        pendingChangeTick = now;
2140    }
2141}
2142
2143Fault
2144TableWalker::testWalk(Addr pa, Addr size, TlbEntry::DomainType domain,
2145                      LookupLevel lookup_level)
2146{
2147    return tlb->testWalk(pa, size, currState->vaddr, currState->isSecure,
2148                         currState->mode, domain, lookup_level);
2149}
2150
2151
2152uint8_t
2153TableWalker::pageSizeNtoStatBin(uint8_t N)
2154{
2155    /* for statPageSizes */
2156    switch(N) {
2157        case 12: return 0; // 4K
2158        case 14: return 1; // 16K (using 16K granule in v8-64)
2159        case 16: return 2; // 64K
2160        case 20: return 3; // 1M
2161        case 21: return 4; // 2M-LPAE
2162        case 24: return 5; // 16M
2163        case 25: return 6; // 32M (using 16K granule in v8-64)
2164        case 29: return 7; // 512M (using 64K granule in v8-64)
2165        case 30: return 8; // 1G-LPAE
2166        default:
2167            panic("unknown page size");
2168            return 255;
2169    }
2170}
2171
2172void
2173TableWalker::regStats()
2174{
2175    ClockedObject::regStats();
2176
2177    statWalks
2178        .name(name() + ".walks")
2179        .desc("Table walker walks requested")
2180        ;
2181
2182    statWalksShortDescriptor
2183        .name(name() + ".walksShort")
2184        .desc("Table walker walks initiated with short descriptors")
2185        .flags(Stats::nozero)
2186        ;
2187
2188    statWalksLongDescriptor
2189        .name(name() + ".walksLong")
2190        .desc("Table walker walks initiated with long descriptors")
2191        .flags(Stats::nozero)
2192        ;
2193
2194    statWalksShortTerminatedAtLevel
2195        .init(2)
2196        .name(name() + ".walksShortTerminationLevel")
2197        .desc("Level at which table walker walks "
2198              "with short descriptors terminate")
2199        .flags(Stats::nozero)
2200        ;
2201    statWalksShortTerminatedAtLevel.subname(0, "Level1");
2202    statWalksShortTerminatedAtLevel.subname(1, "Level2");
2203
2204    statWalksLongTerminatedAtLevel
2205        .init(4)
2206        .name(name() + ".walksLongTerminationLevel")
2207        .desc("Level at which table walker walks "
2208              "with long descriptors terminate")
2209        .flags(Stats::nozero)
2210        ;
2211    statWalksLongTerminatedAtLevel.subname(0, "Level0");
2212    statWalksLongTerminatedAtLevel.subname(1, "Level1");
2213    statWalksLongTerminatedAtLevel.subname(2, "Level2");
2214    statWalksLongTerminatedAtLevel.subname(3, "Level3");
2215
2216    statSquashedBefore
2217        .name(name() + ".walksSquashedBefore")
2218        .desc("Table walks squashed before starting")
2219        .flags(Stats::nozero)
2220        ;
2221
2222    statSquashedAfter
2223        .name(name() + ".walksSquashedAfter")
2224        .desc("Table walks squashed after completion")
2225        .flags(Stats::nozero)
2226        ;
2227
2228    statWalkWaitTime
2229        .init(16)
2230        .name(name() + ".walkWaitTime")
2231        .desc("Table walker wait (enqueue to first request) latency")
2232        .flags(Stats::pdf | Stats::nozero | Stats::nonan)
2233        ;
2234
2235    statWalkServiceTime
2236        .init(16)
2237        .name(name() + ".walkCompletionTime")
2238        .desc("Table walker service (enqueue to completion) latency")
2239        .flags(Stats::pdf | Stats::nozero | Stats::nonan)
2240        ;
2241
2242    statPendingWalks
2243        .init(16)
2244        .name(name() + ".walksPending")
2245        .desc("Table walker pending requests distribution")
2246        .flags(Stats::pdf | Stats::dist | Stats::nozero | Stats::nonan)
2247        ;
2248
2249    statPageSizes // see DDI 0487A D4-1661
2250        .init(9)
2251        .name(name() + ".walkPageSizes")
2252        .desc("Table walker page sizes translated")
2253        .flags(Stats::total | Stats::pdf | Stats::dist | Stats::nozero)
2254        ;
2255    statPageSizes.subname(0, "4K");
2256    statPageSizes.subname(1, "16K");
2257    statPageSizes.subname(2, "64K");
2258    statPageSizes.subname(3, "1M");
2259    statPageSizes.subname(4, "2M");
2260    statPageSizes.subname(5, "16M");
2261    statPageSizes.subname(6, "32M");
2262    statPageSizes.subname(7, "512M");
2263    statPageSizes.subname(8, "1G");
2264
2265    statRequestOrigin
2266        .init(2,2) // Instruction/Data, requests/completed
2267        .name(name() + ".walkRequestOrigin")
2268        .desc("Table walker requests started/completed, data/inst")
2269        .flags(Stats::total)
2270        ;
2271    statRequestOrigin.subname(0,"Requested");
2272    statRequestOrigin.subname(1,"Completed");
2273    statRequestOrigin.ysubname(0,"Data");
2274    statRequestOrigin.ysubname(1,"Inst");
2275}
2276