table_walker.cc revision 14095:4f5d16d7cf45
1/*
2 * Copyright (c) 2010, 2012-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Ali Saidi
38 *          Giacomo Gabrielli
39 */
40#include "arch/arm/table_walker.hh"
41
42#include <memory>
43
44#include "arch/arm/faults.hh"
45#include "arch/arm/stage2_mmu.hh"
46#include "arch/arm/system.hh"
47#include "arch/arm/tlb.hh"
48#include "cpu/base.hh"
49#include "cpu/thread_context.hh"
50#include "debug/Checkpoint.hh"
51#include "debug/Drain.hh"
52#include "debug/TLB.hh"
53#include "debug/TLBVerbose.hh"
54#include "dev/dma_device.hh"
55#include "sim/system.hh"
56
57using namespace ArmISA;
58
59TableWalker::TableWalker(const Params *p)
60    : ClockedObject(p),
61      stage2Mmu(NULL), port(NULL), masterId(Request::invldMasterId),
62      isStage2(p->is_stage2), tlb(NULL),
63      currState(NULL), pending(false),
64      numSquashable(p->num_squash_per_cycle),
65      pendingReqs(0),
66      pendingChangeTick(curTick()),
67      doL1DescEvent([this]{ doL1DescriptorWrapper(); }, name()),
68      doL2DescEvent([this]{ doL2DescriptorWrapper(); }, name()),
69      doL0LongDescEvent([this]{ doL0LongDescriptorWrapper(); }, name()),
70      doL1LongDescEvent([this]{ doL1LongDescriptorWrapper(); }, name()),
71      doL2LongDescEvent([this]{ doL2LongDescriptorWrapper(); }, name()),
72      doL3LongDescEvent([this]{ doL3LongDescriptorWrapper(); }, name()),
73      LongDescEventByLevel { &doL0LongDescEvent, &doL1LongDescEvent,
74                             &doL2LongDescEvent, &doL3LongDescEvent },
75      doProcessEvent([this]{ processWalkWrapper(); }, name())
76{
77    sctlr = 0;
78
79    // Cache system-level properties
80    if (FullSystem) {
81        ArmSystem *armSys = dynamic_cast<ArmSystem *>(p->sys);
82        assert(armSys);
83        haveSecurity = armSys->haveSecurity();
84        _haveLPAE = armSys->haveLPAE();
85        _haveVirtualization = armSys->haveVirtualization();
86        physAddrRange = armSys->physAddrRange();
87        _haveLargeAsid64 = armSys->haveLargeAsid64();
88    } else {
89        haveSecurity = _haveLPAE = _haveVirtualization = false;
90        _haveLargeAsid64 = false;
91        physAddrRange = 32;
92    }
93
94}
95
96TableWalker::~TableWalker()
97{
98    ;
99}
100
101void
102TableWalker::setMMU(Stage2MMU *m, MasterID master_id)
103{
104    stage2Mmu = m;
105    port = &m->getDMAPort();
106    masterId = master_id;
107}
108
109void
110TableWalker::init()
111{
112    fatal_if(!stage2Mmu, "Table walker must have a valid stage-2 MMU\n");
113    fatal_if(!port, "Table walker must have a valid port\n");
114    fatal_if(!tlb, "Table walker must have a valid TLB\n");
115}
116
117Port &
118TableWalker::getPort(const std::string &if_name, PortID idx)
119{
120    if (if_name == "port") {
121        if (!isStage2) {
122            return *port;
123        } else {
124            fatal("Cannot access table walker port through stage-two walker\n");
125        }
126    }
127    return ClockedObject::getPort(if_name, idx);
128}
129
130TableWalker::WalkerState::WalkerState() :
131    tc(nullptr), aarch64(false), el(EL0), physAddrRange(0), req(nullptr),
132    asid(0), vmid(0), isHyp(false), transState(nullptr),
133    vaddr(0), vaddr_tainted(0),
134    sctlr(0), scr(0), cpsr(0), tcr(0),
135    htcr(0), hcr(0), vtcr(0),
136    isWrite(false), isFetch(false), isSecure(false),
137    secureLookup(false), rwTable(false), userTable(false), xnTable(false),
138    pxnTable(false), hpd(false), stage2Req(false),
139    stage2Tran(nullptr), timing(false), functional(false),
140    mode(BaseTLB::Read), tranType(TLB::NormalTran), l2Desc(l1Desc),
141    delayed(false), tableWalker(nullptr)
142{
143}
144
145void
146TableWalker::completeDrain()
147{
148    if (drainState() == DrainState::Draining &&
149        stateQueues[L0].empty() && stateQueues[L1].empty() &&
150        stateQueues[L2].empty() && stateQueues[L3].empty() &&
151        pendingQueue.empty()) {
152
153        DPRINTF(Drain, "TableWalker done draining, processing drain event\n");
154        signalDrainDone();
155    }
156}
157
158DrainState
159TableWalker::drain()
160{
161    bool state_queues_not_empty = false;
162
163    for (int i = 0; i < MAX_LOOKUP_LEVELS; ++i) {
164        if (!stateQueues[i].empty()) {
165            state_queues_not_empty = true;
166            break;
167        }
168    }
169
170    if (state_queues_not_empty || pendingQueue.size()) {
171        DPRINTF(Drain, "TableWalker not drained\n");
172        return DrainState::Draining;
173    } else {
174        DPRINTF(Drain, "TableWalker free, no need to drain\n");
175        return DrainState::Drained;
176    }
177}
178
179void
180TableWalker::drainResume()
181{
182    if (params()->sys->isTimingMode() && currState) {
183        delete currState;
184        currState = NULL;
185        pendingChange();
186    }
187}
188
189Fault
190TableWalker::walk(const RequestPtr &_req, ThreadContext *_tc, uint16_t _asid,
191                  uint8_t _vmid, bool _isHyp, TLB::Mode _mode,
192                  TLB::Translation *_trans, bool _timing, bool _functional,
193                  bool secure, TLB::ArmTranslationType tranType,
194                  bool _stage2Req)
195{
196    assert(!(_functional && _timing));
197    ++statWalks;
198
199    WalkerState *savedCurrState = NULL;
200
201    if (!currState && !_functional) {
202        // For atomic mode, a new WalkerState instance should be only created
203        // once per TLB. For timing mode, a new instance is generated for every
204        // TLB miss.
205        DPRINTF(TLBVerbose, "creating new instance of WalkerState\n");
206
207        currState = new WalkerState();
208        currState->tableWalker = this;
209    } else if (_functional) {
210        // If we are mixing functional mode with timing (or even
211        // atomic), we need to to be careful and clean up after
212        // ourselves to not risk getting into an inconsistent state.
213        DPRINTF(TLBVerbose, "creating functional instance of WalkerState\n");
214        savedCurrState = currState;
215        currState = new WalkerState();
216        currState->tableWalker = this;
217    } else if (_timing) {
218        // This is a translation that was completed and then faulted again
219        // because some underlying parameters that affect the translation
220        // changed out from under us (e.g. asid). It will either be a
221        // misprediction, in which case nothing will happen or we'll use
222        // this fault to re-execute the faulting instruction which should clean
223        // up everything.
224        if (currState->vaddr_tainted == _req->getVaddr()) {
225            ++statSquashedBefore;
226            return std::make_shared<ReExec>();
227        }
228    }
229    pendingChange();
230
231    currState->startTime = curTick();
232    currState->tc = _tc;
233    // ARM DDI 0487A.f (ARMv8 ARM) pg J8-5672
234    // aarch32/translation/translation/AArch32.TranslateAddress dictates
235    // even AArch32 EL0 will use AArch64 translation if EL1 is in AArch64.
236    if (isStage2) {
237        currState->el = EL1;
238        currState->aarch64 = ELIs64(_tc, EL2);
239    } else {
240        currState->el =
241            TLB::tranTypeEL(_tc->readMiscReg(MISCREG_CPSR), tranType);
242        currState->aarch64 =
243            ELIs64(_tc, currState->el == EL0 ? EL1 : currState->el);
244    }
245    currState->transState = _trans;
246    currState->req = _req;
247    currState->fault = NoFault;
248    currState->asid = _asid;
249    currState->vmid = _vmid;
250    currState->isHyp = _isHyp;
251    currState->timing = _timing;
252    currState->functional = _functional;
253    currState->mode = _mode;
254    currState->tranType = tranType;
255    currState->isSecure = secure;
256    currState->physAddrRange = physAddrRange;
257
258    /** @todo These should be cached or grabbed from cached copies in
259     the TLB, all these miscreg reads are expensive */
260    currState->vaddr_tainted = currState->req->getVaddr();
261    if (currState->aarch64)
262        currState->vaddr = purifyTaggedAddr(currState->vaddr_tainted,
263                                            currState->tc, currState->el);
264    else
265        currState->vaddr = currState->vaddr_tainted;
266
267    if (currState->aarch64) {
268        if (isStage2) {
269            currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
270            currState->vtcr = currState->tc->readMiscReg(MISCREG_VTCR_EL2);
271        } else switch (currState->el) {
272          case EL0:
273          case EL1:
274            currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
275            currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL1);
276            break;
277          case EL2:
278            assert(_haveVirtualization);
279            currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL2);
280            currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL2);
281            break;
282          case EL3:
283            assert(haveSecurity);
284            currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL3);
285            currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL3);
286            break;
287          default:
288            panic("Invalid exception level");
289            break;
290        }
291        currState->hcr = currState->tc->readMiscReg(MISCREG_HCR_EL2);
292    } else {
293        currState->sctlr = currState->tc->readMiscReg(snsBankedIndex(
294            MISCREG_SCTLR, currState->tc, !currState->isSecure));
295        currState->ttbcr = currState->tc->readMiscReg(snsBankedIndex(
296            MISCREG_TTBCR, currState->tc, !currState->isSecure));
297        currState->htcr  = currState->tc->readMiscReg(MISCREG_HTCR);
298        currState->hcr   = currState->tc->readMiscReg(MISCREG_HCR);
299        currState->vtcr  = currState->tc->readMiscReg(MISCREG_VTCR);
300    }
301    sctlr = currState->sctlr;
302
303    currState->isFetch = (currState->mode == TLB::Execute);
304    currState->isWrite = (currState->mode == TLB::Write);
305
306    statRequestOrigin[REQUESTED][currState->isFetch]++;
307
308    currState->stage2Req = _stage2Req && !isStage2;
309
310    bool long_desc_format = currState->aarch64 || _isHyp || isStage2 ||
311                            longDescFormatInUse(currState->tc);
312
313    if (long_desc_format) {
314        // Helper variables used for hierarchical permissions
315        currState->secureLookup = currState->isSecure;
316        currState->rwTable = true;
317        currState->userTable = true;
318        currState->xnTable = false;
319        currState->pxnTable = false;
320
321        ++statWalksLongDescriptor;
322    } else {
323        ++statWalksShortDescriptor;
324    }
325
326    if (!currState->timing) {
327        Fault fault = NoFault;
328        if (currState->aarch64)
329            fault = processWalkAArch64();
330        else if (long_desc_format)
331            fault = processWalkLPAE();
332        else
333            fault = processWalk();
334
335        // If this was a functional non-timing access restore state to
336        // how we found it.
337        if (currState->functional) {
338            delete currState;
339            currState = savedCurrState;
340        }
341        return fault;
342    }
343
344    if (pending || pendingQueue.size()) {
345        pendingQueue.push_back(currState);
346        currState = NULL;
347        pendingChange();
348    } else {
349        pending = true;
350        pendingChange();
351        if (currState->aarch64)
352            return processWalkAArch64();
353        else if (long_desc_format)
354            return processWalkLPAE();
355        else
356            return processWalk();
357    }
358
359    return NoFault;
360}
361
362void
363TableWalker::processWalkWrapper()
364{
365    assert(!currState);
366    assert(pendingQueue.size());
367    pendingChange();
368    currState = pendingQueue.front();
369
370    // Check if a previous walk filled this request already
371    // @TODO Should this always be the TLB or should we look in the stage2 TLB?
372    TlbEntry* te = tlb->lookup(currState->vaddr, currState->asid,
373            currState->vmid, currState->isHyp, currState->isSecure, true, false,
374            currState->el);
375
376    // Check if we still need to have a walk for this request. If the requesting
377    // instruction has been squashed, or a previous walk has filled the TLB with
378    // a match, we just want to get rid of the walk. The latter could happen
379    // when there are multiple outstanding misses to a single page and a
380    // previous request has been successfully translated.
381    if (!currState->transState->squashed() && !te) {
382        // We've got a valid request, lets process it
383        pending = true;
384        pendingQueue.pop_front();
385        // Keep currState in case one of the processWalk... calls NULLs it
386        WalkerState *curr_state_copy = currState;
387        Fault f;
388        if (currState->aarch64)
389            f = processWalkAArch64();
390        else if (longDescFormatInUse(currState->tc) ||
391                 currState->isHyp || isStage2)
392            f = processWalkLPAE();
393        else
394            f = processWalk();
395
396        if (f != NoFault) {
397            curr_state_copy->transState->finish(f, curr_state_copy->req,
398                    curr_state_copy->tc, curr_state_copy->mode);
399
400            delete curr_state_copy;
401        }
402        return;
403    }
404
405
406    // If the instruction that we were translating for has been
407    // squashed we shouldn't bother.
408    unsigned num_squashed = 0;
409    ThreadContext *tc = currState->tc;
410    while ((num_squashed < numSquashable) && currState &&
411           (currState->transState->squashed() || te)) {
412        pendingQueue.pop_front();
413        num_squashed++;
414        statSquashedBefore++;
415
416        DPRINTF(TLB, "Squashing table walk for address %#x\n",
417                      currState->vaddr_tainted);
418
419        if (currState->transState->squashed()) {
420            // finish the translation which will delete the translation object
421            currState->transState->finish(
422                std::make_shared<UnimpFault>("Squashed Inst"),
423                currState->req, currState->tc, currState->mode);
424        } else {
425            // translate the request now that we know it will work
426            statWalkServiceTime.sample(curTick() - currState->startTime);
427            tlb->translateTiming(currState->req, currState->tc,
428                        currState->transState, currState->mode);
429
430        }
431
432        // delete the current request
433        delete currState;
434
435        // peak at the next one
436        if (pendingQueue.size()) {
437            currState = pendingQueue.front();
438            te = tlb->lookup(currState->vaddr, currState->asid,
439                currState->vmid, currState->isHyp, currState->isSecure, true,
440                false, currState->el);
441        } else {
442            // Terminate the loop, nothing more to do
443            currState = NULL;
444        }
445    }
446    pendingChange();
447
448    // if we still have pending translations, schedule more work
449    nextWalk(tc);
450    currState = NULL;
451}
452
453Fault
454TableWalker::processWalk()
455{
456    Addr ttbr = 0;
457
458    // If translation isn't enabled, we shouldn't be here
459    assert(currState->sctlr.m || isStage2);
460
461    DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
462            currState->vaddr_tainted, currState->ttbcr, mbits(currState->vaddr, 31,
463                                                      32 - currState->ttbcr.n));
464
465    statWalkWaitTime.sample(curTick() - currState->startTime);
466
467    if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31,
468                                          32 - currState->ttbcr.n)) {
469        DPRINTF(TLB, " - Selecting TTBR0\n");
470        // Check if table walk is allowed when Security Extensions are enabled
471        if (haveSecurity && currState->ttbcr.pd0) {
472            if (currState->isFetch)
473                return std::make_shared<PrefetchAbort>(
474                    currState->vaddr_tainted,
475                    ArmFault::TranslationLL + L1,
476                    isStage2,
477                    ArmFault::VmsaTran);
478            else
479                return std::make_shared<DataAbort>(
480                    currState->vaddr_tainted,
481                    TlbEntry::DomainType::NoAccess, currState->isWrite,
482                    ArmFault::TranslationLL + L1, isStage2,
483                    ArmFault::VmsaTran);
484        }
485        ttbr = currState->tc->readMiscReg(snsBankedIndex(
486            MISCREG_TTBR0, currState->tc, !currState->isSecure));
487    } else {
488        DPRINTF(TLB, " - Selecting TTBR1\n");
489        // Check if table walk is allowed when Security Extensions are enabled
490        if (haveSecurity && currState->ttbcr.pd1) {
491            if (currState->isFetch)
492                return std::make_shared<PrefetchAbort>(
493                    currState->vaddr_tainted,
494                    ArmFault::TranslationLL + L1,
495                    isStage2,
496                    ArmFault::VmsaTran);
497            else
498                return std::make_shared<DataAbort>(
499                    currState->vaddr_tainted,
500                    TlbEntry::DomainType::NoAccess, currState->isWrite,
501                    ArmFault::TranslationLL + L1, isStage2,
502                    ArmFault::VmsaTran);
503        }
504        ttbr = currState->tc->readMiscReg(snsBankedIndex(
505            MISCREG_TTBR1, currState->tc, !currState->isSecure));
506        currState->ttbcr.n = 0;
507    }
508
509    Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) |
510        (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2);
511    DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr,
512            currState->isSecure ? "s" : "ns");
513
514    // Trickbox address check
515    Fault f;
516    f = testWalk(l1desc_addr, sizeof(uint32_t),
517                 TlbEntry::DomainType::NoAccess, L1);
518    if (f) {
519        DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
520        if (currState->timing) {
521            pending = false;
522            nextWalk(currState->tc);
523            currState = NULL;
524        } else {
525            currState->tc = NULL;
526            currState->req = NULL;
527        }
528        return f;
529    }
530
531    Request::Flags flag = Request::PT_WALK;
532    if (currState->sctlr.c == 0) {
533        flag.set(Request::UNCACHEABLE);
534    }
535
536    if (currState->isSecure) {
537        flag.set(Request::SECURE);
538    }
539
540    bool delayed;
541    delayed = fetchDescriptor(l1desc_addr, (uint8_t*)&currState->l1Desc.data,
542                              sizeof(uint32_t), flag, L1, &doL1DescEvent,
543                              &TableWalker::doL1Descriptor);
544    if (!delayed) {
545       f = currState->fault;
546    }
547
548    return f;
549}
550
551Fault
552TableWalker::processWalkLPAE()
553{
554    Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
555    int tsz, n;
556    LookupLevel start_lookup_level = L1;
557
558    DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n",
559            currState->vaddr_tainted, currState->ttbcr);
560
561    statWalkWaitTime.sample(curTick() - currState->startTime);
562
563    Request::Flags flag = Request::PT_WALK;
564    if (currState->isSecure)
565        flag.set(Request::SECURE);
566
567    // work out which base address register to use, if in hyp mode we always
568    // use HTTBR
569    if (isStage2) {
570        DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n");
571        ttbr = currState->tc->readMiscReg(MISCREG_VTTBR);
572        tsz  = sext<4>(currState->vtcr.t0sz);
573        start_lookup_level = currState->vtcr.sl0 ? L1 : L2;
574    } else if (currState->isHyp) {
575        DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n");
576        ttbr = currState->tc->readMiscReg(MISCREG_HTTBR);
577        tsz  = currState->htcr.t0sz;
578    } else {
579        assert(longDescFormatInUse(currState->tc));
580
581        // Determine boundaries of TTBR0/1 regions
582        if (currState->ttbcr.t0sz)
583            ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1;
584        else if (currState->ttbcr.t1sz)
585            ttbr0_max = (1ULL << 32) -
586                (1ULL << (32 - currState->ttbcr.t1sz)) - 1;
587        else
588            ttbr0_max = (1ULL << 32) - 1;
589        if (currState->ttbcr.t1sz)
590            ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz));
591        else
592            ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz));
593
594        // The following code snippet selects the appropriate translation table base
595        // address (TTBR0 or TTBR1) and the appropriate starting lookup level
596        // depending on the address range supported by the translation table (ARM
597        // ARM issue C B3.6.4)
598        if (currState->vaddr <= ttbr0_max) {
599            DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n");
600            // Check if table walk is allowed
601            if (currState->ttbcr.epd0) {
602                if (currState->isFetch)
603                    return std::make_shared<PrefetchAbort>(
604                        currState->vaddr_tainted,
605                        ArmFault::TranslationLL + L1,
606                        isStage2,
607                        ArmFault::LpaeTran);
608                else
609                    return std::make_shared<DataAbort>(
610                        currState->vaddr_tainted,
611                        TlbEntry::DomainType::NoAccess,
612                        currState->isWrite,
613                        ArmFault::TranslationLL + L1,
614                        isStage2,
615                        ArmFault::LpaeTran);
616            }
617            ttbr = currState->tc->readMiscReg(snsBankedIndex(
618                MISCREG_TTBR0, currState->tc, !currState->isSecure));
619            tsz = currState->ttbcr.t0sz;
620            if (ttbr0_max < (1ULL << 30))  // Upper limit < 1 GB
621                start_lookup_level = L2;
622        } else if (currState->vaddr >= ttbr1_min) {
623            DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n");
624            // Check if table walk is allowed
625            if (currState->ttbcr.epd1) {
626                if (currState->isFetch)
627                    return std::make_shared<PrefetchAbort>(
628                        currState->vaddr_tainted,
629                        ArmFault::TranslationLL + L1,
630                        isStage2,
631                        ArmFault::LpaeTran);
632                else
633                    return std::make_shared<DataAbort>(
634                        currState->vaddr_tainted,
635                        TlbEntry::DomainType::NoAccess,
636                        currState->isWrite,
637                        ArmFault::TranslationLL + L1,
638                        isStage2,
639                        ArmFault::LpaeTran);
640            }
641            ttbr = currState->tc->readMiscReg(snsBankedIndex(
642                MISCREG_TTBR1, currState->tc, !currState->isSecure));
643            tsz = currState->ttbcr.t1sz;
644            if (ttbr1_min >= (1ULL << 31) + (1ULL << 30))  // Lower limit >= 3 GB
645                start_lookup_level = L2;
646        } else {
647            // Out of boundaries -> translation fault
648            if (currState->isFetch)
649                return std::make_shared<PrefetchAbort>(
650                    currState->vaddr_tainted,
651                    ArmFault::TranslationLL + L1,
652                    isStage2,
653                    ArmFault::LpaeTran);
654            else
655                return std::make_shared<DataAbort>(
656                    currState->vaddr_tainted,
657                    TlbEntry::DomainType::NoAccess,
658                    currState->isWrite, ArmFault::TranslationLL + L1,
659                    isStage2, ArmFault::LpaeTran);
660        }
661
662    }
663
664    // Perform lookup (ARM ARM issue C B3.6.6)
665    if (start_lookup_level == L1) {
666        n = 5 - tsz;
667        desc_addr = mbits(ttbr, 39, n) |
668            (bits(currState->vaddr, n + 26, 30) << 3);
669        DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
670                desc_addr, currState->isSecure ? "s" : "ns");
671    } else {
672        // Skip first-level lookup
673        n = (tsz >= 2 ? 14 - tsz : 12);
674        desc_addr = mbits(ttbr, 39, n) |
675            (bits(currState->vaddr, n + 17, 21) << 3);
676        DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
677                desc_addr, currState->isSecure ? "s" : "ns");
678    }
679
680    // Trickbox address check
681    Fault f = testWalk(desc_addr, sizeof(uint64_t),
682                       TlbEntry::DomainType::NoAccess, start_lookup_level);
683    if (f) {
684        DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
685        if (currState->timing) {
686            pending = false;
687            nextWalk(currState->tc);
688            currState = NULL;
689        } else {
690            currState->tc = NULL;
691            currState->req = NULL;
692        }
693        return f;
694    }
695
696    if (currState->sctlr.c == 0) {
697        flag.set(Request::UNCACHEABLE);
698    }
699
700    currState->longDesc.lookupLevel = start_lookup_level;
701    currState->longDesc.aarch64 = false;
702    currState->longDesc.grainSize = Grain4KB;
703
704    bool delayed = fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
705                                   sizeof(uint64_t), flag, start_lookup_level,
706                                   LongDescEventByLevel[start_lookup_level],
707                                   &TableWalker::doLongDescriptor);
708    if (!delayed) {
709        f = currState->fault;
710    }
711
712    return f;
713}
714
715unsigned
716TableWalker::adjustTableSizeAArch64(unsigned tsz)
717{
718    if (tsz < 25)
719        return 25;
720    if (tsz > 48)
721        return 48;
722    return tsz;
723}
724
725bool
726TableWalker::checkAddrSizeFaultAArch64(Addr addr, int currPhysAddrRange)
727{
728    return (currPhysAddrRange != MaxPhysAddrRange &&
729            bits(addr, MaxPhysAddrRange - 1, currPhysAddrRange));
730}
731
732Fault
733TableWalker::processWalkAArch64()
734{
735    assert(currState->aarch64);
736
737    DPRINTF(TLB, "Beginning table walk for address %#llx, TCR: %#llx\n",
738            currState->vaddr_tainted, currState->tcr);
739
740    static const GrainSize GrainMap_tg0[] =
741      { Grain4KB, Grain64KB, Grain16KB, ReservedGrain };
742    static const GrainSize GrainMap_tg1[] =
743      { ReservedGrain, Grain16KB, Grain4KB, Grain64KB };
744
745    statWalkWaitTime.sample(curTick() - currState->startTime);
746
747    // Determine TTBR, table size, granule size and phys. address range
748    Addr ttbr = 0;
749    int tsz = 0, ps = 0;
750    GrainSize tg = Grain4KB; // grain size computed from tg* field
751    bool fault = false;
752
753    LookupLevel start_lookup_level = MAX_LOOKUP_LEVELS;
754
755    switch (currState->el) {
756      case EL0:
757      case EL1:
758        if (isStage2) {
759            DPRINTF(TLB, " - Selecting VTTBR0 (AArch64 stage 2)\n");
760            ttbr = currState->tc->readMiscReg(MISCREG_VTTBR_EL2);
761            tsz = 64 - currState->vtcr.t0sz64;
762            tg = GrainMap_tg0[currState->vtcr.tg0];
763            // ARM DDI 0487A.f D7-2148
764            // The starting level of stage 2 translation depends on
765            // VTCR_EL2.SL0 and VTCR_EL2.TG0
766            LookupLevel __ = MAX_LOOKUP_LEVELS; // invalid level
767            uint8_t sl_tg = (currState->vtcr.sl0 << 2) | currState->vtcr.tg0;
768            static const LookupLevel SLL[] = {
769                L2, L3, L3, __, // sl0 == 0
770                L1, L2, L2, __, // sl0 == 1, etc.
771                L0, L1, L1, __,
772                __, __, __, __
773            };
774            start_lookup_level = SLL[sl_tg];
775            panic_if(start_lookup_level == MAX_LOOKUP_LEVELS,
776                     "Cannot discern lookup level from vtcr.{sl0,tg0}");
777            ps = currState->vtcr.ps;
778        } else {
779            switch (bits(currState->vaddr, 63,48)) {
780              case 0:
781                DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
782                ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL1);
783                tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
784                tg = GrainMap_tg0[currState->tcr.tg0];
785                currState->hpd = currState->tcr.hpd0;
786                if (bits(currState->vaddr, 63, tsz) != 0x0 ||
787                    currState->tcr.epd0)
788                  fault = true;
789                break;
790              case 0xffff:
791                DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
792                ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL1);
793                tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz);
794                tg = GrainMap_tg1[currState->tcr.tg1];
795                currState->hpd = currState->tcr.hpd1;
796                if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) ||
797                    currState->tcr.epd1)
798                  fault = true;
799                break;
800              default:
801                // top two bytes must be all 0s or all 1s, else invalid addr
802                fault = true;
803            }
804            ps = currState->tcr.ips;
805        }
806        break;
807      case EL2:
808        switch(bits(currState->vaddr, 63,48)) {
809          case 0:
810            DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
811            ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL2);
812            tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
813            tg = GrainMap_tg0[currState->tcr.tg0];
814            currState->hpd = currState->hcr.e2h ?
815                currState->tcr.hpd0 : currState->tcr.hpd;
816            break;
817
818          case 0xffff:
819            DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
820            ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL2);
821            tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz);
822            tg = GrainMap_tg1[currState->tcr.tg1];
823            currState->hpd = currState->tcr.hpd1;
824            if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) ||
825                currState->tcr.epd1 || !currState->hcr.e2h)
826              fault = true;
827            break;
828
829           default:
830              // invalid addr if top two bytes are not all 0s
831              fault = true;
832        }
833        ps = currState->tcr.ps;
834        break;
835      case EL3:
836        switch(bits(currState->vaddr, 63,48)) {
837            case 0:
838                DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
839                ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL3);
840                tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
841                tg = GrainMap_tg0[currState->tcr.tg0];
842                currState->hpd = currState->tcr.hpd;
843                break;
844            default:
845                // invalid addr if top two bytes are not all 0s
846                fault = true;
847        }
848        ps = currState->tcr.ps;
849        break;
850    }
851
852    if (fault) {
853        Fault f;
854        if (currState->isFetch)
855            f =  std::make_shared<PrefetchAbort>(
856                currState->vaddr_tainted,
857                ArmFault::TranslationLL + L0, isStage2,
858                ArmFault::LpaeTran);
859        else
860            f = std::make_shared<DataAbort>(
861                currState->vaddr_tainted,
862                TlbEntry::DomainType::NoAccess,
863                currState->isWrite,
864                ArmFault::TranslationLL + L0,
865                isStage2, ArmFault::LpaeTran);
866
867        if (currState->timing) {
868            pending = false;
869            nextWalk(currState->tc);
870            currState = NULL;
871        } else {
872            currState->tc = NULL;
873            currState->req = NULL;
874        }
875        return f;
876
877    }
878
879    if (tg == ReservedGrain) {
880        warn_once("Reserved granule size requested; gem5's IMPLEMENTATION "
881                  "DEFINED behavior takes this to mean 4KB granules\n");
882        tg = Grain4KB;
883    }
884
885    // Determine starting lookup level
886    // See aarch64/translation/walk in Appendix G: ARMv8 Pseudocode Library
887    // in ARM DDI 0487A.  These table values correspond to the cascading tests
888    // to compute the lookup level and are of the form
889    // (grain_size + N*stride), for N = {1, 2, 3}.
890    // A value of 64 will never succeed and a value of 0 will always succeed.
891    if (start_lookup_level == MAX_LOOKUP_LEVELS) {
892        struct GrainMap {
893            GrainSize grain_size;
894            unsigned lookup_level_cutoff[MAX_LOOKUP_LEVELS];
895        };
896        static const GrainMap GM[] = {
897            { Grain4KB,  { 39, 30,  0, 0 } },
898            { Grain16KB, { 47, 36, 25, 0 } },
899            { Grain64KB, { 64, 42, 29, 0 } }
900        };
901
902        const unsigned *lookup = NULL; // points to a lookup_level_cutoff
903
904        for (unsigned i = 0; i < 3; ++i) { // choose entry of GM[]
905            if (tg == GM[i].grain_size) {
906                lookup = GM[i].lookup_level_cutoff;
907                break;
908            }
909        }
910        assert(lookup);
911
912        for (int L = L0; L != MAX_LOOKUP_LEVELS; ++L) {
913            if (tsz > lookup[L]) {
914                start_lookup_level = (LookupLevel) L;
915                break;
916            }
917        }
918        panic_if(start_lookup_level == MAX_LOOKUP_LEVELS,
919                 "Table walker couldn't find lookup level\n");
920    }
921
922    int stride = tg - 3;
923
924    // Determine table base address
925    int base_addr_lo = 3 + tsz - stride * (3 - start_lookup_level) - tg;
926    Addr base_addr = mbits(ttbr, 47, base_addr_lo);
927
928    // Determine physical address size and raise an Address Size Fault if
929    // necessary
930    int pa_range = decodePhysAddrRange64(ps);
931    // Clamp to lower limit
932    if (pa_range > physAddrRange)
933        currState->physAddrRange = physAddrRange;
934    else
935        currState->physAddrRange = pa_range;
936    if (checkAddrSizeFaultAArch64(base_addr, currState->physAddrRange)) {
937        DPRINTF(TLB, "Address size fault before any lookup\n");
938        Fault f;
939        if (currState->isFetch)
940            f = std::make_shared<PrefetchAbort>(
941                currState->vaddr_tainted,
942                ArmFault::AddressSizeLL + start_lookup_level,
943                isStage2,
944                ArmFault::LpaeTran);
945        else
946            f = std::make_shared<DataAbort>(
947                currState->vaddr_tainted,
948                TlbEntry::DomainType::NoAccess,
949                currState->isWrite,
950                ArmFault::AddressSizeLL + start_lookup_level,
951                isStage2,
952                ArmFault::LpaeTran);
953
954
955        if (currState->timing) {
956            pending = false;
957            nextWalk(currState->tc);
958            currState = NULL;
959        } else {
960            currState->tc = NULL;
961            currState->req = NULL;
962        }
963        return f;
964
965   }
966
967    // Determine descriptor address
968    Addr desc_addr = base_addr |
969        (bits(currState->vaddr, tsz - 1,
970              stride * (3 - start_lookup_level) + tg) << 3);
971
972    // Trickbox address check
973    Fault f = testWalk(desc_addr, sizeof(uint64_t),
974                       TlbEntry::DomainType::NoAccess, start_lookup_level);
975    if (f) {
976        DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
977        if (currState->timing) {
978            pending = false;
979            nextWalk(currState->tc);
980            currState = NULL;
981        } else {
982            currState->tc = NULL;
983            currState->req = NULL;
984        }
985        return f;
986    }
987
988    Request::Flags flag = Request::PT_WALK;
989    if (currState->sctlr.c == 0) {
990        flag.set(Request::UNCACHEABLE);
991    }
992
993    if (currState->isSecure) {
994        flag.set(Request::SECURE);
995    }
996
997    currState->longDesc.lookupLevel = start_lookup_level;
998    currState->longDesc.aarch64 = true;
999    currState->longDesc.grainSize = tg;
1000
1001    if (currState->timing) {
1002        fetchDescriptor(desc_addr, (uint8_t*) &currState->longDesc.data,
1003                        sizeof(uint64_t), flag, start_lookup_level,
1004                        LongDescEventByLevel[start_lookup_level], NULL);
1005    } else {
1006        fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
1007                        sizeof(uint64_t), flag, -1, NULL,
1008                        &TableWalker::doLongDescriptor);
1009        f = currState->fault;
1010    }
1011
1012    return f;
1013}
1014
1015void
1016TableWalker::memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr,
1017                      uint8_t texcb, bool s)
1018{
1019    // Note: tc and sctlr local variables are hiding tc and sctrl class
1020    // variables
1021    DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s);
1022    te.shareable = false; // default value
1023    te.nonCacheable = false;
1024    te.outerShareable = false;
1025    if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) {
1026        switch(texcb) {
1027          case 0: // Stongly-ordered
1028            te.nonCacheable = true;
1029            te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1030            te.shareable = true;
1031            te.innerAttrs = 1;
1032            te.outerAttrs = 0;
1033            break;
1034          case 1: // Shareable Device
1035            te.nonCacheable = true;
1036            te.mtype = TlbEntry::MemoryType::Device;
1037            te.shareable = true;
1038            te.innerAttrs = 3;
1039            te.outerAttrs = 0;
1040            break;
1041          case 2: // Outer and Inner Write-Through, no Write-Allocate
1042            te.mtype = TlbEntry::MemoryType::Normal;
1043            te.shareable = s;
1044            te.innerAttrs = 6;
1045            te.outerAttrs = bits(texcb, 1, 0);
1046            break;
1047          case 3: // Outer and Inner Write-Back, no Write-Allocate
1048            te.mtype = TlbEntry::MemoryType::Normal;
1049            te.shareable = s;
1050            te.innerAttrs = 7;
1051            te.outerAttrs = bits(texcb, 1, 0);
1052            break;
1053          case 4: // Outer and Inner Non-cacheable
1054            te.nonCacheable = true;
1055            te.mtype = TlbEntry::MemoryType::Normal;
1056            te.shareable = s;
1057            te.innerAttrs = 0;
1058            te.outerAttrs = bits(texcb, 1, 0);
1059            break;
1060          case 5: // Reserved
1061            panic("Reserved texcb value!\n");
1062            break;
1063          case 6: // Implementation Defined
1064            panic("Implementation-defined texcb value!\n");
1065            break;
1066          case 7: // Outer and Inner Write-Back, Write-Allocate
1067            te.mtype = TlbEntry::MemoryType::Normal;
1068            te.shareable = s;
1069            te.innerAttrs = 5;
1070            te.outerAttrs = 1;
1071            break;
1072          case 8: // Non-shareable Device
1073            te.nonCacheable = true;
1074            te.mtype = TlbEntry::MemoryType::Device;
1075            te.shareable = false;
1076            te.innerAttrs = 3;
1077            te.outerAttrs = 0;
1078            break;
1079          case 9 ... 15:  // Reserved
1080            panic("Reserved texcb value!\n");
1081            break;
1082          case 16 ... 31: // Cacheable Memory
1083            te.mtype = TlbEntry::MemoryType::Normal;
1084            te.shareable = s;
1085            if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0)
1086                te.nonCacheable = true;
1087            te.innerAttrs = bits(texcb, 1, 0);
1088            te.outerAttrs = bits(texcb, 3, 2);
1089            break;
1090          default:
1091            panic("More than 32 states for 5 bits?\n");
1092        }
1093    } else {
1094        assert(tc);
1095        PRRR prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR,
1096                                    currState->tc, !currState->isSecure));
1097        NMRR nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR,
1098                                    currState->tc, !currState->isSecure));
1099        DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
1100        uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
1101        switch(bits(texcb, 2,0)) {
1102          case 0:
1103            curr_tr = prrr.tr0;
1104            curr_ir = nmrr.ir0;
1105            curr_or = nmrr.or0;
1106            te.outerShareable = (prrr.nos0 == 0);
1107            break;
1108          case 1:
1109            curr_tr = prrr.tr1;
1110            curr_ir = nmrr.ir1;
1111            curr_or = nmrr.or1;
1112            te.outerShareable = (prrr.nos1 == 0);
1113            break;
1114          case 2:
1115            curr_tr = prrr.tr2;
1116            curr_ir = nmrr.ir2;
1117            curr_or = nmrr.or2;
1118            te.outerShareable = (prrr.nos2 == 0);
1119            break;
1120          case 3:
1121            curr_tr = prrr.tr3;
1122            curr_ir = nmrr.ir3;
1123            curr_or = nmrr.or3;
1124            te.outerShareable = (prrr.nos3 == 0);
1125            break;
1126          case 4:
1127            curr_tr = prrr.tr4;
1128            curr_ir = nmrr.ir4;
1129            curr_or = nmrr.or4;
1130            te.outerShareable = (prrr.nos4 == 0);
1131            break;
1132          case 5:
1133            curr_tr = prrr.tr5;
1134            curr_ir = nmrr.ir5;
1135            curr_or = nmrr.or5;
1136            te.outerShareable = (prrr.nos5 == 0);
1137            break;
1138          case 6:
1139            panic("Imp defined type\n");
1140          case 7:
1141            curr_tr = prrr.tr7;
1142            curr_ir = nmrr.ir7;
1143            curr_or = nmrr.or7;
1144            te.outerShareable = (prrr.nos7 == 0);
1145            break;
1146        }
1147
1148        switch(curr_tr) {
1149          case 0:
1150            DPRINTF(TLBVerbose, "StronglyOrdered\n");
1151            te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1152            te.nonCacheable = true;
1153            te.innerAttrs = 1;
1154            te.outerAttrs = 0;
1155            te.shareable = true;
1156            break;
1157          case 1:
1158            DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n",
1159                    prrr.ds1, prrr.ds0, s);
1160            te.mtype = TlbEntry::MemoryType::Device;
1161            te.nonCacheable = true;
1162            te.innerAttrs = 3;
1163            te.outerAttrs = 0;
1164            if (prrr.ds1 && s)
1165                te.shareable = true;
1166            if (prrr.ds0 && !s)
1167                te.shareable = true;
1168            break;
1169          case 2:
1170            DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n",
1171                    prrr.ns1, prrr.ns0, s);
1172            te.mtype = TlbEntry::MemoryType::Normal;
1173            if (prrr.ns1 && s)
1174                te.shareable = true;
1175            if (prrr.ns0 && !s)
1176                te.shareable = true;
1177            break;
1178          case 3:
1179            panic("Reserved type");
1180        }
1181
1182        if (te.mtype == TlbEntry::MemoryType::Normal){
1183            switch(curr_ir) {
1184              case 0:
1185                te.nonCacheable = true;
1186                te.innerAttrs = 0;
1187                break;
1188              case 1:
1189                te.innerAttrs = 5;
1190                break;
1191              case 2:
1192                te.innerAttrs = 6;
1193                break;
1194              case 3:
1195                te.innerAttrs = 7;
1196                break;
1197            }
1198
1199            switch(curr_or) {
1200              case 0:
1201                te.nonCacheable = true;
1202                te.outerAttrs = 0;
1203                break;
1204              case 1:
1205                te.outerAttrs = 1;
1206                break;
1207              case 2:
1208                te.outerAttrs = 2;
1209                break;
1210              case 3:
1211                te.outerAttrs = 3;
1212                break;
1213            }
1214        }
1215    }
1216    DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, "
1217            "outerAttrs: %d\n",
1218            te.shareable, te.innerAttrs, te.outerAttrs);
1219    te.setAttributes(false);
1220}
1221
1222void
1223TableWalker::memAttrsLPAE(ThreadContext *tc, TlbEntry &te,
1224    LongDescriptor &lDescriptor)
1225{
1226    assert(_haveLPAE);
1227
1228    uint8_t attr;
1229    uint8_t sh = lDescriptor.sh();
1230    // Different format and source of attributes if this is a stage 2
1231    // translation
1232    if (isStage2) {
1233        attr = lDescriptor.memAttr();
1234        uint8_t attr_3_2 = (attr >> 2) & 0x3;
1235        uint8_t attr_1_0 =  attr       & 0x3;
1236
1237        DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh);
1238
1239        if (attr_3_2 == 0) {
1240            te.mtype        = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered
1241                                            : TlbEntry::MemoryType::Device;
1242            te.outerAttrs   = 0;
1243            te.innerAttrs   = attr_1_0 == 0 ? 1 : 3;
1244            te.nonCacheable = true;
1245        } else {
1246            te.mtype        = TlbEntry::MemoryType::Normal;
1247            te.outerAttrs   = attr_3_2 == 1 ? 0 :
1248                              attr_3_2 == 2 ? 2 : 1;
1249            te.innerAttrs   = attr_1_0 == 1 ? 0 :
1250                              attr_1_0 == 2 ? 6 : 5;
1251            te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1252        }
1253    } else {
1254        uint8_t attrIndx = lDescriptor.attrIndx();
1255
1256        // LPAE always uses remapping of memory attributes, irrespective of the
1257        // value of SCTLR.TRE
1258        MiscRegIndex reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0;
1259        int reg_as_int = snsBankedIndex(reg, currState->tc,
1260                                        !currState->isSecure);
1261        uint32_t mair = currState->tc->readMiscReg(reg_as_int);
1262        attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1263        uint8_t attr_7_4 = bits(attr, 7, 4);
1264        uint8_t attr_3_0 = bits(attr, 3, 0);
1265        DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr);
1266
1267        // Note: the memory subsystem only cares about the 'cacheable' memory
1268        // attribute. The other attributes are only used to fill the PAR register
1269        // accordingly to provide the illusion of full support
1270        te.nonCacheable = false;
1271
1272        switch (attr_7_4) {
1273          case 0x0:
1274            // Strongly-ordered or Device memory
1275            if (attr_3_0 == 0x0)
1276                te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1277            else if (attr_3_0 == 0x4)
1278                te.mtype = TlbEntry::MemoryType::Device;
1279            else
1280                panic("Unpredictable behavior\n");
1281            te.nonCacheable = true;
1282            te.outerAttrs   = 0;
1283            break;
1284          case 0x4:
1285            // Normal memory, Outer Non-cacheable
1286            te.mtype = TlbEntry::MemoryType::Normal;
1287            te.outerAttrs = 0;
1288            if (attr_3_0 == 0x4)
1289                // Inner Non-cacheable
1290                te.nonCacheable = true;
1291            else if (attr_3_0 < 0x8)
1292                panic("Unpredictable behavior\n");
1293            break;
1294          case 0x8:
1295          case 0x9:
1296          case 0xa:
1297          case 0xb:
1298          case 0xc:
1299          case 0xd:
1300          case 0xe:
1301          case 0xf:
1302            if (attr_7_4 & 0x4) {
1303                te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1304            } else {
1305                te.outerAttrs = 0x2;
1306            }
1307            // Normal memory, Outer Cacheable
1308            te.mtype = TlbEntry::MemoryType::Normal;
1309            if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1310                panic("Unpredictable behavior\n");
1311            break;
1312          default:
1313            panic("Unpredictable behavior\n");
1314            break;
1315        }
1316
1317        switch (attr_3_0) {
1318          case 0x0:
1319            te.innerAttrs = 0x1;
1320            break;
1321          case 0x4:
1322            te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1323            break;
1324          case 0x8:
1325          case 0x9:
1326          case 0xA:
1327          case 0xB:
1328            te.innerAttrs = 6;
1329            break;
1330          case 0xC:
1331          case 0xD:
1332          case 0xE:
1333          case 0xF:
1334            te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1335            break;
1336          default:
1337            panic("Unpredictable behavior\n");
1338            break;
1339        }
1340    }
1341
1342    te.outerShareable = sh == 2;
1343    te.shareable       = (sh & 0x2) ? true : false;
1344    te.setAttributes(true);
1345    te.attributes |= (uint64_t) attr << 56;
1346}
1347
1348void
1349TableWalker::memAttrsAArch64(ThreadContext *tc, TlbEntry &te,
1350                             LongDescriptor &lDescriptor)
1351{
1352    uint8_t attr;
1353    uint8_t attr_hi;
1354    uint8_t attr_lo;
1355    uint8_t sh = lDescriptor.sh();
1356
1357    if (isStage2) {
1358        attr = lDescriptor.memAttr();
1359        uint8_t attr_hi = (attr >> 2) & 0x3;
1360        uint8_t attr_lo =  attr       & 0x3;
1361
1362        DPRINTF(TLBVerbose, "memAttrsAArch64 MemAttr:%#x sh:%#x\n", attr, sh);
1363
1364        if (attr_hi == 0) {
1365            te.mtype        = attr_lo == 0 ? TlbEntry::MemoryType::StronglyOrdered
1366                                            : TlbEntry::MemoryType::Device;
1367            te.outerAttrs   = 0;
1368            te.innerAttrs   = attr_lo == 0 ? 1 : 3;
1369            te.nonCacheable = true;
1370        } else {
1371            te.mtype        = TlbEntry::MemoryType::Normal;
1372            te.outerAttrs   = attr_hi == 1 ? 0 :
1373                              attr_hi == 2 ? 2 : 1;
1374            te.innerAttrs   = attr_lo == 1 ? 0 :
1375                              attr_lo == 2 ? 6 : 5;
1376            // Treat write-through memory as uncacheable, this is safe
1377            // but for performance reasons not optimal.
1378            te.nonCacheable = (attr_hi == 1) || (attr_hi == 2) ||
1379                (attr_lo == 1) || (attr_lo == 2);
1380        }
1381    } else {
1382        uint8_t attrIndx = lDescriptor.attrIndx();
1383
1384        DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh);
1385
1386        // Select MAIR
1387        uint64_t mair;
1388        switch (currState->el) {
1389          case EL0:
1390          case EL1:
1391            mair = tc->readMiscReg(MISCREG_MAIR_EL1);
1392            break;
1393          case EL2:
1394            mair = tc->readMiscReg(MISCREG_MAIR_EL2);
1395            break;
1396          case EL3:
1397            mair = tc->readMiscReg(MISCREG_MAIR_EL3);
1398            break;
1399          default:
1400            panic("Invalid exception level");
1401            break;
1402        }
1403
1404        // Select attributes
1405        attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1406        attr_lo = bits(attr, 3, 0);
1407        attr_hi = bits(attr, 7, 4);
1408
1409        // Memory type
1410        te.mtype = attr_hi == 0 ? TlbEntry::MemoryType::Device : TlbEntry::MemoryType::Normal;
1411
1412        // Cacheability
1413        te.nonCacheable = false;
1414        if (te.mtype == TlbEntry::MemoryType::Device) {  // Device memory
1415            te.nonCacheable = true;
1416        }
1417        // Treat write-through memory as uncacheable, this is safe
1418        // but for performance reasons not optimal.
1419        switch (attr_hi) {
1420          case 0x1 ... 0x3: // Normal Memory, Outer Write-through transient
1421          case 0x4:         // Normal memory, Outer Non-cacheable
1422          case 0x8 ... 0xb: // Normal Memory, Outer Write-through non-transient
1423            te.nonCacheable = true;
1424        }
1425        switch (attr_lo) {
1426          case 0x1 ... 0x3: // Normal Memory, Inner Write-through transient
1427          case 0x9 ... 0xb: // Normal Memory, Inner Write-through non-transient
1428            warn_if(!attr_hi, "Unpredictable behavior");
1429            M5_FALLTHROUGH;
1430          case 0x4:         // Device-nGnRE memory or
1431                            // Normal memory, Inner Non-cacheable
1432          case 0x8:         // Device-nGRE memory or
1433                            // Normal memory, Inner Write-through non-transient
1434            te.nonCacheable = true;
1435        }
1436
1437        te.shareable       = sh == 2;
1438        te.outerShareable = (sh & 0x2) ? true : false;
1439        // Attributes formatted according to the 64-bit PAR
1440        te.attributes = ((uint64_t) attr << 56) |
1441            (1 << 11) |     // LPAE bit
1442            (te.ns << 9) |  // NS bit
1443            (sh << 7);
1444    }
1445}
1446
1447void
1448TableWalker::doL1Descriptor()
1449{
1450    if (currState->fault != NoFault) {
1451        return;
1452    }
1453
1454    currState->l1Desc.data = htog(currState->l1Desc.data,
1455                                  byteOrder(currState->tc));
1456
1457    DPRINTF(TLB, "L1 descriptor for %#x is %#x\n",
1458            currState->vaddr_tainted, currState->l1Desc.data);
1459    TlbEntry te;
1460
1461    switch (currState->l1Desc.type()) {
1462      case L1Descriptor::Ignore:
1463      case L1Descriptor::Reserved:
1464        if (!currState->timing) {
1465            currState->tc = NULL;
1466            currState->req = NULL;
1467        }
1468        DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n");
1469        if (currState->isFetch)
1470            currState->fault =
1471                std::make_shared<PrefetchAbort>(
1472                    currState->vaddr_tainted,
1473                    ArmFault::TranslationLL + L1,
1474                    isStage2,
1475                    ArmFault::VmsaTran);
1476        else
1477            currState->fault =
1478                std::make_shared<DataAbort>(
1479                    currState->vaddr_tainted,
1480                    TlbEntry::DomainType::NoAccess,
1481                    currState->isWrite,
1482                    ArmFault::TranslationLL + L1, isStage2,
1483                    ArmFault::VmsaTran);
1484        return;
1485      case L1Descriptor::Section:
1486        if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
1487            /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is
1488              * enabled if set, do l1.Desc.setAp0() instead of generating
1489              * AccessFlag0
1490              */
1491
1492            currState->fault = std::make_shared<DataAbort>(
1493                currState->vaddr_tainted,
1494                currState->l1Desc.domain(),
1495                currState->isWrite,
1496                ArmFault::AccessFlagLL + L1,
1497                isStage2,
1498                ArmFault::VmsaTran);
1499        }
1500        if (currState->l1Desc.supersection()) {
1501            panic("Haven't implemented supersections\n");
1502        }
1503        insertTableEntry(currState->l1Desc, false);
1504        return;
1505      case L1Descriptor::PageTable:
1506        {
1507            Addr l2desc_addr;
1508            l2desc_addr = currState->l1Desc.l2Addr() |
1509                (bits(currState->vaddr, 19, 12) << 2);
1510            DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n",
1511                    l2desc_addr, currState->isSecure ? "s" : "ns");
1512
1513            // Trickbox address check
1514            currState->fault = testWalk(l2desc_addr, sizeof(uint32_t),
1515                                        currState->l1Desc.domain(), L2);
1516
1517            if (currState->fault) {
1518                if (!currState->timing) {
1519                    currState->tc = NULL;
1520                    currState->req = NULL;
1521                }
1522                return;
1523            }
1524
1525            Request::Flags flag = Request::PT_WALK;
1526            if (currState->isSecure)
1527                flag.set(Request::SECURE);
1528
1529            bool delayed;
1530            delayed = fetchDescriptor(l2desc_addr,
1531                                      (uint8_t*)&currState->l2Desc.data,
1532                                      sizeof(uint32_t), flag, -1, &doL2DescEvent,
1533                                      &TableWalker::doL2Descriptor);
1534            if (delayed) {
1535                currState->delayed = true;
1536            }
1537
1538            return;
1539        }
1540      default:
1541        panic("A new type in a 2 bit field?\n");
1542    }
1543}
1544
1545Fault
1546TableWalker::generateLongDescFault(ArmFault::FaultSource src)
1547{
1548    if (currState->isFetch) {
1549        return std::make_shared<PrefetchAbort>(
1550            currState->vaddr_tainted,
1551            src + currState->longDesc.lookupLevel,
1552            isStage2,
1553            ArmFault::LpaeTran);
1554    } else {
1555        return std::make_shared<DataAbort>(
1556            currState->vaddr_tainted,
1557            TlbEntry::DomainType::NoAccess,
1558            currState->isWrite,
1559            src + currState->longDesc.lookupLevel,
1560            isStage2,
1561            ArmFault::LpaeTran);
1562    }
1563}
1564
1565void
1566TableWalker::doLongDescriptor()
1567{
1568    if (currState->fault != NoFault) {
1569        return;
1570    }
1571
1572    currState->longDesc.data = htog(currState->longDesc.data,
1573                                    byteOrder(currState->tc));
1574
1575    DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n",
1576            currState->longDesc.lookupLevel, currState->vaddr_tainted,
1577            currState->longDesc.data,
1578            currState->aarch64 ? "AArch64" : "long-desc.");
1579
1580    if ((currState->longDesc.type() == LongDescriptor::Block) ||
1581        (currState->longDesc.type() == LongDescriptor::Page)) {
1582        DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1583                "xn: %d, ap: %d, af: %d, type: %d\n",
1584                currState->longDesc.lookupLevel,
1585                currState->longDesc.data,
1586                currState->longDesc.pxn(),
1587                currState->longDesc.xn(),
1588                currState->longDesc.ap(),
1589                currState->longDesc.af(),
1590                currState->longDesc.type());
1591    } else {
1592        DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, type: %d\n",
1593                currState->longDesc.lookupLevel,
1594                currState->longDesc.data,
1595                currState->longDesc.type());
1596    }
1597
1598    TlbEntry te;
1599
1600    switch (currState->longDesc.type()) {
1601      case LongDescriptor::Invalid:
1602        if (!currState->timing) {
1603            currState->tc = NULL;
1604            currState->req = NULL;
1605        }
1606
1607        DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n",
1608                currState->longDesc.lookupLevel,
1609                ArmFault::TranslationLL + currState->longDesc.lookupLevel);
1610
1611        currState->fault = generateLongDescFault(ArmFault::TranslationLL);
1612        return;
1613
1614      case LongDescriptor::Block:
1615      case LongDescriptor::Page:
1616        {
1617            auto fault_source = ArmFault::FaultSourceInvalid;
1618            // Check for address size fault
1619            if (checkAddrSizeFaultAArch64(
1620                    mbits(currState->longDesc.data, MaxPhysAddrRange - 1,
1621                          currState->longDesc.offsetBits()),
1622                    currState->physAddrRange)) {
1623
1624                DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1625                        currState->longDesc.lookupLevel);
1626                fault_source = ArmFault::AddressSizeLL;
1627
1628            // Check for access fault
1629            } else if (currState->longDesc.af() == 0) {
1630
1631                DPRINTF(TLB, "L%d descriptor causing Access Fault\n",
1632                        currState->longDesc.lookupLevel);
1633                fault_source = ArmFault::AccessFlagLL;
1634            }
1635
1636            if (fault_source != ArmFault::FaultSourceInvalid) {
1637                currState->fault = generateLongDescFault(fault_source);
1638            } else {
1639                insertTableEntry(currState->longDesc, true);
1640            }
1641        }
1642        return;
1643      case LongDescriptor::Table:
1644        {
1645            // Set hierarchical permission flags
1646            currState->secureLookup = currState->secureLookup &&
1647                currState->longDesc.secureTable();
1648            currState->rwTable = currState->rwTable &&
1649                (currState->longDesc.rwTable() || currState->hpd);
1650            currState->userTable = currState->userTable &&
1651                (currState->longDesc.userTable() || currState->hpd);
1652            currState->xnTable = currState->xnTable ||
1653                (currState->longDesc.xnTable() && !currState->hpd);
1654            currState->pxnTable = currState->pxnTable ||
1655                (currState->longDesc.pxnTable() && !currState->hpd);
1656
1657            // Set up next level lookup
1658            Addr next_desc_addr = currState->longDesc.nextDescAddr(
1659                currState->vaddr);
1660
1661            DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1662                    currState->longDesc.lookupLevel,
1663                    currState->longDesc.lookupLevel + 1,
1664                    next_desc_addr,
1665                    currState->secureLookup ? "s" : "ns");
1666
1667            // Check for address size fault
1668            if (currState->aarch64 && checkAddrSizeFaultAArch64(
1669                    next_desc_addr, currState->physAddrRange)) {
1670                DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1671                        currState->longDesc.lookupLevel);
1672
1673                currState->fault = generateLongDescFault(
1674                    ArmFault::AddressSizeLL);
1675                return;
1676            }
1677
1678            // Trickbox address check
1679            currState->fault = testWalk(
1680                next_desc_addr, sizeof(uint64_t), TlbEntry::DomainType::Client,
1681                toLookupLevel(currState->longDesc.lookupLevel +1));
1682
1683            if (currState->fault) {
1684                if (!currState->timing) {
1685                    currState->tc = NULL;
1686                    currState->req = NULL;
1687                }
1688                return;
1689            }
1690
1691            Request::Flags flag = Request::PT_WALK;
1692            if (currState->secureLookup)
1693                flag.set(Request::SECURE);
1694
1695            LookupLevel L = currState->longDesc.lookupLevel =
1696                (LookupLevel) (currState->longDesc.lookupLevel + 1);
1697            Event *event = NULL;
1698            switch (L) {
1699              case L1:
1700                assert(currState->aarch64);
1701              case L2:
1702              case L3:
1703                event = LongDescEventByLevel[L];
1704                break;
1705              default:
1706                panic("Wrong lookup level in table walk\n");
1707                break;
1708            }
1709
1710            bool delayed;
1711            delayed = fetchDescriptor(next_desc_addr, (uint8_t*)&currState->longDesc.data,
1712                                      sizeof(uint64_t), flag, -1, event,
1713                                      &TableWalker::doLongDescriptor);
1714            if (delayed) {
1715                 currState->delayed = true;
1716            }
1717        }
1718        return;
1719      default:
1720        panic("A new type in a 2 bit field?\n");
1721    }
1722}
1723
1724void
1725TableWalker::doL2Descriptor()
1726{
1727    if (currState->fault != NoFault) {
1728        return;
1729    }
1730
1731    currState->l2Desc.data = htog(currState->l2Desc.data,
1732                                  byteOrder(currState->tc));
1733
1734    DPRINTF(TLB, "L2 descriptor for %#x is %#x\n",
1735            currState->vaddr_tainted, currState->l2Desc.data);
1736    TlbEntry te;
1737
1738    if (currState->l2Desc.invalid()) {
1739        DPRINTF(TLB, "L2 descriptor invalid, causing fault\n");
1740        if (!currState->timing) {
1741            currState->tc = NULL;
1742            currState->req = NULL;
1743        }
1744        if (currState->isFetch)
1745            currState->fault = std::make_shared<PrefetchAbort>(
1746                    currState->vaddr_tainted,
1747                    ArmFault::TranslationLL + L2,
1748                    isStage2,
1749                    ArmFault::VmsaTran);
1750        else
1751            currState->fault = std::make_shared<DataAbort>(
1752                currState->vaddr_tainted, currState->l1Desc.domain(),
1753                currState->isWrite, ArmFault::TranslationLL + L2,
1754                isStage2,
1755                ArmFault::VmsaTran);
1756        return;
1757    }
1758
1759    if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) {
1760        /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is enabled
1761          * if set, do l2.Desc.setAp0() instead of generating AccessFlag0
1762          */
1763         DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n",
1764                 currState->sctlr.afe, currState->l2Desc.ap());
1765
1766        currState->fault = std::make_shared<DataAbort>(
1767            currState->vaddr_tainted,
1768            TlbEntry::DomainType::NoAccess, currState->isWrite,
1769            ArmFault::AccessFlagLL + L2, isStage2,
1770            ArmFault::VmsaTran);
1771    }
1772
1773    insertTableEntry(currState->l2Desc, false);
1774}
1775
1776void
1777TableWalker::doL1DescriptorWrapper()
1778{
1779    currState = stateQueues[L1].front();
1780    currState->delayed = false;
1781    // if there's a stage2 translation object we don't need it any more
1782    if (currState->stage2Tran) {
1783        delete currState->stage2Tran;
1784        currState->stage2Tran = NULL;
1785    }
1786
1787
1788    DPRINTF(TLBVerbose, "L1 Desc object host addr: %p\n",&currState->l1Desc.data);
1789    DPRINTF(TLBVerbose, "L1 Desc object      data: %08x\n",currState->l1Desc.data);
1790
1791    DPRINTF(TLBVerbose, "calling doL1Descriptor for vaddr:%#x\n", currState->vaddr_tainted);
1792    doL1Descriptor();
1793
1794    stateQueues[L1].pop_front();
1795    // Check if fault was generated
1796    if (currState->fault != NoFault) {
1797        currState->transState->finish(currState->fault, currState->req,
1798                                      currState->tc, currState->mode);
1799        statWalksShortTerminatedAtLevel[0]++;
1800
1801        pending = false;
1802        nextWalk(currState->tc);
1803
1804        currState->req = NULL;
1805        currState->tc = NULL;
1806        currState->delayed = false;
1807        delete currState;
1808    }
1809    else if (!currState->delayed) {
1810        // delay is not set so there is no L2 to do
1811        // Don't finish the translation if a stage 2 look up is underway
1812        statWalkServiceTime.sample(curTick() - currState->startTime);
1813        DPRINTF(TLBVerbose, "calling translateTiming again\n");
1814        tlb->translateTiming(currState->req, currState->tc,
1815                             currState->transState, currState->mode);
1816        statWalksShortTerminatedAtLevel[0]++;
1817
1818        pending = false;
1819        nextWalk(currState->tc);
1820
1821        currState->req = NULL;
1822        currState->tc = NULL;
1823        currState->delayed = false;
1824        delete currState;
1825    } else {
1826        // need to do L2 descriptor
1827        stateQueues[L2].push_back(currState);
1828    }
1829    currState = NULL;
1830}
1831
1832void
1833TableWalker::doL2DescriptorWrapper()
1834{
1835    currState = stateQueues[L2].front();
1836    assert(currState->delayed);
1837    // if there's a stage2 translation object we don't need it any more
1838    if (currState->stage2Tran) {
1839        delete currState->stage2Tran;
1840        currState->stage2Tran = NULL;
1841    }
1842
1843    DPRINTF(TLBVerbose, "calling doL2Descriptor for vaddr:%#x\n",
1844            currState->vaddr_tainted);
1845    doL2Descriptor();
1846
1847    // Check if fault was generated
1848    if (currState->fault != NoFault) {
1849        currState->transState->finish(currState->fault, currState->req,
1850                                      currState->tc, currState->mode);
1851        statWalksShortTerminatedAtLevel[1]++;
1852    } else {
1853        statWalkServiceTime.sample(curTick() - currState->startTime);
1854        DPRINTF(TLBVerbose, "calling translateTiming again\n");
1855        tlb->translateTiming(currState->req, currState->tc,
1856                             currState->transState, currState->mode);
1857        statWalksShortTerminatedAtLevel[1]++;
1858    }
1859
1860
1861    stateQueues[L2].pop_front();
1862    pending = false;
1863    nextWalk(currState->tc);
1864
1865    currState->req = NULL;
1866    currState->tc = NULL;
1867    currState->delayed = false;
1868
1869    delete currState;
1870    currState = NULL;
1871}
1872
1873void
1874TableWalker::doL0LongDescriptorWrapper()
1875{
1876    doLongDescriptorWrapper(L0);
1877}
1878
1879void
1880TableWalker::doL1LongDescriptorWrapper()
1881{
1882    doLongDescriptorWrapper(L1);
1883}
1884
1885void
1886TableWalker::doL2LongDescriptorWrapper()
1887{
1888    doLongDescriptorWrapper(L2);
1889}
1890
1891void
1892TableWalker::doL3LongDescriptorWrapper()
1893{
1894    doLongDescriptorWrapper(L3);
1895}
1896
1897void
1898TableWalker::doLongDescriptorWrapper(LookupLevel curr_lookup_level)
1899{
1900    currState = stateQueues[curr_lookup_level].front();
1901    assert(curr_lookup_level == currState->longDesc.lookupLevel);
1902    currState->delayed = false;
1903
1904    // if there's a stage2 translation object we don't need it any more
1905    if (currState->stage2Tran) {
1906        delete currState->stage2Tran;
1907        currState->stage2Tran = NULL;
1908    }
1909
1910    DPRINTF(TLBVerbose, "calling doLongDescriptor for vaddr:%#x\n",
1911            currState->vaddr_tainted);
1912    doLongDescriptor();
1913
1914    stateQueues[curr_lookup_level].pop_front();
1915
1916    if (currState->fault != NoFault) {
1917        // A fault was generated
1918        currState->transState->finish(currState->fault, currState->req,
1919                                      currState->tc, currState->mode);
1920
1921        pending = false;
1922        nextWalk(currState->tc);
1923
1924        currState->req = NULL;
1925        currState->tc = NULL;
1926        currState->delayed = false;
1927        delete currState;
1928    } else if (!currState->delayed) {
1929        // No additional lookups required
1930        DPRINTF(TLBVerbose, "calling translateTiming again\n");
1931        statWalkServiceTime.sample(curTick() - currState->startTime);
1932        tlb->translateTiming(currState->req, currState->tc,
1933                             currState->transState, currState->mode);
1934        statWalksLongTerminatedAtLevel[(unsigned) curr_lookup_level]++;
1935
1936        pending = false;
1937        nextWalk(currState->tc);
1938
1939        currState->req = NULL;
1940        currState->tc = NULL;
1941        currState->delayed = false;
1942        delete currState;
1943    } else {
1944        if (curr_lookup_level >= MAX_LOOKUP_LEVELS - 1)
1945            panic("Max. number of lookups already reached in table walk\n");
1946        // Need to perform additional lookups
1947        stateQueues[currState->longDesc.lookupLevel].push_back(currState);
1948    }
1949    currState = NULL;
1950}
1951
1952
1953void
1954TableWalker::nextWalk(ThreadContext *tc)
1955{
1956    if (pendingQueue.size())
1957        schedule(doProcessEvent, clockEdge(Cycles(1)));
1958    else
1959        completeDrain();
1960}
1961
1962bool
1963TableWalker::fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes,
1964    Request::Flags flags, int queueIndex, Event *event,
1965    void (TableWalker::*doDescriptor)())
1966{
1967    bool isTiming = currState->timing;
1968
1969    DPRINTF(TLBVerbose, "Fetching descriptor at address: 0x%x stage2Req: %d\n",
1970            descAddr, currState->stage2Req);
1971
1972    // If this translation has a stage 2 then we know descAddr is an IPA and
1973    // needs to be translated before we can access the page table. Do that
1974    // check here.
1975    if (currState->stage2Req) {
1976        Fault fault;
1977        flags = flags | TLB::MustBeOne;
1978
1979        if (isTiming) {
1980            Stage2MMU::Stage2Translation *tran = new
1981                Stage2MMU::Stage2Translation(*stage2Mmu, data, event,
1982                                             currState->vaddr);
1983            currState->stage2Tran = tran;
1984            stage2Mmu->readDataTimed(currState->tc, descAddr, tran, numBytes,
1985                                     flags);
1986            fault = tran->fault;
1987        } else {
1988            fault = stage2Mmu->readDataUntimed(currState->tc,
1989                currState->vaddr, descAddr, data, numBytes, flags,
1990                currState->functional);
1991        }
1992
1993        if (fault != NoFault) {
1994            currState->fault = fault;
1995        }
1996        if (isTiming) {
1997            if (queueIndex >= 0) {
1998                DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
1999                        stateQueues[queueIndex].size());
2000                stateQueues[queueIndex].push_back(currState);
2001                currState = NULL;
2002            }
2003        } else {
2004            (this->*doDescriptor)();
2005        }
2006    } else {
2007        if (isTiming) {
2008            port->dmaAction(MemCmd::ReadReq, descAddr, numBytes, event, data,
2009                           currState->tc->getCpuPtr()->clockPeriod(),flags);
2010            if (queueIndex >= 0) {
2011                DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
2012                        stateQueues[queueIndex].size());
2013                stateQueues[queueIndex].push_back(currState);
2014                currState = NULL;
2015            }
2016        } else if (!currState->functional) {
2017            port->dmaAction(MemCmd::ReadReq, descAddr, numBytes, NULL, data,
2018                           currState->tc->getCpuPtr()->clockPeriod(), flags);
2019            (this->*doDescriptor)();
2020        } else {
2021            RequestPtr req = std::make_shared<Request>(
2022                descAddr, numBytes, flags, masterId);
2023
2024            req->taskId(ContextSwitchTaskId::DMA);
2025            PacketPtr  pkt = new Packet(req, MemCmd::ReadReq);
2026            pkt->dataStatic(data);
2027            port->sendFunctional(pkt);
2028            (this->*doDescriptor)();
2029            delete pkt;
2030        }
2031    }
2032    return (isTiming);
2033}
2034
2035void
2036TableWalker::insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
2037{
2038    TlbEntry te;
2039
2040    // Create and fill a new page table entry
2041    te.valid          = true;
2042    te.longDescFormat = longDescriptor;
2043    te.isHyp          = currState->isHyp;
2044    te.asid           = currState->asid;
2045    te.vmid           = currState->vmid;
2046    te.N              = descriptor.offsetBits();
2047    te.vpn            = currState->vaddr >> te.N;
2048    te.size           = (1<<te.N) - 1;
2049    te.pfn            = descriptor.pfn();
2050    te.domain         = descriptor.domain();
2051    te.lookupLevel    = descriptor.lookupLevel;
2052    te.ns             = !descriptor.secure(haveSecurity, currState) || isStage2;
2053    te.nstid          = !currState->isSecure;
2054    te.xn             = descriptor.xn();
2055    if (currState->aarch64)
2056        te.el         = currState->el;
2057    else
2058        te.el         = EL1;
2059
2060    statPageSizes[pageSizeNtoStatBin(te.N)]++;
2061    statRequestOrigin[COMPLETED][currState->isFetch]++;
2062
2063    // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
2064    // as global
2065    te.global         = descriptor.global(currState) || isStage2;
2066    if (longDescriptor) {
2067        LongDescriptor lDescriptor =
2068            dynamic_cast<LongDescriptor &>(descriptor);
2069
2070        te.xn |= currState->xnTable;
2071        te.pxn = currState->pxnTable || lDescriptor.pxn();
2072        if (isStage2) {
2073            // this is actually the HAP field, but its stored in the same bit
2074            // possitions as the AP field in a stage 1 translation.
2075            te.hap = lDescriptor.ap();
2076        } else {
2077           te.ap = ((!currState->rwTable || descriptor.ap() >> 1) << 1) |
2078               (currState->userTable && (descriptor.ap() & 0x1));
2079        }
2080        if (currState->aarch64)
2081            memAttrsAArch64(currState->tc, te, lDescriptor);
2082        else
2083            memAttrsLPAE(currState->tc, te, lDescriptor);
2084    } else {
2085        te.ap = descriptor.ap();
2086        memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(),
2087                 descriptor.shareable());
2088    }
2089
2090    // Debug output
2091    DPRINTF(TLB, descriptor.dbgHeader().c_str());
2092    DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2093            te.N, te.pfn, te.size, te.global, te.valid);
2094    DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2095            "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
2096            te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp,
2097            te.nonCacheable, te.ns);
2098    DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
2099            descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
2100            descriptor.getRawData());
2101
2102    // Insert the entry into the TLB
2103    tlb->insert(currState->vaddr, te);
2104    if (!currState->timing) {
2105        currState->tc  = NULL;
2106        currState->req = NULL;
2107    }
2108}
2109
2110ArmISA::TableWalker *
2111ArmTableWalkerParams::create()
2112{
2113    return new ArmISA::TableWalker(this);
2114}
2115
2116LookupLevel
2117TableWalker::toLookupLevel(uint8_t lookup_level_as_int)
2118{
2119    switch (lookup_level_as_int) {
2120      case L1:
2121        return L1;
2122      case L2:
2123        return L2;
2124      case L3:
2125        return L3;
2126      default:
2127        panic("Invalid lookup level conversion");
2128    }
2129}
2130
2131/* this method keeps track of the table walker queue's residency, so
2132 * needs to be called whenever requests start and complete. */
2133void
2134TableWalker::pendingChange()
2135{
2136    unsigned n = pendingQueue.size();
2137    if ((currState != NULL) && (currState != pendingQueue.front())) {
2138        ++n;
2139    }
2140
2141    if (n != pendingReqs) {
2142        Tick now = curTick();
2143        statPendingWalks.sample(pendingReqs, now - pendingChangeTick);
2144        pendingReqs = n;
2145        pendingChangeTick = now;
2146    }
2147}
2148
2149Fault
2150TableWalker::testWalk(Addr pa, Addr size, TlbEntry::DomainType domain,
2151                      LookupLevel lookup_level)
2152{
2153    return tlb->testWalk(pa, size, currState->vaddr, currState->isSecure,
2154                         currState->mode, domain, lookup_level);
2155}
2156
2157
2158uint8_t
2159TableWalker::pageSizeNtoStatBin(uint8_t N)
2160{
2161    /* for statPageSizes */
2162    switch(N) {
2163        case 12: return 0; // 4K
2164        case 14: return 1; // 16K (using 16K granule in v8-64)
2165        case 16: return 2; // 64K
2166        case 20: return 3; // 1M
2167        case 21: return 4; // 2M-LPAE
2168        case 24: return 5; // 16M
2169        case 25: return 6; // 32M (using 16K granule in v8-64)
2170        case 29: return 7; // 512M (using 64K granule in v8-64)
2171        case 30: return 8; // 1G-LPAE
2172        default:
2173            panic("unknown page size");
2174            return 255;
2175    }
2176}
2177
2178void
2179TableWalker::regStats()
2180{
2181    ClockedObject::regStats();
2182
2183    statWalks
2184        .name(name() + ".walks")
2185        .desc("Table walker walks requested")
2186        ;
2187
2188    statWalksShortDescriptor
2189        .name(name() + ".walksShort")
2190        .desc("Table walker walks initiated with short descriptors")
2191        .flags(Stats::nozero)
2192        ;
2193
2194    statWalksLongDescriptor
2195        .name(name() + ".walksLong")
2196        .desc("Table walker walks initiated with long descriptors")
2197        .flags(Stats::nozero)
2198        ;
2199
2200    statWalksShortTerminatedAtLevel
2201        .init(2)
2202        .name(name() + ".walksShortTerminationLevel")
2203        .desc("Level at which table walker walks "
2204              "with short descriptors terminate")
2205        .flags(Stats::nozero)
2206        ;
2207    statWalksShortTerminatedAtLevel.subname(0, "Level1");
2208    statWalksShortTerminatedAtLevel.subname(1, "Level2");
2209
2210    statWalksLongTerminatedAtLevel
2211        .init(4)
2212        .name(name() + ".walksLongTerminationLevel")
2213        .desc("Level at which table walker walks "
2214              "with long descriptors terminate")
2215        .flags(Stats::nozero)
2216        ;
2217    statWalksLongTerminatedAtLevel.subname(0, "Level0");
2218    statWalksLongTerminatedAtLevel.subname(1, "Level1");
2219    statWalksLongTerminatedAtLevel.subname(2, "Level2");
2220    statWalksLongTerminatedAtLevel.subname(3, "Level3");
2221
2222    statSquashedBefore
2223        .name(name() + ".walksSquashedBefore")
2224        .desc("Table walks squashed before starting")
2225        .flags(Stats::nozero)
2226        ;
2227
2228    statSquashedAfter
2229        .name(name() + ".walksSquashedAfter")
2230        .desc("Table walks squashed after completion")
2231        .flags(Stats::nozero)
2232        ;
2233
2234    statWalkWaitTime
2235        .init(16)
2236        .name(name() + ".walkWaitTime")
2237        .desc("Table walker wait (enqueue to first request) latency")
2238        .flags(Stats::pdf | Stats::nozero | Stats::nonan)
2239        ;
2240
2241    statWalkServiceTime
2242        .init(16)
2243        .name(name() + ".walkCompletionTime")
2244        .desc("Table walker service (enqueue to completion) latency")
2245        .flags(Stats::pdf | Stats::nozero | Stats::nonan)
2246        ;
2247
2248    statPendingWalks
2249        .init(16)
2250        .name(name() + ".walksPending")
2251        .desc("Table walker pending requests distribution")
2252        .flags(Stats::pdf | Stats::dist | Stats::nozero | Stats::nonan)
2253        ;
2254
2255    statPageSizes // see DDI 0487A D4-1661
2256        .init(9)
2257        .name(name() + ".walkPageSizes")
2258        .desc("Table walker page sizes translated")
2259        .flags(Stats::total | Stats::pdf | Stats::dist | Stats::nozero)
2260        ;
2261    statPageSizes.subname(0, "4K");
2262    statPageSizes.subname(1, "16K");
2263    statPageSizes.subname(2, "64K");
2264    statPageSizes.subname(3, "1M");
2265    statPageSizes.subname(4, "2M");
2266    statPageSizes.subname(5, "16M");
2267    statPageSizes.subname(6, "32M");
2268    statPageSizes.subname(7, "512M");
2269    statPageSizes.subname(8, "1G");
2270
2271    statRequestOrigin
2272        .init(2,2) // Instruction/Data, requests/completed
2273        .name(name() + ".walkRequestOrigin")
2274        .desc("Table walker requests started/completed, data/inst")
2275        .flags(Stats::total)
2276        ;
2277    statRequestOrigin.subname(0,"Requested");
2278    statRequestOrigin.subname(1,"Completed");
2279    statRequestOrigin.ysubname(0,"Data");
2280    statRequestOrigin.ysubname(1,"Inst");
2281}
2282