table_walker.cc revision 14088:8de55a7aa53b
12SN/A/*
21762SN/A * Copyright (c) 2010, 2012-2018 ARM Limited
32SN/A * All rights reserved
42SN/A *
52SN/A * The license below extends only to copyright in the software and shall
62SN/A * not be construed as granting a license to any other intellectual
72SN/A * property including but not limited to intellectual property relating
82SN/A * to a hardware implementation of the functionality of the software
92SN/A * licensed hereunder.  You may use the software subject to the license
102SN/A * terms below provided that you ensure that this notice is replicated
112SN/A * unmodified and in its entirety in all distributions of the software,
122SN/A * modified or unmodified, in source code or in binary form.
132SN/A *
142SN/A * Redistribution and use in source and binary forms, with or without
152SN/A * modification, are permitted provided that the following conditions are
162SN/A * met: redistributions of source code must retain the above copyright
172SN/A * notice, this list of conditions and the following disclaimer;
182SN/A * redistributions in binary form must reproduce the above copyright
192SN/A * notice, this list of conditions and the following disclaimer in the
202SN/A * documentation and/or other materials provided with the distribution;
212SN/A * neither the name of the copyright holders nor the names of its
222SN/A * contributors may be used to endorse or promote products derived from
232SN/A * this software without specific prior written permission.
242SN/A *
252SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
262SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
272665Ssaidi@eecs.umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
282665Ssaidi@eecs.umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
292665Ssaidi@eecs.umich.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
302SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
312SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
322SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
332SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
342SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
352520SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
362207SN/A *
372207SN/A * Authors: Ali Saidi
386214Snate@binkert.org *          Giacomo Gabrielli
392SN/A */
408706Sandreas.hansson@arm.com#include "arch/arm/table_walker.hh"
412SN/A
422SN/A#include <memory>
432SN/A
442SN/A#include "arch/arm/faults.hh"
45360SN/A#include "arch/arm/stage2_mmu.hh"
46360SN/A#include "arch/arm/system.hh"
47360SN/A#include "arch/arm/tlb.hh"
48360SN/A#include "cpu/base.hh"
492207SN/A#include "cpu/thread_context.hh"
504111Sgblack@eecs.umich.edu#include "debug/Checkpoint.hh"
514111Sgblack@eecs.umich.edu#include "debug/Drain.hh"
524155Sgblack@eecs.umich.edu#include "debug/TLB.hh"
535874Sgblack@eecs.umich.edu#include "debug/TLBVerbose.hh"
545874Sgblack@eecs.umich.edu#include "dev/dma_device.hh"
556691Stjones1@inf.ed.ac.uk#include "sim/system.hh"
567095Sgblack@eecs.umich.edu
576691Stjones1@inf.ed.ac.ukusing namespace ArmISA;
58360SN/A
59360SN/ATableWalker::TableWalker(const Params *p)
60360SN/A    : ClockedObject(p),
61360SN/A      stage2Mmu(NULL), port(NULL), masterId(Request::invldMasterId),
62360SN/A      isStage2(p->is_stage2), tlb(NULL),
632207SN/A      currState(NULL), pending(false),
646392Ssaidi@eecs.umich.edu      numSquashable(p->num_squash_per_cycle),
656392Ssaidi@eecs.umich.edu      pendingReqs(0),
66360SN/A      pendingChangeTick(curTick()),
67360SN/A      doL1DescEvent([this]{ doL1DescriptorWrapper(); }, name()),
682SN/A      doL2DescEvent([this]{ doL2DescriptorWrapper(); }, name()),
6912SN/A      doL0LongDescEvent([this]{ doL0LongDescriptorWrapper(); }, name()),
702SN/A      doL1LongDescEvent([this]{ doL1LongDescriptorWrapper(); }, name()),
7112SN/A      doL2LongDescEvent([this]{ doL2LongDescriptorWrapper(); }, name()),
722SN/A      doL3LongDescEvent([this]{ doL3LongDescriptorWrapper(); }, name()),
732SN/A      LongDescEventByLevel { &doL0LongDescEvent, &doL1LongDescEvent,
74360SN/A                             &doL2LongDescEvent, &doL3LongDescEvent },
75360SN/A      doProcessEvent([this]{ processWalkWrapper(); }, name())
76360SN/A{
7712SN/A    sctlr = 0;
78360SN/A
79360SN/A    // Cache system-level properties
8012SN/A    if (FullSystem) {
812SN/A        ArmSystem *armSys = dynamic_cast<ArmSystem *>(p->sys);
822SN/A        assert(armSys);
832SN/A        haveSecurity = armSys->haveSecurity();
842SN/A        _haveLPAE = armSys->haveLPAE();
852SN/A        _haveVirtualization = armSys->haveVirtualization();
868706Sandreas.hansson@arm.com        physAddrRange = armSys->physAddrRange();
872520SN/A        _haveLargeAsid64 = armSys->haveLargeAsid64();
883812Ssaidi@eecs.umich.edu    } else {
893812Ssaidi@eecs.umich.edu        haveSecurity = _haveLPAE = _haveVirtualization = false;
903812Ssaidi@eecs.umich.edu        _haveLargeAsid64 = false;
913812Ssaidi@eecs.umich.edu        physAddrRange = 32;
922SN/A    }
935070Ssaidi@eecs.umich.edu
945070Ssaidi@eecs.umich.edu}
953917Ssaidi@eecs.umich.edu
96360SN/ATableWalker::~TableWalker()
97360SN/A{
98360SN/A    ;
992SN/A}
1002SN/A
10112SN/Avoid
1022420SN/ATableWalker::setMMU(Stage2MMU *m, MasterID master_id)
1032420SN/A{
1042420SN/A    stage2Mmu = m;
10512SN/A    port = &m->getDMAPort();
10612SN/A    masterId = master_id;
10712SN/A}
10812SN/A
10912SN/Avoid
11012SN/ATableWalker::init()
11112SN/A{
11212SN/A    fatal_if(!stage2Mmu, "Table walker must have a valid stage-2 MMU\n");
1132SN/A    fatal_if(!port, "Table walker must have a valid port\n");
1148706Sandreas.hansson@arm.com    fatal_if(!tlb, "Table walker must have a valid TLB\n");
1152472SN/A}
1162420SN/A
1172SN/APort &
11812SN/ATableWalker::getPort(const std::string &if_name, PortID idx)
1192472SN/A{
12012SN/A    if (if_name == "port") {
1212SN/A        if (!isStage2) {
12212SN/A            return *port;
12312SN/A        } else {
12412SN/A            fatal("Cannot access table walker port through stage-two walker\n");
12512SN/A        }
12612SN/A    }
12712SN/A    return ClockedObject::getPort(if_name, idx);
12812SN/A}
1293584Ssaidi@eecs.umich.edu
1303584Ssaidi@eecs.umich.eduTableWalker::WalkerState::WalkerState() :
1312SN/A    tc(nullptr), aarch64(false), el(EL0), physAddrRange(0), req(nullptr),
1322SN/A    asid(0), vmid(0), isHyp(false), transState(nullptr),
1333584Ssaidi@eecs.umich.edu    vaddr(0), vaddr_tainted(0),
1342SN/A    sctlr(0), scr(0), cpsr(0), tcr(0),
1352SN/A    htcr(0), hcr(0), vtcr(0),
1362SN/A    isWrite(false), isFetch(false), isSecure(false),
137    secureLookup(false), rwTable(false), userTable(false), xnTable(false),
138    pxnTable(false), stage2Req(false),
139    stage2Tran(nullptr), timing(false), functional(false),
140    mode(BaseTLB::Read), tranType(TLB::NormalTran), l2Desc(l1Desc),
141    delayed(false), tableWalker(nullptr)
142{
143}
144
145void
146TableWalker::completeDrain()
147{
148    if (drainState() == DrainState::Draining &&
149        stateQueues[L0].empty() && stateQueues[L1].empty() &&
150        stateQueues[L2].empty() && stateQueues[L3].empty() &&
151        pendingQueue.empty()) {
152
153        DPRINTF(Drain, "TableWalker done draining, processing drain event\n");
154        signalDrainDone();
155    }
156}
157
158DrainState
159TableWalker::drain()
160{
161    bool state_queues_not_empty = false;
162
163    for (int i = 0; i < MAX_LOOKUP_LEVELS; ++i) {
164        if (!stateQueues[i].empty()) {
165            state_queues_not_empty = true;
166            break;
167        }
168    }
169
170    if (state_queues_not_empty || pendingQueue.size()) {
171        DPRINTF(Drain, "TableWalker not drained\n");
172        return DrainState::Draining;
173    } else {
174        DPRINTF(Drain, "TableWalker free, no need to drain\n");
175        return DrainState::Drained;
176    }
177}
178
179void
180TableWalker::drainResume()
181{
182    if (params()->sys->isTimingMode() && currState) {
183        delete currState;
184        currState = NULL;
185        pendingChange();
186    }
187}
188
189Fault
190TableWalker::walk(const RequestPtr &_req, ThreadContext *_tc, uint16_t _asid,
191                  uint8_t _vmid, bool _isHyp, TLB::Mode _mode,
192                  TLB::Translation *_trans, bool _timing, bool _functional,
193                  bool secure, TLB::ArmTranslationType tranType,
194                  bool _stage2Req)
195{
196    assert(!(_functional && _timing));
197    ++statWalks;
198
199    WalkerState *savedCurrState = NULL;
200
201    if (!currState && !_functional) {
202        // For atomic mode, a new WalkerState instance should be only created
203        // once per TLB. For timing mode, a new instance is generated for every
204        // TLB miss.
205        DPRINTF(TLBVerbose, "creating new instance of WalkerState\n");
206
207        currState = new WalkerState();
208        currState->tableWalker = this;
209    } else if (_functional) {
210        // If we are mixing functional mode with timing (or even
211        // atomic), we need to to be careful and clean up after
212        // ourselves to not risk getting into an inconsistent state.
213        DPRINTF(TLBVerbose, "creating functional instance of WalkerState\n");
214        savedCurrState = currState;
215        currState = new WalkerState();
216        currState->tableWalker = this;
217    } else if (_timing) {
218        // This is a translation that was completed and then faulted again
219        // because some underlying parameters that affect the translation
220        // changed out from under us (e.g. asid). It will either be a
221        // misprediction, in which case nothing will happen or we'll use
222        // this fault to re-execute the faulting instruction which should clean
223        // up everything.
224        if (currState->vaddr_tainted == _req->getVaddr()) {
225            ++statSquashedBefore;
226            return std::make_shared<ReExec>();
227        }
228    }
229    pendingChange();
230
231    currState->startTime = curTick();
232    currState->tc = _tc;
233    // ARM DDI 0487A.f (ARMv8 ARM) pg J8-5672
234    // aarch32/translation/translation/AArch32.TranslateAddress dictates
235    // even AArch32 EL0 will use AArch64 translation if EL1 is in AArch64.
236    if (isStage2) {
237        currState->el = EL1;
238        currState->aarch64 = ELIs64(_tc, EL2);
239    } else {
240        currState->el =
241            TLB::tranTypeEL(_tc->readMiscReg(MISCREG_CPSR), tranType);
242        currState->aarch64 =
243            ELIs64(_tc, currState->el == EL0 ? EL1 : currState->el);
244    }
245    currState->transState = _trans;
246    currState->req = _req;
247    currState->fault = NoFault;
248    currState->asid = _asid;
249    currState->vmid = _vmid;
250    currState->isHyp = _isHyp;
251    currState->timing = _timing;
252    currState->functional = _functional;
253    currState->mode = _mode;
254    currState->tranType = tranType;
255    currState->isSecure = secure;
256    currState->physAddrRange = physAddrRange;
257
258    /** @todo These should be cached or grabbed from cached copies in
259     the TLB, all these miscreg reads are expensive */
260    currState->vaddr_tainted = currState->req->getVaddr();
261    if (currState->aarch64)
262        currState->vaddr = purifyTaggedAddr(currState->vaddr_tainted,
263                                            currState->tc, currState->el);
264    else
265        currState->vaddr = currState->vaddr_tainted;
266
267    if (currState->aarch64) {
268        if (isStage2) {
269            currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
270            currState->vtcr = currState->tc->readMiscReg(MISCREG_VTCR_EL2);
271        } else switch (currState->el) {
272          case EL0:
273          case EL1:
274            currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
275            currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL1);
276            break;
277          case EL2:
278            assert(_haveVirtualization);
279            currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL2);
280            currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL2);
281            break;
282          case EL3:
283            assert(haveSecurity);
284            currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL3);
285            currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL3);
286            break;
287          default:
288            panic("Invalid exception level");
289            break;
290        }
291        currState->hcr = currState->tc->readMiscReg(MISCREG_HCR_EL2);
292    } else {
293        currState->sctlr = currState->tc->readMiscReg(snsBankedIndex(
294            MISCREG_SCTLR, currState->tc, !currState->isSecure));
295        currState->ttbcr = currState->tc->readMiscReg(snsBankedIndex(
296            MISCREG_TTBCR, currState->tc, !currState->isSecure));
297        currState->htcr  = currState->tc->readMiscReg(MISCREG_HTCR);
298        currState->hcr   = currState->tc->readMiscReg(MISCREG_HCR);
299        currState->vtcr  = currState->tc->readMiscReg(MISCREG_VTCR);
300    }
301    sctlr = currState->sctlr;
302
303    currState->isFetch = (currState->mode == TLB::Execute);
304    currState->isWrite = (currState->mode == TLB::Write);
305
306    statRequestOrigin[REQUESTED][currState->isFetch]++;
307
308    currState->stage2Req = _stage2Req && !isStage2;
309
310    bool long_desc_format = currState->aarch64 || _isHyp || isStage2 ||
311                            longDescFormatInUse(currState->tc);
312
313    if (long_desc_format) {
314        // Helper variables used for hierarchical permissions
315        currState->secureLookup = currState->isSecure;
316        currState->rwTable = true;
317        currState->userTable = true;
318        currState->xnTable = false;
319        currState->pxnTable = false;
320
321        ++statWalksLongDescriptor;
322    } else {
323        ++statWalksShortDescriptor;
324    }
325
326    if (!currState->timing) {
327        Fault fault = NoFault;
328        if (currState->aarch64)
329            fault = processWalkAArch64();
330        else if (long_desc_format)
331            fault = processWalkLPAE();
332        else
333            fault = processWalk();
334
335        // If this was a functional non-timing access restore state to
336        // how we found it.
337        if (currState->functional) {
338            delete currState;
339            currState = savedCurrState;
340        }
341        return fault;
342    }
343
344    if (pending || pendingQueue.size()) {
345        pendingQueue.push_back(currState);
346        currState = NULL;
347        pendingChange();
348    } else {
349        pending = true;
350        pendingChange();
351        if (currState->aarch64)
352            return processWalkAArch64();
353        else if (long_desc_format)
354            return processWalkLPAE();
355        else
356            return processWalk();
357    }
358
359    return NoFault;
360}
361
362void
363TableWalker::processWalkWrapper()
364{
365    assert(!currState);
366    assert(pendingQueue.size());
367    pendingChange();
368    currState = pendingQueue.front();
369
370    // Check if a previous walk filled this request already
371    // @TODO Should this always be the TLB or should we look in the stage2 TLB?
372    TlbEntry* te = tlb->lookup(currState->vaddr, currState->asid,
373            currState->vmid, currState->isHyp, currState->isSecure, true, false,
374            currState->el);
375
376    // Check if we still need to have a walk for this request. If the requesting
377    // instruction has been squashed, or a previous walk has filled the TLB with
378    // a match, we just want to get rid of the walk. The latter could happen
379    // when there are multiple outstanding misses to a single page and a
380    // previous request has been successfully translated.
381    if (!currState->transState->squashed() && !te) {
382        // We've got a valid request, lets process it
383        pending = true;
384        pendingQueue.pop_front();
385        // Keep currState in case one of the processWalk... calls NULLs it
386        WalkerState *curr_state_copy = currState;
387        Fault f;
388        if (currState->aarch64)
389            f = processWalkAArch64();
390        else if (longDescFormatInUse(currState->tc) ||
391                 currState->isHyp || isStage2)
392            f = processWalkLPAE();
393        else
394            f = processWalk();
395
396        if (f != NoFault) {
397            curr_state_copy->transState->finish(f, curr_state_copy->req,
398                    curr_state_copy->tc, curr_state_copy->mode);
399
400            delete curr_state_copy;
401        }
402        return;
403    }
404
405
406    // If the instruction that we were translating for has been
407    // squashed we shouldn't bother.
408    unsigned num_squashed = 0;
409    ThreadContext *tc = currState->tc;
410    while ((num_squashed < numSquashable) && currState &&
411           (currState->transState->squashed() || te)) {
412        pendingQueue.pop_front();
413        num_squashed++;
414        statSquashedBefore++;
415
416        DPRINTF(TLB, "Squashing table walk for address %#x\n",
417                      currState->vaddr_tainted);
418
419        if (currState->transState->squashed()) {
420            // finish the translation which will delete the translation object
421            currState->transState->finish(
422                std::make_shared<UnimpFault>("Squashed Inst"),
423                currState->req, currState->tc, currState->mode);
424        } else {
425            // translate the request now that we know it will work
426            statWalkServiceTime.sample(curTick() - currState->startTime);
427            tlb->translateTiming(currState->req, currState->tc,
428                        currState->transState, currState->mode);
429
430        }
431
432        // delete the current request
433        delete currState;
434
435        // peak at the next one
436        if (pendingQueue.size()) {
437            currState = pendingQueue.front();
438            te = tlb->lookup(currState->vaddr, currState->asid,
439                currState->vmid, currState->isHyp, currState->isSecure, true,
440                false, currState->el);
441        } else {
442            // Terminate the loop, nothing more to do
443            currState = NULL;
444        }
445    }
446    pendingChange();
447
448    // if we still have pending translations, schedule more work
449    nextWalk(tc);
450    currState = NULL;
451}
452
453Fault
454TableWalker::processWalk()
455{
456    Addr ttbr = 0;
457
458    // If translation isn't enabled, we shouldn't be here
459    assert(currState->sctlr.m || isStage2);
460
461    DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
462            currState->vaddr_tainted, currState->ttbcr, mbits(currState->vaddr, 31,
463                                                      32 - currState->ttbcr.n));
464
465    statWalkWaitTime.sample(curTick() - currState->startTime);
466
467    if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31,
468                                          32 - currState->ttbcr.n)) {
469        DPRINTF(TLB, " - Selecting TTBR0\n");
470        // Check if table walk is allowed when Security Extensions are enabled
471        if (haveSecurity && currState->ttbcr.pd0) {
472            if (currState->isFetch)
473                return std::make_shared<PrefetchAbort>(
474                    currState->vaddr_tainted,
475                    ArmFault::TranslationLL + L1,
476                    isStage2,
477                    ArmFault::VmsaTran);
478            else
479                return std::make_shared<DataAbort>(
480                    currState->vaddr_tainted,
481                    TlbEntry::DomainType::NoAccess, currState->isWrite,
482                    ArmFault::TranslationLL + L1, isStage2,
483                    ArmFault::VmsaTran);
484        }
485        ttbr = currState->tc->readMiscReg(snsBankedIndex(
486            MISCREG_TTBR0, currState->tc, !currState->isSecure));
487    } else {
488        DPRINTF(TLB, " - Selecting TTBR1\n");
489        // Check if table walk is allowed when Security Extensions are enabled
490        if (haveSecurity && currState->ttbcr.pd1) {
491            if (currState->isFetch)
492                return std::make_shared<PrefetchAbort>(
493                    currState->vaddr_tainted,
494                    ArmFault::TranslationLL + L1,
495                    isStage2,
496                    ArmFault::VmsaTran);
497            else
498                return std::make_shared<DataAbort>(
499                    currState->vaddr_tainted,
500                    TlbEntry::DomainType::NoAccess, currState->isWrite,
501                    ArmFault::TranslationLL + L1, isStage2,
502                    ArmFault::VmsaTran);
503        }
504        ttbr = currState->tc->readMiscReg(snsBankedIndex(
505            MISCREG_TTBR1, currState->tc, !currState->isSecure));
506        currState->ttbcr.n = 0;
507    }
508
509    Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) |
510        (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2);
511    DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr,
512            currState->isSecure ? "s" : "ns");
513
514    // Trickbox address check
515    Fault f;
516    f = testWalk(l1desc_addr, sizeof(uint32_t),
517                 TlbEntry::DomainType::NoAccess, L1);
518    if (f) {
519        DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
520        if (currState->timing) {
521            pending = false;
522            nextWalk(currState->tc);
523            currState = NULL;
524        } else {
525            currState->tc = NULL;
526            currState->req = NULL;
527        }
528        return f;
529    }
530
531    Request::Flags flag = Request::PT_WALK;
532    if (currState->sctlr.c == 0) {
533        flag.set(Request::UNCACHEABLE);
534    }
535
536    if (currState->isSecure) {
537        flag.set(Request::SECURE);
538    }
539
540    bool delayed;
541    delayed = fetchDescriptor(l1desc_addr, (uint8_t*)&currState->l1Desc.data,
542                              sizeof(uint32_t), flag, L1, &doL1DescEvent,
543                              &TableWalker::doL1Descriptor);
544    if (!delayed) {
545       f = currState->fault;
546    }
547
548    return f;
549}
550
551Fault
552TableWalker::processWalkLPAE()
553{
554    Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
555    int tsz, n;
556    LookupLevel start_lookup_level = L1;
557
558    DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n",
559            currState->vaddr_tainted, currState->ttbcr);
560
561    statWalkWaitTime.sample(curTick() - currState->startTime);
562
563    Request::Flags flag = Request::PT_WALK;
564    if (currState->isSecure)
565        flag.set(Request::SECURE);
566
567    // work out which base address register to use, if in hyp mode we always
568    // use HTTBR
569    if (isStage2) {
570        DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n");
571        ttbr = currState->tc->readMiscReg(MISCREG_VTTBR);
572        tsz  = sext<4>(currState->vtcr.t0sz);
573        start_lookup_level = currState->vtcr.sl0 ? L1 : L2;
574    } else if (currState->isHyp) {
575        DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n");
576        ttbr = currState->tc->readMiscReg(MISCREG_HTTBR);
577        tsz  = currState->htcr.t0sz;
578    } else {
579        assert(longDescFormatInUse(currState->tc));
580
581        // Determine boundaries of TTBR0/1 regions
582        if (currState->ttbcr.t0sz)
583            ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1;
584        else if (currState->ttbcr.t1sz)
585            ttbr0_max = (1ULL << 32) -
586                (1ULL << (32 - currState->ttbcr.t1sz)) - 1;
587        else
588            ttbr0_max = (1ULL << 32) - 1;
589        if (currState->ttbcr.t1sz)
590            ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz));
591        else
592            ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz));
593
594        // The following code snippet selects the appropriate translation table base
595        // address (TTBR0 or TTBR1) and the appropriate starting lookup level
596        // depending on the address range supported by the translation table (ARM
597        // ARM issue C B3.6.4)
598        if (currState->vaddr <= ttbr0_max) {
599            DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n");
600            // Check if table walk is allowed
601            if (currState->ttbcr.epd0) {
602                if (currState->isFetch)
603                    return std::make_shared<PrefetchAbort>(
604                        currState->vaddr_tainted,
605                        ArmFault::TranslationLL + L1,
606                        isStage2,
607                        ArmFault::LpaeTran);
608                else
609                    return std::make_shared<DataAbort>(
610                        currState->vaddr_tainted,
611                        TlbEntry::DomainType::NoAccess,
612                        currState->isWrite,
613                        ArmFault::TranslationLL + L1,
614                        isStage2,
615                        ArmFault::LpaeTran);
616            }
617            ttbr = currState->tc->readMiscReg(snsBankedIndex(
618                MISCREG_TTBR0, currState->tc, !currState->isSecure));
619            tsz = currState->ttbcr.t0sz;
620            if (ttbr0_max < (1ULL << 30))  // Upper limit < 1 GB
621                start_lookup_level = L2;
622        } else if (currState->vaddr >= ttbr1_min) {
623            DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n");
624            // Check if table walk is allowed
625            if (currState->ttbcr.epd1) {
626                if (currState->isFetch)
627                    return std::make_shared<PrefetchAbort>(
628                        currState->vaddr_tainted,
629                        ArmFault::TranslationLL + L1,
630                        isStage2,
631                        ArmFault::LpaeTran);
632                else
633                    return std::make_shared<DataAbort>(
634                        currState->vaddr_tainted,
635                        TlbEntry::DomainType::NoAccess,
636                        currState->isWrite,
637                        ArmFault::TranslationLL + L1,
638                        isStage2,
639                        ArmFault::LpaeTran);
640            }
641            ttbr = currState->tc->readMiscReg(snsBankedIndex(
642                MISCREG_TTBR1, currState->tc, !currState->isSecure));
643            tsz = currState->ttbcr.t1sz;
644            if (ttbr1_min >= (1ULL << 31) + (1ULL << 30))  // Lower limit >= 3 GB
645                start_lookup_level = L2;
646        } else {
647            // Out of boundaries -> translation fault
648            if (currState->isFetch)
649                return std::make_shared<PrefetchAbort>(
650                    currState->vaddr_tainted,
651                    ArmFault::TranslationLL + L1,
652                    isStage2,
653                    ArmFault::LpaeTran);
654            else
655                return std::make_shared<DataAbort>(
656                    currState->vaddr_tainted,
657                    TlbEntry::DomainType::NoAccess,
658                    currState->isWrite, ArmFault::TranslationLL + L1,
659                    isStage2, ArmFault::LpaeTran);
660        }
661
662    }
663
664    // Perform lookup (ARM ARM issue C B3.6.6)
665    if (start_lookup_level == L1) {
666        n = 5 - tsz;
667        desc_addr = mbits(ttbr, 39, n) |
668            (bits(currState->vaddr, n + 26, 30) << 3);
669        DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
670                desc_addr, currState->isSecure ? "s" : "ns");
671    } else {
672        // Skip first-level lookup
673        n = (tsz >= 2 ? 14 - tsz : 12);
674        desc_addr = mbits(ttbr, 39, n) |
675            (bits(currState->vaddr, n + 17, 21) << 3);
676        DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
677                desc_addr, currState->isSecure ? "s" : "ns");
678    }
679
680    // Trickbox address check
681    Fault f = testWalk(desc_addr, sizeof(uint64_t),
682                       TlbEntry::DomainType::NoAccess, start_lookup_level);
683    if (f) {
684        DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
685        if (currState->timing) {
686            pending = false;
687            nextWalk(currState->tc);
688            currState = NULL;
689        } else {
690            currState->tc = NULL;
691            currState->req = NULL;
692        }
693        return f;
694    }
695
696    if (currState->sctlr.c == 0) {
697        flag.set(Request::UNCACHEABLE);
698    }
699
700    currState->longDesc.lookupLevel = start_lookup_level;
701    currState->longDesc.aarch64 = false;
702    currState->longDesc.grainSize = Grain4KB;
703
704    bool delayed = fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
705                                   sizeof(uint64_t), flag, start_lookup_level,
706                                   LongDescEventByLevel[start_lookup_level],
707                                   &TableWalker::doLongDescriptor);
708    if (!delayed) {
709        f = currState->fault;
710    }
711
712    return f;
713}
714
715unsigned
716TableWalker::adjustTableSizeAArch64(unsigned tsz)
717{
718    if (tsz < 25)
719        return 25;
720    if (tsz > 48)
721        return 48;
722    return tsz;
723}
724
725bool
726TableWalker::checkAddrSizeFaultAArch64(Addr addr, int currPhysAddrRange)
727{
728    return (currPhysAddrRange != MaxPhysAddrRange &&
729            bits(addr, MaxPhysAddrRange - 1, currPhysAddrRange));
730}
731
732Fault
733TableWalker::processWalkAArch64()
734{
735    assert(currState->aarch64);
736
737    DPRINTF(TLB, "Beginning table walk for address %#llx, TCR: %#llx\n",
738            currState->vaddr_tainted, currState->tcr);
739
740    static const GrainSize GrainMap_tg0[] =
741      { Grain4KB, Grain64KB, Grain16KB, ReservedGrain };
742    static const GrainSize GrainMap_tg1[] =
743      { ReservedGrain, Grain16KB, Grain4KB, Grain64KB };
744
745    statWalkWaitTime.sample(curTick() - currState->startTime);
746
747    // Determine TTBR, table size, granule size and phys. address range
748    Addr ttbr = 0;
749    int tsz = 0, ps = 0;
750    GrainSize tg = Grain4KB; // grain size computed from tg* field
751    bool fault = false;
752
753    LookupLevel start_lookup_level = MAX_LOOKUP_LEVELS;
754
755    switch (currState->el) {
756      case EL0:
757      case EL1:
758        if (isStage2) {
759            DPRINTF(TLB, " - Selecting VTTBR0 (AArch64 stage 2)\n");
760            ttbr = currState->tc->readMiscReg(MISCREG_VTTBR_EL2);
761            tsz = 64 - currState->vtcr.t0sz64;
762            tg = GrainMap_tg0[currState->vtcr.tg0];
763            // ARM DDI 0487A.f D7-2148
764            // The starting level of stage 2 translation depends on
765            // VTCR_EL2.SL0 and VTCR_EL2.TG0
766            LookupLevel __ = MAX_LOOKUP_LEVELS; // invalid level
767            uint8_t sl_tg = (currState->vtcr.sl0 << 2) | currState->vtcr.tg0;
768            static const LookupLevel SLL[] = {
769                L2, L3, L3, __, // sl0 == 0
770                L1, L2, L2, __, // sl0 == 1, etc.
771                L0, L1, L1, __,
772                __, __, __, __
773            };
774            start_lookup_level = SLL[sl_tg];
775            panic_if(start_lookup_level == MAX_LOOKUP_LEVELS,
776                     "Cannot discern lookup level from vtcr.{sl0,tg0}");
777            ps = currState->vtcr.ps;
778        } else {
779            switch (bits(currState->vaddr, 63,48)) {
780              case 0:
781                DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
782                ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL1);
783                tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
784                tg = GrainMap_tg0[currState->tcr.tg0];
785                if (bits(currState->vaddr, 63, tsz) != 0x0 ||
786                    currState->tcr.epd0)
787                  fault = true;
788                break;
789              case 0xffff:
790                DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
791                ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL1);
792                tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz);
793                tg = GrainMap_tg1[currState->tcr.tg1];
794                if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) ||
795                    currState->tcr.epd1)
796                  fault = true;
797                break;
798              default:
799                // top two bytes must be all 0s or all 1s, else invalid addr
800                fault = true;
801            }
802            ps = currState->tcr.ips;
803        }
804        break;
805      case EL2:
806        switch(bits(currState->vaddr, 63,48)) {
807          case 0:
808            DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
809            ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL2);
810            tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
811            tg = GrainMap_tg0[currState->tcr.tg0];
812            break;
813
814          case 0xffff:
815            DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
816            ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL2);
817            tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz);
818            tg = GrainMap_tg1[currState->tcr.tg1];
819            if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) ||
820                currState->tcr.epd1 || !currState->hcr.e2h)
821              fault = true;
822            break;
823
824           default:
825              // invalid addr if top two bytes are not all 0s
826              fault = true;
827        }
828        ps = currState->tcr.ps;
829        break;
830      case EL3:
831        switch(bits(currState->vaddr, 63,48)) {
832            case 0:
833                DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
834                ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL3);
835                tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
836                tg = GrainMap_tg0[currState->tcr.tg0];
837                break;
838            default:
839                // invalid addr if top two bytes are not all 0s
840                fault = true;
841        }
842        ps = currState->tcr.ps;
843        break;
844    }
845
846    if (fault) {
847        Fault f;
848        if (currState->isFetch)
849            f =  std::make_shared<PrefetchAbort>(
850                currState->vaddr_tainted,
851                ArmFault::TranslationLL + L0, isStage2,
852                ArmFault::LpaeTran);
853        else
854            f = std::make_shared<DataAbort>(
855                currState->vaddr_tainted,
856                TlbEntry::DomainType::NoAccess,
857                currState->isWrite,
858                ArmFault::TranslationLL + L0,
859                isStage2, ArmFault::LpaeTran);
860
861        if (currState->timing) {
862            pending = false;
863            nextWalk(currState->tc);
864            currState = NULL;
865        } else {
866            currState->tc = NULL;
867            currState->req = NULL;
868        }
869        return f;
870
871    }
872
873    if (tg == ReservedGrain) {
874        warn_once("Reserved granule size requested; gem5's IMPLEMENTATION "
875                  "DEFINED behavior takes this to mean 4KB granules\n");
876        tg = Grain4KB;
877    }
878
879    // Determine starting lookup level
880    // See aarch64/translation/walk in Appendix G: ARMv8 Pseudocode Library
881    // in ARM DDI 0487A.  These table values correspond to the cascading tests
882    // to compute the lookup level and are of the form
883    // (grain_size + N*stride), for N = {1, 2, 3}.
884    // A value of 64 will never succeed and a value of 0 will always succeed.
885    if (start_lookup_level == MAX_LOOKUP_LEVELS) {
886        struct GrainMap {
887            GrainSize grain_size;
888            unsigned lookup_level_cutoff[MAX_LOOKUP_LEVELS];
889        };
890        static const GrainMap GM[] = {
891            { Grain4KB,  { 39, 30,  0, 0 } },
892            { Grain16KB, { 47, 36, 25, 0 } },
893            { Grain64KB, { 64, 42, 29, 0 } }
894        };
895
896        const unsigned *lookup = NULL; // points to a lookup_level_cutoff
897
898        for (unsigned i = 0; i < 3; ++i) { // choose entry of GM[]
899            if (tg == GM[i].grain_size) {
900                lookup = GM[i].lookup_level_cutoff;
901                break;
902            }
903        }
904        assert(lookup);
905
906        for (int L = L0; L != MAX_LOOKUP_LEVELS; ++L) {
907            if (tsz > lookup[L]) {
908                start_lookup_level = (LookupLevel) L;
909                break;
910            }
911        }
912        panic_if(start_lookup_level == MAX_LOOKUP_LEVELS,
913                 "Table walker couldn't find lookup level\n");
914    }
915
916    int stride = tg - 3;
917
918    // Determine table base address
919    int base_addr_lo = 3 + tsz - stride * (3 - start_lookup_level) - tg;
920    Addr base_addr = mbits(ttbr, 47, base_addr_lo);
921
922    // Determine physical address size and raise an Address Size Fault if
923    // necessary
924    int pa_range = decodePhysAddrRange64(ps);
925    // Clamp to lower limit
926    if (pa_range > physAddrRange)
927        currState->physAddrRange = physAddrRange;
928    else
929        currState->physAddrRange = pa_range;
930    if (checkAddrSizeFaultAArch64(base_addr, currState->physAddrRange)) {
931        DPRINTF(TLB, "Address size fault before any lookup\n");
932        Fault f;
933        if (currState->isFetch)
934            f = std::make_shared<PrefetchAbort>(
935                currState->vaddr_tainted,
936                ArmFault::AddressSizeLL + start_lookup_level,
937                isStage2,
938                ArmFault::LpaeTran);
939        else
940            f = std::make_shared<DataAbort>(
941                currState->vaddr_tainted,
942                TlbEntry::DomainType::NoAccess,
943                currState->isWrite,
944                ArmFault::AddressSizeLL + start_lookup_level,
945                isStage2,
946                ArmFault::LpaeTran);
947
948
949        if (currState->timing) {
950            pending = false;
951            nextWalk(currState->tc);
952            currState = NULL;
953        } else {
954            currState->tc = NULL;
955            currState->req = NULL;
956        }
957        return f;
958
959   }
960
961    // Determine descriptor address
962    Addr desc_addr = base_addr |
963        (bits(currState->vaddr, tsz - 1,
964              stride * (3 - start_lookup_level) + tg) << 3);
965
966    // Trickbox address check
967    Fault f = testWalk(desc_addr, sizeof(uint64_t),
968                       TlbEntry::DomainType::NoAccess, start_lookup_level);
969    if (f) {
970        DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
971        if (currState->timing) {
972            pending = false;
973            nextWalk(currState->tc);
974            currState = NULL;
975        } else {
976            currState->tc = NULL;
977            currState->req = NULL;
978        }
979        return f;
980    }
981
982    Request::Flags flag = Request::PT_WALK;
983    if (currState->sctlr.c == 0) {
984        flag.set(Request::UNCACHEABLE);
985    }
986
987    if (currState->isSecure) {
988        flag.set(Request::SECURE);
989    }
990
991    currState->longDesc.lookupLevel = start_lookup_level;
992    currState->longDesc.aarch64 = true;
993    currState->longDesc.grainSize = tg;
994
995    if (currState->timing) {
996        fetchDescriptor(desc_addr, (uint8_t*) &currState->longDesc.data,
997                        sizeof(uint64_t), flag, start_lookup_level,
998                        LongDescEventByLevel[start_lookup_level], NULL);
999    } else {
1000        fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
1001                        sizeof(uint64_t), flag, -1, NULL,
1002                        &TableWalker::doLongDescriptor);
1003        f = currState->fault;
1004    }
1005
1006    return f;
1007}
1008
1009void
1010TableWalker::memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr,
1011                      uint8_t texcb, bool s)
1012{
1013    // Note: tc and sctlr local variables are hiding tc and sctrl class
1014    // variables
1015    DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s);
1016    te.shareable = false; // default value
1017    te.nonCacheable = false;
1018    te.outerShareable = false;
1019    if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) {
1020        switch(texcb) {
1021          case 0: // Stongly-ordered
1022            te.nonCacheable = true;
1023            te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1024            te.shareable = true;
1025            te.innerAttrs = 1;
1026            te.outerAttrs = 0;
1027            break;
1028          case 1: // Shareable Device
1029            te.nonCacheable = true;
1030            te.mtype = TlbEntry::MemoryType::Device;
1031            te.shareable = true;
1032            te.innerAttrs = 3;
1033            te.outerAttrs = 0;
1034            break;
1035          case 2: // Outer and Inner Write-Through, no Write-Allocate
1036            te.mtype = TlbEntry::MemoryType::Normal;
1037            te.shareable = s;
1038            te.innerAttrs = 6;
1039            te.outerAttrs = bits(texcb, 1, 0);
1040            break;
1041          case 3: // Outer and Inner Write-Back, no Write-Allocate
1042            te.mtype = TlbEntry::MemoryType::Normal;
1043            te.shareable = s;
1044            te.innerAttrs = 7;
1045            te.outerAttrs = bits(texcb, 1, 0);
1046            break;
1047          case 4: // Outer and Inner Non-cacheable
1048            te.nonCacheable = true;
1049            te.mtype = TlbEntry::MemoryType::Normal;
1050            te.shareable = s;
1051            te.innerAttrs = 0;
1052            te.outerAttrs = bits(texcb, 1, 0);
1053            break;
1054          case 5: // Reserved
1055            panic("Reserved texcb value!\n");
1056            break;
1057          case 6: // Implementation Defined
1058            panic("Implementation-defined texcb value!\n");
1059            break;
1060          case 7: // Outer and Inner Write-Back, Write-Allocate
1061            te.mtype = TlbEntry::MemoryType::Normal;
1062            te.shareable = s;
1063            te.innerAttrs = 5;
1064            te.outerAttrs = 1;
1065            break;
1066          case 8: // Non-shareable Device
1067            te.nonCacheable = true;
1068            te.mtype = TlbEntry::MemoryType::Device;
1069            te.shareable = false;
1070            te.innerAttrs = 3;
1071            te.outerAttrs = 0;
1072            break;
1073          case 9 ... 15:  // Reserved
1074            panic("Reserved texcb value!\n");
1075            break;
1076          case 16 ... 31: // Cacheable Memory
1077            te.mtype = TlbEntry::MemoryType::Normal;
1078            te.shareable = s;
1079            if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0)
1080                te.nonCacheable = true;
1081            te.innerAttrs = bits(texcb, 1, 0);
1082            te.outerAttrs = bits(texcb, 3, 2);
1083            break;
1084          default:
1085            panic("More than 32 states for 5 bits?\n");
1086        }
1087    } else {
1088        assert(tc);
1089        PRRR prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR,
1090                                    currState->tc, !currState->isSecure));
1091        NMRR nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR,
1092                                    currState->tc, !currState->isSecure));
1093        DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
1094        uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
1095        switch(bits(texcb, 2,0)) {
1096          case 0:
1097            curr_tr = prrr.tr0;
1098            curr_ir = nmrr.ir0;
1099            curr_or = nmrr.or0;
1100            te.outerShareable = (prrr.nos0 == 0);
1101            break;
1102          case 1:
1103            curr_tr = prrr.tr1;
1104            curr_ir = nmrr.ir1;
1105            curr_or = nmrr.or1;
1106            te.outerShareable = (prrr.nos1 == 0);
1107            break;
1108          case 2:
1109            curr_tr = prrr.tr2;
1110            curr_ir = nmrr.ir2;
1111            curr_or = nmrr.or2;
1112            te.outerShareable = (prrr.nos2 == 0);
1113            break;
1114          case 3:
1115            curr_tr = prrr.tr3;
1116            curr_ir = nmrr.ir3;
1117            curr_or = nmrr.or3;
1118            te.outerShareable = (prrr.nos3 == 0);
1119            break;
1120          case 4:
1121            curr_tr = prrr.tr4;
1122            curr_ir = nmrr.ir4;
1123            curr_or = nmrr.or4;
1124            te.outerShareable = (prrr.nos4 == 0);
1125            break;
1126          case 5:
1127            curr_tr = prrr.tr5;
1128            curr_ir = nmrr.ir5;
1129            curr_or = nmrr.or5;
1130            te.outerShareable = (prrr.nos5 == 0);
1131            break;
1132          case 6:
1133            panic("Imp defined type\n");
1134          case 7:
1135            curr_tr = prrr.tr7;
1136            curr_ir = nmrr.ir7;
1137            curr_or = nmrr.or7;
1138            te.outerShareable = (prrr.nos7 == 0);
1139            break;
1140        }
1141
1142        switch(curr_tr) {
1143          case 0:
1144            DPRINTF(TLBVerbose, "StronglyOrdered\n");
1145            te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1146            te.nonCacheable = true;
1147            te.innerAttrs = 1;
1148            te.outerAttrs = 0;
1149            te.shareable = true;
1150            break;
1151          case 1:
1152            DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n",
1153                    prrr.ds1, prrr.ds0, s);
1154            te.mtype = TlbEntry::MemoryType::Device;
1155            te.nonCacheable = true;
1156            te.innerAttrs = 3;
1157            te.outerAttrs = 0;
1158            if (prrr.ds1 && s)
1159                te.shareable = true;
1160            if (prrr.ds0 && !s)
1161                te.shareable = true;
1162            break;
1163          case 2:
1164            DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n",
1165                    prrr.ns1, prrr.ns0, s);
1166            te.mtype = TlbEntry::MemoryType::Normal;
1167            if (prrr.ns1 && s)
1168                te.shareable = true;
1169            if (prrr.ns0 && !s)
1170                te.shareable = true;
1171            break;
1172          case 3:
1173            panic("Reserved type");
1174        }
1175
1176        if (te.mtype == TlbEntry::MemoryType::Normal){
1177            switch(curr_ir) {
1178              case 0:
1179                te.nonCacheable = true;
1180                te.innerAttrs = 0;
1181                break;
1182              case 1:
1183                te.innerAttrs = 5;
1184                break;
1185              case 2:
1186                te.innerAttrs = 6;
1187                break;
1188              case 3:
1189                te.innerAttrs = 7;
1190                break;
1191            }
1192
1193            switch(curr_or) {
1194              case 0:
1195                te.nonCacheable = true;
1196                te.outerAttrs = 0;
1197                break;
1198              case 1:
1199                te.outerAttrs = 1;
1200                break;
1201              case 2:
1202                te.outerAttrs = 2;
1203                break;
1204              case 3:
1205                te.outerAttrs = 3;
1206                break;
1207            }
1208        }
1209    }
1210    DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, "
1211            "outerAttrs: %d\n",
1212            te.shareable, te.innerAttrs, te.outerAttrs);
1213    te.setAttributes(false);
1214}
1215
1216void
1217TableWalker::memAttrsLPAE(ThreadContext *tc, TlbEntry &te,
1218    LongDescriptor &lDescriptor)
1219{
1220    assert(_haveLPAE);
1221
1222    uint8_t attr;
1223    uint8_t sh = lDescriptor.sh();
1224    // Different format and source of attributes if this is a stage 2
1225    // translation
1226    if (isStage2) {
1227        attr = lDescriptor.memAttr();
1228        uint8_t attr_3_2 = (attr >> 2) & 0x3;
1229        uint8_t attr_1_0 =  attr       & 0x3;
1230
1231        DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh);
1232
1233        if (attr_3_2 == 0) {
1234            te.mtype        = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered
1235                                            : TlbEntry::MemoryType::Device;
1236            te.outerAttrs   = 0;
1237            te.innerAttrs   = attr_1_0 == 0 ? 1 : 3;
1238            te.nonCacheable = true;
1239        } else {
1240            te.mtype        = TlbEntry::MemoryType::Normal;
1241            te.outerAttrs   = attr_3_2 == 1 ? 0 :
1242                              attr_3_2 == 2 ? 2 : 1;
1243            te.innerAttrs   = attr_1_0 == 1 ? 0 :
1244                              attr_1_0 == 2 ? 6 : 5;
1245            te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1246        }
1247    } else {
1248        uint8_t attrIndx = lDescriptor.attrIndx();
1249
1250        // LPAE always uses remapping of memory attributes, irrespective of the
1251        // value of SCTLR.TRE
1252        MiscRegIndex reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0;
1253        int reg_as_int = snsBankedIndex(reg, currState->tc,
1254                                        !currState->isSecure);
1255        uint32_t mair = currState->tc->readMiscReg(reg_as_int);
1256        attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1257        uint8_t attr_7_4 = bits(attr, 7, 4);
1258        uint8_t attr_3_0 = bits(attr, 3, 0);
1259        DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr);
1260
1261        // Note: the memory subsystem only cares about the 'cacheable' memory
1262        // attribute. The other attributes are only used to fill the PAR register
1263        // accordingly to provide the illusion of full support
1264        te.nonCacheable = false;
1265
1266        switch (attr_7_4) {
1267          case 0x0:
1268            // Strongly-ordered or Device memory
1269            if (attr_3_0 == 0x0)
1270                te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1271            else if (attr_3_0 == 0x4)
1272                te.mtype = TlbEntry::MemoryType::Device;
1273            else
1274                panic("Unpredictable behavior\n");
1275            te.nonCacheable = true;
1276            te.outerAttrs   = 0;
1277            break;
1278          case 0x4:
1279            // Normal memory, Outer Non-cacheable
1280            te.mtype = TlbEntry::MemoryType::Normal;
1281            te.outerAttrs = 0;
1282            if (attr_3_0 == 0x4)
1283                // Inner Non-cacheable
1284                te.nonCacheable = true;
1285            else if (attr_3_0 < 0x8)
1286                panic("Unpredictable behavior\n");
1287            break;
1288          case 0x8:
1289          case 0x9:
1290          case 0xa:
1291          case 0xb:
1292          case 0xc:
1293          case 0xd:
1294          case 0xe:
1295          case 0xf:
1296            if (attr_7_4 & 0x4) {
1297                te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1298            } else {
1299                te.outerAttrs = 0x2;
1300            }
1301            // Normal memory, Outer Cacheable
1302            te.mtype = TlbEntry::MemoryType::Normal;
1303            if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1304                panic("Unpredictable behavior\n");
1305            break;
1306          default:
1307            panic("Unpredictable behavior\n");
1308            break;
1309        }
1310
1311        switch (attr_3_0) {
1312          case 0x0:
1313            te.innerAttrs = 0x1;
1314            break;
1315          case 0x4:
1316            te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1317            break;
1318          case 0x8:
1319          case 0x9:
1320          case 0xA:
1321          case 0xB:
1322            te.innerAttrs = 6;
1323            break;
1324          case 0xC:
1325          case 0xD:
1326          case 0xE:
1327          case 0xF:
1328            te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1329            break;
1330          default:
1331            panic("Unpredictable behavior\n");
1332            break;
1333        }
1334    }
1335
1336    te.outerShareable = sh == 2;
1337    te.shareable       = (sh & 0x2) ? true : false;
1338    te.setAttributes(true);
1339    te.attributes |= (uint64_t) attr << 56;
1340}
1341
1342void
1343TableWalker::memAttrsAArch64(ThreadContext *tc, TlbEntry &te,
1344                             LongDescriptor &lDescriptor)
1345{
1346    uint8_t attr;
1347    uint8_t attr_hi;
1348    uint8_t attr_lo;
1349    uint8_t sh = lDescriptor.sh();
1350
1351    if (isStage2) {
1352        attr = lDescriptor.memAttr();
1353        uint8_t attr_hi = (attr >> 2) & 0x3;
1354        uint8_t attr_lo =  attr       & 0x3;
1355
1356        DPRINTF(TLBVerbose, "memAttrsAArch64 MemAttr:%#x sh:%#x\n", attr, sh);
1357
1358        if (attr_hi == 0) {
1359            te.mtype        = attr_lo == 0 ? TlbEntry::MemoryType::StronglyOrdered
1360                                            : TlbEntry::MemoryType::Device;
1361            te.outerAttrs   = 0;
1362            te.innerAttrs   = attr_lo == 0 ? 1 : 3;
1363            te.nonCacheable = true;
1364        } else {
1365            te.mtype        = TlbEntry::MemoryType::Normal;
1366            te.outerAttrs   = attr_hi == 1 ? 0 :
1367                              attr_hi == 2 ? 2 : 1;
1368            te.innerAttrs   = attr_lo == 1 ? 0 :
1369                              attr_lo == 2 ? 6 : 5;
1370            // Treat write-through memory as uncacheable, this is safe
1371            // but for performance reasons not optimal.
1372            te.nonCacheable = (attr_hi == 1) || (attr_hi == 2) ||
1373                (attr_lo == 1) || (attr_lo == 2);
1374        }
1375    } else {
1376        uint8_t attrIndx = lDescriptor.attrIndx();
1377
1378        DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh);
1379
1380        // Select MAIR
1381        uint64_t mair;
1382        switch (currState->el) {
1383          case EL0:
1384          case EL1:
1385            mair = tc->readMiscReg(MISCREG_MAIR_EL1);
1386            break;
1387          case EL2:
1388            mair = tc->readMiscReg(MISCREG_MAIR_EL2);
1389            break;
1390          case EL3:
1391            mair = tc->readMiscReg(MISCREG_MAIR_EL3);
1392            break;
1393          default:
1394            panic("Invalid exception level");
1395            break;
1396        }
1397
1398        // Select attributes
1399        attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1400        attr_lo = bits(attr, 3, 0);
1401        attr_hi = bits(attr, 7, 4);
1402
1403        // Memory type
1404        te.mtype = attr_hi == 0 ? TlbEntry::MemoryType::Device : TlbEntry::MemoryType::Normal;
1405
1406        // Cacheability
1407        te.nonCacheable = false;
1408        if (te.mtype == TlbEntry::MemoryType::Device) {  // Device memory
1409            te.nonCacheable = true;
1410        }
1411        // Treat write-through memory as uncacheable, this is safe
1412        // but for performance reasons not optimal.
1413        switch (attr_hi) {
1414          case 0x1 ... 0x3: // Normal Memory, Outer Write-through transient
1415          case 0x4:         // Normal memory, Outer Non-cacheable
1416          case 0x8 ... 0xb: // Normal Memory, Outer Write-through non-transient
1417            te.nonCacheable = true;
1418        }
1419        switch (attr_lo) {
1420          case 0x1 ... 0x3: // Normal Memory, Inner Write-through transient
1421          case 0x9 ... 0xb: // Normal Memory, Inner Write-through non-transient
1422            warn_if(!attr_hi, "Unpredictable behavior");
1423            M5_FALLTHROUGH;
1424          case 0x4:         // Device-nGnRE memory or
1425                            // Normal memory, Inner Non-cacheable
1426          case 0x8:         // Device-nGRE memory or
1427                            // Normal memory, Inner Write-through non-transient
1428            te.nonCacheable = true;
1429        }
1430
1431        te.shareable       = sh == 2;
1432        te.outerShareable = (sh & 0x2) ? true : false;
1433        // Attributes formatted according to the 64-bit PAR
1434        te.attributes = ((uint64_t) attr << 56) |
1435            (1 << 11) |     // LPAE bit
1436            (te.ns << 9) |  // NS bit
1437            (sh << 7);
1438    }
1439}
1440
1441void
1442TableWalker::doL1Descriptor()
1443{
1444    if (currState->fault != NoFault) {
1445        return;
1446    }
1447
1448    currState->l1Desc.data = htog(currState->l1Desc.data,
1449                                  byteOrder(currState->tc));
1450
1451    DPRINTF(TLB, "L1 descriptor for %#x is %#x\n",
1452            currState->vaddr_tainted, currState->l1Desc.data);
1453    TlbEntry te;
1454
1455    switch (currState->l1Desc.type()) {
1456      case L1Descriptor::Ignore:
1457      case L1Descriptor::Reserved:
1458        if (!currState->timing) {
1459            currState->tc = NULL;
1460            currState->req = NULL;
1461        }
1462        DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n");
1463        if (currState->isFetch)
1464            currState->fault =
1465                std::make_shared<PrefetchAbort>(
1466                    currState->vaddr_tainted,
1467                    ArmFault::TranslationLL + L1,
1468                    isStage2,
1469                    ArmFault::VmsaTran);
1470        else
1471            currState->fault =
1472                std::make_shared<DataAbort>(
1473                    currState->vaddr_tainted,
1474                    TlbEntry::DomainType::NoAccess,
1475                    currState->isWrite,
1476                    ArmFault::TranslationLL + L1, isStage2,
1477                    ArmFault::VmsaTran);
1478        return;
1479      case L1Descriptor::Section:
1480        if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
1481            /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is
1482              * enabled if set, do l1.Desc.setAp0() instead of generating
1483              * AccessFlag0
1484              */
1485
1486            currState->fault = std::make_shared<DataAbort>(
1487                currState->vaddr_tainted,
1488                currState->l1Desc.domain(),
1489                currState->isWrite,
1490                ArmFault::AccessFlagLL + L1,
1491                isStage2,
1492                ArmFault::VmsaTran);
1493        }
1494        if (currState->l1Desc.supersection()) {
1495            panic("Haven't implemented supersections\n");
1496        }
1497        insertTableEntry(currState->l1Desc, false);
1498        return;
1499      case L1Descriptor::PageTable:
1500        {
1501            Addr l2desc_addr;
1502            l2desc_addr = currState->l1Desc.l2Addr() |
1503                (bits(currState->vaddr, 19, 12) << 2);
1504            DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n",
1505                    l2desc_addr, currState->isSecure ? "s" : "ns");
1506
1507            // Trickbox address check
1508            currState->fault = testWalk(l2desc_addr, sizeof(uint32_t),
1509                                        currState->l1Desc.domain(), L2);
1510
1511            if (currState->fault) {
1512                if (!currState->timing) {
1513                    currState->tc = NULL;
1514                    currState->req = NULL;
1515                }
1516                return;
1517            }
1518
1519            Request::Flags flag = Request::PT_WALK;
1520            if (currState->isSecure)
1521                flag.set(Request::SECURE);
1522
1523            bool delayed;
1524            delayed = fetchDescriptor(l2desc_addr,
1525                                      (uint8_t*)&currState->l2Desc.data,
1526                                      sizeof(uint32_t), flag, -1, &doL2DescEvent,
1527                                      &TableWalker::doL2Descriptor);
1528            if (delayed) {
1529                currState->delayed = true;
1530            }
1531
1532            return;
1533        }
1534      default:
1535        panic("A new type in a 2 bit field?\n");
1536    }
1537}
1538
1539void
1540TableWalker::doLongDescriptor()
1541{
1542    if (currState->fault != NoFault) {
1543        return;
1544    }
1545
1546    currState->longDesc.data = htog(currState->longDesc.data,
1547                                    byteOrder(currState->tc));
1548
1549    DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n",
1550            currState->longDesc.lookupLevel, currState->vaddr_tainted,
1551            currState->longDesc.data,
1552            currState->aarch64 ? "AArch64" : "long-desc.");
1553
1554    if ((currState->longDesc.type() == LongDescriptor::Block) ||
1555        (currState->longDesc.type() == LongDescriptor::Page)) {
1556        DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1557                "xn: %d, ap: %d, af: %d, type: %d\n",
1558                currState->longDesc.lookupLevel,
1559                currState->longDesc.data,
1560                currState->longDesc.pxn(),
1561                currState->longDesc.xn(),
1562                currState->longDesc.ap(),
1563                currState->longDesc.af(),
1564                currState->longDesc.type());
1565    } else {
1566        DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, type: %d\n",
1567                currState->longDesc.lookupLevel,
1568                currState->longDesc.data,
1569                currState->longDesc.type());
1570    }
1571
1572    TlbEntry te;
1573
1574    switch (currState->longDesc.type()) {
1575      case LongDescriptor::Invalid:
1576        if (!currState->timing) {
1577            currState->tc = NULL;
1578            currState->req = NULL;
1579        }
1580
1581        DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n",
1582                currState->longDesc.lookupLevel,
1583                ArmFault::TranslationLL + currState->longDesc.lookupLevel);
1584        if (currState->isFetch)
1585            currState->fault = std::make_shared<PrefetchAbort>(
1586                currState->vaddr_tainted,
1587                ArmFault::TranslationLL + currState->longDesc.lookupLevel,
1588                isStage2,
1589                ArmFault::LpaeTran);
1590        else
1591            currState->fault = std::make_shared<DataAbort>(
1592                currState->vaddr_tainted,
1593                TlbEntry::DomainType::NoAccess,
1594                currState->isWrite,
1595                ArmFault::TranslationLL + currState->longDesc.lookupLevel,
1596                isStage2,
1597                ArmFault::LpaeTran);
1598        return;
1599      case LongDescriptor::Block:
1600      case LongDescriptor::Page:
1601        {
1602            bool fault = false;
1603            bool aff = false;
1604            // Check for address size fault
1605            if (checkAddrSizeFaultAArch64(
1606                    mbits(currState->longDesc.data, MaxPhysAddrRange - 1,
1607                          currState->longDesc.offsetBits()),
1608                    currState->physAddrRange)) {
1609                fault = true;
1610                DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1611                        currState->longDesc.lookupLevel);
1612            // Check for access fault
1613            } else if (currState->longDesc.af() == 0) {
1614                fault = true;
1615                DPRINTF(TLB, "L%d descriptor causing Access Fault\n",
1616                        currState->longDesc.lookupLevel);
1617                aff = true;
1618            }
1619            if (fault) {
1620                if (currState->isFetch)
1621                    currState->fault = std::make_shared<PrefetchAbort>(
1622                        currState->vaddr_tainted,
1623                        (aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) +
1624                        currState->longDesc.lookupLevel,
1625                        isStage2,
1626                        ArmFault::LpaeTran);
1627                else
1628                    currState->fault = std::make_shared<DataAbort>(
1629                        currState->vaddr_tainted,
1630                        TlbEntry::DomainType::NoAccess, currState->isWrite,
1631                        (aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) +
1632                        currState->longDesc.lookupLevel,
1633                        isStage2,
1634                        ArmFault::LpaeTran);
1635            } else {
1636                insertTableEntry(currState->longDesc, true);
1637            }
1638        }
1639        return;
1640      case LongDescriptor::Table:
1641        {
1642            // Set hierarchical permission flags
1643            currState->secureLookup = currState->secureLookup &&
1644                currState->longDesc.secureTable();
1645            currState->rwTable = currState->rwTable &&
1646                currState->longDesc.rwTable();
1647            currState->userTable = currState->userTable &&
1648                currState->longDesc.userTable();
1649            currState->xnTable = currState->xnTable ||
1650                currState->longDesc.xnTable();
1651            currState->pxnTable = currState->pxnTable ||
1652                currState->longDesc.pxnTable();
1653
1654            // Set up next level lookup
1655            Addr next_desc_addr = currState->longDesc.nextDescAddr(
1656                currState->vaddr);
1657
1658            DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1659                    currState->longDesc.lookupLevel,
1660                    currState->longDesc.lookupLevel + 1,
1661                    next_desc_addr,
1662                    currState->secureLookup ? "s" : "ns");
1663
1664            // Check for address size fault
1665            if (currState->aarch64 && checkAddrSizeFaultAArch64(
1666                    next_desc_addr, currState->physAddrRange)) {
1667                DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1668                        currState->longDesc.lookupLevel);
1669                if (currState->isFetch)
1670                    currState->fault = std::make_shared<PrefetchAbort>(
1671                        currState->vaddr_tainted,
1672                        ArmFault::AddressSizeLL
1673                        + currState->longDesc.lookupLevel,
1674                        isStage2,
1675                        ArmFault::LpaeTran);
1676                else
1677                    currState->fault = std::make_shared<DataAbort>(
1678                        currState->vaddr_tainted,
1679                        TlbEntry::DomainType::NoAccess, currState->isWrite,
1680                        ArmFault::AddressSizeLL
1681                        + currState->longDesc.lookupLevel,
1682                        isStage2,
1683                        ArmFault::LpaeTran);
1684                return;
1685            }
1686
1687            // Trickbox address check
1688            currState->fault = testWalk(
1689                next_desc_addr, sizeof(uint64_t), TlbEntry::DomainType::Client,
1690                toLookupLevel(currState->longDesc.lookupLevel +1));
1691
1692            if (currState->fault) {
1693                if (!currState->timing) {
1694                    currState->tc = NULL;
1695                    currState->req = NULL;
1696                }
1697                return;
1698            }
1699
1700            Request::Flags flag = Request::PT_WALK;
1701            if (currState->secureLookup)
1702                flag.set(Request::SECURE);
1703
1704            LookupLevel L = currState->longDesc.lookupLevel =
1705                (LookupLevel) (currState->longDesc.lookupLevel + 1);
1706            Event *event = NULL;
1707            switch (L) {
1708              case L1:
1709                assert(currState->aarch64);
1710              case L2:
1711              case L3:
1712                event = LongDescEventByLevel[L];
1713                break;
1714              default:
1715                panic("Wrong lookup level in table walk\n");
1716                break;
1717            }
1718
1719            bool delayed;
1720            delayed = fetchDescriptor(next_desc_addr, (uint8_t*)&currState->longDesc.data,
1721                                      sizeof(uint64_t), flag, -1, event,
1722                                      &TableWalker::doLongDescriptor);
1723            if (delayed) {
1724                 currState->delayed = true;
1725            }
1726        }
1727        return;
1728      default:
1729        panic("A new type in a 2 bit field?\n");
1730    }
1731}
1732
1733void
1734TableWalker::doL2Descriptor()
1735{
1736    if (currState->fault != NoFault) {
1737        return;
1738    }
1739
1740    currState->l2Desc.data = htog(currState->l2Desc.data,
1741                                  byteOrder(currState->tc));
1742
1743    DPRINTF(TLB, "L2 descriptor for %#x is %#x\n",
1744            currState->vaddr_tainted, currState->l2Desc.data);
1745    TlbEntry te;
1746
1747    if (currState->l2Desc.invalid()) {
1748        DPRINTF(TLB, "L2 descriptor invalid, causing fault\n");
1749        if (!currState->timing) {
1750            currState->tc = NULL;
1751            currState->req = NULL;
1752        }
1753        if (currState->isFetch)
1754            currState->fault = std::make_shared<PrefetchAbort>(
1755                    currState->vaddr_tainted,
1756                    ArmFault::TranslationLL + L2,
1757                    isStage2,
1758                    ArmFault::VmsaTran);
1759        else
1760            currState->fault = std::make_shared<DataAbort>(
1761                currState->vaddr_tainted, currState->l1Desc.domain(),
1762                currState->isWrite, ArmFault::TranslationLL + L2,
1763                isStage2,
1764                ArmFault::VmsaTran);
1765        return;
1766    }
1767
1768    if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) {
1769        /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is enabled
1770          * if set, do l2.Desc.setAp0() instead of generating AccessFlag0
1771          */
1772         DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n",
1773                 currState->sctlr.afe, currState->l2Desc.ap());
1774
1775        currState->fault = std::make_shared<DataAbort>(
1776            currState->vaddr_tainted,
1777            TlbEntry::DomainType::NoAccess, currState->isWrite,
1778            ArmFault::AccessFlagLL + L2, isStage2,
1779            ArmFault::VmsaTran);
1780    }
1781
1782    insertTableEntry(currState->l2Desc, false);
1783}
1784
1785void
1786TableWalker::doL1DescriptorWrapper()
1787{
1788    currState = stateQueues[L1].front();
1789    currState->delayed = false;
1790    // if there's a stage2 translation object we don't need it any more
1791    if (currState->stage2Tran) {
1792        delete currState->stage2Tran;
1793        currState->stage2Tran = NULL;
1794    }
1795
1796
1797    DPRINTF(TLBVerbose, "L1 Desc object host addr: %p\n",&currState->l1Desc.data);
1798    DPRINTF(TLBVerbose, "L1 Desc object      data: %08x\n",currState->l1Desc.data);
1799
1800    DPRINTF(TLBVerbose, "calling doL1Descriptor for vaddr:%#x\n", currState->vaddr_tainted);
1801    doL1Descriptor();
1802
1803    stateQueues[L1].pop_front();
1804    // Check if fault was generated
1805    if (currState->fault != NoFault) {
1806        currState->transState->finish(currState->fault, currState->req,
1807                                      currState->tc, currState->mode);
1808        statWalksShortTerminatedAtLevel[0]++;
1809
1810        pending = false;
1811        nextWalk(currState->tc);
1812
1813        currState->req = NULL;
1814        currState->tc = NULL;
1815        currState->delayed = false;
1816        delete currState;
1817    }
1818    else if (!currState->delayed) {
1819        // delay is not set so there is no L2 to do
1820        // Don't finish the translation if a stage 2 look up is underway
1821        statWalkServiceTime.sample(curTick() - currState->startTime);
1822        DPRINTF(TLBVerbose, "calling translateTiming again\n");
1823        tlb->translateTiming(currState->req, currState->tc,
1824                             currState->transState, currState->mode);
1825        statWalksShortTerminatedAtLevel[0]++;
1826
1827        pending = false;
1828        nextWalk(currState->tc);
1829
1830        currState->req = NULL;
1831        currState->tc = NULL;
1832        currState->delayed = false;
1833        delete currState;
1834    } else {
1835        // need to do L2 descriptor
1836        stateQueues[L2].push_back(currState);
1837    }
1838    currState = NULL;
1839}
1840
1841void
1842TableWalker::doL2DescriptorWrapper()
1843{
1844    currState = stateQueues[L2].front();
1845    assert(currState->delayed);
1846    // if there's a stage2 translation object we don't need it any more
1847    if (currState->stage2Tran) {
1848        delete currState->stage2Tran;
1849        currState->stage2Tran = NULL;
1850    }
1851
1852    DPRINTF(TLBVerbose, "calling doL2Descriptor for vaddr:%#x\n",
1853            currState->vaddr_tainted);
1854    doL2Descriptor();
1855
1856    // Check if fault was generated
1857    if (currState->fault != NoFault) {
1858        currState->transState->finish(currState->fault, currState->req,
1859                                      currState->tc, currState->mode);
1860        statWalksShortTerminatedAtLevel[1]++;
1861    } else {
1862        statWalkServiceTime.sample(curTick() - currState->startTime);
1863        DPRINTF(TLBVerbose, "calling translateTiming again\n");
1864        tlb->translateTiming(currState->req, currState->tc,
1865                             currState->transState, currState->mode);
1866        statWalksShortTerminatedAtLevel[1]++;
1867    }
1868
1869
1870    stateQueues[L2].pop_front();
1871    pending = false;
1872    nextWalk(currState->tc);
1873
1874    currState->req = NULL;
1875    currState->tc = NULL;
1876    currState->delayed = false;
1877
1878    delete currState;
1879    currState = NULL;
1880}
1881
1882void
1883TableWalker::doL0LongDescriptorWrapper()
1884{
1885    doLongDescriptorWrapper(L0);
1886}
1887
1888void
1889TableWalker::doL1LongDescriptorWrapper()
1890{
1891    doLongDescriptorWrapper(L1);
1892}
1893
1894void
1895TableWalker::doL2LongDescriptorWrapper()
1896{
1897    doLongDescriptorWrapper(L2);
1898}
1899
1900void
1901TableWalker::doL3LongDescriptorWrapper()
1902{
1903    doLongDescriptorWrapper(L3);
1904}
1905
1906void
1907TableWalker::doLongDescriptorWrapper(LookupLevel curr_lookup_level)
1908{
1909    currState = stateQueues[curr_lookup_level].front();
1910    assert(curr_lookup_level == currState->longDesc.lookupLevel);
1911    currState->delayed = false;
1912
1913    // if there's a stage2 translation object we don't need it any more
1914    if (currState->stage2Tran) {
1915        delete currState->stage2Tran;
1916        currState->stage2Tran = NULL;
1917    }
1918
1919    DPRINTF(TLBVerbose, "calling doLongDescriptor for vaddr:%#x\n",
1920            currState->vaddr_tainted);
1921    doLongDescriptor();
1922
1923    stateQueues[curr_lookup_level].pop_front();
1924
1925    if (currState->fault != NoFault) {
1926        // A fault was generated
1927        currState->transState->finish(currState->fault, currState->req,
1928                                      currState->tc, currState->mode);
1929
1930        pending = false;
1931        nextWalk(currState->tc);
1932
1933        currState->req = NULL;
1934        currState->tc = NULL;
1935        currState->delayed = false;
1936        delete currState;
1937    } else if (!currState->delayed) {
1938        // No additional lookups required
1939        DPRINTF(TLBVerbose, "calling translateTiming again\n");
1940        statWalkServiceTime.sample(curTick() - currState->startTime);
1941        tlb->translateTiming(currState->req, currState->tc,
1942                             currState->transState, currState->mode);
1943        statWalksLongTerminatedAtLevel[(unsigned) curr_lookup_level]++;
1944
1945        pending = false;
1946        nextWalk(currState->tc);
1947
1948        currState->req = NULL;
1949        currState->tc = NULL;
1950        currState->delayed = false;
1951        delete currState;
1952    } else {
1953        if (curr_lookup_level >= MAX_LOOKUP_LEVELS - 1)
1954            panic("Max. number of lookups already reached in table walk\n");
1955        // Need to perform additional lookups
1956        stateQueues[currState->longDesc.lookupLevel].push_back(currState);
1957    }
1958    currState = NULL;
1959}
1960
1961
1962void
1963TableWalker::nextWalk(ThreadContext *tc)
1964{
1965    if (pendingQueue.size())
1966        schedule(doProcessEvent, clockEdge(Cycles(1)));
1967    else
1968        completeDrain();
1969}
1970
1971bool
1972TableWalker::fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes,
1973    Request::Flags flags, int queueIndex, Event *event,
1974    void (TableWalker::*doDescriptor)())
1975{
1976    bool isTiming = currState->timing;
1977
1978    DPRINTF(TLBVerbose, "Fetching descriptor at address: 0x%x stage2Req: %d\n",
1979            descAddr, currState->stage2Req);
1980
1981    // If this translation has a stage 2 then we know descAddr is an IPA and
1982    // needs to be translated before we can access the page table. Do that
1983    // check here.
1984    if (currState->stage2Req) {
1985        Fault fault;
1986        flags = flags | TLB::MustBeOne;
1987
1988        if (isTiming) {
1989            Stage2MMU::Stage2Translation *tran = new
1990                Stage2MMU::Stage2Translation(*stage2Mmu, data, event,
1991                                             currState->vaddr);
1992            currState->stage2Tran = tran;
1993            stage2Mmu->readDataTimed(currState->tc, descAddr, tran, numBytes,
1994                                     flags);
1995            fault = tran->fault;
1996        } else {
1997            fault = stage2Mmu->readDataUntimed(currState->tc,
1998                currState->vaddr, descAddr, data, numBytes, flags,
1999                currState->functional);
2000        }
2001
2002        if (fault != NoFault) {
2003            currState->fault = fault;
2004        }
2005        if (isTiming) {
2006            if (queueIndex >= 0) {
2007                DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
2008                        stateQueues[queueIndex].size());
2009                stateQueues[queueIndex].push_back(currState);
2010                currState = NULL;
2011            }
2012        } else {
2013            (this->*doDescriptor)();
2014        }
2015    } else {
2016        if (isTiming) {
2017            port->dmaAction(MemCmd::ReadReq, descAddr, numBytes, event, data,
2018                           currState->tc->getCpuPtr()->clockPeriod(),flags);
2019            if (queueIndex >= 0) {
2020                DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
2021                        stateQueues[queueIndex].size());
2022                stateQueues[queueIndex].push_back(currState);
2023                currState = NULL;
2024            }
2025        } else if (!currState->functional) {
2026            port->dmaAction(MemCmd::ReadReq, descAddr, numBytes, NULL, data,
2027                           currState->tc->getCpuPtr()->clockPeriod(), flags);
2028            (this->*doDescriptor)();
2029        } else {
2030            RequestPtr req = std::make_shared<Request>(
2031                descAddr, numBytes, flags, masterId);
2032
2033            req->taskId(ContextSwitchTaskId::DMA);
2034            PacketPtr  pkt = new Packet(req, MemCmd::ReadReq);
2035            pkt->dataStatic(data);
2036            port->sendFunctional(pkt);
2037            (this->*doDescriptor)();
2038            delete pkt;
2039        }
2040    }
2041    return (isTiming);
2042}
2043
2044void
2045TableWalker::insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
2046{
2047    TlbEntry te;
2048
2049    // Create and fill a new page table entry
2050    te.valid          = true;
2051    te.longDescFormat = longDescriptor;
2052    te.isHyp          = currState->isHyp;
2053    te.asid           = currState->asid;
2054    te.vmid           = currState->vmid;
2055    te.N              = descriptor.offsetBits();
2056    te.vpn            = currState->vaddr >> te.N;
2057    te.size           = (1<<te.N) - 1;
2058    te.pfn            = descriptor.pfn();
2059    te.domain         = descriptor.domain();
2060    te.lookupLevel    = descriptor.lookupLevel;
2061    te.ns             = !descriptor.secure(haveSecurity, currState) || isStage2;
2062    te.nstid          = !currState->isSecure;
2063    te.xn             = descriptor.xn();
2064    if (currState->aarch64)
2065        te.el         = currState->el;
2066    else
2067        te.el         = EL1;
2068
2069    statPageSizes[pageSizeNtoStatBin(te.N)]++;
2070    statRequestOrigin[COMPLETED][currState->isFetch]++;
2071
2072    // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
2073    // as global
2074    te.global         = descriptor.global(currState) || isStage2;
2075    if (longDescriptor) {
2076        LongDescriptor lDescriptor =
2077            dynamic_cast<LongDescriptor &>(descriptor);
2078
2079        te.xn |= currState->xnTable;
2080        te.pxn = currState->pxnTable || lDescriptor.pxn();
2081        if (isStage2) {
2082            // this is actually the HAP field, but its stored in the same bit
2083            // possitions as the AP field in a stage 1 translation.
2084            te.hap = lDescriptor.ap();
2085        } else {
2086           te.ap = ((!currState->rwTable || descriptor.ap() >> 1) << 1) |
2087               (currState->userTable && (descriptor.ap() & 0x1));
2088        }
2089        if (currState->aarch64)
2090            memAttrsAArch64(currState->tc, te, lDescriptor);
2091        else
2092            memAttrsLPAE(currState->tc, te, lDescriptor);
2093    } else {
2094        te.ap = descriptor.ap();
2095        memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(),
2096                 descriptor.shareable());
2097    }
2098
2099    // Debug output
2100    DPRINTF(TLB, descriptor.dbgHeader().c_str());
2101    DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2102            te.N, te.pfn, te.size, te.global, te.valid);
2103    DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2104            "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
2105            te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp,
2106            te.nonCacheable, te.ns);
2107    DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
2108            descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
2109            descriptor.getRawData());
2110
2111    // Insert the entry into the TLB
2112    tlb->insert(currState->vaddr, te);
2113    if (!currState->timing) {
2114        currState->tc  = NULL;
2115        currState->req = NULL;
2116    }
2117}
2118
2119ArmISA::TableWalker *
2120ArmTableWalkerParams::create()
2121{
2122    return new ArmISA::TableWalker(this);
2123}
2124
2125LookupLevel
2126TableWalker::toLookupLevel(uint8_t lookup_level_as_int)
2127{
2128    switch (lookup_level_as_int) {
2129      case L1:
2130        return L1;
2131      case L2:
2132        return L2;
2133      case L3:
2134        return L3;
2135      default:
2136        panic("Invalid lookup level conversion");
2137    }
2138}
2139
2140/* this method keeps track of the table walker queue's residency, so
2141 * needs to be called whenever requests start and complete. */
2142void
2143TableWalker::pendingChange()
2144{
2145    unsigned n = pendingQueue.size();
2146    if ((currState != NULL) && (currState != pendingQueue.front())) {
2147        ++n;
2148    }
2149
2150    if (n != pendingReqs) {
2151        Tick now = curTick();
2152        statPendingWalks.sample(pendingReqs, now - pendingChangeTick);
2153        pendingReqs = n;
2154        pendingChangeTick = now;
2155    }
2156}
2157
2158Fault
2159TableWalker::testWalk(Addr pa, Addr size, TlbEntry::DomainType domain,
2160                      LookupLevel lookup_level)
2161{
2162    return tlb->testWalk(pa, size, currState->vaddr, currState->isSecure,
2163                         currState->mode, domain, lookup_level);
2164}
2165
2166
2167uint8_t
2168TableWalker::pageSizeNtoStatBin(uint8_t N)
2169{
2170    /* for statPageSizes */
2171    switch(N) {
2172        case 12: return 0; // 4K
2173        case 14: return 1; // 16K (using 16K granule in v8-64)
2174        case 16: return 2; // 64K
2175        case 20: return 3; // 1M
2176        case 21: return 4; // 2M-LPAE
2177        case 24: return 5; // 16M
2178        case 25: return 6; // 32M (using 16K granule in v8-64)
2179        case 29: return 7; // 512M (using 64K granule in v8-64)
2180        case 30: return 8; // 1G-LPAE
2181        default:
2182            panic("unknown page size");
2183            return 255;
2184    }
2185}
2186
2187void
2188TableWalker::regStats()
2189{
2190    ClockedObject::regStats();
2191
2192    statWalks
2193        .name(name() + ".walks")
2194        .desc("Table walker walks requested")
2195        ;
2196
2197    statWalksShortDescriptor
2198        .name(name() + ".walksShort")
2199        .desc("Table walker walks initiated with short descriptors")
2200        .flags(Stats::nozero)
2201        ;
2202
2203    statWalksLongDescriptor
2204        .name(name() + ".walksLong")
2205        .desc("Table walker walks initiated with long descriptors")
2206        .flags(Stats::nozero)
2207        ;
2208
2209    statWalksShortTerminatedAtLevel
2210        .init(2)
2211        .name(name() + ".walksShortTerminationLevel")
2212        .desc("Level at which table walker walks "
2213              "with short descriptors terminate")
2214        .flags(Stats::nozero)
2215        ;
2216    statWalksShortTerminatedAtLevel.subname(0, "Level1");
2217    statWalksShortTerminatedAtLevel.subname(1, "Level2");
2218
2219    statWalksLongTerminatedAtLevel
2220        .init(4)
2221        .name(name() + ".walksLongTerminationLevel")
2222        .desc("Level at which table walker walks "
2223              "with long descriptors terminate")
2224        .flags(Stats::nozero)
2225        ;
2226    statWalksLongTerminatedAtLevel.subname(0, "Level0");
2227    statWalksLongTerminatedAtLevel.subname(1, "Level1");
2228    statWalksLongTerminatedAtLevel.subname(2, "Level2");
2229    statWalksLongTerminatedAtLevel.subname(3, "Level3");
2230
2231    statSquashedBefore
2232        .name(name() + ".walksSquashedBefore")
2233        .desc("Table walks squashed before starting")
2234        .flags(Stats::nozero)
2235        ;
2236
2237    statSquashedAfter
2238        .name(name() + ".walksSquashedAfter")
2239        .desc("Table walks squashed after completion")
2240        .flags(Stats::nozero)
2241        ;
2242
2243    statWalkWaitTime
2244        .init(16)
2245        .name(name() + ".walkWaitTime")
2246        .desc("Table walker wait (enqueue to first request) latency")
2247        .flags(Stats::pdf | Stats::nozero | Stats::nonan)
2248        ;
2249
2250    statWalkServiceTime
2251        .init(16)
2252        .name(name() + ".walkCompletionTime")
2253        .desc("Table walker service (enqueue to completion) latency")
2254        .flags(Stats::pdf | Stats::nozero | Stats::nonan)
2255        ;
2256
2257    statPendingWalks
2258        .init(16)
2259        .name(name() + ".walksPending")
2260        .desc("Table walker pending requests distribution")
2261        .flags(Stats::pdf | Stats::dist | Stats::nozero | Stats::nonan)
2262        ;
2263
2264    statPageSizes // see DDI 0487A D4-1661
2265        .init(9)
2266        .name(name() + ".walkPageSizes")
2267        .desc("Table walker page sizes translated")
2268        .flags(Stats::total | Stats::pdf | Stats::dist | Stats::nozero)
2269        ;
2270    statPageSizes.subname(0, "4K");
2271    statPageSizes.subname(1, "16K");
2272    statPageSizes.subname(2, "64K");
2273    statPageSizes.subname(3, "1M");
2274    statPageSizes.subname(4, "2M");
2275    statPageSizes.subname(5, "16M");
2276    statPageSizes.subname(6, "32M");
2277    statPageSizes.subname(7, "512M");
2278    statPageSizes.subname(8, "1G");
2279
2280    statRequestOrigin
2281        .init(2,2) // Instruction/Data, requests/completed
2282        .name(name() + ".walkRequestOrigin")
2283        .desc("Table walker requests started/completed, data/inst")
2284        .flags(Stats::total)
2285        ;
2286    statRequestOrigin.subname(0,"Requested");
2287    statRequestOrigin.subname(1,"Completed");
2288    statRequestOrigin.ysubname(0,"Data");
2289    statRequestOrigin.ysubname(1,"Inst");
2290}
2291