smmu_v3_transl.cc revision 14102:b0b52ccb7e1b
1/*
2 * Copyright (c) 2013, 2018-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Stan Czerniawski
38 */
39
40#include "dev/arm/smmu_v3_transl.hh"
41
42#include "debug/SMMUv3.hh"
43#include "debug/SMMUv3Hazard.hh"
44#include "dev/arm/amba.hh"
45#include "dev/arm/smmu_v3.hh"
46#include "sim/system.hh"
47
48SMMUTranslRequest
49SMMUTranslRequest::fromPacket(PacketPtr pkt, bool ats)
50{
51    SMMUTranslRequest req;
52    req.addr         = pkt->getAddr();
53    req.size         = pkt->getSize();
54    req.sid          = pkt->req->streamId();
55    req.ssid         = pkt->req->hasSubstreamId() ?
56        pkt->req->substreamId() : 0;
57    req.isWrite      = pkt->isWrite();
58    req.isPrefetch   = false;
59    req.isAtsRequest = ats;
60    req.pkt          = pkt;
61
62    return req;
63}
64
65SMMUTranslRequest
66SMMUTranslRequest::prefetch(Addr addr, uint32_t sid, uint32_t ssid)
67{
68    SMMUTranslRequest req;
69    req.addr         = addr;
70    req.size         = 0;
71    req.sid          = sid;
72    req.ssid         = ssid;
73    req.isWrite      = false;
74    req.isPrefetch   = true;
75    req.isAtsRequest = false;
76    req.pkt          = NULL;
77
78    return req;
79}
80
81SMMUTranslationProcess::SMMUTranslationProcess(const std::string &name,
82    SMMUv3 &_smmu, SMMUv3SlaveInterface &_ifc)
83  :
84    SMMUProcess(name, _smmu),
85    ifc(_ifc)
86{
87    // Decrease number of pending translation slots on the slave interface
88    assert(ifc.xlateSlotsRemaining > 0);
89    ifc.xlateSlotsRemaining--;
90    reinit();
91}
92
93SMMUTranslationProcess::~SMMUTranslationProcess()
94{
95    // Increase number of pending translation slots on the slave interface
96    ifc.xlateSlotsRemaining++;
97    // If no more SMMU translations are pending (all slots available),
98    // signal SMMU Slave Interface as drained
99    if (ifc.xlateSlotsRemaining == ifc.params()->xlate_slots) {
100        ifc.signalDrainDone();
101    }
102}
103
104void
105SMMUTranslationProcess::beginTransaction(const SMMUTranslRequest &req)
106{
107    request = req;
108
109    reinit();
110}
111
112void
113SMMUTranslationProcess::resumeTransaction()
114{
115    assert(smmu.system.isTimingMode());
116
117    assert(!"Stalls are broken");
118
119    Tick resumeTick = curTick();
120
121    (void) resumeTick;
122    DPRINTF(SMMUv3, "Resume at tick = %d. Fault duration = %d (%.3fus)\n",
123        resumeTick, resumeTick-faultTick, (resumeTick-faultTick) / 1e6);
124
125    beginTransaction(request);
126
127    smmu.runProcessTiming(this, request.pkt);
128}
129
130void
131SMMUTranslationProcess::main(Yield &yield)
132{
133    // Hack:
134    // The coroutine starts running as soon as it's created.
135    // But we need to wait for request data esp. in atomic mode.
136    SMMUAction a;
137    a.type = ACTION_INITIAL_NOP;
138    a.pkt = NULL;
139    yield(a);
140
141    const Addr next4k = (request.addr + 0x1000ULL) & ~0xfffULL;
142
143    if ((request.addr + request.size) > next4k)
144        panic("Transaction crosses 4k boundary (addr=%#x size=%#x)!\n",
145                request.addr, request.size);
146
147
148    unsigned numSlaveBeats = request.isWrite ?
149        (request.size + (ifc.portWidth - 1)) / ifc.portWidth : 1;
150
151    doSemaphoreDown(yield, ifc.slavePortSem);
152    doDelay(yield, Cycles(numSlaveBeats));
153    doSemaphoreUp(ifc.slavePortSem);
154
155
156    recvTick = curTick();
157
158    if (!(smmu.regs.cr0 & CR0_SMMUEN_MASK)) {
159        // SMMU disabled
160        doDelay(yield, Cycles(1));
161        completeTransaction(yield, bypass(request.addr));
162        return;
163    }
164
165    TranslResult tr;
166    bool wasPrefetched = false;
167
168    if (request.isPrefetch) {
169        // Abort prefetch if:
170        //   - there's already a transaction looking up the same 4k page, OR
171        //   - requested address is already in the TLB.
172        if (hazard4kCheck() || ifcTLBLookup(yield, tr, wasPrefetched))
173            completePrefetch(yield); // this never returns
174
175        hazard4kRegister();
176
177        tr = smmuTranslation(yield);
178
179        if (tr.fault == FAULT_NONE)
180            ifcTLBUpdate(yield, tr);
181
182        hazard4kRelease();
183
184        completePrefetch(yield);
185    } else {
186        hazardIdRegister();
187
188        if (!microTLBLookup(yield, tr)) {
189            bool hit = ifcTLBLookup(yield, tr, wasPrefetched);
190            if (!hit) {
191                while (!hit && hazard4kCheck()) {
192                    hazard4kHold(yield);
193                    hit = ifcTLBLookup(yield, tr, wasPrefetched);
194                }
195            }
196
197            // Issue prefetch if:
198            //   - there was a TLB hit and the entry was prefetched, OR
199            //   - TLB miss was successfully serviced
200            if (hit) {
201                if (wasPrefetched)
202                    issuePrefetch(next4k);
203            } else {
204                hazard4kRegister();
205
206                tr = smmuTranslation(yield);
207
208                if (tr.fault == FAULT_NONE) {
209                    ifcTLBUpdate(yield, tr);
210
211                    issuePrefetch(next4k);
212                }
213
214                hazard4kRelease();
215            }
216
217            if (tr.fault == FAULT_NONE)
218                microTLBUpdate(yield, tr);
219        }
220
221        hazardIdHold(yield);
222        hazardIdRelease();
223
224        if (tr.fault != FAULT_NONE)
225            panic("fault\n");
226
227        completeTransaction(yield, tr);
228    }
229}
230
231SMMUTranslationProcess::TranslResult
232SMMUTranslationProcess::bypass(Addr addr) const
233{
234    TranslResult tr;
235    tr.fault = FAULT_NONE;
236    tr.addr = addr;
237    tr.addrMask = 0;
238    tr.writable = 1;
239
240    return tr;
241}
242
243SMMUTranslationProcess::TranslResult
244SMMUTranslationProcess::smmuTranslation(Yield &yield)
245{
246    TranslResult tr;
247
248    // Need SMMU credit to proceed
249    doSemaphoreDown(yield, smmu.transSem);
250
251    // Simulate pipelined IFC->SMMU link
252    doSemaphoreDown(yield, smmu.ifcSmmuSem);
253    doDelay(yield, Cycles(1)); // serialize transactions
254    doSemaphoreUp(smmu.ifcSmmuSem);
255    doDelay(yield, smmu.ifcSmmuLat - Cycles(1)); // remaining pipeline delay
256
257    bool haveConfig = true;
258    if (!configCacheLookup(yield, context)) {
259        if(findConfig(yield, context, tr)) {
260            configCacheUpdate(yield, context);
261        } else {
262            haveConfig = false;
263        }
264    }
265
266    if (haveConfig && !smmuTLBLookup(yield, tr)) {
267        // SMMU main TLB miss
268
269        // Need PTW slot to proceed
270        doSemaphoreDown(yield, smmu.ptwSem);
271
272        // Page table walk
273        Tick ptwStartTick = curTick();
274
275        if (context.stage1Enable) {
276            tr = translateStage1And2(yield, request.addr);
277        } else if (context.stage2Enable) {
278            tr = translateStage2(yield, request.addr, true);
279        } else {
280            tr = bypass(request.addr);
281        }
282
283        if (context.stage1Enable || context.stage2Enable)
284            smmu.ptwTimeDist.sample(curTick() - ptwStartTick);
285
286        // Free PTW slot
287        doSemaphoreUp(smmu.ptwSem);
288
289        if (tr.fault == FAULT_NONE)
290            smmuTLBUpdate(yield, tr);
291    }
292
293    // Simulate pipelined SMMU->SLAVE INTERFACE link
294    doSemaphoreDown(yield, smmu.smmuIfcSem);
295    doDelay(yield, Cycles(1)); // serialize transactions
296    doSemaphoreUp(smmu.smmuIfcSem);
297    doDelay(yield, smmu.smmuIfcLat - Cycles(1)); // remaining pipeline delay
298
299    // return SMMU credit
300    doSemaphoreUp(smmu.transSem);
301
302    return tr;
303}
304
305bool
306SMMUTranslationProcess::microTLBLookup(Yield &yield, TranslResult &tr)
307{
308    if (!ifc.microTLBEnable)
309        return false;
310
311    doSemaphoreDown(yield, ifc.microTLBSem);
312    doDelay(yield, ifc.microTLBLat);
313    const SMMUTLB::Entry *e =
314        ifc.microTLB->lookup(request.sid, request.ssid, request.addr);
315    doSemaphoreUp(ifc.microTLBSem);
316
317    if (!e) {
318        DPRINTF(SMMUv3, "micro TLB miss vaddr=%#x sid=%#x ssid=%#x\n",
319            request.addr, request.sid, request.ssid);
320
321        return false;
322    }
323
324    DPRINTF(SMMUv3,
325        "micro TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x paddr=%#x\n",
326        request.addr, e->vaMask, request.sid, request.ssid, e->pa);
327
328    tr.fault = FAULT_NONE;
329    tr.addr = e->pa + (request.addr & ~e->vaMask);;
330    tr.addrMask = e->vaMask;
331    tr.writable = e->permissions;
332
333    return true;
334}
335
336bool
337SMMUTranslationProcess::ifcTLBLookup(Yield &yield, TranslResult &tr,
338                                     bool &wasPrefetched)
339{
340    if (!ifc.mainTLBEnable)
341        return false;
342
343    doSemaphoreDown(yield, ifc.mainTLBSem);
344    doDelay(yield, ifc.mainTLBLat);
345    const SMMUTLB::Entry *e =
346        ifc.mainTLB->lookup(request.sid, request.ssid, request.addr);
347    doSemaphoreUp(ifc.mainTLBSem);
348
349    if (!e) {
350        DPRINTF(SMMUv3,
351                "SLAVE Interface TLB miss vaddr=%#x sid=%#x ssid=%#x\n",
352                request.addr, request.sid, request.ssid);
353
354        return false;
355    }
356
357    DPRINTF(SMMUv3,
358            "SLAVE Interface TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x "
359            "paddr=%#x\n", request.addr, e->vaMask, request.sid,
360            request.ssid, e->pa);
361
362    tr.fault = FAULT_NONE;
363    tr.addr = e->pa + (request.addr & ~e->vaMask);;
364    tr.addrMask = e->vaMask;
365    tr.writable = e->permissions;
366    wasPrefetched = e->prefetched;
367
368    return true;
369}
370
371bool
372SMMUTranslationProcess::smmuTLBLookup(Yield &yield, TranslResult &tr)
373{
374    if (!smmu.tlbEnable)
375        return false;
376
377    doSemaphoreDown(yield, smmu.tlbSem);
378    doDelay(yield, smmu.tlbLat);
379    const ARMArchTLB::Entry *e =
380        smmu.tlb.lookup(request.addr, context.asid, context.vmid);
381    doSemaphoreUp(smmu.tlbSem);
382
383    if (!e) {
384        DPRINTF(SMMUv3, "SMMU TLB miss vaddr=%#x asid=%#x vmid=%#x\n",
385            request.addr, context.asid, context.vmid);
386
387        return false;
388    }
389
390    DPRINTF(SMMUv3,
391            "SMMU TLB hit vaddr=%#x amask=%#x asid=%#x vmid=%#x paddr=%#x\n",
392            request.addr, e->vaMask, context.asid, context.vmid, e->pa);
393
394    tr.fault = FAULT_NONE;
395    tr.addr = e->pa + (request.addr & ~e->vaMask);;
396    tr.addrMask = e->vaMask;
397    tr.writable = e->permissions;
398
399    return true;
400}
401
402void
403SMMUTranslationProcess::microTLBUpdate(Yield &yield,
404                                       const TranslResult &tr)
405{
406    assert(tr.fault == FAULT_NONE);
407
408    if (!ifc.microTLBEnable)
409        return;
410
411    SMMUTLB::Entry e;
412    e.valid = true;
413    e.prefetched = false;
414    e.sid = request.sid;
415    e.ssid = request.ssid;
416    e.vaMask = tr.addrMask;
417    e.va = request.addr & e.vaMask;
418    e.pa = tr.addr & e.vaMask;
419    e.permissions = tr.writable;
420    e.asid = context.asid;
421    e.vmid = context.vmid;
422
423    doSemaphoreDown(yield, ifc.microTLBSem);
424
425    DPRINTF(SMMUv3,
426        "micro TLB upd vaddr=%#x amask=%#x paddr=%#x sid=%#x ssid=%#x\n",
427        e.va, e.vaMask, e.pa, e.sid, e.ssid);
428
429    ifc.microTLB->store(e, SMMUTLB::ALLOC_ANY_WAY);
430
431    doSemaphoreUp(ifc.microTLBSem);
432}
433
434void
435SMMUTranslationProcess::ifcTLBUpdate(Yield &yield,
436                                     const TranslResult &tr)
437{
438    assert(tr.fault == FAULT_NONE);
439
440    if (!ifc.mainTLBEnable)
441        return;
442
443    SMMUTLB::Entry e;
444    e.valid = true;
445    e.prefetched = request.isPrefetch;
446    e.sid = request.sid;
447    e.ssid = request.ssid;
448    e.vaMask = tr.addrMask;
449    e.va = request.addr & e.vaMask;
450    e.pa = tr.addr & e.vaMask;
451    e.permissions = tr.writable;
452    e.asid = context.asid;
453    e.vmid = context.vmid;
454
455    SMMUTLB::AllocPolicy alloc = SMMUTLB::ALLOC_ANY_WAY;
456    if (ifc.prefetchEnable && ifc.prefetchReserveLastWay)
457        alloc = request.isPrefetch ?
458            SMMUTLB::ALLOC_LAST_WAY : SMMUTLB::ALLOC_ANY_BUT_LAST_WAY;
459
460    doSemaphoreDown(yield, ifc.mainTLBSem);
461
462    DPRINTF(SMMUv3,
463            "SLAVE Interface upd vaddr=%#x amask=%#x paddr=%#x sid=%#x "
464            "ssid=%#x\n", e.va, e.vaMask, e.pa, e.sid, e.ssid);
465
466    ifc.mainTLB->store(e, alloc);
467
468    doSemaphoreUp(ifc.mainTLBSem);
469}
470
471void
472SMMUTranslationProcess::smmuTLBUpdate(Yield &yield,
473                                      const TranslResult &tr)
474{
475    assert(tr.fault == FAULT_NONE);
476
477    if (!smmu.tlbEnable)
478        return;
479
480    ARMArchTLB::Entry e;
481    e.valid = true;
482    e.vaMask = tr.addrMask;
483    e.va = request.addr & e.vaMask;
484    e.asid = context.asid;
485    e.vmid = context.vmid;
486    e.pa = tr.addr & e.vaMask;
487    e.permissions = tr.writable;
488
489    doSemaphoreDown(yield, smmu.tlbSem);
490
491    DPRINTF(SMMUv3,
492            "SMMU TLB upd vaddr=%#x amask=%#x paddr=%#x asid=%#x vmid=%#x\n",
493            e.va, e.vaMask, e.pa, e.asid, e.vmid);
494
495    smmu.tlb.store(e);
496
497    doSemaphoreUp(smmu.tlbSem);
498}
499
500bool
501SMMUTranslationProcess::configCacheLookup(Yield &yield, TranslContext &tc)
502{
503    if (!smmu.configCacheEnable)
504        return false;
505
506    doSemaphoreDown(yield, smmu.configSem);
507    doDelay(yield, smmu.configLat);
508    const ConfigCache::Entry *e =
509        smmu.configCache.lookup(request.sid, request.ssid);
510    doSemaphoreUp(smmu.configSem);
511
512    if (!e) {
513        DPRINTF(SMMUv3, "Config miss sid=%#x ssid=%#x\n",
514                request.sid, request.ssid);
515
516        return false;
517    }
518
519    DPRINTF(SMMUv3, "Config hit sid=%#x ssid=%#x ttb=%#08x asid=%#x\n",
520            request.sid, request.ssid, e->ttb0, e->asid);
521
522    tc.stage1Enable = e->stage1_en;
523    tc.stage2Enable = e->stage2_en;
524
525    tc.ttb0 = e->ttb0;
526    tc.ttb1 = e->ttb1;
527    tc.asid = e->asid;
528    tc.httb = e->httb;
529    tc.vmid = e->vmid;
530
531    tc.stage1TranslGranule = e->stage1_tg;
532    tc.stage2TranslGranule = e->stage2_tg;
533
534    tc.t0sz = e->t0sz;
535    tc.s2t0sz = e->s2t0sz;
536
537    return true;
538}
539
540void
541SMMUTranslationProcess::configCacheUpdate(Yield &yield,
542                                          const TranslContext &tc)
543{
544    if (!smmu.configCacheEnable)
545        return;
546
547    ConfigCache::Entry e;
548    e.valid = true;
549    e.sid = request.sid;
550    e.ssid = request.ssid;
551    e.stage1_en = tc.stage1Enable;
552    e.stage2_en = tc.stage2Enable;
553    e.ttb0 = tc.ttb0;
554    e.ttb1 = tc.ttb1;
555    e.asid = tc.asid;
556    e.httb = tc.httb;
557    e.vmid = tc.vmid;
558    e.stage1_tg = tc.stage1TranslGranule;
559    e.stage2_tg = tc.stage2TranslGranule;
560    e.t0sz = tc.t0sz;
561    e.s2t0sz = tc.s2t0sz;
562
563    doSemaphoreDown(yield, smmu.configSem);
564
565    DPRINTF(SMMUv3, "Config upd  sid=%#x ssid=%#x\n", e.sid, e.ssid);
566
567    smmu.configCache.store(e);
568
569    doSemaphoreUp(smmu.configSem);
570}
571
572bool
573SMMUTranslationProcess::findConfig(Yield &yield,
574                                   TranslContext &tc,
575                                   TranslResult &tr)
576{
577    tc.stage1Enable = false;
578    tc.stage2Enable = false;
579
580    StreamTableEntry ste;
581    doReadSTE(yield, ste, request.sid);
582
583    switch (ste.dw0.config) {
584        case STE_CONFIG_BYPASS:
585            break;
586
587        case STE_CONFIG_STAGE1_ONLY:
588            tc.stage1Enable = true;
589            break;
590
591        case STE_CONFIG_STAGE2_ONLY:
592            tc.stage2Enable = true;
593            break;
594
595        case STE_CONFIG_STAGE1_AND_2:
596            tc.stage1Enable = true;
597            tc.stage2Enable = true;
598            break;
599
600        default:
601            panic("Bad or unimplemented STE config %d\n",
602                ste.dw0.config);
603    }
604
605
606    // Establish stage 2 context first since
607    // Context Descriptors can be in IPA space.
608    if (tc.stage2Enable) {
609        tc.httb = ste.dw3.s2ttb << STE_S2TTB_SHIFT;
610        tc.vmid = ste.dw2.s2vmid;
611        tc.stage2TranslGranule = ste.dw2.s2tg;
612        tc.s2t0sz = ste.dw2.s2t0sz;
613    } else {
614        tc.httb = 0xdeadbeef;
615        tc.vmid = 0;
616        tc.stage2TranslGranule = TRANS_GRANULE_INVALID;
617        tc.s2t0sz = 0;
618    }
619
620
621    // Now fetch stage 1 config.
622    if (context.stage1Enable) {
623        ContextDescriptor cd;
624        doReadCD(yield, cd, ste, request.sid, request.ssid);
625
626        tc.ttb0 = cd.dw1.ttb0 << CD_TTB_SHIFT;
627        tc.ttb1 = cd.dw2.ttb1 << CD_TTB_SHIFT;
628        tc.asid = cd.dw0.asid;
629        tc.stage1TranslGranule = cd.dw0.tg0;
630        tc.t0sz = cd.dw0.t0sz;
631    } else {
632        tc.ttb0 = 0xcafebabe;
633        tc.ttb1 = 0xcafed00d;
634        tc.asid = 0;
635        tc.stage1TranslGranule = TRANS_GRANULE_INVALID;
636        tc.t0sz = 0;
637    }
638
639    return true;
640}
641
642void
643SMMUTranslationProcess::walkCacheLookup(
644        Yield &yield,
645        const WalkCache::Entry *&walkEntry,
646        Addr addr, uint16_t asid, uint16_t vmid,
647        unsigned stage, unsigned level)
648{
649    const char *indent = stage==2 ? "  " : "";
650    (void) indent; // this is only used in DPRINTFs
651
652    const PageTableOps *pt_ops =
653        stage == 1 ?
654            smmu.getPageTableOps(context.stage1TranslGranule) :
655            smmu.getPageTableOps(context.stage2TranslGranule);
656
657    unsigned walkCacheLevels =
658        smmu.walkCacheEnable ?
659            (stage == 1 ? smmu.walkCacheS1Levels : smmu.walkCacheS2Levels) :
660            0;
661
662    if ((1 << level) & walkCacheLevels) {
663        doSemaphoreDown(yield, smmu.walkSem);
664        doDelay(yield, smmu.walkLat);
665
666        walkEntry = smmu.walkCache.lookup(addr, pt_ops->walkMask(level),
667                                          asid, vmid, stage, level);
668
669        if (walkEntry) {
670            DPRINTF(SMMUv3, "%sWalkCache hit  va=%#x asid=%#x vmid=%#x "
671                            "base=%#x (S%d, L%d)\n",
672                    indent, addr, asid, vmid, walkEntry->pa, stage, level);
673        } else {
674            DPRINTF(SMMUv3, "%sWalkCache miss va=%#x asid=%#x vmid=%#x "
675                            "(S%d, L%d)\n",
676                    indent, addr, asid, vmid, stage, level);
677        }
678
679        doSemaphoreUp(smmu.walkSem);
680    }
681}
682
683void
684SMMUTranslationProcess::walkCacheUpdate(Yield &yield, Addr va,
685                                        Addr vaMask, Addr pa,
686                                        unsigned stage, unsigned level,
687                                        bool leaf, uint8_t permissions)
688{
689    unsigned walkCacheLevels =
690        stage == 1 ? smmu.walkCacheS1Levels : smmu.walkCacheS2Levels;
691
692    if (smmu.walkCacheEnable && ((1<<level) & walkCacheLevels)) {
693        WalkCache::Entry e;
694        e.valid = true;
695        e.va = va;
696        e.vaMask = vaMask;
697        e.asid = stage==1 ? context.asid : 0;
698        e.vmid = context.vmid;
699        e.stage = stage;
700        e.level = level;
701        e.leaf = leaf;
702        e.pa = pa;
703        e.permissions = permissions;
704
705        doSemaphoreDown(yield, smmu.walkSem);
706
707        DPRINTF(SMMUv3, "%sWalkCache upd  va=%#x mask=%#x asid=%#x vmid=%#x "
708                        "tpa=%#x leaf=%s (S%d, L%d)\n",
709                e.stage==2 ? "  " : "",
710                e.va, e.vaMask, e.asid, e.vmid,
711                e.pa, e.leaf, e.stage, e.level);
712
713        smmu.walkCache.store(e);
714
715        doSemaphoreUp(smmu.walkSem);
716    }
717}
718
719/*
720 * Please note:
721 * This does not deal with the case where stage 1 page size
722 * is larger than stage 2 page size.
723 */
724SMMUTranslationProcess::TranslResult
725SMMUTranslationProcess::walkStage1And2(Yield &yield, Addr addr,
726                                       const PageTableOps *pt_ops,
727                                       unsigned level, Addr walkPtr)
728{
729    PageTableOps::pte_t pte = 0;
730
731    doSemaphoreDown(yield, smmu.cycleSem);
732    doDelay(yield, Cycles(1));
733    doSemaphoreUp(smmu.cycleSem);
734
735    for (; level <= pt_ops->lastLevel(); level++) {
736        Addr pte_addr = walkPtr + pt_ops->index(addr, level);
737
738        DPRINTF(SMMUv3, "Fetching S1 L%d PTE from pa=%#08x\n",
739                level, pte_addr);
740
741        doReadPTE(yield, addr, pte_addr, &pte, 1, level);
742
743        DPRINTF(SMMUv3, "Got S1 L%d PTE=%#x from pa=%#08x\n",
744                level, pte, pte_addr);
745
746        doSemaphoreDown(yield, smmu.cycleSem);
747        doDelay(yield, Cycles(1));
748        doSemaphoreUp(smmu.cycleSem);
749
750        bool valid = pt_ops->isValid(pte, level);
751        bool leaf  = pt_ops->isLeaf(pte, level);
752
753        if (!valid) {
754            DPRINTF(SMMUv3, "S1 PTE not valid - fault\n");
755
756            TranslResult tr;
757            tr.fault = FAULT_TRANSLATION;
758            return tr;
759        }
760
761        if (valid && leaf && request.isWrite &&
762            !pt_ops->isWritable(pte, level, false))
763        {
764            DPRINTF(SMMUv3, "S1 page not writable - fault\n");
765
766            TranslResult tr;
767            tr.fault = FAULT_PERMISSION;
768            return tr;
769        }
770
771        walkPtr = pt_ops->nextLevelPointer(pte, level);
772
773        if (leaf)
774            break;
775
776        if (context.stage2Enable) {
777            TranslResult s2tr = translateStage2(yield, walkPtr, false);
778            if (s2tr.fault != FAULT_NONE)
779                return s2tr;
780
781            walkPtr = s2tr.addr;
782        }
783
784        walkCacheUpdate(yield, addr, pt_ops->walkMask(level), walkPtr,
785                        1, level, leaf, 0);
786    }
787
788    TranslResult tr;
789    tr.fault    = FAULT_NONE;
790    tr.addrMask = pt_ops->pageMask(pte, level);
791    tr.addr     = walkPtr + (addr & ~tr.addrMask);
792    tr.writable = pt_ops->isWritable(pte, level, false);
793
794    if (context.stage2Enable) {
795        TranslResult s2tr = translateStage2(yield, tr.addr, true);
796        if (s2tr.fault != FAULT_NONE)
797            return s2tr;
798
799        tr = combineTranslations(tr, s2tr);
800    }
801
802    walkCacheUpdate(yield, addr, tr.addrMask, tr.addr,
803                    1, level, true, tr.writable);
804
805    return tr;
806}
807
808SMMUTranslationProcess::TranslResult
809SMMUTranslationProcess::walkStage2(Yield &yield, Addr addr, bool final_tr,
810                                   const PageTableOps *pt_ops,
811                                   unsigned level, Addr walkPtr)
812{
813    PageTableOps::pte_t pte;
814
815    doSemaphoreDown(yield, smmu.cycleSem);
816    doDelay(yield, Cycles(1));
817    doSemaphoreUp(smmu.cycleSem);
818
819    for (; level <= pt_ops->lastLevel(); level++) {
820        Addr pte_addr = walkPtr + pt_ops->index(addr, level);
821
822        DPRINTF(SMMUv3, "  Fetching S2 L%d PTE from pa=%#08x\n",
823                level, pte_addr);
824
825        doReadPTE(yield, addr, pte_addr, &pte, 2, level);
826
827        DPRINTF(SMMUv3, "  Got S2 L%d PTE=%#x from pa=%#08x\n",
828                level, pte, pte_addr);
829
830        doSemaphoreDown(yield, smmu.cycleSem);
831        doDelay(yield, Cycles(1));
832        doSemaphoreUp(smmu.cycleSem);
833
834        bool valid = pt_ops->isValid(pte, level);
835        bool leaf  = pt_ops->isLeaf(pte, level);
836
837        if (!valid) {
838            DPRINTF(SMMUv3, "  S2 PTE not valid - fault\n");
839
840            TranslResult tr;
841            tr.fault = FAULT_TRANSLATION;
842            return tr;
843        }
844
845        if (valid && leaf && request.isWrite &&
846            !pt_ops->isWritable(pte, level, true))
847        {
848            DPRINTF(SMMUv3, "  S2 PTE not writable = fault\n");
849
850            TranslResult tr;
851            tr.fault = FAULT_PERMISSION;
852            return tr;
853        }
854
855        walkPtr = pt_ops->nextLevelPointer(pte, level);
856
857        if (final_tr || smmu.walkCacheNonfinalEnable)
858            walkCacheUpdate(yield, addr, pt_ops->walkMask(level), walkPtr,
859                            2, level, leaf,
860                            leaf ? pt_ops->isWritable(pte, level, true) : 0);
861        if (leaf)
862            break;
863    }
864
865    TranslResult tr;
866    tr.fault    = FAULT_NONE;
867    tr.addrMask = pt_ops->pageMask(pte, level);
868    tr.addr     = walkPtr + (addr & ~tr.addrMask);
869    tr.writable = pt_ops->isWritable(pte, level, true);
870
871    return tr;
872}
873
874SMMUTranslationProcess::TranslResult
875SMMUTranslationProcess::translateStage1And2(Yield &yield, Addr addr)
876{
877    const PageTableOps *pt_ops =
878        smmu.getPageTableOps(context.stage1TranslGranule);
879
880    const WalkCache::Entry *walk_ep = NULL;
881    unsigned level;
882
883    // Level here is actually (level+1) so we can count down
884    // to 0 using unsigned int.
885    for (level = pt_ops->lastLevel() + 1;
886        level > pt_ops->firstLevel(context.t0sz);
887        level--)
888    {
889        walkCacheLookup(yield, walk_ep, addr,
890                        context.asid, context.vmid, 1, level-1);
891
892        if (walk_ep)
893            break;
894    }
895
896    // Correct level (see above).
897    level -= 1;
898
899    TranslResult tr;
900    if (walk_ep) {
901        if (walk_ep->leaf) {
902            tr.fault    = FAULT_NONE;
903            tr.addr     = walk_ep->pa + (addr & ~walk_ep->vaMask);
904            tr.addrMask = walk_ep->vaMask;
905            tr.writable = walk_ep->permissions;
906        } else {
907            tr = walkStage1And2(yield, addr, pt_ops, level+1, walk_ep->pa);
908        }
909    } else {
910        Addr table_addr = context.ttb0;
911        if (context.stage2Enable) {
912            TranslResult s2tr = translateStage2(yield, table_addr, false);
913            if (s2tr.fault != FAULT_NONE)
914                return s2tr;
915
916            table_addr = s2tr.addr;
917        }
918
919        tr = walkStage1And2(yield, addr, pt_ops,
920                            pt_ops->firstLevel(context.t0sz),
921                            table_addr);
922    }
923
924    if (tr.fault == FAULT_NONE)
925        DPRINTF(SMMUv3, "Translated vaddr %#x to paddr %#x\n", addr, tr.addr);
926
927    return tr;
928}
929
930SMMUTranslationProcess::TranslResult
931SMMUTranslationProcess::translateStage2(Yield &yield, Addr addr, bool final_tr)
932{
933    const PageTableOps *pt_ops =
934            smmu.getPageTableOps(context.stage2TranslGranule);
935
936    const IPACache::Entry *ipa_ep = NULL;
937    if (smmu.ipaCacheEnable) {
938        doSemaphoreDown(yield, smmu.ipaSem);
939        doDelay(yield, smmu.ipaLat);
940        ipa_ep = smmu.ipaCache.lookup(addr, context.vmid);
941        doSemaphoreUp(smmu.ipaSem);
942    }
943
944    if (ipa_ep) {
945        TranslResult tr;
946        tr.fault    = FAULT_NONE;
947        tr.addr     = ipa_ep->pa + (addr & ~ipa_ep->ipaMask);
948        tr.addrMask = ipa_ep->ipaMask;
949        tr.writable = ipa_ep->permissions;
950
951        DPRINTF(SMMUv3, "  IPACache hit  ipa=%#x vmid=%#x pa=%#x\n",
952            addr, context.vmid, tr.addr);
953
954        return tr;
955    } else if (smmu.ipaCacheEnable) {
956        DPRINTF(SMMUv3, "  IPACache miss ipa=%#x vmid=%#x\n",
957                addr, context.vmid);
958    }
959
960    const WalkCache::Entry *walk_ep = NULL;
961    unsigned level = pt_ops->firstLevel(context.s2t0sz);
962
963    if (final_tr || smmu.walkCacheNonfinalEnable) {
964        // Level here is actually (level+1) so we can count down
965        // to 0 using unsigned int.
966        for (level = pt_ops->lastLevel() + 1;
967            level > pt_ops->firstLevel(context.s2t0sz);
968            level--)
969        {
970            walkCacheLookup(yield, walk_ep, addr,
971                            0, context.vmid, 2, level-1);
972
973            if (walk_ep)
974                break;
975        }
976
977        // Correct level (see above).
978        level -= 1;
979    }
980
981    TranslResult tr;
982    if (walk_ep) {
983        if (walk_ep->leaf) {
984            tr.fault    = FAULT_NONE;
985            tr.addr     = walk_ep->pa + (addr & ~walk_ep->vaMask);
986            tr.addrMask = walk_ep->vaMask;
987            tr.writable = walk_ep->permissions;
988        } else {
989            tr = walkStage2(yield, addr, final_tr, pt_ops,
990                            level + 1, walk_ep->pa);
991        }
992    } else {
993        tr = walkStage2(yield, addr, final_tr, pt_ops,
994                        pt_ops->firstLevel(context.s2t0sz),
995                        context.httb);
996    }
997
998    if (tr.fault == FAULT_NONE)
999        DPRINTF(SMMUv3, "  Translated %saddr %#x to paddr %#x\n",
1000            context.stage1Enable ? "ip" : "v", addr, tr.addr);
1001
1002    if (smmu.ipaCacheEnable) {
1003        IPACache::Entry e;
1004        e.valid = true;
1005        e.ipaMask = tr.addrMask;
1006        e.ipa = addr & e.ipaMask;
1007        e.pa = tr.addr & tr.addrMask;
1008        e.permissions = tr.writable;
1009        e.vmid = context.vmid;
1010
1011        doSemaphoreDown(yield, smmu.ipaSem);
1012        smmu.ipaCache.store(e);
1013        doSemaphoreUp(smmu.ipaSem);
1014    }
1015
1016    return tr;
1017}
1018
1019SMMUTranslationProcess::TranslResult
1020SMMUTranslationProcess::combineTranslations(const TranslResult &s1tr,
1021                                            const TranslResult &s2tr) const
1022{
1023    if (s2tr.fault != FAULT_NONE)
1024        return s2tr;
1025
1026    assert(s1tr.fault == FAULT_NONE);
1027
1028    TranslResult tr;
1029    tr.fault    = FAULT_NONE;
1030    tr.addr     = s2tr.addr;
1031    tr.addrMask = s1tr.addrMask | s2tr.addrMask;
1032    tr.writable = s1tr.writable & s2tr.writable;
1033
1034    return tr;
1035}
1036
1037bool
1038SMMUTranslationProcess::hazard4kCheck()
1039{
1040    Addr addr4k = request.addr & ~0xfffULL;
1041
1042    for (auto it = ifc.duplicateReqs.begin();
1043         it != ifc.duplicateReqs.end();
1044         ++it)
1045    {
1046        Addr other4k = (*it)->request.addr & ~0xfffULL;
1047        if (addr4k == other4k)
1048            return true;
1049    }
1050
1051    return false;
1052}
1053
1054void
1055SMMUTranslationProcess::hazard4kRegister()
1056{
1057    DPRINTF(SMMUv3Hazard, "4kReg:  p=%p a4k=%#x\n",
1058            this, request.addr & ~0xfffULL);
1059
1060    ifc.duplicateReqs.push_back(this);
1061}
1062
1063void
1064SMMUTranslationProcess::hazard4kHold(Yield &yield)
1065{
1066    Addr addr4k = request.addr & ~0xfffULL;
1067
1068    bool found_hazard;
1069
1070    do {
1071        found_hazard = false;
1072
1073        for (auto it = ifc.duplicateReqs.begin();
1074             it!=ifc.duplicateReqs.end() && *it!=this;
1075             ++it)
1076        {
1077            Addr other4k = (*it)->request.addr & ~0xfffULL;
1078
1079            DPRINTF(SMMUv3Hazard, "4kHold: p=%p a4k=%#x Q: p=%p a4k=%#x\n",
1080                    this, addr4k, *it, other4k);
1081
1082            if (addr4k == other4k) {
1083                DPRINTF(SMMUv3Hazard,
1084                        "4kHold: p=%p a4k=%#x WAIT on p=%p a4k=%#x\n",
1085                        this, addr4k, *it, other4k);
1086
1087                doWaitForSignal(yield, ifc.duplicateReqRemoved);
1088
1089                DPRINTF(SMMUv3Hazard, "4kHold: p=%p a4k=%#x RESUME\n",
1090                        this, addr4k);
1091
1092                // This is to avoid checking *it!=this after doWaitForSignal()
1093                // since it could have been deleted.
1094                found_hazard = true;
1095                break;
1096            }
1097        }
1098    } while (found_hazard);
1099}
1100
1101void
1102SMMUTranslationProcess::hazard4kRelease()
1103{
1104    DPRINTF(SMMUv3Hazard, "4kRel:  p=%p a4k=%#x\n",
1105            this, request.addr & ~0xfffULL);
1106
1107    std::list<SMMUTranslationProcess *>::iterator it;
1108
1109    for (it = ifc.duplicateReqs.begin(); it != ifc.duplicateReqs.end(); ++it)
1110        if (*it == this)
1111            break;
1112
1113    if (it == ifc.duplicateReqs.end())
1114        panic("hazard4kRelease: request not found");
1115
1116    ifc.duplicateReqs.erase(it);
1117
1118    doBroadcastSignal(ifc.duplicateReqRemoved);
1119}
1120
1121void
1122SMMUTranslationProcess::hazardIdRegister()
1123{
1124    auto orderId = AMBA::orderId(request.pkt);
1125
1126    DPRINTF(SMMUv3Hazard, "IdReg:  p=%p oid=%d\n", this, orderId);
1127
1128    assert(orderId < SMMU_MAX_TRANS_ID);
1129
1130    std::list<SMMUTranslationProcess *> &depReqs =
1131        request.isWrite ?
1132            ifc.dependentWrites[orderId] : ifc.dependentReads[orderId];
1133    depReqs.push_back(this);
1134}
1135
1136void
1137SMMUTranslationProcess::hazardIdHold(Yield &yield)
1138{
1139    auto orderId = AMBA::orderId(request.pkt);
1140
1141    DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d\n", this, orderId);
1142
1143    std::list<SMMUTranslationProcess *> &depReqs =
1144        request.isWrite ?
1145            ifc.dependentWrites[orderId] : ifc.dependentReads[orderId];
1146    std::list<SMMUTranslationProcess *>::iterator it;
1147
1148    bool found_hazard;
1149
1150    do {
1151        found_hazard = false;
1152
1153        for (auto it = depReqs.begin(); it!=depReqs.end() && *it!=this; ++it) {
1154            DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d Q: %p\n",
1155                    this, orderId, *it);
1156
1157            if (AMBA::orderId((*it)->request.pkt) == orderId) {
1158                DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d WAIT on=%p\n",
1159                        this, orderId, *it);
1160
1161                doWaitForSignal(yield, ifc.dependentReqRemoved);
1162
1163                DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d RESUME\n",
1164                        this, orderId);
1165
1166                // This is to avoid checking *it!=this after doWaitForSignal()
1167                // since it could have been deleted.
1168                found_hazard = true;
1169                break;
1170            }
1171        }
1172    } while (found_hazard);
1173}
1174
1175void
1176SMMUTranslationProcess::hazardIdRelease()
1177{
1178    auto orderId = AMBA::orderId(request.pkt);
1179
1180    DPRINTF(SMMUv3Hazard, "IdRel:  p=%p oid=%d\n", this, orderId);
1181
1182    std::list<SMMUTranslationProcess *> &depReqs =
1183        request.isWrite ?
1184            ifc.dependentWrites[orderId] : ifc.dependentReads[orderId];
1185    std::list<SMMUTranslationProcess *>::iterator it;
1186
1187    for (it = depReqs.begin(); it != depReqs.end(); ++it) {
1188        if (*it == this)
1189            break;
1190    }
1191
1192    if (it == depReqs.end())
1193        panic("hazardIdRelease: request not found");
1194
1195    depReqs.erase(it);
1196
1197    doBroadcastSignal(ifc.dependentReqRemoved);
1198}
1199
1200void
1201SMMUTranslationProcess::issuePrefetch(Addr addr)
1202{
1203    if (!smmu.system.isTimingMode())
1204        return;
1205
1206    if (!ifc.prefetchEnable || ifc.xlateSlotsRemaining == 0)
1207        return;
1208
1209    std::string proc_name = csprintf("%sprf", name());
1210    SMMUTranslationProcess *proc =
1211        new SMMUTranslationProcess(proc_name, smmu, ifc);
1212
1213    proc->beginTransaction(
1214            SMMUTranslRequest::prefetch(addr, request.sid, request.ssid));
1215    proc->scheduleWakeup(smmu.clockEdge(Cycles(1)));
1216}
1217
1218void
1219SMMUTranslationProcess::completeTransaction(Yield &yield,
1220                                            const TranslResult &tr)
1221{
1222    assert(tr.fault == FAULT_NONE);
1223
1224    unsigned numMasterBeats = request.isWrite ?
1225        (request.size + (smmu.masterPortWidth-1))
1226            / smmu.masterPortWidth :
1227        1;
1228
1229    doSemaphoreDown(yield, smmu.masterPortSem);
1230    doDelay(yield, Cycles(numMasterBeats));
1231    doSemaphoreUp(smmu.masterPortSem);
1232
1233
1234    smmu.translationTimeDist.sample(curTick() - recvTick);
1235    if (!request.isAtsRequest && request.isWrite)
1236        ifc.wrBufSlotsRemaining +=
1237            (request.size + (ifc.portWidth-1)) / ifc.portWidth;
1238
1239    smmu.scheduleSlaveRetries();
1240
1241
1242    SMMUAction a;
1243
1244    if (request.isAtsRequest) {
1245        a.type = ACTION_SEND_RESP_ATS;
1246
1247        if (smmu.system.isAtomicMode()) {
1248            request.pkt->makeAtomicResponse();
1249        } else if (smmu.system.isTimingMode()) {
1250            request.pkt->makeTimingResponse();
1251        } else {
1252            panic("Not in atomic or timing mode");
1253        }
1254    } else {
1255        a.type = ACTION_SEND_REQ_FINAL;
1256        a.ifc = &ifc;
1257    }
1258
1259    a.pkt = request.pkt;
1260    a.delay = 0;
1261
1262    a.pkt->setAddr(tr.addr);
1263    a.pkt->req->setPaddr(tr.addr);
1264
1265    yield(a);
1266
1267    if (!request.isAtsRequest) {
1268        PacketPtr pkt = yield.get();
1269        pkt->setAddr(request.addr);
1270
1271        a.type = ACTION_SEND_RESP;
1272        a.pkt = pkt;
1273        a.ifc = &ifc;
1274        a.delay = 0;
1275        yield(a);
1276    }
1277}
1278
1279void
1280SMMUTranslationProcess::completePrefetch(Yield &yield)
1281{
1282    SMMUAction a;
1283    a.type = ACTION_TERMINATE;
1284    a.pkt = NULL;
1285    a.ifc = &ifc;
1286    a.delay = 0;
1287    yield(a);
1288}
1289
1290void
1291SMMUTranslationProcess::sendEvent(Yield &yield, const SMMUEvent &ev)
1292{
1293    int sizeMask = mask(smmu.regs.eventq_base & Q_BASE_SIZE_MASK);
1294
1295    if (((smmu.regs.eventq_prod+1) & sizeMask) ==
1296            (smmu.regs.eventq_cons & sizeMask))
1297        panic("Event queue full - aborting\n");
1298
1299    Addr event_addr =
1300        (smmu.regs.eventq_base & Q_BASE_ADDR_MASK) +
1301        (smmu.regs.eventq_prod & sizeMask) * sizeof(ev);
1302
1303    DPRINTF(SMMUv3, "Sending event to addr=%#08x (pos=%d): type=%#x stag=%#x "
1304        "flags=%#x sid=%#x ssid=%#x va=%#08x ipa=%#x\n",
1305        event_addr, smmu.regs.eventq_prod, ev.type, ev.stag,
1306        ev.flags, ev.streamId, ev.substreamId, ev.va, ev.ipa);
1307
1308    // This deliberately resets the overflow field in eventq_prod!
1309    smmu.regs.eventq_prod = (smmu.regs.eventq_prod + 1) & sizeMask;
1310
1311    doWrite(yield, event_addr, &ev, sizeof(ev));
1312
1313    if (!(smmu.regs.eventq_irq_cfg0 & E_BASE_ENABLE_MASK))
1314        panic("eventq msi not enabled\n");
1315
1316    doWrite(yield, smmu.regs.eventq_irq_cfg0 & E_BASE_ADDR_MASK,
1317            &smmu.regs.eventq_irq_cfg1, sizeof(smmu.regs.eventq_irq_cfg1));
1318}
1319
1320void
1321SMMUTranslationProcess::doReadSTE(Yield &yield,
1322                                  StreamTableEntry &ste,
1323                                  uint32_t sid)
1324{
1325    unsigned max_sid = 1 << (smmu.regs.strtab_base_cfg & ST_CFG_SIZE_MASK);
1326    if (sid >= max_sid)
1327        panic("SID %#x out of range, max=%#x", sid, max_sid);
1328
1329    Addr ste_addr;
1330
1331    if ((smmu.regs.strtab_base_cfg & ST_CFG_FMT_MASK) == ST_CFG_FMT_2LEVEL) {
1332        unsigned split =
1333            (smmu.regs.strtab_base_cfg & ST_CFG_SPLIT_MASK) >> ST_CFG_SPLIT_SHIFT;
1334
1335        if (split!= 7 && split!=8 && split!=16)
1336            panic("Invalid stream table split %d", split);
1337
1338        uint64_t l2_ptr;
1339        uint64_t l2_addr =
1340            (smmu.regs.strtab_base & VMT_BASE_ADDR_MASK) +
1341            bits(sid, 32, split) * sizeof(l2_ptr);
1342
1343        DPRINTF(SMMUv3, "Read L1STE at %#x\n", l2_addr);
1344
1345        doReadConfig(yield, l2_addr, &l2_ptr, sizeof(l2_ptr), sid, 0);
1346
1347        DPRINTF(SMMUv3, "Got L1STE L1 at %#x: 0x%016x\n", l2_addr, l2_ptr);
1348
1349        unsigned span = l2_ptr & ST_L2_SPAN_MASK;
1350        if (span == 0)
1351            panic("Invalid level 1 stream table descriptor");
1352
1353        unsigned index = bits(sid, split-1, 0);
1354        if (index >= (1 << span))
1355            panic("StreamID %d out of level 1 descriptor range %d",
1356                  sid, 1<<span);
1357
1358        ste_addr = (l2_ptr & ST_L2_ADDR_MASK) + index * sizeof(ste);
1359
1360        smmu.steL1Fetches++;
1361    } else if ((smmu.regs.strtab_base_cfg & ST_CFG_FMT_MASK) == ST_CFG_FMT_LINEAR) {
1362        ste_addr =
1363            (smmu.regs.strtab_base & VMT_BASE_ADDR_MASK) + sid * sizeof(ste);
1364    } else {
1365        panic("Invalid stream table format");
1366    }
1367
1368    DPRINTF(SMMUv3, "Read STE at %#x\n", ste_addr);
1369
1370    doReadConfig(yield, ste_addr, &ste, sizeof(ste), sid, 0);
1371
1372    DPRINTF(SMMUv3, "Got STE at %#x [0]: 0x%016x\n", ste_addr, ste.dw0);
1373    DPRINTF(SMMUv3, "    STE at %#x [1]: 0x%016x\n", ste_addr, ste.dw1);
1374    DPRINTF(SMMUv3, "    STE at %#x [2]: 0x%016x\n", ste_addr, ste.dw2);
1375    DPRINTF(SMMUv3, "    STE at %#x [3]: 0x%016x\n", ste_addr, ste.dw3);
1376    DPRINTF(SMMUv3, "    STE at %#x [4]: 0x%016x\n", ste_addr, ste._pad[0]);
1377    DPRINTF(SMMUv3, "    STE at %#x [5]: 0x%016x\n", ste_addr, ste._pad[1]);
1378    DPRINTF(SMMUv3, "    STE at %#x [6]: 0x%016x\n", ste_addr, ste._pad[2]);
1379    DPRINTF(SMMUv3, "    STE at %#x [7]: 0x%016x\n", ste_addr, ste._pad[3]);
1380
1381    if (!ste.dw0.valid)
1382        panic("STE @ %#x not valid\n", ste_addr);
1383
1384    smmu.steFetches++;
1385}
1386
1387void
1388SMMUTranslationProcess::doReadCD(Yield &yield,
1389                                 ContextDescriptor &cd,
1390                                 const StreamTableEntry &ste,
1391                                 uint32_t sid, uint32_t ssid)
1392{
1393    Addr cd_addr;
1394
1395    if (ste.dw0.s1cdmax == 0) {
1396        cd_addr = ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT;
1397    } else {
1398        unsigned max_ssid = 1 << ste.dw0.s1cdmax;
1399        if (ssid >= max_ssid)
1400            panic("SSID %#x out of range, max=%#x", ssid, max_ssid);
1401
1402        if (ste.dw0.s1fmt==STAGE1_CFG_2L_4K ||
1403            ste.dw0.s1fmt==STAGE1_CFG_2L_64K)
1404        {
1405            unsigned split = ste.dw0.s1fmt==STAGE1_CFG_2L_4K ? 7 : 11;
1406
1407            uint64_t l2_ptr;
1408            uint64_t l2_addr = (ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT) +
1409                bits(ssid, 24, split) * sizeof(l2_ptr);
1410
1411            if (context.stage2Enable)
1412                l2_addr = translateStage2(yield, l2_addr, false).addr;
1413
1414            DPRINTF(SMMUv3, "Read L1CD at %#x\n", l2_addr);
1415
1416            doReadConfig(yield, l2_addr, &l2_ptr, sizeof(l2_ptr), sid, ssid);
1417
1418            DPRINTF(SMMUv3, "Got L1CD at %#x: 0x%016x\n", l2_addr, l2_ptr);
1419
1420            cd_addr = l2_ptr + bits(ssid, split-1, 0) * sizeof(cd);
1421
1422            smmu.cdL1Fetches++;
1423        } else if (ste.dw0.s1fmt == STAGE1_CFG_1L) {
1424            cd_addr = (ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT) + ssid*sizeof(cd);
1425        }
1426    }
1427
1428    if (context.stage2Enable)
1429        cd_addr = translateStage2(yield, cd_addr, false).addr;
1430
1431    DPRINTF(SMMUv3, "Read CD at %#x\n", cd_addr);
1432
1433    doReadConfig(yield, cd_addr, &cd, sizeof(cd), sid, ssid);
1434
1435    DPRINTF(SMMUv3, "Got CD at %#x [0]: 0x%016x\n", cd_addr, cd.dw0);
1436    DPRINTF(SMMUv3, "    CD at %#x [1]: 0x%016x\n", cd_addr, cd.dw1);
1437    DPRINTF(SMMUv3, "    CD at %#x [2]: 0x%016x\n", cd_addr, cd.dw2);
1438    DPRINTF(SMMUv3, "    CD at %#x [3]: 0x%016x\n", cd_addr, cd.mair);
1439    DPRINTF(SMMUv3, "    CD at %#x [4]: 0x%016x\n", cd_addr, cd.amair);
1440    DPRINTF(SMMUv3, "    CD at %#x [5]: 0x%016x\n", cd_addr, cd._pad[0]);
1441    DPRINTF(SMMUv3, "    CD at %#x [6]: 0x%016x\n", cd_addr, cd._pad[1]);
1442    DPRINTF(SMMUv3, "    CD at %#x [7]: 0x%016x\n", cd_addr, cd._pad[2]);
1443
1444
1445    if (!cd.dw0.valid)
1446        panic("CD @ %#x not valid\n", cd_addr);
1447
1448    smmu.cdFetches++;
1449}
1450
1451void
1452SMMUTranslationProcess::doReadConfig(Yield &yield, Addr addr,
1453                                     void *ptr, size_t size,
1454                                     uint32_t sid, uint32_t ssid)
1455{
1456    doRead(yield, addr, ptr, size);
1457}
1458
1459void
1460SMMUTranslationProcess::doReadPTE(Yield &yield, Addr va, Addr addr,
1461                                  void *ptr, unsigned stage,
1462                                  unsigned level)
1463{
1464    size_t pte_size = sizeof(PageTableOps::pte_t);
1465
1466    Addr mask = pte_size - 1;
1467    Addr base = addr & ~mask;
1468
1469    doRead(yield, base, ptr, pte_size);
1470}
1471