smmu_v3_transl.cc revision 14039
1/*
2 * Copyright (c) 2013, 2018-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Stan Czerniawski
38 */
39
40#include "dev/arm/smmu_v3_transl.hh"
41
42#include "debug/SMMUv3.hh"
43#include "debug/SMMUv3Hazard.hh"
44#include "dev/arm/amba.hh"
45#include "dev/arm/smmu_v3.hh"
46#include "sim/system.hh"
47
48SMMUTranslRequest
49SMMUTranslRequest::fromPacket(PacketPtr pkt, bool ats)
50{
51    SMMUTranslRequest req;
52    req.addr         = pkt->getAddr();
53    req.size         = pkt->getSize();
54    req.sid          = pkt->req->streamId();
55    req.ssid         = pkt->req->hasSubstreamId() ?
56        pkt->req->substreamId() : 0;
57    req.isWrite      = pkt->isWrite();
58    req.isPrefetch   = false;
59    req.isAtsRequest = ats;
60    req.pkt          = pkt;
61
62    return req;
63}
64
65SMMUTranslRequest
66SMMUTranslRequest::prefetch(Addr addr, uint32_t sid, uint32_t ssid)
67{
68    SMMUTranslRequest req;
69    req.addr         = addr;
70    req.size         = 0;
71    req.sid          = sid;
72    req.ssid         = ssid;
73    req.isWrite      = false;
74    req.isPrefetch   = true;
75    req.isAtsRequest = false;
76    req.pkt          = NULL;
77
78    return req;
79}
80
81void
82SMMUTranslationProcess::beginTransaction(const SMMUTranslRequest &req)
83{
84    request = req;
85
86    reinit();
87}
88
89void
90SMMUTranslationProcess::resumeTransaction()
91{
92    assert(smmu.system.isTimingMode());
93
94    assert(!"Stalls are broken");
95
96    Tick resumeTick = curTick();
97
98    (void) resumeTick;
99    DPRINTF(SMMUv3, "Resume at tick = %d. Fault duration = %d (%.3fus)\n",
100        resumeTick, resumeTick-faultTick, (resumeTick-faultTick) / 1e6);
101
102    beginTransaction(request);
103
104    smmu.runProcessTiming(this, request.pkt);
105}
106
107void
108SMMUTranslationProcess::main(Yield &yield)
109{
110    // Hack:
111    // The coroutine starts running as soon as it's created.
112    // But we need to wait for request data esp. in atomic mode.
113    SMMUAction a;
114    a.type = ACTION_INITIAL_NOP;
115    a.pkt = NULL;
116    yield(a);
117
118    const Addr next4k = (request.addr + 0x1000ULL) & ~0xfffULL;
119
120    if ((request.addr + request.size) > next4k)
121        panic("Transaction crosses 4k boundary (addr=%#x size=%#x)!\n",
122                request.addr, request.size);
123
124
125    unsigned numSlaveBeats = request.isWrite ?
126        (request.size + (ifc.portWidth - 1)) / ifc.portWidth : 1;
127
128    doSemaphoreDown(yield, ifc.slavePortSem);
129    doDelay(yield, Cycles(numSlaveBeats));
130    doSemaphoreUp(ifc.slavePortSem);
131
132
133    recvTick = curTick();
134
135
136    if (!(smmu.regs.cr0 & 0x1)) {
137        // SMMU disabled
138        doDelay(yield, Cycles(1));
139        completeTransaction(yield, bypass(request.addr));
140        return;
141    }
142
143    TranslResult tr;
144    bool wasPrefetched = false;
145
146    if (request.isPrefetch) {
147        // Abort prefetch if:
148        //   - there's already a transaction looking up the same 4k page, OR
149        //   - requested address is already in the TLB.
150        if (hazard4kCheck() || ifcTLBLookup(yield, tr, wasPrefetched))
151            completePrefetch(yield); // this never returns
152
153        hazard4kRegister();
154
155        tr = smmuTranslation(yield);
156
157        if (tr.fault == FAULT_NONE)
158            ifcTLBUpdate(yield, tr);
159
160        hazard4kRelease();
161
162        completePrefetch(yield);
163    } else {
164        hazardIdRegister();
165
166        if (!microTLBLookup(yield, tr)) {
167            bool hit = ifcTLBLookup(yield, tr, wasPrefetched);
168            if (!hit) {
169                while (!hit && hazard4kCheck()) {
170                    hazard4kHold(yield);
171                    hit = ifcTLBLookup(yield, tr, wasPrefetched);
172                }
173            }
174
175            // Issue prefetch if:
176            //   - there was a TLB hit and the entry was prefetched, OR
177            //   - TLB miss was successfully serviced
178            if (hit) {
179                if (wasPrefetched)
180                    issuePrefetch(next4k);
181            } else {
182                hazard4kRegister();
183
184                tr = smmuTranslation(yield);
185
186                if (tr.fault == FAULT_NONE) {
187                    ifcTLBUpdate(yield, tr);
188
189                    issuePrefetch(next4k);
190                }
191
192                hazard4kRelease();
193            }
194
195            if (tr.fault == FAULT_NONE)
196                microTLBUpdate(yield, tr);
197        }
198
199        hazardIdHold(yield);
200        hazardIdRelease();
201
202        if (tr.fault != FAULT_NONE)
203            panic("fault\n");
204
205        completeTransaction(yield, tr);
206    }
207}
208
209SMMUTranslationProcess::TranslResult
210SMMUTranslationProcess::bypass(Addr addr) const
211{
212    TranslResult tr;
213    tr.fault = FAULT_NONE;
214    tr.addr = addr;
215    tr.addrMask = 0;
216    tr.writable = 1;
217
218    return tr;
219}
220
221SMMUTranslationProcess::TranslResult
222SMMUTranslationProcess::smmuTranslation(Yield &yield)
223{
224    TranslResult tr;
225
226    // Need SMMU credit to proceed
227    doSemaphoreDown(yield, smmu.transSem);
228
229    // Simulate pipelined IFC->SMMU link
230    doSemaphoreDown(yield, smmu.ifcSmmuSem);
231    doDelay(yield, Cycles(1)); // serialize transactions
232    doSemaphoreUp(smmu.ifcSmmuSem);
233    doDelay(yield, smmu.ifcSmmuLat - Cycles(1)); // remaining pipeline delay
234
235    bool haveConfig = true;
236    if (!configCacheLookup(yield, context)) {
237        if(findConfig(yield, context, tr)) {
238            configCacheUpdate(yield, context);
239        } else {
240            haveConfig = false;
241        }
242    }
243
244    if (haveConfig && !smmuTLBLookup(yield, tr)) {
245        // SMMU main TLB miss
246
247        // Need PTW slot to proceed
248        doSemaphoreDown(yield, smmu.ptwSem);
249
250        // Page table walk
251        Tick ptwStartTick = curTick();
252
253        if (context.stage1Enable) {
254            tr = translateStage1And2(yield, request.addr);
255        } else if (context.stage2Enable) {
256            tr = translateStage2(yield, request.addr, true);
257        } else {
258            tr = bypass(request.addr);
259        }
260
261        if (context.stage1Enable || context.stage2Enable)
262            smmu.ptwTimeDist.sample(curTick() - ptwStartTick);
263
264        // Free PTW slot
265        doSemaphoreUp(smmu.ptwSem);
266
267        if (tr.fault == FAULT_NONE)
268            smmuTLBUpdate(yield, tr);
269    }
270
271    // Simulate pipelined SMMU->SLAVE INTERFACE link
272    doSemaphoreDown(yield, smmu.smmuIfcSem);
273    doDelay(yield, Cycles(1)); // serialize transactions
274    doSemaphoreUp(smmu.smmuIfcSem);
275    doDelay(yield, smmu.smmuIfcLat - Cycles(1)); // remaining pipeline delay
276
277    // return SMMU credit
278    doSemaphoreUp(smmu.transSem);
279
280    return tr;
281}
282
283bool
284SMMUTranslationProcess::microTLBLookup(Yield &yield, TranslResult &tr)
285{
286    if (!ifc.microTLBEnable)
287        return false;
288
289    doSemaphoreDown(yield, ifc.microTLBSem);
290    doDelay(yield, ifc.microTLBLat);
291    const SMMUTLB::Entry *e =
292        ifc.microTLB->lookup(request.sid, request.ssid, request.addr);
293    doSemaphoreUp(ifc.microTLBSem);
294
295    if (!e) {
296        DPRINTF(SMMUv3, "micro TLB miss vaddr=%#x sid=%#x ssid=%#x\n",
297            request.addr, request.sid, request.ssid);
298
299        return false;
300    }
301
302    DPRINTF(SMMUv3,
303        "micro TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x paddr=%#x\n",
304        request.addr, e->vaMask, request.sid, request.ssid, e->pa);
305
306    tr.fault = FAULT_NONE;
307    tr.addr = e->pa + (request.addr & ~e->vaMask);;
308    tr.addrMask = e->vaMask;
309    tr.writable = e->permissions;
310
311    return true;
312}
313
314bool
315SMMUTranslationProcess::ifcTLBLookup(Yield &yield, TranslResult &tr,
316                                     bool &wasPrefetched)
317{
318    if (!ifc.mainTLBEnable)
319        return false;
320
321    doSemaphoreDown(yield, ifc.mainTLBSem);
322    doDelay(yield, ifc.mainTLBLat);
323    const SMMUTLB::Entry *e =
324        ifc.mainTLB->lookup(request.sid, request.ssid, request.addr);
325    doSemaphoreUp(ifc.mainTLBSem);
326
327    if (!e) {
328        DPRINTF(SMMUv3,
329                "SLAVE Interface TLB miss vaddr=%#x sid=%#x ssid=%#x\n",
330                request.addr, request.sid, request.ssid);
331
332        return false;
333    }
334
335    DPRINTF(SMMUv3,
336            "SLAVE Interface TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x "
337            "paddr=%#x\n", request.addr, e->vaMask, request.sid,
338            request.ssid, e->pa);
339
340    tr.fault = FAULT_NONE;
341    tr.addr = e->pa + (request.addr & ~e->vaMask);;
342    tr.addrMask = e->vaMask;
343    tr.writable = e->permissions;
344    wasPrefetched = e->prefetched;
345
346    return true;
347}
348
349bool
350SMMUTranslationProcess::smmuTLBLookup(Yield &yield, TranslResult &tr)
351{
352    if (!smmu.tlbEnable)
353        return false;
354
355    doSemaphoreDown(yield, smmu.tlbSem);
356    doDelay(yield, smmu.tlbLat);
357    const ARMArchTLB::Entry *e =
358        smmu.tlb.lookup(request.addr, context.asid, context.vmid);
359    doSemaphoreUp(smmu.tlbSem);
360
361    if (!e) {
362        DPRINTF(SMMUv3, "SMMU TLB miss vaddr=%#x asid=%#x vmid=%#x\n",
363            request.addr, context.asid, context.vmid);
364
365        return false;
366    }
367
368    DPRINTF(SMMUv3,
369            "SMMU TLB hit vaddr=%#x amask=%#x asid=%#x vmid=%#x paddr=%#x\n",
370            request.addr, e->vaMask, context.asid, context.vmid, e->pa);
371
372    tr.fault = FAULT_NONE;
373    tr.addr = e->pa + (request.addr & ~e->vaMask);;
374    tr.addrMask = e->vaMask;
375    tr.writable = e->permissions;
376
377    return true;
378}
379
380void
381SMMUTranslationProcess::microTLBUpdate(Yield &yield,
382                                       const TranslResult &tr)
383{
384    assert(tr.fault == FAULT_NONE);
385
386    if (!ifc.microTLBEnable)
387        return;
388
389    SMMUTLB::Entry e;
390    e.valid = true;
391    e.prefetched = false;
392    e.sid = request.sid;
393    e.ssid = request.ssid;
394    e.vaMask = tr.addrMask;
395    e.va = request.addr & e.vaMask;
396    e.pa = tr.addr & e.vaMask;
397    e.permissions = tr.writable;
398    e.asid = context.asid;
399    e.vmid = context.vmid;
400
401    doSemaphoreDown(yield, ifc.microTLBSem);
402
403    DPRINTF(SMMUv3,
404        "micro TLB upd vaddr=%#x amask=%#x paddr=%#x sid=%#x ssid=%#x\n",
405        e.va, e.vaMask, e.pa, e.sid, e.ssid);
406
407    ifc.microTLB->store(e, SMMUTLB::ALLOC_ANY_WAY);
408
409    doSemaphoreUp(ifc.microTLBSem);
410}
411
412void
413SMMUTranslationProcess::ifcTLBUpdate(Yield &yield,
414                                     const TranslResult &tr)
415{
416    assert(tr.fault == FAULT_NONE);
417
418    if (!ifc.mainTLBEnable)
419        return;
420
421    SMMUTLB::Entry e;
422    e.valid = true;
423    e.prefetched = request.isPrefetch;
424    e.sid = request.sid;
425    e.ssid = request.ssid;
426    e.vaMask = tr.addrMask;
427    e.va = request.addr & e.vaMask;
428    e.pa = tr.addr & e.vaMask;
429    e.permissions = tr.writable;
430    e.asid = context.asid;
431    e.vmid = context.vmid;
432
433    SMMUTLB::AllocPolicy alloc = SMMUTLB::ALLOC_ANY_WAY;
434    if (ifc.prefetchEnable && ifc.prefetchReserveLastWay)
435        alloc = request.isPrefetch ?
436            SMMUTLB::ALLOC_LAST_WAY : SMMUTLB::ALLOC_ANY_BUT_LAST_WAY;
437
438    doSemaphoreDown(yield, ifc.mainTLBSem);
439
440    DPRINTF(SMMUv3,
441            "SLAVE Interface upd vaddr=%#x amask=%#x paddr=%#x sid=%#x "
442            "ssid=%#x\n", e.va, e.vaMask, e.pa, e.sid, e.ssid);
443
444    ifc.mainTLB->store(e, alloc);
445
446    doSemaphoreUp(ifc.mainTLBSem);
447}
448
449void
450SMMUTranslationProcess::smmuTLBUpdate(Yield &yield,
451                                      const TranslResult &tr)
452{
453    assert(tr.fault == FAULT_NONE);
454
455    if (!smmu.tlbEnable)
456        return;
457
458    ARMArchTLB::Entry e;
459    e.valid = true;
460    e.vaMask = tr.addrMask;
461    e.va = request.addr & e.vaMask;
462    e.asid = context.asid;
463    e.vmid = context.vmid;
464    e.pa = tr.addr & e.vaMask;
465    e.permissions = tr.writable;
466
467    doSemaphoreDown(yield, smmu.tlbSem);
468
469    DPRINTF(SMMUv3,
470            "SMMU TLB upd vaddr=%#x amask=%#x paddr=%#x asid=%#x vmid=%#x\n",
471            e.va, e.vaMask, e.pa, e.asid, e.vmid);
472
473    smmu.tlb.store(e);
474
475    doSemaphoreUp(smmu.tlbSem);
476}
477
478bool
479SMMUTranslationProcess::configCacheLookup(Yield &yield, TranslContext &tc)
480{
481    if (!smmu.configCacheEnable)
482        return false;
483
484    doSemaphoreDown(yield, smmu.configSem);
485    doDelay(yield, smmu.configLat);
486    const ConfigCache::Entry *e =
487        smmu.configCache.lookup(request.sid, request.ssid);
488    doSemaphoreUp(smmu.configSem);
489
490    if (!e) {
491        DPRINTF(SMMUv3, "Config miss sid=%#x ssid=%#x\n",
492                request.sid, request.ssid);
493
494        return false;
495    }
496
497    DPRINTF(SMMUv3, "Config hit sid=%#x ssid=%#x ttb=%#08x asid=%#x\n",
498            request.sid, request.ssid, e->ttb0, e->asid);
499
500    tc.stage1Enable = e->stage1_en;
501    tc.stage2Enable = e->stage2_en;
502
503    tc.ttb0 = e->ttb0;
504    tc.ttb1 = e->ttb1;
505    tc.asid = e->asid;
506    tc.httb = e->httb;
507    tc.vmid = e->vmid;
508
509    tc.stage1TranslGranule = e->stage1_tg;
510    tc.stage2TranslGranule = e->stage2_tg;
511
512    return true;
513}
514
515void
516SMMUTranslationProcess::configCacheUpdate(Yield &yield,
517                                          const TranslContext &tc)
518{
519    if (!smmu.configCacheEnable)
520        return;
521
522    ConfigCache::Entry e;
523    e.valid = true;
524    e.sid = request.sid;
525    e.ssid = request.ssid;
526    e.stage1_en = tc.stage1Enable;
527    e.stage2_en = tc.stage2Enable;
528    e.ttb0 = tc.ttb0;
529    e.ttb1 = tc.ttb1;
530    e.asid = tc.asid;
531    e.httb = tc.httb;
532    e.vmid = tc.vmid;
533    e.stage1_tg = tc.stage1TranslGranule;
534    e.stage2_tg = tc.stage2TranslGranule;
535
536    doSemaphoreDown(yield, smmu.configSem);
537
538    DPRINTF(SMMUv3, "Config upd  sid=%#x ssid=%#x\n", e.sid, e.ssid);
539
540    smmu.configCache.store(e);
541
542    doSemaphoreUp(smmu.configSem);
543}
544
545bool
546SMMUTranslationProcess::findConfig(Yield &yield,
547                                   TranslContext &tc,
548                                   TranslResult &tr)
549{
550    tc.stage1Enable = false;
551    tc.stage2Enable = false;
552
553    StreamTableEntry ste;
554    doReadSTE(yield, ste, request.sid);
555
556    switch (ste.dw0.config) {
557        case STE_CONFIG_BYPASS:
558            break;
559
560        case STE_CONFIG_STAGE1_ONLY:
561            tc.stage1Enable = true;
562            break;
563
564        case STE_CONFIG_STAGE2_ONLY:
565            tc.stage2Enable = true;
566            break;
567
568        case STE_CONFIG_STAGE1_AND_2:
569            tc.stage1Enable = true;
570            tc.stage2Enable = true;
571            break;
572
573        default:
574            panic("Bad or unimplemented STE config %d\n",
575                ste.dw0.config);
576    }
577
578
579    // Establish stage 2 context first since
580    // Context Descriptors can be in IPA space.
581    if (tc.stage2Enable) {
582        tc.httb = ste.dw3.s2ttb << STE_S2TTB_SHIFT;
583        tc.vmid = ste.dw2.s2vmid;
584        tc.stage2TranslGranule = ste.dw2.s2tg;
585    } else {
586        tc.httb = 0xdeadbeef;
587        tc.vmid = 0;
588        tc.stage2TranslGranule = TRANS_GRANULE_INVALID;
589    }
590
591
592    // Now fetch stage 1 config.
593    if (context.stage1Enable) {
594        ContextDescriptor cd;
595        doReadCD(yield, cd, ste, request.sid, request.ssid);
596
597        tc.ttb0 = cd.dw1.ttb0 << CD_TTB_SHIFT;
598        tc.ttb1 = cd.dw2.ttb1 << CD_TTB_SHIFT;
599        tc.asid = cd.dw0.asid;
600        tc.stage1TranslGranule = cd.dw0.tg0;
601    } else {
602        tc.ttb0 = 0xcafebabe;
603        tc.ttb1 = 0xcafed00d;
604        tc.asid = 0;
605        tc.stage1TranslGranule = TRANS_GRANULE_INVALID;
606    }
607
608    return true;
609}
610
611void
612SMMUTranslationProcess::walkCacheLookup(
613        Yield &yield,
614        const WalkCache::Entry *&walkEntry,
615        Addr addr, uint16_t asid, uint16_t vmid,
616        unsigned stage, unsigned level)
617{
618    const char *indent = stage==2 ? "  " : "";
619    (void) indent; // this is only used in DPRINTFs
620
621    const PageTableOps *pt_ops =
622        stage == 1 ?
623            smmu.getPageTableOps(context.stage1TranslGranule) :
624            smmu.getPageTableOps(context.stage2TranslGranule);
625
626    unsigned walkCacheLevels =
627        smmu.walkCacheEnable ?
628            (stage == 1 ? smmu.walkCacheS1Levels : smmu.walkCacheS2Levels) :
629            0;
630
631    if ((1 << level) & walkCacheLevels) {
632        doSemaphoreDown(yield, smmu.walkSem);
633        doDelay(yield, smmu.walkLat);
634
635        walkEntry = smmu.walkCache.lookup(addr, pt_ops->walkMask(level),
636                                          asid, vmid, stage, level);
637
638        if (walkEntry) {
639            DPRINTF(SMMUv3, "%sWalkCache hit  va=%#x asid=%#x vmid=%#x "
640                            "base=%#x (S%d, L%d)\n",
641                    indent, addr, asid, vmid, walkEntry->pa, stage, level);
642        } else {
643            DPRINTF(SMMUv3, "%sWalkCache miss va=%#x asid=%#x vmid=%#x "
644                            "(S%d, L%d)\n",
645                    indent, addr, asid, vmid, stage, level);
646        }
647
648        doSemaphoreUp(smmu.walkSem);
649    }
650}
651
652void
653SMMUTranslationProcess::walkCacheUpdate(Yield &yield, Addr va,
654                                        Addr vaMask, Addr pa,
655                                        unsigned stage, unsigned level,
656                                        bool leaf, uint8_t permissions)
657{
658    unsigned walkCacheLevels =
659        stage == 1 ? smmu.walkCacheS1Levels : smmu.walkCacheS2Levels;
660
661    if (smmu.walkCacheEnable && ((1<<level) & walkCacheLevels)) {
662        WalkCache::Entry e;
663        e.valid = true;
664        e.va = va;
665        e.vaMask = vaMask;
666        e.asid = stage==1 ? context.asid : 0;
667        e.vmid = context.vmid;
668        e.stage = stage;
669        e.level = level;
670        e.leaf = leaf;
671        e.pa = pa;
672        e.permissions = permissions;
673
674        doSemaphoreDown(yield, smmu.walkSem);
675
676        DPRINTF(SMMUv3, "%sWalkCache upd  va=%#x mask=%#x asid=%#x vmid=%#x "
677                        "tpa=%#x leaf=%s (S%d, L%d)\n",
678                e.stage==2 ? "  " : "",
679                e.va, e.vaMask, e.asid, e.vmid,
680                e.pa, e.leaf, e.stage, e.level);
681
682        smmu.walkCache.store(e);
683
684        doSemaphoreUp(smmu.walkSem);
685    }
686}
687
688/*
689 * Please note:
690 * This does not deal with the case where stage 1 page size
691 * is larger than stage 2 page size.
692 */
693SMMUTranslationProcess::TranslResult
694SMMUTranslationProcess::walkStage1And2(Yield &yield, Addr addr,
695                                       const PageTableOps *pt_ops,
696                                       unsigned level, Addr walkPtr)
697{
698    PageTableOps::pte_t pte = 0;
699
700    doSemaphoreDown(yield, smmu.cycleSem);
701    doDelay(yield, Cycles(1));
702    doSemaphoreUp(smmu.cycleSem);
703
704    for (; level <= pt_ops->lastLevel(); level++) {
705        Addr pte_addr = walkPtr + pt_ops->index(addr, level);
706
707        DPRINTF(SMMUv3, "Fetching S1 L%d PTE from pa=%#08x\n",
708                level, pte_addr);
709
710        doReadPTE(yield, addr, pte_addr, &pte, 1, level);
711
712        DPRINTF(SMMUv3, "Got S1 L%d PTE=%#x from pa=%#08x\n",
713                level, pte, pte_addr);
714
715        doSemaphoreDown(yield, smmu.cycleSem);
716        doDelay(yield, Cycles(1));
717        doSemaphoreUp(smmu.cycleSem);
718
719        bool valid = pt_ops->isValid(pte, level);
720        bool leaf  = pt_ops->isLeaf(pte, level);
721
722        if (!valid) {
723            DPRINTF(SMMUv3, "S1 PTE not valid - fault\n");
724
725            TranslResult tr;
726            tr.fault = FAULT_TRANSLATION;
727            return tr;
728        }
729
730        if (valid && leaf && request.isWrite &&
731            !pt_ops->isWritable(pte, level, false))
732        {
733            DPRINTF(SMMUv3, "S1 page not writable - fault\n");
734
735            TranslResult tr;
736            tr.fault = FAULT_PERMISSION;
737            return tr;
738        }
739
740        walkPtr = pt_ops->nextLevelPointer(pte, level);
741
742        if (leaf)
743            break;
744
745        if (context.stage2Enable) {
746            TranslResult s2tr = translateStage2(yield, walkPtr, false);
747            if (s2tr.fault != FAULT_NONE)
748                return s2tr;
749
750            walkPtr = s2tr.addr;
751        }
752
753        walkCacheUpdate(yield, addr, pt_ops->walkMask(level), walkPtr,
754                        1, level, leaf, 0);
755    }
756
757    TranslResult tr;
758    tr.fault    = FAULT_NONE;
759    tr.addrMask = pt_ops->pageMask(pte, level);
760    tr.addr     = walkPtr + (addr & ~tr.addrMask);
761    tr.writable = pt_ops->isWritable(pte, level, false);
762
763    if (context.stage2Enable) {
764        TranslResult s2tr = translateStage2(yield, tr.addr, true);
765        if (s2tr.fault != FAULT_NONE)
766            return s2tr;
767
768        tr = combineTranslations(tr, s2tr);
769    }
770
771    walkCacheUpdate(yield, addr, tr.addrMask, tr.addr,
772                    1, level, true, tr.writable);
773
774    return tr;
775}
776
777SMMUTranslationProcess::TranslResult
778SMMUTranslationProcess::walkStage2(Yield &yield, Addr addr, bool final_tr,
779                                   const PageTableOps *pt_ops,
780                                   unsigned level, Addr walkPtr)
781{
782    PageTableOps::pte_t pte;
783
784    doSemaphoreDown(yield, smmu.cycleSem);
785    doDelay(yield, Cycles(1));
786    doSemaphoreUp(smmu.cycleSem);
787
788    for (; level <= pt_ops->lastLevel(); level++) {
789        Addr pte_addr = walkPtr + pt_ops->index(addr, level);
790
791        DPRINTF(SMMUv3, "  Fetching S2 L%d PTE from pa=%#08x\n",
792                level, pte_addr);
793
794        doReadPTE(yield, addr, pte_addr, &pte, 2, level);
795
796        DPRINTF(SMMUv3, "  Got S2 L%d PTE=%#x from pa=%#08x\n",
797                level, pte, pte_addr);
798
799        doSemaphoreDown(yield, smmu.cycleSem);
800        doDelay(yield, Cycles(1));
801        doSemaphoreUp(smmu.cycleSem);
802
803        bool valid = pt_ops->isValid(pte, level);
804        bool leaf  = pt_ops->isLeaf(pte, level);
805
806        if (!valid) {
807            DPRINTF(SMMUv3, "  S2 PTE not valid - fault\n");
808
809            TranslResult tr;
810            tr.fault = FAULT_TRANSLATION;
811            return tr;
812        }
813
814        if (valid && leaf && request.isWrite &&
815            !pt_ops->isWritable(pte, level, true))
816        {
817            DPRINTF(SMMUv3, "  S2 PTE not writable = fault\n");
818
819            TranslResult tr;
820            tr.fault = FAULT_PERMISSION;
821            return tr;
822        }
823
824        walkPtr = pt_ops->nextLevelPointer(pte, level);
825
826        if (final_tr || smmu.walkCacheNonfinalEnable)
827            walkCacheUpdate(yield, addr, pt_ops->walkMask(level), walkPtr,
828                            2, level, leaf,
829                            leaf ? pt_ops->isWritable(pte, level, true) : 0);
830        if (leaf)
831            break;
832    }
833
834    TranslResult tr;
835    tr.fault    = FAULT_NONE;
836    tr.addrMask = pt_ops->pageMask(pte, level);
837    tr.addr     = walkPtr + (addr & ~tr.addrMask);
838    tr.writable = pt_ops->isWritable(pte, level, true);
839
840    return tr;
841}
842
843SMMUTranslationProcess::TranslResult
844SMMUTranslationProcess::translateStage1And2(Yield &yield, Addr addr)
845{
846    const PageTableOps *pt_ops =
847        smmu.getPageTableOps(context.stage1TranslGranule);
848
849    const WalkCache::Entry *walk_ep = NULL;
850    unsigned level;
851
852    // Level here is actually (level+1) so we can count down
853    // to 0 using unsigned int.
854    for (level = pt_ops->lastLevel() + 1;
855        level > pt_ops->firstLevel();
856        level--)
857    {
858        walkCacheLookup(yield, walk_ep, addr,
859                        context.asid, context.vmid, 1, level-1);
860
861        if (walk_ep)
862            break;
863    }
864
865    // Correct level (see above).
866    level -= 1;
867
868    TranslResult tr;
869    if (walk_ep) {
870        if (walk_ep->leaf) {
871            tr.fault    = FAULT_NONE;
872            tr.addr     = walk_ep->pa + (addr & ~walk_ep->vaMask);
873            tr.addrMask = walk_ep->vaMask;
874            tr.writable = walk_ep->permissions;
875        } else {
876            tr = walkStage1And2(yield, addr, pt_ops, level+1, walk_ep->pa);
877        }
878    } else {
879        Addr table_addr = context.ttb0;
880        if (context.stage2Enable) {
881            TranslResult s2tr = translateStage2(yield, table_addr, false);
882            if (s2tr.fault != FAULT_NONE)
883                return s2tr;
884
885            table_addr = s2tr.addr;
886        }
887
888        tr = walkStage1And2(yield, addr, pt_ops, pt_ops->firstLevel(),
889                            table_addr);
890    }
891
892    if (tr.fault == FAULT_NONE)
893        DPRINTF(SMMUv3, "Translated vaddr %#x to paddr %#x\n", addr, tr.addr);
894
895    return tr;
896}
897
898SMMUTranslationProcess::TranslResult
899SMMUTranslationProcess::translateStage2(Yield &yield, Addr addr, bool final_tr)
900{
901    const PageTableOps *pt_ops =
902            smmu.getPageTableOps(context.stage2TranslGranule);
903
904    const IPACache::Entry *ipa_ep = NULL;
905    if (smmu.ipaCacheEnable) {
906        doSemaphoreDown(yield, smmu.ipaSem);
907        doDelay(yield, smmu.ipaLat);
908        ipa_ep = smmu.ipaCache.lookup(addr, context.vmid);
909        doSemaphoreUp(smmu.ipaSem);
910    }
911
912    if (ipa_ep) {
913        TranslResult tr;
914        tr.fault    = FAULT_NONE;
915        tr.addr     = ipa_ep->pa + (addr & ~ipa_ep->ipaMask);
916        tr.addrMask = ipa_ep->ipaMask;
917        tr.writable = ipa_ep->permissions;
918
919        DPRINTF(SMMUv3, "  IPACache hit  ipa=%#x vmid=%#x pa=%#x\n",
920            addr, context.vmid, tr.addr);
921
922        return tr;
923    } else if (smmu.ipaCacheEnable) {
924        DPRINTF(SMMUv3, "  IPACache miss ipa=%#x vmid=%#x\n",
925                addr, context.vmid);
926    }
927
928    const WalkCache::Entry *walk_ep = NULL;
929    unsigned level = pt_ops->firstLevel();
930
931    if (final_tr || smmu.walkCacheNonfinalEnable) {
932        // Level here is actually (level+1) so we can count down
933        // to 0 using unsigned int.
934        for (level = pt_ops->lastLevel() + 1;
935            level > pt_ops->firstLevel();
936            level--)
937        {
938            walkCacheLookup(yield, walk_ep, addr,
939                            0, context.vmid, 2, level-1);
940
941            if (walk_ep)
942                break;
943        }
944
945        // Correct level (see above).
946        level -= 1;
947    }
948
949    TranslResult tr;
950    if (walk_ep) {
951        if (walk_ep->leaf) {
952            tr.fault    = FAULT_NONE;
953            tr.addr     = walk_ep->pa + (addr & ~walk_ep->vaMask);
954            tr.addrMask = walk_ep->vaMask;
955            tr.writable = walk_ep->permissions;
956        } else {
957            tr = walkStage2(yield, addr, final_tr, pt_ops,
958                            level + 1, walk_ep->pa);
959        }
960    } else {
961        tr = walkStage2(yield, addr, final_tr, pt_ops, pt_ops->firstLevel(),
962                        context.httb);
963    }
964
965    if (tr.fault == FAULT_NONE)
966        DPRINTF(SMMUv3, "  Translated %saddr %#x to paddr %#x\n",
967            context.stage1Enable ? "ip" : "v", addr, tr.addr);
968
969    if (smmu.ipaCacheEnable) {
970        IPACache::Entry e;
971        e.valid = true;
972        e.ipaMask = tr.addrMask;
973        e.ipa = addr & e.ipaMask;
974        e.pa = tr.addr & tr.addrMask;
975        e.permissions = tr.writable;
976        e.vmid = context.vmid;
977
978        doSemaphoreDown(yield, smmu.ipaSem);
979        smmu.ipaCache.store(e);
980        doSemaphoreUp(smmu.ipaSem);
981    }
982
983    return tr;
984}
985
986SMMUTranslationProcess::TranslResult
987SMMUTranslationProcess::combineTranslations(const TranslResult &s1tr,
988                                            const TranslResult &s2tr) const
989{
990    if (s2tr.fault != FAULT_NONE)
991        return s2tr;
992
993    assert(s1tr.fault == FAULT_NONE);
994
995    TranslResult tr;
996    tr.fault    = FAULT_NONE;
997    tr.addr     = s2tr.addr;
998    tr.addrMask = s1tr.addrMask | s2tr.addrMask;
999    tr.writable = s1tr.writable & s2tr.writable;
1000
1001    return tr;
1002}
1003
1004bool
1005SMMUTranslationProcess::hazard4kCheck()
1006{
1007    Addr addr4k = request.addr & ~0xfffULL;
1008
1009    for (auto it = ifc.duplicateReqs.begin();
1010         it != ifc.duplicateReqs.end();
1011         ++it)
1012    {
1013        Addr other4k = (*it)->request.addr & ~0xfffULL;
1014        if (addr4k == other4k)
1015            return true;
1016    }
1017
1018    return false;
1019}
1020
1021void
1022SMMUTranslationProcess::hazard4kRegister()
1023{
1024    DPRINTF(SMMUv3Hazard, "4kReg:  p=%p a4k=%#x\n",
1025            this, request.addr & ~0xfffULL);
1026
1027    ifc.duplicateReqs.push_back(this);
1028}
1029
1030void
1031SMMUTranslationProcess::hazard4kHold(Yield &yield)
1032{
1033    Addr addr4k = request.addr & ~0xfffULL;
1034
1035    bool found_hazard;
1036
1037    do {
1038        found_hazard = false;
1039
1040        for (auto it = ifc.duplicateReqs.begin();
1041             it!=ifc.duplicateReqs.end() && *it!=this;
1042             ++it)
1043        {
1044            Addr other4k = (*it)->request.addr & ~0xfffULL;
1045
1046            DPRINTF(SMMUv3Hazard, "4kHold: p=%p a4k=%#x Q: p=%p a4k=%#x\n",
1047                    this, addr4k, *it, other4k);
1048
1049            if (addr4k == other4k) {
1050                DPRINTF(SMMUv3Hazard,
1051                        "4kHold: p=%p a4k=%#x WAIT on p=%p a4k=%#x\n",
1052                        this, addr4k, *it, other4k);
1053
1054                doWaitForSignal(yield, ifc.duplicateReqRemoved);
1055
1056                DPRINTF(SMMUv3Hazard, "4kHold: p=%p a4k=%#x RESUME\n",
1057                        this, addr4k);
1058
1059                // This is to avoid checking *it!=this after doWaitForSignal()
1060                // since it could have been deleted.
1061                found_hazard = true;
1062                break;
1063            }
1064        }
1065    } while (found_hazard);
1066}
1067
1068void
1069SMMUTranslationProcess::hazard4kRelease()
1070{
1071    DPRINTF(SMMUv3Hazard, "4kRel:  p=%p a4k=%#x\n",
1072            this, request.addr & ~0xfffULL);
1073
1074    std::list<SMMUTranslationProcess *>::iterator it;
1075
1076    for (it = ifc.duplicateReqs.begin(); it != ifc.duplicateReqs.end(); ++it)
1077        if (*it == this)
1078            break;
1079
1080    if (it == ifc.duplicateReqs.end())
1081        panic("hazard4kRelease: request not found");
1082
1083    ifc.duplicateReqs.erase(it);
1084
1085    doBroadcastSignal(ifc.duplicateReqRemoved);
1086}
1087
1088void
1089SMMUTranslationProcess::hazardIdRegister()
1090{
1091    auto orderId = AMBA::orderId(request.pkt);
1092
1093    DPRINTF(SMMUv3Hazard, "IdReg:  p=%p oid=%d\n", this, orderId);
1094
1095    assert(orderId < SMMU_MAX_TRANS_ID);
1096
1097    std::list<SMMUTranslationProcess *> &depReqs =
1098        request.isWrite ?
1099            ifc.dependentWrites[orderId] : ifc.dependentReads[orderId];
1100    depReqs.push_back(this);
1101}
1102
1103void
1104SMMUTranslationProcess::hazardIdHold(Yield &yield)
1105{
1106    auto orderId = AMBA::orderId(request.pkt);
1107
1108    DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d\n", this, orderId);
1109
1110    std::list<SMMUTranslationProcess *> &depReqs =
1111        request.isWrite ?
1112            ifc.dependentWrites[orderId] : ifc.dependentReads[orderId];
1113    std::list<SMMUTranslationProcess *>::iterator it;
1114
1115    bool found_hazard;
1116
1117    do {
1118        found_hazard = false;
1119
1120        for (auto it = depReqs.begin(); it!=depReqs.end() && *it!=this; ++it) {
1121            DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d Q: %p\n",
1122                    this, orderId, *it);
1123
1124            if (AMBA::orderId((*it)->request.pkt) == orderId) {
1125                DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d WAIT on=%p\n",
1126                        this, orderId, *it);
1127
1128                doWaitForSignal(yield, ifc.dependentReqRemoved);
1129
1130                DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d RESUME\n",
1131                        this, orderId);
1132
1133                // This is to avoid checking *it!=this after doWaitForSignal()
1134                // since it could have been deleted.
1135                found_hazard = true;
1136                break;
1137            }
1138        }
1139    } while (found_hazard);
1140}
1141
1142void
1143SMMUTranslationProcess::hazardIdRelease()
1144{
1145    auto orderId = AMBA::orderId(request.pkt);
1146
1147    DPRINTF(SMMUv3Hazard, "IdRel:  p=%p oid=%d\n", this, orderId);
1148
1149    std::list<SMMUTranslationProcess *> &depReqs =
1150        request.isWrite ?
1151            ifc.dependentWrites[orderId] : ifc.dependentReads[orderId];
1152    std::list<SMMUTranslationProcess *>::iterator it;
1153
1154    for (it = depReqs.begin(); it != depReqs.end(); ++it) {
1155        if (*it == this)
1156            break;
1157    }
1158
1159    if (it == depReqs.end())
1160        panic("hazardIdRelease: request not found");
1161
1162    depReqs.erase(it);
1163
1164    doBroadcastSignal(ifc.dependentReqRemoved);
1165}
1166
1167void
1168SMMUTranslationProcess::issuePrefetch(Addr addr)
1169{
1170    if (!smmu.system.isTimingMode())
1171        return;
1172
1173    if (!ifc.prefetchEnable || ifc.xlateSlotsRemaining == 0)
1174        return;
1175
1176    ifc.xlateSlotsRemaining--;
1177
1178    std::string proc_name = csprintf("%sprf", name());
1179    SMMUTranslationProcess *proc =
1180        new SMMUTranslationProcess(proc_name, smmu, ifc);
1181
1182    proc->beginTransaction(
1183            SMMUTranslRequest::prefetch(addr, request.sid, request.ssid));
1184    proc->scheduleWakeup(smmu.clockEdge(Cycles(1)));
1185}
1186
1187void
1188SMMUTranslationProcess::completeTransaction(Yield &yield,
1189                                            const TranslResult &tr)
1190{
1191    assert(tr.fault == FAULT_NONE);
1192
1193    unsigned numMasterBeats = request.isWrite ?
1194        (request.size + (smmu.masterPortWidth-1))
1195            / smmu.masterPortWidth :
1196        1;
1197
1198    doSemaphoreDown(yield, smmu.masterPortSem);
1199    doDelay(yield, Cycles(numMasterBeats));
1200    doSemaphoreUp(smmu.masterPortSem);
1201
1202
1203    smmu.translationTimeDist.sample(curTick() - recvTick);
1204    ifc.xlateSlotsRemaining++;
1205    if (!request.isAtsRequest && request.isWrite)
1206        ifc.wrBufSlotsRemaining +=
1207            (request.size + (ifc.portWidth-1)) / ifc.portWidth;
1208
1209    smmu.scheduleSlaveRetries();
1210
1211
1212    SMMUAction a;
1213
1214    if (request.isAtsRequest) {
1215        a.type = ACTION_SEND_RESP_ATS;
1216
1217        if (smmu.system.isAtomicMode()) {
1218            request.pkt->makeAtomicResponse();
1219        } else if (smmu.system.isTimingMode()) {
1220            request.pkt->makeTimingResponse();
1221        } else {
1222            panic("Not in atomic or timing mode");
1223        }
1224    } else {
1225        a.type = ACTION_SEND_REQ_FINAL;
1226        a.ifc = &ifc;
1227    }
1228
1229    a.pkt = request.pkt;
1230    a.delay = 0;
1231
1232    a.pkt->setAddr(tr.addr);
1233    a.pkt->req->setPaddr(tr.addr);
1234
1235    yield(a);
1236
1237    if (!request.isAtsRequest) {
1238        PacketPtr pkt = yield.get();
1239        pkt->setAddr(request.addr);
1240
1241        a.type = ACTION_SEND_RESP;
1242        a.pkt = pkt;
1243        a.ifc = &ifc;
1244        a.delay = 0;
1245        yield(a);
1246    }
1247}
1248
1249void
1250SMMUTranslationProcess::completePrefetch(Yield &yield)
1251{
1252    ifc.xlateSlotsRemaining++;
1253
1254    SMMUAction a;
1255    a.type = ACTION_TERMINATE;
1256    a.pkt = NULL;
1257    a.ifc = &ifc;
1258    a.delay = 0;
1259    yield(a);
1260}
1261
1262void
1263SMMUTranslationProcess::sendEvent(Yield &yield, const SMMUEvent &ev)
1264{
1265    int sizeMask = mask(smmu.regs.eventq_base & Q_BASE_SIZE_MASK) &
1266            Q_CONS_PROD_MASK;
1267
1268    if (((smmu.regs.eventq_prod+1) & sizeMask) ==
1269            (smmu.regs.eventq_cons & sizeMask))
1270        panic("Event queue full - aborting\n");
1271
1272    Addr event_addr =
1273        (smmu.regs.eventq_base & Q_BASE_ADDR_MASK) +
1274        (smmu.regs.eventq_prod & sizeMask) * sizeof(ev);
1275
1276    DPRINTF(SMMUv3, "Sending event to addr=%#08x (pos=%d): type=%#x stag=%#x "
1277        "flags=%#x sid=%#x ssid=%#x va=%#08x ipa=%#x\n",
1278        event_addr, smmu.regs.eventq_prod, ev.type, ev.stag,
1279        ev.flags, ev.streamId, ev.substreamId, ev.va, ev.ipa);
1280
1281    // This deliberately resets the overflow field in eventq_prod!
1282    smmu.regs.eventq_prod = (smmu.regs.eventq_prod + 1) & sizeMask;
1283
1284    doWrite(yield, event_addr, &ev, sizeof(ev));
1285
1286    if (!(smmu.regs.eventq_irq_cfg0 & E_BASE_ENABLE_MASK))
1287        panic("eventq msi not enabled\n");
1288
1289    doWrite(yield, smmu.regs.eventq_irq_cfg0 & E_BASE_ADDR_MASK,
1290            &smmu.regs.eventq_irq_cfg1, sizeof(smmu.regs.eventq_irq_cfg1));
1291}
1292
1293void
1294SMMUTranslationProcess::doReadSTE(Yield &yield,
1295                                  StreamTableEntry &ste,
1296                                  uint32_t sid)
1297{
1298    unsigned max_sid = 1 << (smmu.regs.strtab_base_cfg & ST_CFG_SIZE_MASK);
1299    if (sid >= max_sid)
1300        panic("SID %#x out of range, max=%#x", sid, max_sid);
1301
1302    Addr ste_addr;
1303
1304    if ((smmu.regs.strtab_base_cfg & ST_CFG_FMT_MASK) == ST_CFG_FMT_2LEVEL) {
1305        unsigned split =
1306            (smmu.regs.strtab_base_cfg & ST_CFG_SPLIT_MASK) >> ST_CFG_SPLIT_SHIFT;
1307
1308        if (split!= 7 && split!=8 && split!=16)
1309            panic("Invalid stream table split %d", split);
1310
1311        uint64_t l2_ptr;
1312        uint64_t l2_addr =
1313            (smmu.regs.strtab_base & VMT_BASE_ADDR_MASK) +
1314            bits(sid, 32, split) * sizeof(l2_ptr);
1315
1316        DPRINTF(SMMUv3, "Read L1STE at %#x\n", l2_addr);
1317
1318        doReadConfig(yield, l2_addr, &l2_ptr, sizeof(l2_ptr), sid, 0);
1319
1320        DPRINTF(SMMUv3, "Got L1STE L1 at %#x: 0x%016x\n", l2_addr, l2_ptr);
1321
1322        unsigned span = l2_ptr & ST_L2_SPAN_MASK;
1323        if (span == 0)
1324            panic("Invalid level 1 stream table descriptor");
1325
1326        unsigned index = bits(sid, split-1, 0);
1327        if (index >= (1 << span))
1328            panic("StreamID %d out of level 1 descriptor range %d",
1329                  sid, 1<<span);
1330
1331        ste_addr = (l2_ptr & ST_L2_ADDR_MASK) + index * sizeof(ste);
1332
1333        smmu.steL1Fetches++;
1334    } else if ((smmu.regs.strtab_base_cfg & ST_CFG_FMT_MASK) == ST_CFG_FMT_LINEAR) {
1335        ste_addr =
1336            (smmu.regs.strtab_base & VMT_BASE_ADDR_MASK) + sid * sizeof(ste);
1337    } else {
1338        panic("Invalid stream table format");
1339    }
1340
1341    DPRINTF(SMMUv3, "Read STE at %#x\n", ste_addr);
1342
1343    doReadConfig(yield, ste_addr, &ste, sizeof(ste), sid, 0);
1344
1345    DPRINTF(SMMUv3, "Got STE at %#x [0]: 0x%016x\n", ste_addr, ste.dw0);
1346    DPRINTF(SMMUv3, "    STE at %#x [1]: 0x%016x\n", ste_addr, ste.dw1);
1347    DPRINTF(SMMUv3, "    STE at %#x [2]: 0x%016x\n", ste_addr, ste.dw2);
1348    DPRINTF(SMMUv3, "    STE at %#x [3]: 0x%016x\n", ste_addr, ste.dw3);
1349    DPRINTF(SMMUv3, "    STE at %#x [4]: 0x%016x\n", ste_addr, ste._pad[0]);
1350    DPRINTF(SMMUv3, "    STE at %#x [5]: 0x%016x\n", ste_addr, ste._pad[1]);
1351    DPRINTF(SMMUv3, "    STE at %#x [6]: 0x%016x\n", ste_addr, ste._pad[2]);
1352    DPRINTF(SMMUv3, "    STE at %#x [7]: 0x%016x\n", ste_addr, ste._pad[3]);
1353
1354    if (!ste.dw0.valid)
1355        panic("STE @ %#x not valid\n", ste_addr);
1356
1357    smmu.steFetches++;
1358}
1359
1360void
1361SMMUTranslationProcess::doReadCD(Yield &yield,
1362                                 ContextDescriptor &cd,
1363                                 const StreamTableEntry &ste,
1364                                 uint32_t sid, uint32_t ssid)
1365{
1366    Addr cd_addr;
1367
1368    if (ste.dw0.s1cdmax == 0) {
1369        cd_addr = ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT;
1370    } else {
1371        unsigned max_ssid = 1 << ste.dw0.s1cdmax;
1372        if (ssid >= max_ssid)
1373            panic("SSID %#x out of range, max=%#x", ssid, max_ssid);
1374
1375        if (ste.dw0.s1fmt==STAGE1_CFG_2L_4K ||
1376            ste.dw0.s1fmt==STAGE1_CFG_2L_64K)
1377        {
1378            unsigned split = ste.dw0.s1fmt==STAGE1_CFG_2L_4K ? 7 : 11;
1379
1380            uint64_t l2_ptr;
1381            uint64_t l2_addr = (ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT) +
1382                bits(ssid, 24, split) * sizeof(l2_ptr);
1383
1384            if (context.stage2Enable)
1385                l2_addr = translateStage2(yield, l2_addr, false).addr;
1386
1387            DPRINTF(SMMUv3, "Read L1CD at %#x\n", l2_addr);
1388
1389            doReadConfig(yield, l2_addr, &l2_ptr, sizeof(l2_ptr), sid, ssid);
1390
1391            DPRINTF(SMMUv3, "Got L1CD at %#x: 0x%016x\n", l2_addr, l2_ptr);
1392
1393            cd_addr = l2_ptr + bits(ssid, split-1, 0) * sizeof(cd);
1394
1395            smmu.cdL1Fetches++;
1396        } else if (ste.dw0.s1fmt == STAGE1_CFG_1L) {
1397            cd_addr = (ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT) + ssid*sizeof(cd);
1398        }
1399    }
1400
1401    if (context.stage2Enable)
1402        cd_addr = translateStage2(yield, cd_addr, false).addr;
1403
1404    DPRINTF(SMMUv3, "Read CD at %#x\n", cd_addr);
1405
1406    doReadConfig(yield, cd_addr, &cd, sizeof(cd), sid, ssid);
1407
1408    DPRINTF(SMMUv3, "Got CD at %#x [0]: 0x%016x\n", cd_addr, cd.dw0);
1409    DPRINTF(SMMUv3, "    CD at %#x [1]: 0x%016x\n", cd_addr, cd.dw1);
1410    DPRINTF(SMMUv3, "    CD at %#x [2]: 0x%016x\n", cd_addr, cd.dw2);
1411    DPRINTF(SMMUv3, "    CD at %#x [3]: 0x%016x\n", cd_addr, cd.mair);
1412    DPRINTF(SMMUv3, "    CD at %#x [4]: 0x%016x\n", cd_addr, cd.amair);
1413    DPRINTF(SMMUv3, "    CD at %#x [5]: 0x%016x\n", cd_addr, cd._pad[0]);
1414    DPRINTF(SMMUv3, "    CD at %#x [6]: 0x%016x\n", cd_addr, cd._pad[1]);
1415    DPRINTF(SMMUv3, "    CD at %#x [7]: 0x%016x\n", cd_addr, cd._pad[2]);
1416
1417
1418    if (!cd.dw0.valid)
1419        panic("CD @ %#x not valid\n", cd_addr);
1420
1421    smmu.cdFetches++;
1422}
1423
1424void
1425SMMUTranslationProcess::doReadConfig(Yield &yield, Addr addr,
1426                                     void *ptr, size_t size,
1427                                     uint32_t sid, uint32_t ssid)
1428{
1429    doRead(yield, addr, ptr, size);
1430}
1431
1432void
1433SMMUTranslationProcess::doReadPTE(Yield &yield, Addr va, Addr addr,
1434                                  void *ptr, unsigned stage,
1435                                  unsigned level)
1436{
1437    size_t pte_size = sizeof(PageTableOps::pte_t);
1438
1439    Addr mask = pte_size - 1;
1440    Addr base = addr & ~mask;
1441
1442    doRead(yield, base, ptr, pte_size);
1443}
1444