1/*
2 * Copyright (c) 2013, 2018-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Stan Czerniawski
38 */
39
40#include "dev/arm/smmu_v3.hh"
41
42#include <cstddef>
43#include <cstring>
44
45#include "base/bitfield.hh"
46#include "base/cast.hh"
47#include "base/logging.hh"
48#include "base/trace.hh"
49#include "base/types.hh"
50#include "debug/Checkpoint.hh"
51#include "debug/SMMUv3.hh"
52#include "dev/arm/smmu_v3_transl.hh"
53#include "mem/packet_access.hh"
54#include "sim/system.hh"
55
56SMMUv3::SMMUv3(SMMUv3Params *params) :
57    ClockedObject(params),
58    system(*params->system),
59    masterId(params->system->getMasterId(this)),
60    masterPort(name() + ".master", *this),
61    masterTableWalkPort(name() + ".master_walker", *this),
62    controlPort(name() + ".control", *this, params->reg_map),
63    tlb(params->tlb_entries, params->tlb_assoc, params->tlb_policy),
64    configCache(params->cfg_entries, params->cfg_assoc, params->cfg_policy),
65    ipaCache(params->ipa_entries, params->ipa_assoc, params->ipa_policy),
66    walkCache({ { params->walk_S1L0, params->walk_S1L1,
67                  params->walk_S1L2, params->walk_S1L3,
68                  params->walk_S2L0, params->walk_S2L1,
69                  params->walk_S2L2, params->walk_S2L3 } },
70              params->walk_assoc, params->walk_policy),
71    tlbEnable(params->tlb_enable),
72    configCacheEnable(params->cfg_enable),
73    ipaCacheEnable(params->ipa_enable),
74    walkCacheEnable(params->walk_enable),
75    tableWalkPortEnable(false),
76    walkCacheNonfinalEnable(params->wc_nonfinal_enable),
77    walkCacheS1Levels(params->wc_s1_levels),
78    walkCacheS2Levels(params->wc_s2_levels),
79    masterPortWidth(params->master_port_width),
80    tlbSem(params->tlb_slots),
81    ifcSmmuSem(1),
82    smmuIfcSem(1),
83    configSem(params->cfg_slots),
84    ipaSem(params->ipa_slots),
85    walkSem(params->walk_slots),
86    masterPortSem(1),
87    transSem(params->xlate_slots),
88    ptwSem(params->ptw_slots),
89    cycleSem(1),
90    tlbLat(params->tlb_lat),
91    ifcSmmuLat(params->ifc_smmu_lat),
92    smmuIfcLat(params->smmu_ifc_lat),
93    configLat(params->cfg_lat),
94    ipaLat(params->ipa_lat),
95    walkLat(params->walk_lat),
96    slaveInterfaces(params->slave_interfaces),
97    commandExecutor(name() + ".cmd_exec", *this),
98    regsMap(params->reg_map),
99    processCommandsEvent(this)
100{
101    fatal_if(regsMap.size() != SMMU_REG_SIZE,
102        "Invalid register map size: %#x different than SMMU_REG_SIZE = %#x\n",
103        regsMap.size(), SMMU_REG_SIZE);
104
105    // Init smmu registers to 0
106    memset(&regs, 0, sizeof(regs));
107
108    // Setup RO ID registers
109    regs.idr0 = params->smmu_idr0;
110    regs.idr1 = params->smmu_idr1;
111    regs.idr2 = params->smmu_idr2;
112    regs.idr3 = params->smmu_idr3;
113    regs.idr4 = params->smmu_idr4;
114    regs.idr5 = params->smmu_idr5;
115    regs.iidr = params->smmu_iidr;
116    regs.aidr = params->smmu_aidr;
117
118    // TODO: At the moment it possible to set the ID registers to hold
119    // any possible value. It would be nice to have a sanity check here
120    // at construction time in case some idx registers are programmed to
121    // store an unallowed values or if the are configuration conflicts.
122    warn("SMMUv3 IDx register values unchecked\n");
123
124    for (auto ifc : slaveInterfaces)
125        ifc->setSMMU(this);
126}
127
128bool
129SMMUv3::masterRecvTimingResp(PacketPtr pkt)
130{
131    DPRINTF(SMMUv3, "[t] master resp addr=%#x size=%#x\n",
132        pkt->getAddr(), pkt->getSize());
133
134    // @todo: We need to pay for this and not just zero it out
135    pkt->headerDelay = pkt->payloadDelay = 0;
136
137    SMMUProcess *proc =
138        safe_cast<SMMUProcess *>(pkt->popSenderState());
139
140    runProcessTiming(proc, pkt);
141
142    return true;
143}
144
145void
146SMMUv3::masterRecvReqRetry()
147{
148    assert(!packetsToRetry.empty());
149
150    while (!packetsToRetry.empty()) {
151        SMMUAction a = packetsToRetry.front();
152
153        assert(a.type==ACTION_SEND_REQ || a.type==ACTION_SEND_REQ_FINAL);
154
155        DPRINTF(SMMUv3, "[t] master retr addr=%#x size=%#x\n",
156            a.pkt->getAddr(), a.pkt->getSize());
157
158        if (!masterPort.sendTimingReq(a.pkt))
159            break;
160
161        packetsToRetry.pop();
162
163        /*
164         * ACTION_SEND_REQ_FINAL means that we have just forwarded the packet
165         * on the master interface; this means that we no longer hold on to
166         * that transaction and therefore can accept a new one.
167         * If the slave port was stalled then unstall it (send retry).
168         */
169        if (a.type == ACTION_SEND_REQ_FINAL)
170            scheduleSlaveRetries();
171    }
172}
173
174bool
175SMMUv3::masterTableWalkRecvTimingResp(PacketPtr pkt)
176{
177    DPRINTF(SMMUv3, "[t] master HWTW resp addr=%#x size=%#x\n",
178        pkt->getAddr(), pkt->getSize());
179
180    // @todo: We need to pay for this and not just zero it out
181    pkt->headerDelay = pkt->payloadDelay = 0;
182
183    SMMUProcess *proc =
184        safe_cast<SMMUProcess *>(pkt->popSenderState());
185
186    runProcessTiming(proc, pkt);
187
188    return true;
189}
190
191void
192SMMUv3::masterTableWalkRecvReqRetry()
193{
194    assert(tableWalkPortEnable);
195    assert(!packetsTableWalkToRetry.empty());
196
197    while (!packetsTableWalkToRetry.empty()) {
198        SMMUAction a = packetsTableWalkToRetry.front();
199
200        assert(a.type==ACTION_SEND_REQ);
201
202        DPRINTF(SMMUv3, "[t] master HWTW retr addr=%#x size=%#x\n",
203            a.pkt->getAddr(), a.pkt->getSize());
204
205        if (!masterTableWalkPort.sendTimingReq(a.pkt))
206            break;
207
208        packetsTableWalkToRetry.pop();
209    }
210}
211
212void
213SMMUv3::scheduleSlaveRetries()
214{
215    for (auto ifc : slaveInterfaces) {
216        ifc->scheduleDeviceRetry();
217    }
218}
219
220SMMUAction
221SMMUv3::runProcess(SMMUProcess *proc, PacketPtr pkt)
222{
223    if (system.isAtomicMode()) {
224        return runProcessAtomic(proc, pkt);
225    } else if (system.isTimingMode()) {
226        return runProcessTiming(proc, pkt);
227    } else {
228        panic("Not in timing or atomic mode!");
229    }
230}
231
232SMMUAction
233SMMUv3::runProcessAtomic(SMMUProcess *proc, PacketPtr pkt)
234{
235    SMMUAction action;
236    Tick delay = 0;
237    bool finished = false;
238
239    do {
240        action = proc->run(pkt);
241
242        switch (action.type) {
243            case ACTION_SEND_REQ:
244                // Send an MMU initiated request on the table walk port if it is
245                // enabled. Otherwise, fall through and handle same as the final
246                // ACTION_SEND_REQ_FINAL request.
247                if (tableWalkPortEnable) {
248                    delay += masterTableWalkPort.sendAtomic(action.pkt);
249                    pkt = action.pkt;
250                    break;
251                }
252                M5_FALLTHROUGH;
253            case ACTION_SEND_REQ_FINAL:
254                delay += masterPort.sendAtomic(action.pkt);
255                pkt = action.pkt;
256                break;
257
258            case ACTION_SEND_RESP:
259            case ACTION_SEND_RESP_ATS:
260            case ACTION_SLEEP:
261                finished = true;
262                break;
263
264            case ACTION_DELAY:
265                delay += action.delay;
266                break;
267
268            case ACTION_TERMINATE:
269                panic("ACTION_TERMINATE in atomic mode\n");
270
271            default:
272                panic("Unknown action\n");
273        }
274    } while (!finished);
275
276    action.delay = delay;
277
278    return action;
279}
280
281SMMUAction
282SMMUv3::runProcessTiming(SMMUProcess *proc, PacketPtr pkt)
283{
284    SMMUAction action = proc->run(pkt);
285
286    switch (action.type) {
287        case ACTION_SEND_REQ:
288            // Send an MMU initiated request on the table walk port if it is
289            // enabled. Otherwise, fall through and handle same as the final
290            // ACTION_SEND_REQ_FINAL request.
291            if (tableWalkPortEnable) {
292                action.pkt->pushSenderState(proc);
293
294                DPRINTF(SMMUv3, "[t] master HWTW req  addr=%#x size=%#x\n",
295                        action.pkt->getAddr(), action.pkt->getSize());
296
297                if (packetsTableWalkToRetry.empty()
298                        && masterTableWalkPort.sendTimingReq(action.pkt)) {
299                    scheduleSlaveRetries();
300                } else {
301                    DPRINTF(SMMUv3, "[t] master HWTW req  needs retry,"
302                            " qlen=%d\n", packetsTableWalkToRetry.size());
303                    packetsTableWalkToRetry.push(action);
304                }
305
306                break;
307            }
308            M5_FALLTHROUGH;
309        case ACTION_SEND_REQ_FINAL:
310            action.pkt->pushSenderState(proc);
311
312            DPRINTF(SMMUv3, "[t] master req  addr=%#x size=%#x\n",
313                    action.pkt->getAddr(), action.pkt->getSize());
314
315            if (packetsToRetry.empty() && masterPort.sendTimingReq(action.pkt)) {
316                scheduleSlaveRetries();
317            } else {
318                DPRINTF(SMMUv3, "[t] master req  needs retry, qlen=%d\n",
319                        packetsToRetry.size());
320                packetsToRetry.push(action);
321            }
322
323            break;
324
325        case ACTION_SEND_RESP:
326            // @todo: We need to pay for this and not just zero it out
327            action.pkt->headerDelay = action.pkt->payloadDelay = 0;
328
329            DPRINTF(SMMUv3, "[t] slave resp addr=%#x size=%#x\n",
330                    action.pkt->getAddr(),
331                    action.pkt->getSize());
332
333            assert(action.ifc);
334            action.ifc->schedTimingResp(action.pkt);
335
336            delete proc;
337            break;
338
339        case ACTION_SEND_RESP_ATS:
340            // @todo: We need to pay for this and not just zero it out
341            action.pkt->headerDelay = action.pkt->payloadDelay = 0;
342
343            DPRINTF(SMMUv3, "[t] ATS slave resp addr=%#x size=%#x\n",
344                    action.pkt->getAddr(), action.pkt->getSize());
345
346            assert(action.ifc);
347            action.ifc->schedAtsTimingResp(action.pkt);
348
349            delete proc;
350            break;
351
352        case ACTION_DELAY:
353        case ACTION_SLEEP:
354            break;
355
356        case ACTION_TERMINATE:
357            delete proc;
358            break;
359
360        default:
361            panic("Unknown action\n");
362    }
363
364    return action;
365}
366
367void
368SMMUv3::processCommands()
369{
370    DPRINTF(SMMUv3, "processCommands()\n");
371
372    if (system.isAtomicMode()) {
373        SMMUAction a = runProcessAtomic(&commandExecutor, NULL);
374        (void) a;
375    } else if (system.isTimingMode()) {
376        if (!commandExecutor.isBusy())
377            runProcessTiming(&commandExecutor, NULL);
378    } else {
379        panic("Not in timing or atomic mode!");
380    }
381}
382
383void
384SMMUv3::processCommand(const SMMUCommand &cmd)
385{
386    switch (cmd.dw0.type) {
387        case CMD_PRF_CONFIG:
388            DPRINTF(SMMUv3, "CMD_PREFETCH_CONFIG - ignored\n");
389            break;
390
391        case CMD_PRF_ADDR:
392            DPRINTF(SMMUv3, "CMD_PREFETCH_ADDR - ignored\n");
393            break;
394
395        case CMD_CFGI_STE: {
396            DPRINTF(SMMUv3, "CMD_CFGI_STE sid=%#x\n", cmd.dw0.sid);
397            configCache.invalidateSID(cmd.dw0.sid);
398
399            for (auto slave_interface : slaveInterfaces) {
400                slave_interface->microTLB->invalidateSID(cmd.dw0.sid);
401                slave_interface->mainTLB->invalidateSID(cmd.dw0.sid);
402            }
403            break;
404        }
405
406        case CMD_CFGI_STE_RANGE: {
407            const auto range = cmd.dw1.range;
408            if (range == 31) {
409                // CMD_CFGI_ALL is an alias of CMD_CFGI_STE_RANGE with
410                // range = 31
411                DPRINTF(SMMUv3, "CMD_CFGI_ALL\n");
412                configCache.invalidateAll();
413
414                for (auto slave_interface : slaveInterfaces) {
415                    slave_interface->microTLB->invalidateAll();
416                    slave_interface->mainTLB->invalidateAll();
417                }
418            } else {
419                DPRINTF(SMMUv3, "CMD_CFGI_STE_RANGE\n");
420                const auto start_sid = cmd.dw0.sid & ~((1 << (range + 1)) - 1);
421                const auto end_sid = start_sid + (1 << (range + 1)) - 1;
422                for (auto sid = start_sid; sid <= end_sid; sid++) {
423                    configCache.invalidateSID(sid);
424
425                    for (auto slave_interface : slaveInterfaces) {
426                        slave_interface->microTLB->invalidateSID(sid);
427                        slave_interface->mainTLB->invalidateSID(sid);
428                    }
429                }
430            }
431            break;
432        }
433
434        case CMD_CFGI_CD: {
435            DPRINTF(SMMUv3, "CMD_CFGI_CD sid=%#x ssid=%#x\n",
436                    cmd.dw0.sid, cmd.dw0.ssid);
437            configCache.invalidateSSID(cmd.dw0.sid, cmd.dw0.ssid);
438
439            for (auto slave_interface : slaveInterfaces) {
440                slave_interface->microTLB->invalidateSSID(
441                    cmd.dw0.sid, cmd.dw0.ssid);
442                slave_interface->mainTLB->invalidateSSID(
443                    cmd.dw0.sid, cmd.dw0.ssid);
444            }
445            break;
446        }
447
448        case CMD_CFGI_CD_ALL: {
449            DPRINTF(SMMUv3, "CMD_CFGI_CD_ALL sid=%#x\n", cmd.dw0.sid);
450            configCache.invalidateSID(cmd.dw0.sid);
451
452            for (auto slave_interface : slaveInterfaces) {
453                slave_interface->microTLB->invalidateSID(cmd.dw0.sid);
454                slave_interface->mainTLB->invalidateSID(cmd.dw0.sid);
455            }
456            break;
457        }
458
459        case CMD_TLBI_NH_ALL: {
460            DPRINTF(SMMUv3, "CMD_TLBI_NH_ALL vmid=%#x\n", cmd.dw0.vmid);
461            for (auto slave_interface : slaveInterfaces) {
462                slave_interface->microTLB->invalidateVMID(cmd.dw0.vmid);
463                slave_interface->mainTLB->invalidateVMID(cmd.dw0.vmid);
464            }
465            tlb.invalidateVMID(cmd.dw0.vmid);
466            walkCache.invalidateVMID(cmd.dw0.vmid);
467            break;
468        }
469
470        case CMD_TLBI_NH_ASID: {
471            DPRINTF(SMMUv3, "CMD_TLBI_NH_ASID asid=%#x vmid=%#x\n",
472                    cmd.dw0.asid, cmd.dw0.vmid);
473            for (auto slave_interface : slaveInterfaces) {
474                slave_interface->microTLB->invalidateASID(
475                    cmd.dw0.asid, cmd.dw0.vmid);
476                slave_interface->mainTLB->invalidateASID(
477                    cmd.dw0.asid, cmd.dw0.vmid);
478            }
479            tlb.invalidateASID(cmd.dw0.asid, cmd.dw0.vmid);
480            walkCache.invalidateASID(cmd.dw0.asid, cmd.dw0.vmid);
481            break;
482        }
483
484        case CMD_TLBI_NH_VAA: {
485            const Addr addr = cmd.addr();
486            DPRINTF(SMMUv3, "CMD_TLBI_NH_VAA va=%#08x vmid=%#x\n",
487                    addr, cmd.dw0.vmid);
488            for (auto slave_interface : slaveInterfaces) {
489                slave_interface->microTLB->invalidateVAA(
490                    addr, cmd.dw0.vmid);
491                slave_interface->mainTLB->invalidateVAA(
492                    addr, cmd.dw0.vmid);
493            }
494            tlb.invalidateVAA(addr, cmd.dw0.vmid);
495            const bool leaf_only = cmd.dw1.leaf ? true : false;
496            walkCache.invalidateVAA(addr, cmd.dw0.vmid, leaf_only);
497            break;
498        }
499
500        case CMD_TLBI_NH_VA: {
501            const Addr addr = cmd.addr();
502            DPRINTF(SMMUv3, "CMD_TLBI_NH_VA va=%#08x asid=%#x vmid=%#x\n",
503                    addr, cmd.dw0.asid, cmd.dw0.vmid);
504            for (auto slave_interface : slaveInterfaces) {
505                slave_interface->microTLB->invalidateVA(
506                    addr, cmd.dw0.asid, cmd.dw0.vmid);
507                slave_interface->mainTLB->invalidateVA(
508                    addr, cmd.dw0.asid, cmd.dw0.vmid);
509            }
510            tlb.invalidateVA(addr, cmd.dw0.asid, cmd.dw0.vmid);
511            const bool leaf_only = cmd.dw1.leaf ? true : false;
512            walkCache.invalidateVA(addr, cmd.dw0.asid, cmd.dw0.vmid,
513                                   leaf_only);
514            break;
515        }
516
517        case CMD_TLBI_S2_IPA: {
518            const Addr addr = cmd.addr();
519            DPRINTF(SMMUv3, "CMD_TLBI_S2_IPA ipa=%#08x vmid=%#x\n",
520                    addr, cmd.dw0.vmid);
521            // This does not invalidate TLBs containing
522            // combined Stage1 + Stage2 translations, as per the spec.
523            ipaCache.invalidateIPA(addr, cmd.dw0.vmid);
524
525            if (!cmd.dw1.leaf)
526                walkCache.invalidateVMID(cmd.dw0.vmid);
527            break;
528        }
529
530        case CMD_TLBI_S12_VMALL: {
531            DPRINTF(SMMUv3, "CMD_TLBI_S12_VMALL vmid=%#x\n", cmd.dw0.vmid);
532            for (auto slave_interface : slaveInterfaces) {
533                slave_interface->microTLB->invalidateVMID(cmd.dw0.vmid);
534                slave_interface->mainTLB->invalidateVMID(cmd.dw0.vmid);
535            }
536            tlb.invalidateVMID(cmd.dw0.vmid);
537            ipaCache.invalidateVMID(cmd.dw0.vmid);
538            walkCache.invalidateVMID(cmd.dw0.vmid);
539            break;
540        }
541
542        case CMD_TLBI_NSNH_ALL: {
543            DPRINTF(SMMUv3, "CMD_TLBI_NSNH_ALL\n");
544            for (auto slave_interface : slaveInterfaces) {
545                slave_interface->microTLB->invalidateAll();
546                slave_interface->mainTLB->invalidateAll();
547            }
548            tlb.invalidateAll();
549            ipaCache.invalidateAll();
550            walkCache.invalidateAll();
551            break;
552        }
553
554        case CMD_RESUME:
555            DPRINTF(SMMUv3, "CMD_RESUME\n");
556            panic("resume unimplemented");
557            break;
558
559        default:
560            warn("Unimplemented command %#x\n", cmd.dw0.type);
561            break;
562    }
563}
564
565const PageTableOps*
566SMMUv3::getPageTableOps(uint8_t trans_granule)
567{
568    static V8PageTableOps4k  ptOps4k;
569    static V8PageTableOps16k ptOps16k;
570    static V8PageTableOps64k ptOps64k;
571
572    switch (trans_granule) {
573    case TRANS_GRANULE_4K:  return &ptOps4k;
574    case TRANS_GRANULE_16K: return &ptOps16k;
575    case TRANS_GRANULE_64K: return &ptOps64k;
576    default:
577        panic("Unknown translation granule size %d", trans_granule);
578    }
579}
580
581Tick
582SMMUv3::readControl(PacketPtr pkt)
583{
584    DPRINTF(SMMUv3, "readControl:  addr=%08x size=%d\n",
585            pkt->getAddr(), pkt->getSize());
586
587    int offset = pkt->getAddr() - regsMap.start();
588    assert(offset >= 0 && offset < SMMU_REG_SIZE);
589
590    if (inSecureBlock(offset)) {
591        warn("smmu: secure registers (0x%x) are not implemented\n",
592             offset);
593    }
594
595    auto reg_ptr = regs.data + offset;
596
597    switch (pkt->getSize()) {
598      case sizeof(uint32_t):
599        pkt->setLE<uint32_t>(*reinterpret_cast<uint32_t *>(reg_ptr));
600        break;
601      case sizeof(uint64_t):
602        pkt->setLE<uint64_t>(*reinterpret_cast<uint64_t *>(reg_ptr));
603        break;
604      default:
605        panic("smmu: unallowed access size: %d bytes\n", pkt->getSize());
606        break;
607    }
608
609    pkt->makeAtomicResponse();
610
611    return 0;
612}
613
614Tick
615SMMUv3::writeControl(PacketPtr pkt)
616{
617    int offset = pkt->getAddr() - regsMap.start();
618    assert(offset >= 0 && offset < SMMU_REG_SIZE);
619
620    DPRINTF(SMMUv3, "writeControl: addr=%08x size=%d data=%16x\n",
621            pkt->getAddr(), pkt->getSize(),
622            pkt->getSize() == sizeof(uint64_t) ?
623            pkt->getLE<uint64_t>() : pkt->getLE<uint32_t>());
624
625    switch (offset) {
626        case offsetof(SMMURegs, cr0):
627            assert(pkt->getSize() == sizeof(uint32_t));
628            regs.cr0 = regs.cr0ack = pkt->getLE<uint32_t>();
629            break;
630
631        case offsetof(SMMURegs, cr1):
632        case offsetof(SMMURegs, cr2):
633        case offsetof(SMMURegs, strtab_base_cfg):
634        case offsetof(SMMURegs, eventq_cons):
635        case offsetof(SMMURegs, eventq_irq_cfg1):
636        case offsetof(SMMURegs, priq_cons):
637            assert(pkt->getSize() == sizeof(uint32_t));
638            *reinterpret_cast<uint32_t *>(regs.data + offset) =
639                pkt->getLE<uint32_t>();
640            break;
641
642        case offsetof(SMMURegs, cmdq_cons):
643            assert(pkt->getSize() == sizeof(uint32_t));
644            if (regs.cr0 & CR0_CMDQEN_MASK) {
645                warn("CMDQ is enabled: ignoring write to CMDQ_CONS\n");
646            } else {
647                *reinterpret_cast<uint32_t *>(regs.data + offset) =
648                    pkt->getLE<uint32_t>();
649            }
650            break;
651
652        case offsetof(SMMURegs, cmdq_prod):
653            assert(pkt->getSize() == sizeof(uint32_t));
654            *reinterpret_cast<uint32_t *>(regs.data + offset) =
655                pkt->getLE<uint32_t>();
656            schedule(processCommandsEvent, nextCycle());
657            break;
658
659        case offsetof(SMMURegs, strtab_base):
660        case offsetof(SMMURegs, eventq_irq_cfg0):
661            assert(pkt->getSize() == sizeof(uint64_t));
662            *reinterpret_cast<uint64_t *>(regs.data + offset) =
663                pkt->getLE<uint64_t>();
664            break;
665
666        case offsetof(SMMURegs, cmdq_base):
667            assert(pkt->getSize() == sizeof(uint64_t));
668            if (regs.cr0 & CR0_CMDQEN_MASK) {
669                warn("CMDQ is enabled: ignoring write to CMDQ_BASE\n");
670            } else {
671                *reinterpret_cast<uint64_t *>(regs.data + offset) =
672                    pkt->getLE<uint64_t>();
673                regs.cmdq_cons = 0;
674                regs.cmdq_prod = 0;
675            }
676            break;
677
678        case offsetof(SMMURegs, eventq_base):
679            assert(pkt->getSize() == sizeof(uint64_t));
680            *reinterpret_cast<uint64_t *>(regs.data + offset) =
681                pkt->getLE<uint64_t>();
682            regs.eventq_cons = 0;
683            regs.eventq_prod = 0;
684            break;
685
686        case offsetof(SMMURegs, priq_base):
687            assert(pkt->getSize() == sizeof(uint64_t));
688            *reinterpret_cast<uint64_t *>(regs.data + offset) =
689                pkt->getLE<uint64_t>();
690            regs.priq_cons = 0;
691            regs.priq_prod = 0;
692            break;
693
694        default:
695            if (inSecureBlock(offset)) {
696                warn("smmu: secure registers (0x%x) are not implemented\n",
697                     offset);
698            } else {
699                warn("smmu: write to read-only/undefined register at 0x%x\n",
700                     offset);
701            }
702    }
703
704    pkt->makeAtomicResponse();
705
706    return 0;
707}
708
709bool
710SMMUv3::inSecureBlock(uint32_t offs) const
711{
712    if (offs >= offsetof(SMMURegs, _secure_regs) && offs < SMMU_SECURE_SZ)
713        return true;
714    else
715        return false;
716}
717
718void
719SMMUv3::init()
720{
721    // make sure both sides are connected and have the same block size
722    if (!masterPort.isConnected())
723        fatal("Master port is not connected.\n");
724
725    // If the second master port is connected for the table walks, enable
726    // the mode to send table walks through this port instead
727    if (masterTableWalkPort.isConnected())
728        tableWalkPortEnable = true;
729
730    // notify the master side  of our address ranges
731    for (auto ifc : slaveInterfaces) {
732        ifc->sendRange();
733    }
734
735    if (controlPort.isConnected())
736        controlPort.sendRangeChange();
737}
738
739void
740SMMUv3::regStats()
741{
742    ClockedObject::regStats();
743
744    using namespace Stats;
745
746    for (size_t i = 0; i < slaveInterfaces.size(); i++) {
747        slaveInterfaces[i]->microTLB->regStats(
748            csprintf("%s.utlb%d", name(), i));
749        slaveInterfaces[i]->mainTLB->regStats(
750            csprintf("%s.maintlb%d", name(), i));
751    }
752
753    tlb.regStats(name() + ".tlb");
754    configCache.regStats(name() + ".cfg");
755    ipaCache.regStats(name() + ".ipa");
756    walkCache.regStats(name() + ".walk");
757
758    steL1Fetches
759        .name(name() + ".steL1Fetches")
760        .desc("STE L1 fetches")
761        .flags(pdf);
762
763    steFetches
764        .name(name() + ".steFetches")
765        .desc("STE fetches")
766        .flags(pdf);
767
768    cdL1Fetches
769        .name(name() + ".cdL1Fetches")
770        .desc("CD L1 fetches")
771        .flags(pdf);
772
773    cdFetches
774        .name(name() + ".cdFetches")
775        .desc("CD fetches")
776        .flags(pdf);
777
778    translationTimeDist
779        .init(0, 2000000, 2000)
780        .name(name() + ".translationTimeDist")
781        .desc("Time to translate address")
782        .flags(pdf);
783
784    ptwTimeDist
785        .init(0, 2000000, 2000)
786        .name(name() + ".ptwTimeDist")
787        .desc("Time to walk page tables")
788        .flags(pdf);
789}
790
791DrainState
792SMMUv3::drain()
793{
794    // Wait until the Command Executor is not busy
795    if (commandExecutor.isBusy()) {
796        return DrainState::Draining;
797    }
798    return DrainState::Drained;
799}
800
801void
802SMMUv3::serialize(CheckpointOut &cp) const
803{
804    DPRINTF(Checkpoint, "Serializing SMMUv3\n");
805
806    SERIALIZE_ARRAY(regs.data, sizeof(regs.data) / sizeof(regs.data[0]));
807}
808
809void
810SMMUv3::unserialize(CheckpointIn &cp)
811{
812    DPRINTF(Checkpoint, "Unserializing SMMUv3\n");
813
814    UNSERIALIZE_ARRAY(regs.data, sizeof(regs.data) / sizeof(regs.data[0]));
815}
816
817Port&
818SMMUv3::getPort(const std::string &name, PortID id)
819{
820    if (name == "master") {
821        return masterPort;
822    } else if (name == "master_walker") {
823        return masterTableWalkPort;
824    } else if (name == "control") {
825        return controlPort;
826    } else {
827        return ClockedObject::getPort(name, id);
828    }
829}
830
831SMMUv3*
832SMMUv3Params::create()
833{
834    return new SMMUv3(this);
835}
836