smmu_v3.cc (14064:870553bad072) smmu_v3.cc (14098:f4b9024d1a96)
1/*
2 * Copyright (c) 2013, 2018-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Stan Czerniawski
38 */
39
40#include "dev/arm/smmu_v3.hh"
41
42#include <cstddef>
43#include <cstring>
44
45#include "base/bitfield.hh"
46#include "base/cast.hh"
47#include "base/logging.hh"
48#include "base/trace.hh"
49#include "base/types.hh"
50#include "debug/Checkpoint.hh"
51#include "debug/SMMUv3.hh"
52#include "dev/arm/smmu_v3_transl.hh"
53#include "mem/packet_access.hh"
54#include "sim/system.hh"
55
56SMMUv3::SMMUv3(SMMUv3Params *params) :
57 MemObject(params),
58 system(*params->system),
59 masterId(params->system->getMasterId(this)),
60 masterPort(name() + ".master", *this),
61 masterTableWalkPort(name() + ".master_walker", *this),
62 controlPort(name() + ".control", *this, params->reg_map),
63 tlb(params->tlb_entries, params->tlb_assoc, params->tlb_policy),
64 configCache(params->cfg_entries, params->cfg_assoc, params->cfg_policy),
65 ipaCache(params->ipa_entries, params->ipa_assoc, params->ipa_policy),
66 walkCache({ { params->walk_S1L0, params->walk_S1L1,
67 params->walk_S1L2, params->walk_S1L3,
68 params->walk_S2L0, params->walk_S2L1,
69 params->walk_S2L2, params->walk_S2L3 } },
70 params->walk_assoc, params->walk_policy),
71 tlbEnable(params->tlb_enable),
72 configCacheEnable(params->cfg_enable),
73 ipaCacheEnable(params->ipa_enable),
74 walkCacheEnable(params->walk_enable),
75 tableWalkPortEnable(false),
76 walkCacheNonfinalEnable(params->wc_nonfinal_enable),
77 walkCacheS1Levels(params->wc_s1_levels),
78 walkCacheS2Levels(params->wc_s2_levels),
79 masterPortWidth(params->master_port_width),
80 tlbSem(params->tlb_slots),
81 ifcSmmuSem(1),
82 smmuIfcSem(1),
83 configSem(params->cfg_slots),
84 ipaSem(params->ipa_slots),
85 walkSem(params->walk_slots),
86 masterPortSem(1),
87 transSem(params->xlate_slots),
88 ptwSem(params->ptw_slots),
89 cycleSem(1),
90 tlbLat(params->tlb_lat),
91 ifcSmmuLat(params->ifc_smmu_lat),
92 smmuIfcLat(params->smmu_ifc_lat),
93 configLat(params->cfg_lat),
94 ipaLat(params->ipa_lat),
95 walkLat(params->walk_lat),
96 slaveInterfaces(params->slave_interfaces),
97 commandExecutor(name() + ".cmd_exec", *this),
98 regsMap(params->reg_map),
99 processCommandsEvent(this)
100{
101 fatal_if(regsMap.size() != SMMU_REG_SIZE,
102 "Invalid register map size: %#x different than SMMU_REG_SIZE = %#x\n",
103 regsMap.size(), SMMU_REG_SIZE);
104
105 // Init smmu registers to 0
106 memset(&regs, 0, sizeof(regs));
107
108 // Setup RO ID registers
109 regs.idr0 = params->smmu_idr0;
110 regs.idr1 = params->smmu_idr1;
111 regs.idr2 = params->smmu_idr2;
112 regs.idr3 = params->smmu_idr3;
113 regs.idr4 = params->smmu_idr4;
114 regs.idr5 = params->smmu_idr5;
115 regs.iidr = params->smmu_iidr;
116 regs.aidr = params->smmu_aidr;
117
118 // TODO: At the moment it possible to set the ID registers to hold
119 // any possible value. It would be nice to have a sanity check here
120 // at construction time in case some idx registers are programmed to
121 // store an unallowed values or if the are configuration conflicts.
122 warn("SMMUv3 IDx register values unchecked\n");
123
124 for (auto ifc : slaveInterfaces)
125 ifc->setSMMU(this);
126}
127
128bool
129SMMUv3::masterRecvTimingResp(PacketPtr pkt)
130{
131 DPRINTF(SMMUv3, "[t] master resp addr=%#x size=%#x\n",
132 pkt->getAddr(), pkt->getSize());
133
134 // @todo: We need to pay for this and not just zero it out
135 pkt->headerDelay = pkt->payloadDelay = 0;
136
137 SMMUProcess *proc =
138 safe_cast<SMMUProcess *>(pkt->popSenderState());
139
140 runProcessTiming(proc, pkt);
141
142 return true;
143}
144
145void
146SMMUv3::masterRecvReqRetry()
147{
148 assert(!packetsToRetry.empty());
149
150 while (!packetsToRetry.empty()) {
151 SMMUAction a = packetsToRetry.front();
152
153 assert(a.type==ACTION_SEND_REQ || a.type==ACTION_SEND_REQ_FINAL);
154
155 DPRINTF(SMMUv3, "[t] master retr addr=%#x size=%#x\n",
156 a.pkt->getAddr(), a.pkt->getSize());
157
158 if (!masterPort.sendTimingReq(a.pkt))
159 break;
160
161 packetsToRetry.pop();
162
163 /*
164 * ACTION_SEND_REQ_FINAL means that we have just forwarded the packet
165 * on the master interface; this means that we no longer hold on to
166 * that transaction and therefore can accept a new one.
167 * If the slave port was stalled then unstall it (send retry).
168 */
169 if (a.type == ACTION_SEND_REQ_FINAL)
170 scheduleSlaveRetries();
171 }
172}
173
174bool
175SMMUv3::masterTableWalkRecvTimingResp(PacketPtr pkt)
176{
177 DPRINTF(SMMUv3, "[t] master HWTW resp addr=%#x size=%#x\n",
178 pkt->getAddr(), pkt->getSize());
179
180 // @todo: We need to pay for this and not just zero it out
181 pkt->headerDelay = pkt->payloadDelay = 0;
182
183 SMMUProcess *proc =
184 safe_cast<SMMUProcess *>(pkt->popSenderState());
185
186 runProcessTiming(proc, pkt);
187
188 return true;
189}
190
191void
192SMMUv3::masterTableWalkRecvReqRetry()
193{
194 assert(tableWalkPortEnable);
195 assert(!packetsTableWalkToRetry.empty());
196
197 while (!packetsTableWalkToRetry.empty()) {
198 SMMUAction a = packetsTableWalkToRetry.front();
199
200 assert(a.type==ACTION_SEND_REQ);
201
202 DPRINTF(SMMUv3, "[t] master HWTW retr addr=%#x size=%#x\n",
203 a.pkt->getAddr(), a.pkt->getSize());
204
205 if (!masterTableWalkPort.sendTimingReq(a.pkt))
206 break;
207
208 packetsTableWalkToRetry.pop();
209 }
210}
211
212void
213SMMUv3::scheduleSlaveRetries()
214{
215 for (auto ifc : slaveInterfaces) {
216 ifc->scheduleDeviceRetry();
217 }
218}
219
220SMMUAction
221SMMUv3::runProcess(SMMUProcess *proc, PacketPtr pkt)
222{
223 if (system.isAtomicMode()) {
224 return runProcessAtomic(proc, pkt);
225 } else if (system.isTimingMode()) {
226 return runProcessTiming(proc, pkt);
227 } else {
228 panic("Not in timing or atomic mode!");
229 }
230}
231
232SMMUAction
233SMMUv3::runProcessAtomic(SMMUProcess *proc, PacketPtr pkt)
234{
235 SMMUAction action;
236 Tick delay = 0;
237 bool finished = false;
238
239 do {
240 action = proc->run(pkt);
241
242 switch (action.type) {
243 case ACTION_SEND_REQ:
244 // Send an MMU initiated request on the table walk port if it is
245 // enabled. Otherwise, fall through and handle same as the final
246 // ACTION_SEND_REQ_FINAL request.
247 if (tableWalkPortEnable) {
248 delay += masterTableWalkPort.sendAtomic(action.pkt);
249 pkt = action.pkt;
250 break;
251 }
252 M5_FALLTHROUGH;
253 case ACTION_SEND_REQ_FINAL:
254 delay += masterPort.sendAtomic(action.pkt);
255 pkt = action.pkt;
256 break;
257
258 case ACTION_SEND_RESP:
259 case ACTION_SEND_RESP_ATS:
260 case ACTION_SLEEP:
261 finished = true;
262 break;
263
264 case ACTION_DELAY:
265 delay += action.delay;
266 break;
267
268 case ACTION_TERMINATE:
269 panic("ACTION_TERMINATE in atomic mode\n");
270
271 default:
272 panic("Unknown action\n");
273 }
274 } while (!finished);
275
276 action.delay = delay;
277
278 return action;
279}
280
281SMMUAction
282SMMUv3::runProcessTiming(SMMUProcess *proc, PacketPtr pkt)
283{
284 SMMUAction action = proc->run(pkt);
285
286 switch (action.type) {
287 case ACTION_SEND_REQ:
288 // Send an MMU initiated request on the table walk port if it is
289 // enabled. Otherwise, fall through and handle same as the final
290 // ACTION_SEND_REQ_FINAL request.
291 if (tableWalkPortEnable) {
292 action.pkt->pushSenderState(proc);
293
294 DPRINTF(SMMUv3, "[t] master HWTW req addr=%#x size=%#x\n",
295 action.pkt->getAddr(), action.pkt->getSize());
296
297 if (packetsTableWalkToRetry.empty()
298 && masterTableWalkPort.sendTimingReq(action.pkt)) {
299 scheduleSlaveRetries();
300 } else {
301 DPRINTF(SMMUv3, "[t] master HWTW req needs retry,"
302 " qlen=%d\n", packetsTableWalkToRetry.size());
303 packetsTableWalkToRetry.push(action);
304 }
305
306 break;
307 }
308 M5_FALLTHROUGH;
309 case ACTION_SEND_REQ_FINAL:
310 action.pkt->pushSenderState(proc);
311
312 DPRINTF(SMMUv3, "[t] master req addr=%#x size=%#x\n",
313 action.pkt->getAddr(), action.pkt->getSize());
314
315 if (packetsToRetry.empty() && masterPort.sendTimingReq(action.pkt)) {
316 scheduleSlaveRetries();
317 } else {
318 DPRINTF(SMMUv3, "[t] master req needs retry, qlen=%d\n",
319 packetsToRetry.size());
320 packetsToRetry.push(action);
321 }
322
323 break;
324
325 case ACTION_SEND_RESP:
326 // @todo: We need to pay for this and not just zero it out
327 action.pkt->headerDelay = action.pkt->payloadDelay = 0;
328
329 DPRINTF(SMMUv3, "[t] slave resp addr=%#x size=%#x\n",
330 action.pkt->getAddr(),
331 action.pkt->getSize());
332
333 assert(action.ifc);
334 action.ifc->schedTimingResp(action.pkt);
335
336 delete proc;
337 break;
338
339 case ACTION_SEND_RESP_ATS:
340 // @todo: We need to pay for this and not just zero it out
341 action.pkt->headerDelay = action.pkt->payloadDelay = 0;
342
343 DPRINTF(SMMUv3, "[t] ATS slave resp addr=%#x size=%#x\n",
344 action.pkt->getAddr(), action.pkt->getSize());
345
346 assert(action.ifc);
347 action.ifc->schedAtsTimingResp(action.pkt);
348
349 delete proc;
350 break;
351
352 case ACTION_DELAY:
353 case ACTION_SLEEP:
354 break;
355
356 case ACTION_TERMINATE:
357 delete proc;
358 break;
359
360 default:
361 panic("Unknown action\n");
362 }
363
364 return action;
365}
366
367void
368SMMUv3::processCommands()
369{
370 DPRINTF(SMMUv3, "processCommands()\n");
371
372 if (system.isAtomicMode()) {
373 SMMUAction a = runProcessAtomic(&commandExecutor, NULL);
374 (void) a;
375 } else if (system.isTimingMode()) {
376 if (!commandExecutor.isBusy())
377 runProcessTiming(&commandExecutor, NULL);
378 } else {
379 panic("Not in timing or atomic mode!");
380 }
381}
382
383void
384SMMUv3::processCommand(const SMMUCommand &cmd)
385{
386 switch (cmd.type) {
387 case CMD_PRF_CONFIG:
388 DPRINTF(SMMUv3, "CMD_PREFETCH_CONFIG - ignored\n");
389 break;
390
391 case CMD_PRF_ADDR:
392 DPRINTF(SMMUv3, "CMD_PREFETCH_ADDR - ignored\n");
393 break;
394
395 case CMD_INV_STE:
396 DPRINTF(SMMUv3, "CMD_INV_STE sid=%#x\n", cmd.data[0]);
397 configCache.invalidateSID(cmd.data[0]);
398 break;
399
400 case CMD_INV_CD:
401 DPRINTF(SMMUv3, "CMD_INV_CD sid=%#x ssid=%#x\n",
402 cmd.data[0], cmd.data[1]);
403 configCache.invalidateSSID(cmd.data[0], cmd.data[1]);
404 break;
405
406 case CMD_INV_CD_ALL:
407 DPRINTF(SMMUv3, "CMD_INV_CD_ALL sid=%#x\n", cmd.data[0]);
408 configCache.invalidateSID(cmd.data[0]);
409 break;
410
411 case CMD_INV_ALL:
412 DPRINTF(SMMUv3, "CMD_INV_ALL\n");
413 configCache.invalidateAll();
414 break;
415
416 case CMD_TLBI_ALL:
417 DPRINTF(SMMUv3, "CMD_TLBI_ALL\n");
418 for (auto slave_interface : slaveInterfaces) {
419 slave_interface->microTLB->invalidateAll();
420 slave_interface->mainTLB->invalidateAll();
421 }
422 tlb.invalidateAll();
423 ipaCache.invalidateAll();
424 walkCache.invalidateAll();
425 break;
426
427 case CMD_TLBI_ASID:
428 DPRINTF(SMMUv3, "CMD_TLBI_ASID asid=%#x vmid=%#x\n",
429 cmd.data[0], cmd.data[1]);
430 for (auto slave_interface : slaveInterfaces) {
431 slave_interface->microTLB->invalidateASID(
432 cmd.data[0], cmd.data[1]);
433 slave_interface->mainTLB->invalidateASID(
434 cmd.data[0], cmd.data[1]);
435 }
436 tlb.invalidateASID(cmd.data[0], cmd.data[1]);
437 walkCache.invalidateASID(cmd.data[0], cmd.data[1]);
438 break;
439
440 case CMD_TLBI_VAAL:
441 DPRINTF(SMMUv3, "CMD_TLBI_VAAL va=%#08x vmid=%#x\n",
442 cmd.data[0], cmd.data[1]);
443 for (auto slave_interface : slaveInterfaces) {
444 slave_interface->microTLB->invalidateVAA(
445 cmd.data[0], cmd.data[1]);
446 slave_interface->mainTLB->invalidateVAA(
447 cmd.data[0], cmd.data[1]);
448 }
449 tlb.invalidateVAA(cmd.data[0], cmd.data[1]);
450 break;
451
452 case CMD_TLBI_VAA:
453 DPRINTF(SMMUv3, "CMD_TLBI_VAA va=%#08x vmid=%#x\n",
454 cmd.data[0], cmd.data[1]);
455 for (auto slave_interface : slaveInterfaces) {
456 slave_interface->microTLB->invalidateVAA(
457 cmd.data[0], cmd.data[1]);
458 slave_interface->mainTLB->invalidateVAA(
459 cmd.data[0], cmd.data[1]);
460 }
461 tlb.invalidateVAA(cmd.data[0], cmd.data[1]);
462 walkCache.invalidateVAA(cmd.data[0], cmd.data[1]);
463 break;
464
465 case CMD_TLBI_VAL:
466 DPRINTF(SMMUv3, "CMD_TLBI_VAL va=%#08x asid=%#x vmid=%#x\n",
467 cmd.data[0], cmd.data[1], cmd.data[2]);
468 for (auto slave_interface : slaveInterfaces) {
469 slave_interface->microTLB->invalidateVA(
470 cmd.data[0], cmd.data[1], cmd.data[2]);
471 slave_interface->mainTLB->invalidateVA(
472 cmd.data[0], cmd.data[1], cmd.data[2]);
473 }
474 tlb.invalidateVA(cmd.data[0], cmd.data[1], cmd.data[2]);
475 break;
476
477 case CMD_TLBI_VA:
478 DPRINTF(SMMUv3, "CMD_TLBI_VA va=%#08x asid=%#x vmid=%#x\n",
479 cmd.data[0], cmd.data[1], cmd.data[2]);
480 for (auto slave_interface : slaveInterfaces) {
481 slave_interface->microTLB->invalidateVA(
482 cmd.data[0], cmd.data[1], cmd.data[2]);
483 slave_interface->mainTLB->invalidateVA(
484 cmd.data[0], cmd.data[1], cmd.data[2]);
485 }
486 tlb.invalidateVA(cmd.data[0], cmd.data[1], cmd.data[2]);
487 walkCache.invalidateVA(cmd.data[0], cmd.data[1], cmd.data[2]);
488 break;
489
490 case CMD_TLBI_VM_IPAL:
491 DPRINTF(SMMUv3, "CMD_TLBI_VM_IPAL ipa=%#08x vmid=%#x\n",
492 cmd.data[0], cmd.data[1]);
493 // This does not invalidate TLBs containing
494 // combined Stage1 + Stage2 translations, as per the spec.
495 ipaCache.invalidateIPA(cmd.data[0], cmd.data[1]);
496 walkCache.invalidateVMID(cmd.data[1]);
497 break;
498
499 case CMD_TLBI_VM_IPA:
500 DPRINTF(SMMUv3, "CMD_TLBI_VM_IPA ipa=%#08x vmid=%#x\n",
501 cmd.data[0], cmd.data[1]);
502 // This does not invalidate TLBs containing
503 // combined Stage1 + Stage2 translations, as per the spec.
504 ipaCache.invalidateIPA(cmd.data[0], cmd.data[1]);
505 walkCache.invalidateVMID(cmd.data[1]);
506 break;
507
508 case CMD_TLBI_VM_S12:
509 DPRINTF(SMMUv3, "CMD_TLBI_VM_S12 vmid=%#x\n", cmd.data[0]);
510 for (auto slave_interface : slaveInterfaces) {
511 slave_interface->microTLB->invalidateVMID(cmd.data[0]);
512 slave_interface->mainTLB->invalidateVMID(cmd.data[0]);
513 }
514 tlb.invalidateVMID(cmd.data[0]);
515 ipaCache.invalidateVMID(cmd.data[0]);
516 walkCache.invalidateVMID(cmd.data[0]);
517 break;
518
519 case CMD_RESUME_S:
520 DPRINTF(SMMUv3, "CMD_RESUME_S\n");
521 panic("resume unimplemented");
522 break;
523
524 default:
525 warn("Unimplemented command %#x\n", cmd.type);
526 break;
527 }
528}
529
530const PageTableOps*
531SMMUv3::getPageTableOps(uint8_t trans_granule)
532{
533 static V8PageTableOps4k ptOps4k;
1/*
2 * Copyright (c) 2013, 2018-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Stan Czerniawski
38 */
39
40#include "dev/arm/smmu_v3.hh"
41
42#include <cstddef>
43#include <cstring>
44
45#include "base/bitfield.hh"
46#include "base/cast.hh"
47#include "base/logging.hh"
48#include "base/trace.hh"
49#include "base/types.hh"
50#include "debug/Checkpoint.hh"
51#include "debug/SMMUv3.hh"
52#include "dev/arm/smmu_v3_transl.hh"
53#include "mem/packet_access.hh"
54#include "sim/system.hh"
55
56SMMUv3::SMMUv3(SMMUv3Params *params) :
57 MemObject(params),
58 system(*params->system),
59 masterId(params->system->getMasterId(this)),
60 masterPort(name() + ".master", *this),
61 masterTableWalkPort(name() + ".master_walker", *this),
62 controlPort(name() + ".control", *this, params->reg_map),
63 tlb(params->tlb_entries, params->tlb_assoc, params->tlb_policy),
64 configCache(params->cfg_entries, params->cfg_assoc, params->cfg_policy),
65 ipaCache(params->ipa_entries, params->ipa_assoc, params->ipa_policy),
66 walkCache({ { params->walk_S1L0, params->walk_S1L1,
67 params->walk_S1L2, params->walk_S1L3,
68 params->walk_S2L0, params->walk_S2L1,
69 params->walk_S2L2, params->walk_S2L3 } },
70 params->walk_assoc, params->walk_policy),
71 tlbEnable(params->tlb_enable),
72 configCacheEnable(params->cfg_enable),
73 ipaCacheEnable(params->ipa_enable),
74 walkCacheEnable(params->walk_enable),
75 tableWalkPortEnable(false),
76 walkCacheNonfinalEnable(params->wc_nonfinal_enable),
77 walkCacheS1Levels(params->wc_s1_levels),
78 walkCacheS2Levels(params->wc_s2_levels),
79 masterPortWidth(params->master_port_width),
80 tlbSem(params->tlb_slots),
81 ifcSmmuSem(1),
82 smmuIfcSem(1),
83 configSem(params->cfg_slots),
84 ipaSem(params->ipa_slots),
85 walkSem(params->walk_slots),
86 masterPortSem(1),
87 transSem(params->xlate_slots),
88 ptwSem(params->ptw_slots),
89 cycleSem(1),
90 tlbLat(params->tlb_lat),
91 ifcSmmuLat(params->ifc_smmu_lat),
92 smmuIfcLat(params->smmu_ifc_lat),
93 configLat(params->cfg_lat),
94 ipaLat(params->ipa_lat),
95 walkLat(params->walk_lat),
96 slaveInterfaces(params->slave_interfaces),
97 commandExecutor(name() + ".cmd_exec", *this),
98 regsMap(params->reg_map),
99 processCommandsEvent(this)
100{
101 fatal_if(regsMap.size() != SMMU_REG_SIZE,
102 "Invalid register map size: %#x different than SMMU_REG_SIZE = %#x\n",
103 regsMap.size(), SMMU_REG_SIZE);
104
105 // Init smmu registers to 0
106 memset(&regs, 0, sizeof(regs));
107
108 // Setup RO ID registers
109 regs.idr0 = params->smmu_idr0;
110 regs.idr1 = params->smmu_idr1;
111 regs.idr2 = params->smmu_idr2;
112 regs.idr3 = params->smmu_idr3;
113 regs.idr4 = params->smmu_idr4;
114 regs.idr5 = params->smmu_idr5;
115 regs.iidr = params->smmu_iidr;
116 regs.aidr = params->smmu_aidr;
117
118 // TODO: At the moment it possible to set the ID registers to hold
119 // any possible value. It would be nice to have a sanity check here
120 // at construction time in case some idx registers are programmed to
121 // store an unallowed values or if the are configuration conflicts.
122 warn("SMMUv3 IDx register values unchecked\n");
123
124 for (auto ifc : slaveInterfaces)
125 ifc->setSMMU(this);
126}
127
128bool
129SMMUv3::masterRecvTimingResp(PacketPtr pkt)
130{
131 DPRINTF(SMMUv3, "[t] master resp addr=%#x size=%#x\n",
132 pkt->getAddr(), pkt->getSize());
133
134 // @todo: We need to pay for this and not just zero it out
135 pkt->headerDelay = pkt->payloadDelay = 0;
136
137 SMMUProcess *proc =
138 safe_cast<SMMUProcess *>(pkt->popSenderState());
139
140 runProcessTiming(proc, pkt);
141
142 return true;
143}
144
145void
146SMMUv3::masterRecvReqRetry()
147{
148 assert(!packetsToRetry.empty());
149
150 while (!packetsToRetry.empty()) {
151 SMMUAction a = packetsToRetry.front();
152
153 assert(a.type==ACTION_SEND_REQ || a.type==ACTION_SEND_REQ_FINAL);
154
155 DPRINTF(SMMUv3, "[t] master retr addr=%#x size=%#x\n",
156 a.pkt->getAddr(), a.pkt->getSize());
157
158 if (!masterPort.sendTimingReq(a.pkt))
159 break;
160
161 packetsToRetry.pop();
162
163 /*
164 * ACTION_SEND_REQ_FINAL means that we have just forwarded the packet
165 * on the master interface; this means that we no longer hold on to
166 * that transaction and therefore can accept a new one.
167 * If the slave port was stalled then unstall it (send retry).
168 */
169 if (a.type == ACTION_SEND_REQ_FINAL)
170 scheduleSlaveRetries();
171 }
172}
173
174bool
175SMMUv3::masterTableWalkRecvTimingResp(PacketPtr pkt)
176{
177 DPRINTF(SMMUv3, "[t] master HWTW resp addr=%#x size=%#x\n",
178 pkt->getAddr(), pkt->getSize());
179
180 // @todo: We need to pay for this and not just zero it out
181 pkt->headerDelay = pkt->payloadDelay = 0;
182
183 SMMUProcess *proc =
184 safe_cast<SMMUProcess *>(pkt->popSenderState());
185
186 runProcessTiming(proc, pkt);
187
188 return true;
189}
190
191void
192SMMUv3::masterTableWalkRecvReqRetry()
193{
194 assert(tableWalkPortEnable);
195 assert(!packetsTableWalkToRetry.empty());
196
197 while (!packetsTableWalkToRetry.empty()) {
198 SMMUAction a = packetsTableWalkToRetry.front();
199
200 assert(a.type==ACTION_SEND_REQ);
201
202 DPRINTF(SMMUv3, "[t] master HWTW retr addr=%#x size=%#x\n",
203 a.pkt->getAddr(), a.pkt->getSize());
204
205 if (!masterTableWalkPort.sendTimingReq(a.pkt))
206 break;
207
208 packetsTableWalkToRetry.pop();
209 }
210}
211
212void
213SMMUv3::scheduleSlaveRetries()
214{
215 for (auto ifc : slaveInterfaces) {
216 ifc->scheduleDeviceRetry();
217 }
218}
219
220SMMUAction
221SMMUv3::runProcess(SMMUProcess *proc, PacketPtr pkt)
222{
223 if (system.isAtomicMode()) {
224 return runProcessAtomic(proc, pkt);
225 } else if (system.isTimingMode()) {
226 return runProcessTiming(proc, pkt);
227 } else {
228 panic("Not in timing or atomic mode!");
229 }
230}
231
232SMMUAction
233SMMUv3::runProcessAtomic(SMMUProcess *proc, PacketPtr pkt)
234{
235 SMMUAction action;
236 Tick delay = 0;
237 bool finished = false;
238
239 do {
240 action = proc->run(pkt);
241
242 switch (action.type) {
243 case ACTION_SEND_REQ:
244 // Send an MMU initiated request on the table walk port if it is
245 // enabled. Otherwise, fall through and handle same as the final
246 // ACTION_SEND_REQ_FINAL request.
247 if (tableWalkPortEnable) {
248 delay += masterTableWalkPort.sendAtomic(action.pkt);
249 pkt = action.pkt;
250 break;
251 }
252 M5_FALLTHROUGH;
253 case ACTION_SEND_REQ_FINAL:
254 delay += masterPort.sendAtomic(action.pkt);
255 pkt = action.pkt;
256 break;
257
258 case ACTION_SEND_RESP:
259 case ACTION_SEND_RESP_ATS:
260 case ACTION_SLEEP:
261 finished = true;
262 break;
263
264 case ACTION_DELAY:
265 delay += action.delay;
266 break;
267
268 case ACTION_TERMINATE:
269 panic("ACTION_TERMINATE in atomic mode\n");
270
271 default:
272 panic("Unknown action\n");
273 }
274 } while (!finished);
275
276 action.delay = delay;
277
278 return action;
279}
280
281SMMUAction
282SMMUv3::runProcessTiming(SMMUProcess *proc, PacketPtr pkt)
283{
284 SMMUAction action = proc->run(pkt);
285
286 switch (action.type) {
287 case ACTION_SEND_REQ:
288 // Send an MMU initiated request on the table walk port if it is
289 // enabled. Otherwise, fall through and handle same as the final
290 // ACTION_SEND_REQ_FINAL request.
291 if (tableWalkPortEnable) {
292 action.pkt->pushSenderState(proc);
293
294 DPRINTF(SMMUv3, "[t] master HWTW req addr=%#x size=%#x\n",
295 action.pkt->getAddr(), action.pkt->getSize());
296
297 if (packetsTableWalkToRetry.empty()
298 && masterTableWalkPort.sendTimingReq(action.pkt)) {
299 scheduleSlaveRetries();
300 } else {
301 DPRINTF(SMMUv3, "[t] master HWTW req needs retry,"
302 " qlen=%d\n", packetsTableWalkToRetry.size());
303 packetsTableWalkToRetry.push(action);
304 }
305
306 break;
307 }
308 M5_FALLTHROUGH;
309 case ACTION_SEND_REQ_FINAL:
310 action.pkt->pushSenderState(proc);
311
312 DPRINTF(SMMUv3, "[t] master req addr=%#x size=%#x\n",
313 action.pkt->getAddr(), action.pkt->getSize());
314
315 if (packetsToRetry.empty() && masterPort.sendTimingReq(action.pkt)) {
316 scheduleSlaveRetries();
317 } else {
318 DPRINTF(SMMUv3, "[t] master req needs retry, qlen=%d\n",
319 packetsToRetry.size());
320 packetsToRetry.push(action);
321 }
322
323 break;
324
325 case ACTION_SEND_RESP:
326 // @todo: We need to pay for this and not just zero it out
327 action.pkt->headerDelay = action.pkt->payloadDelay = 0;
328
329 DPRINTF(SMMUv3, "[t] slave resp addr=%#x size=%#x\n",
330 action.pkt->getAddr(),
331 action.pkt->getSize());
332
333 assert(action.ifc);
334 action.ifc->schedTimingResp(action.pkt);
335
336 delete proc;
337 break;
338
339 case ACTION_SEND_RESP_ATS:
340 // @todo: We need to pay for this and not just zero it out
341 action.pkt->headerDelay = action.pkt->payloadDelay = 0;
342
343 DPRINTF(SMMUv3, "[t] ATS slave resp addr=%#x size=%#x\n",
344 action.pkt->getAddr(), action.pkt->getSize());
345
346 assert(action.ifc);
347 action.ifc->schedAtsTimingResp(action.pkt);
348
349 delete proc;
350 break;
351
352 case ACTION_DELAY:
353 case ACTION_SLEEP:
354 break;
355
356 case ACTION_TERMINATE:
357 delete proc;
358 break;
359
360 default:
361 panic("Unknown action\n");
362 }
363
364 return action;
365}
366
367void
368SMMUv3::processCommands()
369{
370 DPRINTF(SMMUv3, "processCommands()\n");
371
372 if (system.isAtomicMode()) {
373 SMMUAction a = runProcessAtomic(&commandExecutor, NULL);
374 (void) a;
375 } else if (system.isTimingMode()) {
376 if (!commandExecutor.isBusy())
377 runProcessTiming(&commandExecutor, NULL);
378 } else {
379 panic("Not in timing or atomic mode!");
380 }
381}
382
383void
384SMMUv3::processCommand(const SMMUCommand &cmd)
385{
386 switch (cmd.type) {
387 case CMD_PRF_CONFIG:
388 DPRINTF(SMMUv3, "CMD_PREFETCH_CONFIG - ignored\n");
389 break;
390
391 case CMD_PRF_ADDR:
392 DPRINTF(SMMUv3, "CMD_PREFETCH_ADDR - ignored\n");
393 break;
394
395 case CMD_INV_STE:
396 DPRINTF(SMMUv3, "CMD_INV_STE sid=%#x\n", cmd.data[0]);
397 configCache.invalidateSID(cmd.data[0]);
398 break;
399
400 case CMD_INV_CD:
401 DPRINTF(SMMUv3, "CMD_INV_CD sid=%#x ssid=%#x\n",
402 cmd.data[0], cmd.data[1]);
403 configCache.invalidateSSID(cmd.data[0], cmd.data[1]);
404 break;
405
406 case CMD_INV_CD_ALL:
407 DPRINTF(SMMUv3, "CMD_INV_CD_ALL sid=%#x\n", cmd.data[0]);
408 configCache.invalidateSID(cmd.data[0]);
409 break;
410
411 case CMD_INV_ALL:
412 DPRINTF(SMMUv3, "CMD_INV_ALL\n");
413 configCache.invalidateAll();
414 break;
415
416 case CMD_TLBI_ALL:
417 DPRINTF(SMMUv3, "CMD_TLBI_ALL\n");
418 for (auto slave_interface : slaveInterfaces) {
419 slave_interface->microTLB->invalidateAll();
420 slave_interface->mainTLB->invalidateAll();
421 }
422 tlb.invalidateAll();
423 ipaCache.invalidateAll();
424 walkCache.invalidateAll();
425 break;
426
427 case CMD_TLBI_ASID:
428 DPRINTF(SMMUv3, "CMD_TLBI_ASID asid=%#x vmid=%#x\n",
429 cmd.data[0], cmd.data[1]);
430 for (auto slave_interface : slaveInterfaces) {
431 slave_interface->microTLB->invalidateASID(
432 cmd.data[0], cmd.data[1]);
433 slave_interface->mainTLB->invalidateASID(
434 cmd.data[0], cmd.data[1]);
435 }
436 tlb.invalidateASID(cmd.data[0], cmd.data[1]);
437 walkCache.invalidateASID(cmd.data[0], cmd.data[1]);
438 break;
439
440 case CMD_TLBI_VAAL:
441 DPRINTF(SMMUv3, "CMD_TLBI_VAAL va=%#08x vmid=%#x\n",
442 cmd.data[0], cmd.data[1]);
443 for (auto slave_interface : slaveInterfaces) {
444 slave_interface->microTLB->invalidateVAA(
445 cmd.data[0], cmd.data[1]);
446 slave_interface->mainTLB->invalidateVAA(
447 cmd.data[0], cmd.data[1]);
448 }
449 tlb.invalidateVAA(cmd.data[0], cmd.data[1]);
450 break;
451
452 case CMD_TLBI_VAA:
453 DPRINTF(SMMUv3, "CMD_TLBI_VAA va=%#08x vmid=%#x\n",
454 cmd.data[0], cmd.data[1]);
455 for (auto slave_interface : slaveInterfaces) {
456 slave_interface->microTLB->invalidateVAA(
457 cmd.data[0], cmd.data[1]);
458 slave_interface->mainTLB->invalidateVAA(
459 cmd.data[0], cmd.data[1]);
460 }
461 tlb.invalidateVAA(cmd.data[0], cmd.data[1]);
462 walkCache.invalidateVAA(cmd.data[0], cmd.data[1]);
463 break;
464
465 case CMD_TLBI_VAL:
466 DPRINTF(SMMUv3, "CMD_TLBI_VAL va=%#08x asid=%#x vmid=%#x\n",
467 cmd.data[0], cmd.data[1], cmd.data[2]);
468 for (auto slave_interface : slaveInterfaces) {
469 slave_interface->microTLB->invalidateVA(
470 cmd.data[0], cmd.data[1], cmd.data[2]);
471 slave_interface->mainTLB->invalidateVA(
472 cmd.data[0], cmd.data[1], cmd.data[2]);
473 }
474 tlb.invalidateVA(cmd.data[0], cmd.data[1], cmd.data[2]);
475 break;
476
477 case CMD_TLBI_VA:
478 DPRINTF(SMMUv3, "CMD_TLBI_VA va=%#08x asid=%#x vmid=%#x\n",
479 cmd.data[0], cmd.data[1], cmd.data[2]);
480 for (auto slave_interface : slaveInterfaces) {
481 slave_interface->microTLB->invalidateVA(
482 cmd.data[0], cmd.data[1], cmd.data[2]);
483 slave_interface->mainTLB->invalidateVA(
484 cmd.data[0], cmd.data[1], cmd.data[2]);
485 }
486 tlb.invalidateVA(cmd.data[0], cmd.data[1], cmd.data[2]);
487 walkCache.invalidateVA(cmd.data[0], cmd.data[1], cmd.data[2]);
488 break;
489
490 case CMD_TLBI_VM_IPAL:
491 DPRINTF(SMMUv3, "CMD_TLBI_VM_IPAL ipa=%#08x vmid=%#x\n",
492 cmd.data[0], cmd.data[1]);
493 // This does not invalidate TLBs containing
494 // combined Stage1 + Stage2 translations, as per the spec.
495 ipaCache.invalidateIPA(cmd.data[0], cmd.data[1]);
496 walkCache.invalidateVMID(cmd.data[1]);
497 break;
498
499 case CMD_TLBI_VM_IPA:
500 DPRINTF(SMMUv3, "CMD_TLBI_VM_IPA ipa=%#08x vmid=%#x\n",
501 cmd.data[0], cmd.data[1]);
502 // This does not invalidate TLBs containing
503 // combined Stage1 + Stage2 translations, as per the spec.
504 ipaCache.invalidateIPA(cmd.data[0], cmd.data[1]);
505 walkCache.invalidateVMID(cmd.data[1]);
506 break;
507
508 case CMD_TLBI_VM_S12:
509 DPRINTF(SMMUv3, "CMD_TLBI_VM_S12 vmid=%#x\n", cmd.data[0]);
510 for (auto slave_interface : slaveInterfaces) {
511 slave_interface->microTLB->invalidateVMID(cmd.data[0]);
512 slave_interface->mainTLB->invalidateVMID(cmd.data[0]);
513 }
514 tlb.invalidateVMID(cmd.data[0]);
515 ipaCache.invalidateVMID(cmd.data[0]);
516 walkCache.invalidateVMID(cmd.data[0]);
517 break;
518
519 case CMD_RESUME_S:
520 DPRINTF(SMMUv3, "CMD_RESUME_S\n");
521 panic("resume unimplemented");
522 break;
523
524 default:
525 warn("Unimplemented command %#x\n", cmd.type);
526 break;
527 }
528}
529
530const PageTableOps*
531SMMUv3::getPageTableOps(uint8_t trans_granule)
532{
533 static V8PageTableOps4k ptOps4k;
534 static V8PageTableOps16k ptOps16k;
534 static V8PageTableOps64k ptOps64k;
535
536 switch (trans_granule) {
537 case TRANS_GRANULE_4K: return &ptOps4k;
535 static V8PageTableOps64k ptOps64k;
536
537 switch (trans_granule) {
538 case TRANS_GRANULE_4K: return &ptOps4k;
539 case TRANS_GRANULE_16K: return &ptOps16k;
538 case TRANS_GRANULE_64K: return &ptOps64k;
539 default:
540 panic("Unknown translation granule size %d", trans_granule);
541 }
542}
543
544Tick
545SMMUv3::readControl(PacketPtr pkt)
546{
547 DPRINTF(SMMUv3, "readControl: addr=%08x size=%d\n",
548 pkt->getAddr(), pkt->getSize());
549
550 int offset = pkt->getAddr() - regsMap.start();
551 assert(offset >= 0 && offset < SMMU_REG_SIZE);
552
553 if (inSecureBlock(offset)) {
554 warn("smmu: secure registers (0x%x) are not implemented\n",
555 offset);
556 }
557
558 auto reg_ptr = regs.data + offset;
559
560 switch (pkt->getSize()) {
561 case sizeof(uint32_t):
562 pkt->setLE<uint32_t>(*reinterpret_cast<uint32_t *>(reg_ptr));
563 break;
564 case sizeof(uint64_t):
565 pkt->setLE<uint64_t>(*reinterpret_cast<uint64_t *>(reg_ptr));
566 break;
567 default:
568 panic("smmu: unallowed access size: %d bytes\n", pkt->getSize());
569 break;
570 }
571
572 pkt->makeAtomicResponse();
573
574 return 0;
575}
576
577Tick
578SMMUv3::writeControl(PacketPtr pkt)
579{
580 int offset = pkt->getAddr() - regsMap.start();
581 assert(offset >= 0 && offset < SMMU_REG_SIZE);
582
583 DPRINTF(SMMUv3, "writeControl: addr=%08x size=%d data=%16x\n",
584 pkt->getAddr(), pkt->getSize(),
585 pkt->getSize() == sizeof(uint64_t) ?
586 pkt->getLE<uint64_t>() : pkt->getLE<uint32_t>());
587
588 switch (offset) {
589 case offsetof(SMMURegs, cr0):
590 assert(pkt->getSize() == sizeof(uint32_t));
591 regs.cr0 = regs.cr0ack = pkt->getLE<uint32_t>();
592 break;
593
594 case offsetof(SMMURegs, cr1):
595 case offsetof(SMMURegs, cr2):
596 case offsetof(SMMURegs, strtab_base_cfg):
597 case offsetof(SMMURegs, eventq_cons):
598 case offsetof(SMMURegs, eventq_irq_cfg1):
599 case offsetof(SMMURegs, priq_cons):
600 assert(pkt->getSize() == sizeof(uint32_t));
601 *reinterpret_cast<uint32_t *>(regs.data + offset) =
602 pkt->getLE<uint32_t>();
603 break;
604
605 case offsetof(SMMURegs, cmdq_prod):
606 assert(pkt->getSize() == sizeof(uint32_t));
607 *reinterpret_cast<uint32_t *>(regs.data + offset) =
608 pkt->getLE<uint32_t>();
609 schedule(processCommandsEvent, nextCycle());
610 break;
611
612 case offsetof(SMMURegs, strtab_base):
613 case offsetof(SMMURegs, eventq_irq_cfg0):
614 assert(pkt->getSize() == sizeof(uint64_t));
615 *reinterpret_cast<uint64_t *>(regs.data + offset) =
616 pkt->getLE<uint64_t>();
617 break;
618
619 case offsetof(SMMURegs, cmdq_base):
620 assert(pkt->getSize() == sizeof(uint64_t));
621 *reinterpret_cast<uint64_t *>(regs.data + offset) =
622 pkt->getLE<uint64_t>();
623 regs.cmdq_cons = 0;
624 regs.cmdq_prod = 0;
625 break;
626
627
628 case offsetof(SMMURegs, eventq_base):
629 assert(pkt->getSize() == sizeof(uint64_t));
630 *reinterpret_cast<uint64_t *>(regs.data + offset) =
631 pkt->getLE<uint64_t>();
632 regs.eventq_cons = 0;
633 regs.eventq_prod = 0;
634 break;
635
636 case offsetof(SMMURegs, priq_base):
637 assert(pkt->getSize() == sizeof(uint64_t));
638 *reinterpret_cast<uint64_t *>(regs.data + offset) =
639 pkt->getLE<uint64_t>();
640 regs.priq_cons = 0;
641 regs.priq_prod = 0;
642 break;
643
644 default:
645 if (inSecureBlock(offset)) {
646 warn("smmu: secure registers (0x%x) are not implemented\n",
647 offset);
648 } else {
649 warn("smmu: write to read-only/undefined register at 0x%x\n",
650 offset);
651 }
652 }
653
654 pkt->makeAtomicResponse();
655
656 return 0;
657}
658
659bool
660SMMUv3::inSecureBlock(uint32_t offs) const
661{
662 if (offs >= offsetof(SMMURegs, _secure_regs) && offs < SMMU_SECURE_SZ)
663 return true;
664 else
665 return false;
666}
667
668void
669SMMUv3::init()
670{
671 // make sure both sides are connected and have the same block size
672 if (!masterPort.isConnected())
673 fatal("Master port is not connected.\n");
674
675 // If the second master port is connected for the table walks, enable
676 // the mode to send table walks through this port instead
677 if (masterTableWalkPort.isConnected())
678 tableWalkPortEnable = true;
679
680 // notify the master side of our address ranges
681 for (auto ifc : slaveInterfaces) {
682 ifc->sendRange();
683 }
684
685 if (controlPort.isConnected())
686 controlPort.sendRangeChange();
687}
688
689void
690SMMUv3::regStats()
691{
692 MemObject::regStats();
693
694 using namespace Stats;
695
696 for (size_t i = 0; i < slaveInterfaces.size(); i++) {
697 slaveInterfaces[i]->microTLB->regStats(
698 csprintf("%s.utlb%d", name(), i));
699 slaveInterfaces[i]->mainTLB->regStats(
700 csprintf("%s.maintlb%d", name(), i));
701 }
702
703 tlb.regStats(name() + ".tlb");
704 configCache.regStats(name() + ".cfg");
705 ipaCache.regStats(name() + ".ipa");
706 walkCache.regStats(name() + ".walk");
707
708 steL1Fetches
709 .name(name() + ".steL1Fetches")
710 .desc("STE L1 fetches")
711 .flags(pdf);
712
713 steFetches
714 .name(name() + ".steFetches")
715 .desc("STE fetches")
716 .flags(pdf);
717
718 cdL1Fetches
719 .name(name() + ".cdL1Fetches")
720 .desc("CD L1 fetches")
721 .flags(pdf);
722
723 cdFetches
724 .name(name() + ".cdFetches")
725 .desc("CD fetches")
726 .flags(pdf);
727
728 translationTimeDist
729 .init(0, 2000000, 2000)
730 .name(name() + ".translationTimeDist")
731 .desc("Time to translate address")
732 .flags(pdf);
733
734 ptwTimeDist
735 .init(0, 2000000, 2000)
736 .name(name() + ".ptwTimeDist")
737 .desc("Time to walk page tables")
738 .flags(pdf);
739}
740
741DrainState
742SMMUv3::drain()
743{
744 // Wait until the Command Executor is not busy
745 if (commandExecutor.isBusy()) {
746 return DrainState::Draining;
747 }
748 return DrainState::Drained;
749}
750
751void
752SMMUv3::serialize(CheckpointOut &cp) const
753{
754 DPRINTF(Checkpoint, "Serializing SMMUv3\n");
755
756 SERIALIZE_ARRAY(regs.data, sizeof(regs.data) / sizeof(regs.data[0]));
757}
758
759void
760SMMUv3::unserialize(CheckpointIn &cp)
761{
762 DPRINTF(Checkpoint, "Unserializing SMMUv3\n");
763
764 UNSERIALIZE_ARRAY(regs.data, sizeof(regs.data) / sizeof(regs.data[0]));
765}
766
767Port&
768SMMUv3::getPort(const std::string &name, PortID id)
769{
770 if (name == "master") {
771 return masterPort;
772 } else if (name == "master_walker") {
773 return masterTableWalkPort;
774 } else if (name == "control") {
775 return controlPort;
776 } else {
777 return MemObject::getPort(name, id);
778 }
779}
780
781SMMUv3*
782SMMUv3Params::create()
783{
784 return new SMMUv3(this);
785}
540 case TRANS_GRANULE_64K: return &ptOps64k;
541 default:
542 panic("Unknown translation granule size %d", trans_granule);
543 }
544}
545
546Tick
547SMMUv3::readControl(PacketPtr pkt)
548{
549 DPRINTF(SMMUv3, "readControl: addr=%08x size=%d\n",
550 pkt->getAddr(), pkt->getSize());
551
552 int offset = pkt->getAddr() - regsMap.start();
553 assert(offset >= 0 && offset < SMMU_REG_SIZE);
554
555 if (inSecureBlock(offset)) {
556 warn("smmu: secure registers (0x%x) are not implemented\n",
557 offset);
558 }
559
560 auto reg_ptr = regs.data + offset;
561
562 switch (pkt->getSize()) {
563 case sizeof(uint32_t):
564 pkt->setLE<uint32_t>(*reinterpret_cast<uint32_t *>(reg_ptr));
565 break;
566 case sizeof(uint64_t):
567 pkt->setLE<uint64_t>(*reinterpret_cast<uint64_t *>(reg_ptr));
568 break;
569 default:
570 panic("smmu: unallowed access size: %d bytes\n", pkt->getSize());
571 break;
572 }
573
574 pkt->makeAtomicResponse();
575
576 return 0;
577}
578
579Tick
580SMMUv3::writeControl(PacketPtr pkt)
581{
582 int offset = pkt->getAddr() - regsMap.start();
583 assert(offset >= 0 && offset < SMMU_REG_SIZE);
584
585 DPRINTF(SMMUv3, "writeControl: addr=%08x size=%d data=%16x\n",
586 pkt->getAddr(), pkt->getSize(),
587 pkt->getSize() == sizeof(uint64_t) ?
588 pkt->getLE<uint64_t>() : pkt->getLE<uint32_t>());
589
590 switch (offset) {
591 case offsetof(SMMURegs, cr0):
592 assert(pkt->getSize() == sizeof(uint32_t));
593 regs.cr0 = regs.cr0ack = pkt->getLE<uint32_t>();
594 break;
595
596 case offsetof(SMMURegs, cr1):
597 case offsetof(SMMURegs, cr2):
598 case offsetof(SMMURegs, strtab_base_cfg):
599 case offsetof(SMMURegs, eventq_cons):
600 case offsetof(SMMURegs, eventq_irq_cfg1):
601 case offsetof(SMMURegs, priq_cons):
602 assert(pkt->getSize() == sizeof(uint32_t));
603 *reinterpret_cast<uint32_t *>(regs.data + offset) =
604 pkt->getLE<uint32_t>();
605 break;
606
607 case offsetof(SMMURegs, cmdq_prod):
608 assert(pkt->getSize() == sizeof(uint32_t));
609 *reinterpret_cast<uint32_t *>(regs.data + offset) =
610 pkt->getLE<uint32_t>();
611 schedule(processCommandsEvent, nextCycle());
612 break;
613
614 case offsetof(SMMURegs, strtab_base):
615 case offsetof(SMMURegs, eventq_irq_cfg0):
616 assert(pkt->getSize() == sizeof(uint64_t));
617 *reinterpret_cast<uint64_t *>(regs.data + offset) =
618 pkt->getLE<uint64_t>();
619 break;
620
621 case offsetof(SMMURegs, cmdq_base):
622 assert(pkt->getSize() == sizeof(uint64_t));
623 *reinterpret_cast<uint64_t *>(regs.data + offset) =
624 pkt->getLE<uint64_t>();
625 regs.cmdq_cons = 0;
626 regs.cmdq_prod = 0;
627 break;
628
629
630 case offsetof(SMMURegs, eventq_base):
631 assert(pkt->getSize() == sizeof(uint64_t));
632 *reinterpret_cast<uint64_t *>(regs.data + offset) =
633 pkt->getLE<uint64_t>();
634 regs.eventq_cons = 0;
635 regs.eventq_prod = 0;
636 break;
637
638 case offsetof(SMMURegs, priq_base):
639 assert(pkt->getSize() == sizeof(uint64_t));
640 *reinterpret_cast<uint64_t *>(regs.data + offset) =
641 pkt->getLE<uint64_t>();
642 regs.priq_cons = 0;
643 regs.priq_prod = 0;
644 break;
645
646 default:
647 if (inSecureBlock(offset)) {
648 warn("smmu: secure registers (0x%x) are not implemented\n",
649 offset);
650 } else {
651 warn("smmu: write to read-only/undefined register at 0x%x\n",
652 offset);
653 }
654 }
655
656 pkt->makeAtomicResponse();
657
658 return 0;
659}
660
661bool
662SMMUv3::inSecureBlock(uint32_t offs) const
663{
664 if (offs >= offsetof(SMMURegs, _secure_regs) && offs < SMMU_SECURE_SZ)
665 return true;
666 else
667 return false;
668}
669
670void
671SMMUv3::init()
672{
673 // make sure both sides are connected and have the same block size
674 if (!masterPort.isConnected())
675 fatal("Master port is not connected.\n");
676
677 // If the second master port is connected for the table walks, enable
678 // the mode to send table walks through this port instead
679 if (masterTableWalkPort.isConnected())
680 tableWalkPortEnable = true;
681
682 // notify the master side of our address ranges
683 for (auto ifc : slaveInterfaces) {
684 ifc->sendRange();
685 }
686
687 if (controlPort.isConnected())
688 controlPort.sendRangeChange();
689}
690
691void
692SMMUv3::regStats()
693{
694 MemObject::regStats();
695
696 using namespace Stats;
697
698 for (size_t i = 0; i < slaveInterfaces.size(); i++) {
699 slaveInterfaces[i]->microTLB->regStats(
700 csprintf("%s.utlb%d", name(), i));
701 slaveInterfaces[i]->mainTLB->regStats(
702 csprintf("%s.maintlb%d", name(), i));
703 }
704
705 tlb.regStats(name() + ".tlb");
706 configCache.regStats(name() + ".cfg");
707 ipaCache.regStats(name() + ".ipa");
708 walkCache.regStats(name() + ".walk");
709
710 steL1Fetches
711 .name(name() + ".steL1Fetches")
712 .desc("STE L1 fetches")
713 .flags(pdf);
714
715 steFetches
716 .name(name() + ".steFetches")
717 .desc("STE fetches")
718 .flags(pdf);
719
720 cdL1Fetches
721 .name(name() + ".cdL1Fetches")
722 .desc("CD L1 fetches")
723 .flags(pdf);
724
725 cdFetches
726 .name(name() + ".cdFetches")
727 .desc("CD fetches")
728 .flags(pdf);
729
730 translationTimeDist
731 .init(0, 2000000, 2000)
732 .name(name() + ".translationTimeDist")
733 .desc("Time to translate address")
734 .flags(pdf);
735
736 ptwTimeDist
737 .init(0, 2000000, 2000)
738 .name(name() + ".ptwTimeDist")
739 .desc("Time to walk page tables")
740 .flags(pdf);
741}
742
743DrainState
744SMMUv3::drain()
745{
746 // Wait until the Command Executor is not busy
747 if (commandExecutor.isBusy()) {
748 return DrainState::Draining;
749 }
750 return DrainState::Drained;
751}
752
753void
754SMMUv3::serialize(CheckpointOut &cp) const
755{
756 DPRINTF(Checkpoint, "Serializing SMMUv3\n");
757
758 SERIALIZE_ARRAY(regs.data, sizeof(regs.data) / sizeof(regs.data[0]));
759}
760
761void
762SMMUv3::unserialize(CheckpointIn &cp)
763{
764 DPRINTF(Checkpoint, "Unserializing SMMUv3\n");
765
766 UNSERIALIZE_ARRAY(regs.data, sizeof(regs.data) / sizeof(regs.data[0]));
767}
768
769Port&
770SMMUv3::getPort(const std::string &name, PortID id)
771{
772 if (name == "master") {
773 return masterPort;
774 } else if (name == "master_walker") {
775 return masterTableWalkPort;
776 } else if (name == "control") {
777 return controlPort;
778 } else {
779 return MemObject::getPort(name, id);
780 }
781}
782
783SMMUv3*
784SMMUv3Params::create()
785{
786 return new SMMUv3(this);
787}