smmu_v3_transl.cc (14065:f925f90bda01) smmu_v3_transl.cc (14100:6ef1220dc6da)
1/*
2 * Copyright (c) 2013, 2018-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Stan Czerniawski
38 */
39
40#include "dev/arm/smmu_v3_transl.hh"
41
42#include "debug/SMMUv3.hh"
43#include "debug/SMMUv3Hazard.hh"
44#include "dev/arm/amba.hh"
45#include "dev/arm/smmu_v3.hh"
46#include "sim/system.hh"
47
48SMMUTranslRequest
49SMMUTranslRequest::fromPacket(PacketPtr pkt, bool ats)
50{
51 SMMUTranslRequest req;
52 req.addr = pkt->getAddr();
53 req.size = pkt->getSize();
54 req.sid = pkt->req->streamId();
55 req.ssid = pkt->req->hasSubstreamId() ?
56 pkt->req->substreamId() : 0;
57 req.isWrite = pkt->isWrite();
58 req.isPrefetch = false;
59 req.isAtsRequest = ats;
60 req.pkt = pkt;
61
62 return req;
63}
64
65SMMUTranslRequest
66SMMUTranslRequest::prefetch(Addr addr, uint32_t sid, uint32_t ssid)
67{
68 SMMUTranslRequest req;
69 req.addr = addr;
70 req.size = 0;
71 req.sid = sid;
72 req.ssid = ssid;
73 req.isWrite = false;
74 req.isPrefetch = true;
75 req.isAtsRequest = false;
76 req.pkt = NULL;
77
78 return req;
79}
80
81SMMUTranslationProcess::SMMUTranslationProcess(const std::string &name,
82 SMMUv3 &_smmu, SMMUv3SlaveInterface &_ifc)
83 :
84 SMMUProcess(name, _smmu),
85 ifc(_ifc)
86{
87 // Decrease number of pending translation slots on the slave interface
88 assert(ifc.xlateSlotsRemaining > 0);
89 ifc.xlateSlotsRemaining--;
90 reinit();
91}
92
93SMMUTranslationProcess::~SMMUTranslationProcess()
94{
95 // Increase number of pending translation slots on the slave interface
96 ifc.xlateSlotsRemaining++;
97 // If no more SMMU translations are pending (all slots available),
98 // signal SMMU Slave Interface as drained
99 if (ifc.xlateSlotsRemaining == ifc.params()->xlate_slots) {
100 ifc.signalDrainDone();
101 }
102}
103
104void
105SMMUTranslationProcess::beginTransaction(const SMMUTranslRequest &req)
106{
107 request = req;
108
109 reinit();
110}
111
112void
113SMMUTranslationProcess::resumeTransaction()
114{
115 assert(smmu.system.isTimingMode());
116
117 assert(!"Stalls are broken");
118
119 Tick resumeTick = curTick();
120
121 (void) resumeTick;
122 DPRINTF(SMMUv3, "Resume at tick = %d. Fault duration = %d (%.3fus)\n",
123 resumeTick, resumeTick-faultTick, (resumeTick-faultTick) / 1e6);
124
125 beginTransaction(request);
126
127 smmu.runProcessTiming(this, request.pkt);
128}
129
130void
131SMMUTranslationProcess::main(Yield &yield)
132{
133 // Hack:
134 // The coroutine starts running as soon as it's created.
135 // But we need to wait for request data esp. in atomic mode.
136 SMMUAction a;
137 a.type = ACTION_INITIAL_NOP;
138 a.pkt = NULL;
139 yield(a);
140
141 const Addr next4k = (request.addr + 0x1000ULL) & ~0xfffULL;
142
143 if ((request.addr + request.size) > next4k)
144 panic("Transaction crosses 4k boundary (addr=%#x size=%#x)!\n",
145 request.addr, request.size);
146
147
148 unsigned numSlaveBeats = request.isWrite ?
149 (request.size + (ifc.portWidth - 1)) / ifc.portWidth : 1;
150
151 doSemaphoreDown(yield, ifc.slavePortSem);
152 doDelay(yield, Cycles(numSlaveBeats));
153 doSemaphoreUp(ifc.slavePortSem);
154
155
156 recvTick = curTick();
157
158
159 if (!(smmu.regs.cr0 & 0x1)) {
160 // SMMU disabled
161 doDelay(yield, Cycles(1));
162 completeTransaction(yield, bypass(request.addr));
163 return;
164 }
165
166 TranslResult tr;
167 bool wasPrefetched = false;
168
169 if (request.isPrefetch) {
170 // Abort prefetch if:
171 // - there's already a transaction looking up the same 4k page, OR
172 // - requested address is already in the TLB.
173 if (hazard4kCheck() || ifcTLBLookup(yield, tr, wasPrefetched))
174 completePrefetch(yield); // this never returns
175
176 hazard4kRegister();
177
178 tr = smmuTranslation(yield);
179
180 if (tr.fault == FAULT_NONE)
181 ifcTLBUpdate(yield, tr);
182
183 hazard4kRelease();
184
185 completePrefetch(yield);
186 } else {
187 hazardIdRegister();
188
189 if (!microTLBLookup(yield, tr)) {
190 bool hit = ifcTLBLookup(yield, tr, wasPrefetched);
191 if (!hit) {
192 while (!hit && hazard4kCheck()) {
193 hazard4kHold(yield);
194 hit = ifcTLBLookup(yield, tr, wasPrefetched);
195 }
196 }
197
198 // Issue prefetch if:
199 // - there was a TLB hit and the entry was prefetched, OR
200 // - TLB miss was successfully serviced
201 if (hit) {
202 if (wasPrefetched)
203 issuePrefetch(next4k);
204 } else {
205 hazard4kRegister();
206
207 tr = smmuTranslation(yield);
208
209 if (tr.fault == FAULT_NONE) {
210 ifcTLBUpdate(yield, tr);
211
212 issuePrefetch(next4k);
213 }
214
215 hazard4kRelease();
216 }
217
218 if (tr.fault == FAULT_NONE)
219 microTLBUpdate(yield, tr);
220 }
221
222 hazardIdHold(yield);
223 hazardIdRelease();
224
225 if (tr.fault != FAULT_NONE)
226 panic("fault\n");
227
228 completeTransaction(yield, tr);
229 }
230}
231
232SMMUTranslationProcess::TranslResult
233SMMUTranslationProcess::bypass(Addr addr) const
234{
235 TranslResult tr;
236 tr.fault = FAULT_NONE;
237 tr.addr = addr;
238 tr.addrMask = 0;
239 tr.writable = 1;
240
241 return tr;
242}
243
244SMMUTranslationProcess::TranslResult
245SMMUTranslationProcess::smmuTranslation(Yield &yield)
246{
247 TranslResult tr;
248
249 // Need SMMU credit to proceed
250 doSemaphoreDown(yield, smmu.transSem);
251
252 // Simulate pipelined IFC->SMMU link
253 doSemaphoreDown(yield, smmu.ifcSmmuSem);
254 doDelay(yield, Cycles(1)); // serialize transactions
255 doSemaphoreUp(smmu.ifcSmmuSem);
256 doDelay(yield, smmu.ifcSmmuLat - Cycles(1)); // remaining pipeline delay
257
258 bool haveConfig = true;
259 if (!configCacheLookup(yield, context)) {
260 if(findConfig(yield, context, tr)) {
261 configCacheUpdate(yield, context);
262 } else {
263 haveConfig = false;
264 }
265 }
266
267 if (haveConfig && !smmuTLBLookup(yield, tr)) {
268 // SMMU main TLB miss
269
270 // Need PTW slot to proceed
271 doSemaphoreDown(yield, smmu.ptwSem);
272
273 // Page table walk
274 Tick ptwStartTick = curTick();
275
276 if (context.stage1Enable) {
277 tr = translateStage1And2(yield, request.addr);
278 } else if (context.stage2Enable) {
279 tr = translateStage2(yield, request.addr, true);
280 } else {
281 tr = bypass(request.addr);
282 }
283
284 if (context.stage1Enable || context.stage2Enable)
285 smmu.ptwTimeDist.sample(curTick() - ptwStartTick);
286
287 // Free PTW slot
288 doSemaphoreUp(smmu.ptwSem);
289
290 if (tr.fault == FAULT_NONE)
291 smmuTLBUpdate(yield, tr);
292 }
293
294 // Simulate pipelined SMMU->SLAVE INTERFACE link
295 doSemaphoreDown(yield, smmu.smmuIfcSem);
296 doDelay(yield, Cycles(1)); // serialize transactions
297 doSemaphoreUp(smmu.smmuIfcSem);
298 doDelay(yield, smmu.smmuIfcLat - Cycles(1)); // remaining pipeline delay
299
300 // return SMMU credit
301 doSemaphoreUp(smmu.transSem);
302
303 return tr;
304}
305
306bool
307SMMUTranslationProcess::microTLBLookup(Yield &yield, TranslResult &tr)
308{
309 if (!ifc.microTLBEnable)
310 return false;
311
312 doSemaphoreDown(yield, ifc.microTLBSem);
313 doDelay(yield, ifc.microTLBLat);
314 const SMMUTLB::Entry *e =
315 ifc.microTLB->lookup(request.sid, request.ssid, request.addr);
316 doSemaphoreUp(ifc.microTLBSem);
317
318 if (!e) {
319 DPRINTF(SMMUv3, "micro TLB miss vaddr=%#x sid=%#x ssid=%#x\n",
320 request.addr, request.sid, request.ssid);
321
322 return false;
323 }
324
325 DPRINTF(SMMUv3,
326 "micro TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x paddr=%#x\n",
327 request.addr, e->vaMask, request.sid, request.ssid, e->pa);
328
329 tr.fault = FAULT_NONE;
330 tr.addr = e->pa + (request.addr & ~e->vaMask);;
331 tr.addrMask = e->vaMask;
332 tr.writable = e->permissions;
333
334 return true;
335}
336
337bool
338SMMUTranslationProcess::ifcTLBLookup(Yield &yield, TranslResult &tr,
339 bool &wasPrefetched)
340{
341 if (!ifc.mainTLBEnable)
342 return false;
343
344 doSemaphoreDown(yield, ifc.mainTLBSem);
345 doDelay(yield, ifc.mainTLBLat);
346 const SMMUTLB::Entry *e =
347 ifc.mainTLB->lookup(request.sid, request.ssid, request.addr);
348 doSemaphoreUp(ifc.mainTLBSem);
349
350 if (!e) {
351 DPRINTF(SMMUv3,
352 "SLAVE Interface TLB miss vaddr=%#x sid=%#x ssid=%#x\n",
353 request.addr, request.sid, request.ssid);
354
355 return false;
356 }
357
358 DPRINTF(SMMUv3,
359 "SLAVE Interface TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x "
360 "paddr=%#x\n", request.addr, e->vaMask, request.sid,
361 request.ssid, e->pa);
362
363 tr.fault = FAULT_NONE;
364 tr.addr = e->pa + (request.addr & ~e->vaMask);;
365 tr.addrMask = e->vaMask;
366 tr.writable = e->permissions;
367 wasPrefetched = e->prefetched;
368
369 return true;
370}
371
372bool
373SMMUTranslationProcess::smmuTLBLookup(Yield &yield, TranslResult &tr)
374{
375 if (!smmu.tlbEnable)
376 return false;
377
378 doSemaphoreDown(yield, smmu.tlbSem);
379 doDelay(yield, smmu.tlbLat);
380 const ARMArchTLB::Entry *e =
381 smmu.tlb.lookup(request.addr, context.asid, context.vmid);
382 doSemaphoreUp(smmu.tlbSem);
383
384 if (!e) {
385 DPRINTF(SMMUv3, "SMMU TLB miss vaddr=%#x asid=%#x vmid=%#x\n",
386 request.addr, context.asid, context.vmid);
387
388 return false;
389 }
390
391 DPRINTF(SMMUv3,
392 "SMMU TLB hit vaddr=%#x amask=%#x asid=%#x vmid=%#x paddr=%#x\n",
393 request.addr, e->vaMask, context.asid, context.vmid, e->pa);
394
395 tr.fault = FAULT_NONE;
396 tr.addr = e->pa + (request.addr & ~e->vaMask);;
397 tr.addrMask = e->vaMask;
398 tr.writable = e->permissions;
399
400 return true;
401}
402
403void
404SMMUTranslationProcess::microTLBUpdate(Yield &yield,
405 const TranslResult &tr)
406{
407 assert(tr.fault == FAULT_NONE);
408
409 if (!ifc.microTLBEnable)
410 return;
411
412 SMMUTLB::Entry e;
413 e.valid = true;
414 e.prefetched = false;
415 e.sid = request.sid;
416 e.ssid = request.ssid;
417 e.vaMask = tr.addrMask;
418 e.va = request.addr & e.vaMask;
419 e.pa = tr.addr & e.vaMask;
420 e.permissions = tr.writable;
421 e.asid = context.asid;
422 e.vmid = context.vmid;
423
424 doSemaphoreDown(yield, ifc.microTLBSem);
425
426 DPRINTF(SMMUv3,
427 "micro TLB upd vaddr=%#x amask=%#x paddr=%#x sid=%#x ssid=%#x\n",
428 e.va, e.vaMask, e.pa, e.sid, e.ssid);
429
430 ifc.microTLB->store(e, SMMUTLB::ALLOC_ANY_WAY);
431
432 doSemaphoreUp(ifc.microTLBSem);
433}
434
435void
436SMMUTranslationProcess::ifcTLBUpdate(Yield &yield,
437 const TranslResult &tr)
438{
439 assert(tr.fault == FAULT_NONE);
440
441 if (!ifc.mainTLBEnable)
442 return;
443
444 SMMUTLB::Entry e;
445 e.valid = true;
446 e.prefetched = request.isPrefetch;
447 e.sid = request.sid;
448 e.ssid = request.ssid;
449 e.vaMask = tr.addrMask;
450 e.va = request.addr & e.vaMask;
451 e.pa = tr.addr & e.vaMask;
452 e.permissions = tr.writable;
453 e.asid = context.asid;
454 e.vmid = context.vmid;
455
456 SMMUTLB::AllocPolicy alloc = SMMUTLB::ALLOC_ANY_WAY;
457 if (ifc.prefetchEnable && ifc.prefetchReserveLastWay)
458 alloc = request.isPrefetch ?
459 SMMUTLB::ALLOC_LAST_WAY : SMMUTLB::ALLOC_ANY_BUT_LAST_WAY;
460
461 doSemaphoreDown(yield, ifc.mainTLBSem);
462
463 DPRINTF(SMMUv3,
464 "SLAVE Interface upd vaddr=%#x amask=%#x paddr=%#x sid=%#x "
465 "ssid=%#x\n", e.va, e.vaMask, e.pa, e.sid, e.ssid);
466
467 ifc.mainTLB->store(e, alloc);
468
469 doSemaphoreUp(ifc.mainTLBSem);
470}
471
472void
473SMMUTranslationProcess::smmuTLBUpdate(Yield &yield,
474 const TranslResult &tr)
475{
476 assert(tr.fault == FAULT_NONE);
477
478 if (!smmu.tlbEnable)
479 return;
480
481 ARMArchTLB::Entry e;
482 e.valid = true;
483 e.vaMask = tr.addrMask;
484 e.va = request.addr & e.vaMask;
485 e.asid = context.asid;
486 e.vmid = context.vmid;
487 e.pa = tr.addr & e.vaMask;
488 e.permissions = tr.writable;
489
490 doSemaphoreDown(yield, smmu.tlbSem);
491
492 DPRINTF(SMMUv3,
493 "SMMU TLB upd vaddr=%#x amask=%#x paddr=%#x asid=%#x vmid=%#x\n",
494 e.va, e.vaMask, e.pa, e.asid, e.vmid);
495
496 smmu.tlb.store(e);
497
498 doSemaphoreUp(smmu.tlbSem);
499}
500
501bool
502SMMUTranslationProcess::configCacheLookup(Yield &yield, TranslContext &tc)
503{
504 if (!smmu.configCacheEnable)
505 return false;
506
507 doSemaphoreDown(yield, smmu.configSem);
508 doDelay(yield, smmu.configLat);
509 const ConfigCache::Entry *e =
510 smmu.configCache.lookup(request.sid, request.ssid);
511 doSemaphoreUp(smmu.configSem);
512
513 if (!e) {
514 DPRINTF(SMMUv3, "Config miss sid=%#x ssid=%#x\n",
515 request.sid, request.ssid);
516
517 return false;
518 }
519
520 DPRINTF(SMMUv3, "Config hit sid=%#x ssid=%#x ttb=%#08x asid=%#x\n",
521 request.sid, request.ssid, e->ttb0, e->asid);
522
523 tc.stage1Enable = e->stage1_en;
524 tc.stage2Enable = e->stage2_en;
525
526 tc.ttb0 = e->ttb0;
527 tc.ttb1 = e->ttb1;
528 tc.asid = e->asid;
529 tc.httb = e->httb;
530 tc.vmid = e->vmid;
531
532 tc.stage1TranslGranule = e->stage1_tg;
533 tc.stage2TranslGranule = e->stage2_tg;
534
535 return true;
536}
537
538void
539SMMUTranslationProcess::configCacheUpdate(Yield &yield,
540 const TranslContext &tc)
541{
542 if (!smmu.configCacheEnable)
543 return;
544
545 ConfigCache::Entry e;
546 e.valid = true;
547 e.sid = request.sid;
548 e.ssid = request.ssid;
549 e.stage1_en = tc.stage1Enable;
550 e.stage2_en = tc.stage2Enable;
551 e.ttb0 = tc.ttb0;
552 e.ttb1 = tc.ttb1;
553 e.asid = tc.asid;
554 e.httb = tc.httb;
555 e.vmid = tc.vmid;
556 e.stage1_tg = tc.stage1TranslGranule;
557 e.stage2_tg = tc.stage2TranslGranule;
558
559 doSemaphoreDown(yield, smmu.configSem);
560
561 DPRINTF(SMMUv3, "Config upd sid=%#x ssid=%#x\n", e.sid, e.ssid);
562
563 smmu.configCache.store(e);
564
565 doSemaphoreUp(smmu.configSem);
566}
567
568bool
569SMMUTranslationProcess::findConfig(Yield &yield,
570 TranslContext &tc,
571 TranslResult &tr)
572{
573 tc.stage1Enable = false;
574 tc.stage2Enable = false;
575
576 StreamTableEntry ste;
577 doReadSTE(yield, ste, request.sid);
578
579 switch (ste.dw0.config) {
580 case STE_CONFIG_BYPASS:
581 break;
582
583 case STE_CONFIG_STAGE1_ONLY:
584 tc.stage1Enable = true;
585 break;
586
587 case STE_CONFIG_STAGE2_ONLY:
588 tc.stage2Enable = true;
589 break;
590
591 case STE_CONFIG_STAGE1_AND_2:
592 tc.stage1Enable = true;
593 tc.stage2Enable = true;
594 break;
595
596 default:
597 panic("Bad or unimplemented STE config %d\n",
598 ste.dw0.config);
599 }
600
601
602 // Establish stage 2 context first since
603 // Context Descriptors can be in IPA space.
604 if (tc.stage2Enable) {
605 tc.httb = ste.dw3.s2ttb << STE_S2TTB_SHIFT;
606 tc.vmid = ste.dw2.s2vmid;
607 tc.stage2TranslGranule = ste.dw2.s2tg;
1/*
2 * Copyright (c) 2013, 2018-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Stan Czerniawski
38 */
39
40#include "dev/arm/smmu_v3_transl.hh"
41
42#include "debug/SMMUv3.hh"
43#include "debug/SMMUv3Hazard.hh"
44#include "dev/arm/amba.hh"
45#include "dev/arm/smmu_v3.hh"
46#include "sim/system.hh"
47
48SMMUTranslRequest
49SMMUTranslRequest::fromPacket(PacketPtr pkt, bool ats)
50{
51 SMMUTranslRequest req;
52 req.addr = pkt->getAddr();
53 req.size = pkt->getSize();
54 req.sid = pkt->req->streamId();
55 req.ssid = pkt->req->hasSubstreamId() ?
56 pkt->req->substreamId() : 0;
57 req.isWrite = pkt->isWrite();
58 req.isPrefetch = false;
59 req.isAtsRequest = ats;
60 req.pkt = pkt;
61
62 return req;
63}
64
65SMMUTranslRequest
66SMMUTranslRequest::prefetch(Addr addr, uint32_t sid, uint32_t ssid)
67{
68 SMMUTranslRequest req;
69 req.addr = addr;
70 req.size = 0;
71 req.sid = sid;
72 req.ssid = ssid;
73 req.isWrite = false;
74 req.isPrefetch = true;
75 req.isAtsRequest = false;
76 req.pkt = NULL;
77
78 return req;
79}
80
81SMMUTranslationProcess::SMMUTranslationProcess(const std::string &name,
82 SMMUv3 &_smmu, SMMUv3SlaveInterface &_ifc)
83 :
84 SMMUProcess(name, _smmu),
85 ifc(_ifc)
86{
87 // Decrease number of pending translation slots on the slave interface
88 assert(ifc.xlateSlotsRemaining > 0);
89 ifc.xlateSlotsRemaining--;
90 reinit();
91}
92
93SMMUTranslationProcess::~SMMUTranslationProcess()
94{
95 // Increase number of pending translation slots on the slave interface
96 ifc.xlateSlotsRemaining++;
97 // If no more SMMU translations are pending (all slots available),
98 // signal SMMU Slave Interface as drained
99 if (ifc.xlateSlotsRemaining == ifc.params()->xlate_slots) {
100 ifc.signalDrainDone();
101 }
102}
103
104void
105SMMUTranslationProcess::beginTransaction(const SMMUTranslRequest &req)
106{
107 request = req;
108
109 reinit();
110}
111
112void
113SMMUTranslationProcess::resumeTransaction()
114{
115 assert(smmu.system.isTimingMode());
116
117 assert(!"Stalls are broken");
118
119 Tick resumeTick = curTick();
120
121 (void) resumeTick;
122 DPRINTF(SMMUv3, "Resume at tick = %d. Fault duration = %d (%.3fus)\n",
123 resumeTick, resumeTick-faultTick, (resumeTick-faultTick) / 1e6);
124
125 beginTransaction(request);
126
127 smmu.runProcessTiming(this, request.pkt);
128}
129
130void
131SMMUTranslationProcess::main(Yield &yield)
132{
133 // Hack:
134 // The coroutine starts running as soon as it's created.
135 // But we need to wait for request data esp. in atomic mode.
136 SMMUAction a;
137 a.type = ACTION_INITIAL_NOP;
138 a.pkt = NULL;
139 yield(a);
140
141 const Addr next4k = (request.addr + 0x1000ULL) & ~0xfffULL;
142
143 if ((request.addr + request.size) > next4k)
144 panic("Transaction crosses 4k boundary (addr=%#x size=%#x)!\n",
145 request.addr, request.size);
146
147
148 unsigned numSlaveBeats = request.isWrite ?
149 (request.size + (ifc.portWidth - 1)) / ifc.portWidth : 1;
150
151 doSemaphoreDown(yield, ifc.slavePortSem);
152 doDelay(yield, Cycles(numSlaveBeats));
153 doSemaphoreUp(ifc.slavePortSem);
154
155
156 recvTick = curTick();
157
158
159 if (!(smmu.regs.cr0 & 0x1)) {
160 // SMMU disabled
161 doDelay(yield, Cycles(1));
162 completeTransaction(yield, bypass(request.addr));
163 return;
164 }
165
166 TranslResult tr;
167 bool wasPrefetched = false;
168
169 if (request.isPrefetch) {
170 // Abort prefetch if:
171 // - there's already a transaction looking up the same 4k page, OR
172 // - requested address is already in the TLB.
173 if (hazard4kCheck() || ifcTLBLookup(yield, tr, wasPrefetched))
174 completePrefetch(yield); // this never returns
175
176 hazard4kRegister();
177
178 tr = smmuTranslation(yield);
179
180 if (tr.fault == FAULT_NONE)
181 ifcTLBUpdate(yield, tr);
182
183 hazard4kRelease();
184
185 completePrefetch(yield);
186 } else {
187 hazardIdRegister();
188
189 if (!microTLBLookup(yield, tr)) {
190 bool hit = ifcTLBLookup(yield, tr, wasPrefetched);
191 if (!hit) {
192 while (!hit && hazard4kCheck()) {
193 hazard4kHold(yield);
194 hit = ifcTLBLookup(yield, tr, wasPrefetched);
195 }
196 }
197
198 // Issue prefetch if:
199 // - there was a TLB hit and the entry was prefetched, OR
200 // - TLB miss was successfully serviced
201 if (hit) {
202 if (wasPrefetched)
203 issuePrefetch(next4k);
204 } else {
205 hazard4kRegister();
206
207 tr = smmuTranslation(yield);
208
209 if (tr.fault == FAULT_NONE) {
210 ifcTLBUpdate(yield, tr);
211
212 issuePrefetch(next4k);
213 }
214
215 hazard4kRelease();
216 }
217
218 if (tr.fault == FAULT_NONE)
219 microTLBUpdate(yield, tr);
220 }
221
222 hazardIdHold(yield);
223 hazardIdRelease();
224
225 if (tr.fault != FAULT_NONE)
226 panic("fault\n");
227
228 completeTransaction(yield, tr);
229 }
230}
231
232SMMUTranslationProcess::TranslResult
233SMMUTranslationProcess::bypass(Addr addr) const
234{
235 TranslResult tr;
236 tr.fault = FAULT_NONE;
237 tr.addr = addr;
238 tr.addrMask = 0;
239 tr.writable = 1;
240
241 return tr;
242}
243
244SMMUTranslationProcess::TranslResult
245SMMUTranslationProcess::smmuTranslation(Yield &yield)
246{
247 TranslResult tr;
248
249 // Need SMMU credit to proceed
250 doSemaphoreDown(yield, smmu.transSem);
251
252 // Simulate pipelined IFC->SMMU link
253 doSemaphoreDown(yield, smmu.ifcSmmuSem);
254 doDelay(yield, Cycles(1)); // serialize transactions
255 doSemaphoreUp(smmu.ifcSmmuSem);
256 doDelay(yield, smmu.ifcSmmuLat - Cycles(1)); // remaining pipeline delay
257
258 bool haveConfig = true;
259 if (!configCacheLookup(yield, context)) {
260 if(findConfig(yield, context, tr)) {
261 configCacheUpdate(yield, context);
262 } else {
263 haveConfig = false;
264 }
265 }
266
267 if (haveConfig && !smmuTLBLookup(yield, tr)) {
268 // SMMU main TLB miss
269
270 // Need PTW slot to proceed
271 doSemaphoreDown(yield, smmu.ptwSem);
272
273 // Page table walk
274 Tick ptwStartTick = curTick();
275
276 if (context.stage1Enable) {
277 tr = translateStage1And2(yield, request.addr);
278 } else if (context.stage2Enable) {
279 tr = translateStage2(yield, request.addr, true);
280 } else {
281 tr = bypass(request.addr);
282 }
283
284 if (context.stage1Enable || context.stage2Enable)
285 smmu.ptwTimeDist.sample(curTick() - ptwStartTick);
286
287 // Free PTW slot
288 doSemaphoreUp(smmu.ptwSem);
289
290 if (tr.fault == FAULT_NONE)
291 smmuTLBUpdate(yield, tr);
292 }
293
294 // Simulate pipelined SMMU->SLAVE INTERFACE link
295 doSemaphoreDown(yield, smmu.smmuIfcSem);
296 doDelay(yield, Cycles(1)); // serialize transactions
297 doSemaphoreUp(smmu.smmuIfcSem);
298 doDelay(yield, smmu.smmuIfcLat - Cycles(1)); // remaining pipeline delay
299
300 // return SMMU credit
301 doSemaphoreUp(smmu.transSem);
302
303 return tr;
304}
305
306bool
307SMMUTranslationProcess::microTLBLookup(Yield &yield, TranslResult &tr)
308{
309 if (!ifc.microTLBEnable)
310 return false;
311
312 doSemaphoreDown(yield, ifc.microTLBSem);
313 doDelay(yield, ifc.microTLBLat);
314 const SMMUTLB::Entry *e =
315 ifc.microTLB->lookup(request.sid, request.ssid, request.addr);
316 doSemaphoreUp(ifc.microTLBSem);
317
318 if (!e) {
319 DPRINTF(SMMUv3, "micro TLB miss vaddr=%#x sid=%#x ssid=%#x\n",
320 request.addr, request.sid, request.ssid);
321
322 return false;
323 }
324
325 DPRINTF(SMMUv3,
326 "micro TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x paddr=%#x\n",
327 request.addr, e->vaMask, request.sid, request.ssid, e->pa);
328
329 tr.fault = FAULT_NONE;
330 tr.addr = e->pa + (request.addr & ~e->vaMask);;
331 tr.addrMask = e->vaMask;
332 tr.writable = e->permissions;
333
334 return true;
335}
336
337bool
338SMMUTranslationProcess::ifcTLBLookup(Yield &yield, TranslResult &tr,
339 bool &wasPrefetched)
340{
341 if (!ifc.mainTLBEnable)
342 return false;
343
344 doSemaphoreDown(yield, ifc.mainTLBSem);
345 doDelay(yield, ifc.mainTLBLat);
346 const SMMUTLB::Entry *e =
347 ifc.mainTLB->lookup(request.sid, request.ssid, request.addr);
348 doSemaphoreUp(ifc.mainTLBSem);
349
350 if (!e) {
351 DPRINTF(SMMUv3,
352 "SLAVE Interface TLB miss vaddr=%#x sid=%#x ssid=%#x\n",
353 request.addr, request.sid, request.ssid);
354
355 return false;
356 }
357
358 DPRINTF(SMMUv3,
359 "SLAVE Interface TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x "
360 "paddr=%#x\n", request.addr, e->vaMask, request.sid,
361 request.ssid, e->pa);
362
363 tr.fault = FAULT_NONE;
364 tr.addr = e->pa + (request.addr & ~e->vaMask);;
365 tr.addrMask = e->vaMask;
366 tr.writable = e->permissions;
367 wasPrefetched = e->prefetched;
368
369 return true;
370}
371
372bool
373SMMUTranslationProcess::smmuTLBLookup(Yield &yield, TranslResult &tr)
374{
375 if (!smmu.tlbEnable)
376 return false;
377
378 doSemaphoreDown(yield, smmu.tlbSem);
379 doDelay(yield, smmu.tlbLat);
380 const ARMArchTLB::Entry *e =
381 smmu.tlb.lookup(request.addr, context.asid, context.vmid);
382 doSemaphoreUp(smmu.tlbSem);
383
384 if (!e) {
385 DPRINTF(SMMUv3, "SMMU TLB miss vaddr=%#x asid=%#x vmid=%#x\n",
386 request.addr, context.asid, context.vmid);
387
388 return false;
389 }
390
391 DPRINTF(SMMUv3,
392 "SMMU TLB hit vaddr=%#x amask=%#x asid=%#x vmid=%#x paddr=%#x\n",
393 request.addr, e->vaMask, context.asid, context.vmid, e->pa);
394
395 tr.fault = FAULT_NONE;
396 tr.addr = e->pa + (request.addr & ~e->vaMask);;
397 tr.addrMask = e->vaMask;
398 tr.writable = e->permissions;
399
400 return true;
401}
402
403void
404SMMUTranslationProcess::microTLBUpdate(Yield &yield,
405 const TranslResult &tr)
406{
407 assert(tr.fault == FAULT_NONE);
408
409 if (!ifc.microTLBEnable)
410 return;
411
412 SMMUTLB::Entry e;
413 e.valid = true;
414 e.prefetched = false;
415 e.sid = request.sid;
416 e.ssid = request.ssid;
417 e.vaMask = tr.addrMask;
418 e.va = request.addr & e.vaMask;
419 e.pa = tr.addr & e.vaMask;
420 e.permissions = tr.writable;
421 e.asid = context.asid;
422 e.vmid = context.vmid;
423
424 doSemaphoreDown(yield, ifc.microTLBSem);
425
426 DPRINTF(SMMUv3,
427 "micro TLB upd vaddr=%#x amask=%#x paddr=%#x sid=%#x ssid=%#x\n",
428 e.va, e.vaMask, e.pa, e.sid, e.ssid);
429
430 ifc.microTLB->store(e, SMMUTLB::ALLOC_ANY_WAY);
431
432 doSemaphoreUp(ifc.microTLBSem);
433}
434
435void
436SMMUTranslationProcess::ifcTLBUpdate(Yield &yield,
437 const TranslResult &tr)
438{
439 assert(tr.fault == FAULT_NONE);
440
441 if (!ifc.mainTLBEnable)
442 return;
443
444 SMMUTLB::Entry e;
445 e.valid = true;
446 e.prefetched = request.isPrefetch;
447 e.sid = request.sid;
448 e.ssid = request.ssid;
449 e.vaMask = tr.addrMask;
450 e.va = request.addr & e.vaMask;
451 e.pa = tr.addr & e.vaMask;
452 e.permissions = tr.writable;
453 e.asid = context.asid;
454 e.vmid = context.vmid;
455
456 SMMUTLB::AllocPolicy alloc = SMMUTLB::ALLOC_ANY_WAY;
457 if (ifc.prefetchEnable && ifc.prefetchReserveLastWay)
458 alloc = request.isPrefetch ?
459 SMMUTLB::ALLOC_LAST_WAY : SMMUTLB::ALLOC_ANY_BUT_LAST_WAY;
460
461 doSemaphoreDown(yield, ifc.mainTLBSem);
462
463 DPRINTF(SMMUv3,
464 "SLAVE Interface upd vaddr=%#x amask=%#x paddr=%#x sid=%#x "
465 "ssid=%#x\n", e.va, e.vaMask, e.pa, e.sid, e.ssid);
466
467 ifc.mainTLB->store(e, alloc);
468
469 doSemaphoreUp(ifc.mainTLBSem);
470}
471
472void
473SMMUTranslationProcess::smmuTLBUpdate(Yield &yield,
474 const TranslResult &tr)
475{
476 assert(tr.fault == FAULT_NONE);
477
478 if (!smmu.tlbEnable)
479 return;
480
481 ARMArchTLB::Entry e;
482 e.valid = true;
483 e.vaMask = tr.addrMask;
484 e.va = request.addr & e.vaMask;
485 e.asid = context.asid;
486 e.vmid = context.vmid;
487 e.pa = tr.addr & e.vaMask;
488 e.permissions = tr.writable;
489
490 doSemaphoreDown(yield, smmu.tlbSem);
491
492 DPRINTF(SMMUv3,
493 "SMMU TLB upd vaddr=%#x amask=%#x paddr=%#x asid=%#x vmid=%#x\n",
494 e.va, e.vaMask, e.pa, e.asid, e.vmid);
495
496 smmu.tlb.store(e);
497
498 doSemaphoreUp(smmu.tlbSem);
499}
500
501bool
502SMMUTranslationProcess::configCacheLookup(Yield &yield, TranslContext &tc)
503{
504 if (!smmu.configCacheEnable)
505 return false;
506
507 doSemaphoreDown(yield, smmu.configSem);
508 doDelay(yield, smmu.configLat);
509 const ConfigCache::Entry *e =
510 smmu.configCache.lookup(request.sid, request.ssid);
511 doSemaphoreUp(smmu.configSem);
512
513 if (!e) {
514 DPRINTF(SMMUv3, "Config miss sid=%#x ssid=%#x\n",
515 request.sid, request.ssid);
516
517 return false;
518 }
519
520 DPRINTF(SMMUv3, "Config hit sid=%#x ssid=%#x ttb=%#08x asid=%#x\n",
521 request.sid, request.ssid, e->ttb0, e->asid);
522
523 tc.stage1Enable = e->stage1_en;
524 tc.stage2Enable = e->stage2_en;
525
526 tc.ttb0 = e->ttb0;
527 tc.ttb1 = e->ttb1;
528 tc.asid = e->asid;
529 tc.httb = e->httb;
530 tc.vmid = e->vmid;
531
532 tc.stage1TranslGranule = e->stage1_tg;
533 tc.stage2TranslGranule = e->stage2_tg;
534
535 return true;
536}
537
538void
539SMMUTranslationProcess::configCacheUpdate(Yield &yield,
540 const TranslContext &tc)
541{
542 if (!smmu.configCacheEnable)
543 return;
544
545 ConfigCache::Entry e;
546 e.valid = true;
547 e.sid = request.sid;
548 e.ssid = request.ssid;
549 e.stage1_en = tc.stage1Enable;
550 e.stage2_en = tc.stage2Enable;
551 e.ttb0 = tc.ttb0;
552 e.ttb1 = tc.ttb1;
553 e.asid = tc.asid;
554 e.httb = tc.httb;
555 e.vmid = tc.vmid;
556 e.stage1_tg = tc.stage1TranslGranule;
557 e.stage2_tg = tc.stage2TranslGranule;
558
559 doSemaphoreDown(yield, smmu.configSem);
560
561 DPRINTF(SMMUv3, "Config upd sid=%#x ssid=%#x\n", e.sid, e.ssid);
562
563 smmu.configCache.store(e);
564
565 doSemaphoreUp(smmu.configSem);
566}
567
568bool
569SMMUTranslationProcess::findConfig(Yield &yield,
570 TranslContext &tc,
571 TranslResult &tr)
572{
573 tc.stage1Enable = false;
574 tc.stage2Enable = false;
575
576 StreamTableEntry ste;
577 doReadSTE(yield, ste, request.sid);
578
579 switch (ste.dw0.config) {
580 case STE_CONFIG_BYPASS:
581 break;
582
583 case STE_CONFIG_STAGE1_ONLY:
584 tc.stage1Enable = true;
585 break;
586
587 case STE_CONFIG_STAGE2_ONLY:
588 tc.stage2Enable = true;
589 break;
590
591 case STE_CONFIG_STAGE1_AND_2:
592 tc.stage1Enable = true;
593 tc.stage2Enable = true;
594 break;
595
596 default:
597 panic("Bad or unimplemented STE config %d\n",
598 ste.dw0.config);
599 }
600
601
602 // Establish stage 2 context first since
603 // Context Descriptors can be in IPA space.
604 if (tc.stage2Enable) {
605 tc.httb = ste.dw3.s2ttb << STE_S2TTB_SHIFT;
606 tc.vmid = ste.dw2.s2vmid;
607 tc.stage2TranslGranule = ste.dw2.s2tg;
608 tc.s2t0sz = ste.dw2.s2t0sz;
608 } else {
609 tc.httb = 0xdeadbeef;
610 tc.vmid = 0;
611 tc.stage2TranslGranule = TRANS_GRANULE_INVALID;
609 } else {
610 tc.httb = 0xdeadbeef;
611 tc.vmid = 0;
612 tc.stage2TranslGranule = TRANS_GRANULE_INVALID;
613 tc.s2t0sz = 0;
612 }
613
614
615 // Now fetch stage 1 config.
616 if (context.stage1Enable) {
617 ContextDescriptor cd;
618 doReadCD(yield, cd, ste, request.sid, request.ssid);
619
620 tc.ttb0 = cd.dw1.ttb0 << CD_TTB_SHIFT;
621 tc.ttb1 = cd.dw2.ttb1 << CD_TTB_SHIFT;
622 tc.asid = cd.dw0.asid;
623 tc.stage1TranslGranule = cd.dw0.tg0;
614 }
615
616
617 // Now fetch stage 1 config.
618 if (context.stage1Enable) {
619 ContextDescriptor cd;
620 doReadCD(yield, cd, ste, request.sid, request.ssid);
621
622 tc.ttb0 = cd.dw1.ttb0 << CD_TTB_SHIFT;
623 tc.ttb1 = cd.dw2.ttb1 << CD_TTB_SHIFT;
624 tc.asid = cd.dw0.asid;
625 tc.stage1TranslGranule = cd.dw0.tg0;
626 tc.t0sz = cd.dw0.t0sz;
624 } else {
625 tc.ttb0 = 0xcafebabe;
626 tc.ttb1 = 0xcafed00d;
627 tc.asid = 0;
628 tc.stage1TranslGranule = TRANS_GRANULE_INVALID;
627 } else {
628 tc.ttb0 = 0xcafebabe;
629 tc.ttb1 = 0xcafed00d;
630 tc.asid = 0;
631 tc.stage1TranslGranule = TRANS_GRANULE_INVALID;
632 tc.t0sz = 0;
629 }
630
631 return true;
632}
633
634void
635SMMUTranslationProcess::walkCacheLookup(
636 Yield &yield,
637 const WalkCache::Entry *&walkEntry,
638 Addr addr, uint16_t asid, uint16_t vmid,
639 unsigned stage, unsigned level)
640{
641 const char *indent = stage==2 ? " " : "";
642 (void) indent; // this is only used in DPRINTFs
643
644 const PageTableOps *pt_ops =
645 stage == 1 ?
646 smmu.getPageTableOps(context.stage1TranslGranule) :
647 smmu.getPageTableOps(context.stage2TranslGranule);
648
649 unsigned walkCacheLevels =
650 smmu.walkCacheEnable ?
651 (stage == 1 ? smmu.walkCacheS1Levels : smmu.walkCacheS2Levels) :
652 0;
653
654 if ((1 << level) & walkCacheLevels) {
655 doSemaphoreDown(yield, smmu.walkSem);
656 doDelay(yield, smmu.walkLat);
657
658 walkEntry = smmu.walkCache.lookup(addr, pt_ops->walkMask(level),
659 asid, vmid, stage, level);
660
661 if (walkEntry) {
662 DPRINTF(SMMUv3, "%sWalkCache hit va=%#x asid=%#x vmid=%#x "
663 "base=%#x (S%d, L%d)\n",
664 indent, addr, asid, vmid, walkEntry->pa, stage, level);
665 } else {
666 DPRINTF(SMMUv3, "%sWalkCache miss va=%#x asid=%#x vmid=%#x "
667 "(S%d, L%d)\n",
668 indent, addr, asid, vmid, stage, level);
669 }
670
671 doSemaphoreUp(smmu.walkSem);
672 }
673}
674
675void
676SMMUTranslationProcess::walkCacheUpdate(Yield &yield, Addr va,
677 Addr vaMask, Addr pa,
678 unsigned stage, unsigned level,
679 bool leaf, uint8_t permissions)
680{
681 unsigned walkCacheLevels =
682 stage == 1 ? smmu.walkCacheS1Levels : smmu.walkCacheS2Levels;
683
684 if (smmu.walkCacheEnable && ((1<<level) & walkCacheLevels)) {
685 WalkCache::Entry e;
686 e.valid = true;
687 e.va = va;
688 e.vaMask = vaMask;
689 e.asid = stage==1 ? context.asid : 0;
690 e.vmid = context.vmid;
691 e.stage = stage;
692 e.level = level;
693 e.leaf = leaf;
694 e.pa = pa;
695 e.permissions = permissions;
696
697 doSemaphoreDown(yield, smmu.walkSem);
698
699 DPRINTF(SMMUv3, "%sWalkCache upd va=%#x mask=%#x asid=%#x vmid=%#x "
700 "tpa=%#x leaf=%s (S%d, L%d)\n",
701 e.stage==2 ? " " : "",
702 e.va, e.vaMask, e.asid, e.vmid,
703 e.pa, e.leaf, e.stage, e.level);
704
705 smmu.walkCache.store(e);
706
707 doSemaphoreUp(smmu.walkSem);
708 }
709}
710
711/*
712 * Please note:
713 * This does not deal with the case where stage 1 page size
714 * is larger than stage 2 page size.
715 */
716SMMUTranslationProcess::TranslResult
717SMMUTranslationProcess::walkStage1And2(Yield &yield, Addr addr,
718 const PageTableOps *pt_ops,
719 unsigned level, Addr walkPtr)
720{
721 PageTableOps::pte_t pte = 0;
722
723 doSemaphoreDown(yield, smmu.cycleSem);
724 doDelay(yield, Cycles(1));
725 doSemaphoreUp(smmu.cycleSem);
726
727 for (; level <= pt_ops->lastLevel(); level++) {
728 Addr pte_addr = walkPtr + pt_ops->index(addr, level);
729
730 DPRINTF(SMMUv3, "Fetching S1 L%d PTE from pa=%#08x\n",
731 level, pte_addr);
732
733 doReadPTE(yield, addr, pte_addr, &pte, 1, level);
734
735 DPRINTF(SMMUv3, "Got S1 L%d PTE=%#x from pa=%#08x\n",
736 level, pte, pte_addr);
737
738 doSemaphoreDown(yield, smmu.cycleSem);
739 doDelay(yield, Cycles(1));
740 doSemaphoreUp(smmu.cycleSem);
741
742 bool valid = pt_ops->isValid(pte, level);
743 bool leaf = pt_ops->isLeaf(pte, level);
744
745 if (!valid) {
746 DPRINTF(SMMUv3, "S1 PTE not valid - fault\n");
747
748 TranslResult tr;
749 tr.fault = FAULT_TRANSLATION;
750 return tr;
751 }
752
753 if (valid && leaf && request.isWrite &&
754 !pt_ops->isWritable(pte, level, false))
755 {
756 DPRINTF(SMMUv3, "S1 page not writable - fault\n");
757
758 TranslResult tr;
759 tr.fault = FAULT_PERMISSION;
760 return tr;
761 }
762
763 walkPtr = pt_ops->nextLevelPointer(pte, level);
764
765 if (leaf)
766 break;
767
768 if (context.stage2Enable) {
769 TranslResult s2tr = translateStage2(yield, walkPtr, false);
770 if (s2tr.fault != FAULT_NONE)
771 return s2tr;
772
773 walkPtr = s2tr.addr;
774 }
775
776 walkCacheUpdate(yield, addr, pt_ops->walkMask(level), walkPtr,
777 1, level, leaf, 0);
778 }
779
780 TranslResult tr;
781 tr.fault = FAULT_NONE;
782 tr.addrMask = pt_ops->pageMask(pte, level);
783 tr.addr = walkPtr + (addr & ~tr.addrMask);
784 tr.writable = pt_ops->isWritable(pte, level, false);
785
786 if (context.stage2Enable) {
787 TranslResult s2tr = translateStage2(yield, tr.addr, true);
788 if (s2tr.fault != FAULT_NONE)
789 return s2tr;
790
791 tr = combineTranslations(tr, s2tr);
792 }
793
794 walkCacheUpdate(yield, addr, tr.addrMask, tr.addr,
795 1, level, true, tr.writable);
796
797 return tr;
798}
799
800SMMUTranslationProcess::TranslResult
801SMMUTranslationProcess::walkStage2(Yield &yield, Addr addr, bool final_tr,
802 const PageTableOps *pt_ops,
803 unsigned level, Addr walkPtr)
804{
805 PageTableOps::pte_t pte;
806
807 doSemaphoreDown(yield, smmu.cycleSem);
808 doDelay(yield, Cycles(1));
809 doSemaphoreUp(smmu.cycleSem);
810
811 for (; level <= pt_ops->lastLevel(); level++) {
812 Addr pte_addr = walkPtr + pt_ops->index(addr, level);
813
814 DPRINTF(SMMUv3, " Fetching S2 L%d PTE from pa=%#08x\n",
815 level, pte_addr);
816
817 doReadPTE(yield, addr, pte_addr, &pte, 2, level);
818
819 DPRINTF(SMMUv3, " Got S2 L%d PTE=%#x from pa=%#08x\n",
820 level, pte, pte_addr);
821
822 doSemaphoreDown(yield, smmu.cycleSem);
823 doDelay(yield, Cycles(1));
824 doSemaphoreUp(smmu.cycleSem);
825
826 bool valid = pt_ops->isValid(pte, level);
827 bool leaf = pt_ops->isLeaf(pte, level);
828
829 if (!valid) {
830 DPRINTF(SMMUv3, " S2 PTE not valid - fault\n");
831
832 TranslResult tr;
833 tr.fault = FAULT_TRANSLATION;
834 return tr;
835 }
836
837 if (valid && leaf && request.isWrite &&
838 !pt_ops->isWritable(pte, level, true))
839 {
840 DPRINTF(SMMUv3, " S2 PTE not writable = fault\n");
841
842 TranslResult tr;
843 tr.fault = FAULT_PERMISSION;
844 return tr;
845 }
846
847 walkPtr = pt_ops->nextLevelPointer(pte, level);
848
849 if (final_tr || smmu.walkCacheNonfinalEnable)
850 walkCacheUpdate(yield, addr, pt_ops->walkMask(level), walkPtr,
851 2, level, leaf,
852 leaf ? pt_ops->isWritable(pte, level, true) : 0);
853 if (leaf)
854 break;
855 }
856
857 TranslResult tr;
858 tr.fault = FAULT_NONE;
859 tr.addrMask = pt_ops->pageMask(pte, level);
860 tr.addr = walkPtr + (addr & ~tr.addrMask);
861 tr.writable = pt_ops->isWritable(pte, level, true);
862
863 return tr;
864}
865
866SMMUTranslationProcess::TranslResult
867SMMUTranslationProcess::translateStage1And2(Yield &yield, Addr addr)
868{
869 const PageTableOps *pt_ops =
870 smmu.getPageTableOps(context.stage1TranslGranule);
871
872 const WalkCache::Entry *walk_ep = NULL;
873 unsigned level;
874
875 // Level here is actually (level+1) so we can count down
876 // to 0 using unsigned int.
877 for (level = pt_ops->lastLevel() + 1;
633 }
634
635 return true;
636}
637
638void
639SMMUTranslationProcess::walkCacheLookup(
640 Yield &yield,
641 const WalkCache::Entry *&walkEntry,
642 Addr addr, uint16_t asid, uint16_t vmid,
643 unsigned stage, unsigned level)
644{
645 const char *indent = stage==2 ? " " : "";
646 (void) indent; // this is only used in DPRINTFs
647
648 const PageTableOps *pt_ops =
649 stage == 1 ?
650 smmu.getPageTableOps(context.stage1TranslGranule) :
651 smmu.getPageTableOps(context.stage2TranslGranule);
652
653 unsigned walkCacheLevels =
654 smmu.walkCacheEnable ?
655 (stage == 1 ? smmu.walkCacheS1Levels : smmu.walkCacheS2Levels) :
656 0;
657
658 if ((1 << level) & walkCacheLevels) {
659 doSemaphoreDown(yield, smmu.walkSem);
660 doDelay(yield, smmu.walkLat);
661
662 walkEntry = smmu.walkCache.lookup(addr, pt_ops->walkMask(level),
663 asid, vmid, stage, level);
664
665 if (walkEntry) {
666 DPRINTF(SMMUv3, "%sWalkCache hit va=%#x asid=%#x vmid=%#x "
667 "base=%#x (S%d, L%d)\n",
668 indent, addr, asid, vmid, walkEntry->pa, stage, level);
669 } else {
670 DPRINTF(SMMUv3, "%sWalkCache miss va=%#x asid=%#x vmid=%#x "
671 "(S%d, L%d)\n",
672 indent, addr, asid, vmid, stage, level);
673 }
674
675 doSemaphoreUp(smmu.walkSem);
676 }
677}
678
679void
680SMMUTranslationProcess::walkCacheUpdate(Yield &yield, Addr va,
681 Addr vaMask, Addr pa,
682 unsigned stage, unsigned level,
683 bool leaf, uint8_t permissions)
684{
685 unsigned walkCacheLevels =
686 stage == 1 ? smmu.walkCacheS1Levels : smmu.walkCacheS2Levels;
687
688 if (smmu.walkCacheEnable && ((1<<level) & walkCacheLevels)) {
689 WalkCache::Entry e;
690 e.valid = true;
691 e.va = va;
692 e.vaMask = vaMask;
693 e.asid = stage==1 ? context.asid : 0;
694 e.vmid = context.vmid;
695 e.stage = stage;
696 e.level = level;
697 e.leaf = leaf;
698 e.pa = pa;
699 e.permissions = permissions;
700
701 doSemaphoreDown(yield, smmu.walkSem);
702
703 DPRINTF(SMMUv3, "%sWalkCache upd va=%#x mask=%#x asid=%#x vmid=%#x "
704 "tpa=%#x leaf=%s (S%d, L%d)\n",
705 e.stage==2 ? " " : "",
706 e.va, e.vaMask, e.asid, e.vmid,
707 e.pa, e.leaf, e.stage, e.level);
708
709 smmu.walkCache.store(e);
710
711 doSemaphoreUp(smmu.walkSem);
712 }
713}
714
715/*
716 * Please note:
717 * This does not deal with the case where stage 1 page size
718 * is larger than stage 2 page size.
719 */
720SMMUTranslationProcess::TranslResult
721SMMUTranslationProcess::walkStage1And2(Yield &yield, Addr addr,
722 const PageTableOps *pt_ops,
723 unsigned level, Addr walkPtr)
724{
725 PageTableOps::pte_t pte = 0;
726
727 doSemaphoreDown(yield, smmu.cycleSem);
728 doDelay(yield, Cycles(1));
729 doSemaphoreUp(smmu.cycleSem);
730
731 for (; level <= pt_ops->lastLevel(); level++) {
732 Addr pte_addr = walkPtr + pt_ops->index(addr, level);
733
734 DPRINTF(SMMUv3, "Fetching S1 L%d PTE from pa=%#08x\n",
735 level, pte_addr);
736
737 doReadPTE(yield, addr, pte_addr, &pte, 1, level);
738
739 DPRINTF(SMMUv3, "Got S1 L%d PTE=%#x from pa=%#08x\n",
740 level, pte, pte_addr);
741
742 doSemaphoreDown(yield, smmu.cycleSem);
743 doDelay(yield, Cycles(1));
744 doSemaphoreUp(smmu.cycleSem);
745
746 bool valid = pt_ops->isValid(pte, level);
747 bool leaf = pt_ops->isLeaf(pte, level);
748
749 if (!valid) {
750 DPRINTF(SMMUv3, "S1 PTE not valid - fault\n");
751
752 TranslResult tr;
753 tr.fault = FAULT_TRANSLATION;
754 return tr;
755 }
756
757 if (valid && leaf && request.isWrite &&
758 !pt_ops->isWritable(pte, level, false))
759 {
760 DPRINTF(SMMUv3, "S1 page not writable - fault\n");
761
762 TranslResult tr;
763 tr.fault = FAULT_PERMISSION;
764 return tr;
765 }
766
767 walkPtr = pt_ops->nextLevelPointer(pte, level);
768
769 if (leaf)
770 break;
771
772 if (context.stage2Enable) {
773 TranslResult s2tr = translateStage2(yield, walkPtr, false);
774 if (s2tr.fault != FAULT_NONE)
775 return s2tr;
776
777 walkPtr = s2tr.addr;
778 }
779
780 walkCacheUpdate(yield, addr, pt_ops->walkMask(level), walkPtr,
781 1, level, leaf, 0);
782 }
783
784 TranslResult tr;
785 tr.fault = FAULT_NONE;
786 tr.addrMask = pt_ops->pageMask(pte, level);
787 tr.addr = walkPtr + (addr & ~tr.addrMask);
788 tr.writable = pt_ops->isWritable(pte, level, false);
789
790 if (context.stage2Enable) {
791 TranslResult s2tr = translateStage2(yield, tr.addr, true);
792 if (s2tr.fault != FAULT_NONE)
793 return s2tr;
794
795 tr = combineTranslations(tr, s2tr);
796 }
797
798 walkCacheUpdate(yield, addr, tr.addrMask, tr.addr,
799 1, level, true, tr.writable);
800
801 return tr;
802}
803
804SMMUTranslationProcess::TranslResult
805SMMUTranslationProcess::walkStage2(Yield &yield, Addr addr, bool final_tr,
806 const PageTableOps *pt_ops,
807 unsigned level, Addr walkPtr)
808{
809 PageTableOps::pte_t pte;
810
811 doSemaphoreDown(yield, smmu.cycleSem);
812 doDelay(yield, Cycles(1));
813 doSemaphoreUp(smmu.cycleSem);
814
815 for (; level <= pt_ops->lastLevel(); level++) {
816 Addr pte_addr = walkPtr + pt_ops->index(addr, level);
817
818 DPRINTF(SMMUv3, " Fetching S2 L%d PTE from pa=%#08x\n",
819 level, pte_addr);
820
821 doReadPTE(yield, addr, pte_addr, &pte, 2, level);
822
823 DPRINTF(SMMUv3, " Got S2 L%d PTE=%#x from pa=%#08x\n",
824 level, pte, pte_addr);
825
826 doSemaphoreDown(yield, smmu.cycleSem);
827 doDelay(yield, Cycles(1));
828 doSemaphoreUp(smmu.cycleSem);
829
830 bool valid = pt_ops->isValid(pte, level);
831 bool leaf = pt_ops->isLeaf(pte, level);
832
833 if (!valid) {
834 DPRINTF(SMMUv3, " S2 PTE not valid - fault\n");
835
836 TranslResult tr;
837 tr.fault = FAULT_TRANSLATION;
838 return tr;
839 }
840
841 if (valid && leaf && request.isWrite &&
842 !pt_ops->isWritable(pte, level, true))
843 {
844 DPRINTF(SMMUv3, " S2 PTE not writable = fault\n");
845
846 TranslResult tr;
847 tr.fault = FAULT_PERMISSION;
848 return tr;
849 }
850
851 walkPtr = pt_ops->nextLevelPointer(pte, level);
852
853 if (final_tr || smmu.walkCacheNonfinalEnable)
854 walkCacheUpdate(yield, addr, pt_ops->walkMask(level), walkPtr,
855 2, level, leaf,
856 leaf ? pt_ops->isWritable(pte, level, true) : 0);
857 if (leaf)
858 break;
859 }
860
861 TranslResult tr;
862 tr.fault = FAULT_NONE;
863 tr.addrMask = pt_ops->pageMask(pte, level);
864 tr.addr = walkPtr + (addr & ~tr.addrMask);
865 tr.writable = pt_ops->isWritable(pte, level, true);
866
867 return tr;
868}
869
870SMMUTranslationProcess::TranslResult
871SMMUTranslationProcess::translateStage1And2(Yield &yield, Addr addr)
872{
873 const PageTableOps *pt_ops =
874 smmu.getPageTableOps(context.stage1TranslGranule);
875
876 const WalkCache::Entry *walk_ep = NULL;
877 unsigned level;
878
879 // Level here is actually (level+1) so we can count down
880 // to 0 using unsigned int.
881 for (level = pt_ops->lastLevel() + 1;
878 level > pt_ops->firstLevel();
882 level > pt_ops->firstLevel(context.t0sz);
879 level--)
880 {
881 walkCacheLookup(yield, walk_ep, addr,
882 context.asid, context.vmid, 1, level-1);
883
884 if (walk_ep)
885 break;
886 }
887
888 // Correct level (see above).
889 level -= 1;
890
891 TranslResult tr;
892 if (walk_ep) {
893 if (walk_ep->leaf) {
894 tr.fault = FAULT_NONE;
895 tr.addr = walk_ep->pa + (addr & ~walk_ep->vaMask);
896 tr.addrMask = walk_ep->vaMask;
897 tr.writable = walk_ep->permissions;
898 } else {
899 tr = walkStage1And2(yield, addr, pt_ops, level+1, walk_ep->pa);
900 }
901 } else {
902 Addr table_addr = context.ttb0;
903 if (context.stage2Enable) {
904 TranslResult s2tr = translateStage2(yield, table_addr, false);
905 if (s2tr.fault != FAULT_NONE)
906 return s2tr;
907
908 table_addr = s2tr.addr;
909 }
910
883 level--)
884 {
885 walkCacheLookup(yield, walk_ep, addr,
886 context.asid, context.vmid, 1, level-1);
887
888 if (walk_ep)
889 break;
890 }
891
892 // Correct level (see above).
893 level -= 1;
894
895 TranslResult tr;
896 if (walk_ep) {
897 if (walk_ep->leaf) {
898 tr.fault = FAULT_NONE;
899 tr.addr = walk_ep->pa + (addr & ~walk_ep->vaMask);
900 tr.addrMask = walk_ep->vaMask;
901 tr.writable = walk_ep->permissions;
902 } else {
903 tr = walkStage1And2(yield, addr, pt_ops, level+1, walk_ep->pa);
904 }
905 } else {
906 Addr table_addr = context.ttb0;
907 if (context.stage2Enable) {
908 TranslResult s2tr = translateStage2(yield, table_addr, false);
909 if (s2tr.fault != FAULT_NONE)
910 return s2tr;
911
912 table_addr = s2tr.addr;
913 }
914
911 tr = walkStage1And2(yield, addr, pt_ops, pt_ops->firstLevel(),
915 tr = walkStage1And2(yield, addr, pt_ops,
916 pt_ops->firstLevel(context.t0sz),
912 table_addr);
913 }
914
915 if (tr.fault == FAULT_NONE)
916 DPRINTF(SMMUv3, "Translated vaddr %#x to paddr %#x\n", addr, tr.addr);
917
918 return tr;
919}
920
921SMMUTranslationProcess::TranslResult
922SMMUTranslationProcess::translateStage2(Yield &yield, Addr addr, bool final_tr)
923{
924 const PageTableOps *pt_ops =
925 smmu.getPageTableOps(context.stage2TranslGranule);
926
927 const IPACache::Entry *ipa_ep = NULL;
928 if (smmu.ipaCacheEnable) {
929 doSemaphoreDown(yield, smmu.ipaSem);
930 doDelay(yield, smmu.ipaLat);
931 ipa_ep = smmu.ipaCache.lookup(addr, context.vmid);
932 doSemaphoreUp(smmu.ipaSem);
933 }
934
935 if (ipa_ep) {
936 TranslResult tr;
937 tr.fault = FAULT_NONE;
938 tr.addr = ipa_ep->pa + (addr & ~ipa_ep->ipaMask);
939 tr.addrMask = ipa_ep->ipaMask;
940 tr.writable = ipa_ep->permissions;
941
942 DPRINTF(SMMUv3, " IPACache hit ipa=%#x vmid=%#x pa=%#x\n",
943 addr, context.vmid, tr.addr);
944
945 return tr;
946 } else if (smmu.ipaCacheEnable) {
947 DPRINTF(SMMUv3, " IPACache miss ipa=%#x vmid=%#x\n",
948 addr, context.vmid);
949 }
950
951 const WalkCache::Entry *walk_ep = NULL;
917 table_addr);
918 }
919
920 if (tr.fault == FAULT_NONE)
921 DPRINTF(SMMUv3, "Translated vaddr %#x to paddr %#x\n", addr, tr.addr);
922
923 return tr;
924}
925
926SMMUTranslationProcess::TranslResult
927SMMUTranslationProcess::translateStage2(Yield &yield, Addr addr, bool final_tr)
928{
929 const PageTableOps *pt_ops =
930 smmu.getPageTableOps(context.stage2TranslGranule);
931
932 const IPACache::Entry *ipa_ep = NULL;
933 if (smmu.ipaCacheEnable) {
934 doSemaphoreDown(yield, smmu.ipaSem);
935 doDelay(yield, smmu.ipaLat);
936 ipa_ep = smmu.ipaCache.lookup(addr, context.vmid);
937 doSemaphoreUp(smmu.ipaSem);
938 }
939
940 if (ipa_ep) {
941 TranslResult tr;
942 tr.fault = FAULT_NONE;
943 tr.addr = ipa_ep->pa + (addr & ~ipa_ep->ipaMask);
944 tr.addrMask = ipa_ep->ipaMask;
945 tr.writable = ipa_ep->permissions;
946
947 DPRINTF(SMMUv3, " IPACache hit ipa=%#x vmid=%#x pa=%#x\n",
948 addr, context.vmid, tr.addr);
949
950 return tr;
951 } else if (smmu.ipaCacheEnable) {
952 DPRINTF(SMMUv3, " IPACache miss ipa=%#x vmid=%#x\n",
953 addr, context.vmid);
954 }
955
956 const WalkCache::Entry *walk_ep = NULL;
952 unsigned level = pt_ops->firstLevel();
957 unsigned level = pt_ops->firstLevel(context.s2t0sz);
953
954 if (final_tr || smmu.walkCacheNonfinalEnable) {
955 // Level here is actually (level+1) so we can count down
956 // to 0 using unsigned int.
957 for (level = pt_ops->lastLevel() + 1;
958
959 if (final_tr || smmu.walkCacheNonfinalEnable) {
960 // Level here is actually (level+1) so we can count down
961 // to 0 using unsigned int.
962 for (level = pt_ops->lastLevel() + 1;
958 level > pt_ops->firstLevel();
963 level > pt_ops->firstLevel(context.s2t0sz);
959 level--)
960 {
961 walkCacheLookup(yield, walk_ep, addr,
962 0, context.vmid, 2, level-1);
963
964 if (walk_ep)
965 break;
966 }
967
968 // Correct level (see above).
969 level -= 1;
970 }
971
972 TranslResult tr;
973 if (walk_ep) {
974 if (walk_ep->leaf) {
975 tr.fault = FAULT_NONE;
976 tr.addr = walk_ep->pa + (addr & ~walk_ep->vaMask);
977 tr.addrMask = walk_ep->vaMask;
978 tr.writable = walk_ep->permissions;
979 } else {
980 tr = walkStage2(yield, addr, final_tr, pt_ops,
981 level + 1, walk_ep->pa);
982 }
983 } else {
964 level--)
965 {
966 walkCacheLookup(yield, walk_ep, addr,
967 0, context.vmid, 2, level-1);
968
969 if (walk_ep)
970 break;
971 }
972
973 // Correct level (see above).
974 level -= 1;
975 }
976
977 TranslResult tr;
978 if (walk_ep) {
979 if (walk_ep->leaf) {
980 tr.fault = FAULT_NONE;
981 tr.addr = walk_ep->pa + (addr & ~walk_ep->vaMask);
982 tr.addrMask = walk_ep->vaMask;
983 tr.writable = walk_ep->permissions;
984 } else {
985 tr = walkStage2(yield, addr, final_tr, pt_ops,
986 level + 1, walk_ep->pa);
987 }
988 } else {
984 tr = walkStage2(yield, addr, final_tr, pt_ops, pt_ops->firstLevel(),
989 tr = walkStage2(yield, addr, final_tr, pt_ops,
990 pt_ops->firstLevel(context.s2t0sz),
985 context.httb);
986 }
987
988 if (tr.fault == FAULT_NONE)
989 DPRINTF(SMMUv3, " Translated %saddr %#x to paddr %#x\n",
990 context.stage1Enable ? "ip" : "v", addr, tr.addr);
991
992 if (smmu.ipaCacheEnable) {
993 IPACache::Entry e;
994 e.valid = true;
995 e.ipaMask = tr.addrMask;
996 e.ipa = addr & e.ipaMask;
997 e.pa = tr.addr & tr.addrMask;
998 e.permissions = tr.writable;
999 e.vmid = context.vmid;
1000
1001 doSemaphoreDown(yield, smmu.ipaSem);
1002 smmu.ipaCache.store(e);
1003 doSemaphoreUp(smmu.ipaSem);
1004 }
1005
1006 return tr;
1007}
1008
1009SMMUTranslationProcess::TranslResult
1010SMMUTranslationProcess::combineTranslations(const TranslResult &s1tr,
1011 const TranslResult &s2tr) const
1012{
1013 if (s2tr.fault != FAULT_NONE)
1014 return s2tr;
1015
1016 assert(s1tr.fault == FAULT_NONE);
1017
1018 TranslResult tr;
1019 tr.fault = FAULT_NONE;
1020 tr.addr = s2tr.addr;
1021 tr.addrMask = s1tr.addrMask | s2tr.addrMask;
1022 tr.writable = s1tr.writable & s2tr.writable;
1023
1024 return tr;
1025}
1026
1027bool
1028SMMUTranslationProcess::hazard4kCheck()
1029{
1030 Addr addr4k = request.addr & ~0xfffULL;
1031
1032 for (auto it = ifc.duplicateReqs.begin();
1033 it != ifc.duplicateReqs.end();
1034 ++it)
1035 {
1036 Addr other4k = (*it)->request.addr & ~0xfffULL;
1037 if (addr4k == other4k)
1038 return true;
1039 }
1040
1041 return false;
1042}
1043
1044void
1045SMMUTranslationProcess::hazard4kRegister()
1046{
1047 DPRINTF(SMMUv3Hazard, "4kReg: p=%p a4k=%#x\n",
1048 this, request.addr & ~0xfffULL);
1049
1050 ifc.duplicateReqs.push_back(this);
1051}
1052
1053void
1054SMMUTranslationProcess::hazard4kHold(Yield &yield)
1055{
1056 Addr addr4k = request.addr & ~0xfffULL;
1057
1058 bool found_hazard;
1059
1060 do {
1061 found_hazard = false;
1062
1063 for (auto it = ifc.duplicateReqs.begin();
1064 it!=ifc.duplicateReqs.end() && *it!=this;
1065 ++it)
1066 {
1067 Addr other4k = (*it)->request.addr & ~0xfffULL;
1068
1069 DPRINTF(SMMUv3Hazard, "4kHold: p=%p a4k=%#x Q: p=%p a4k=%#x\n",
1070 this, addr4k, *it, other4k);
1071
1072 if (addr4k == other4k) {
1073 DPRINTF(SMMUv3Hazard,
1074 "4kHold: p=%p a4k=%#x WAIT on p=%p a4k=%#x\n",
1075 this, addr4k, *it, other4k);
1076
1077 doWaitForSignal(yield, ifc.duplicateReqRemoved);
1078
1079 DPRINTF(SMMUv3Hazard, "4kHold: p=%p a4k=%#x RESUME\n",
1080 this, addr4k);
1081
1082 // This is to avoid checking *it!=this after doWaitForSignal()
1083 // since it could have been deleted.
1084 found_hazard = true;
1085 break;
1086 }
1087 }
1088 } while (found_hazard);
1089}
1090
1091void
1092SMMUTranslationProcess::hazard4kRelease()
1093{
1094 DPRINTF(SMMUv3Hazard, "4kRel: p=%p a4k=%#x\n",
1095 this, request.addr & ~0xfffULL);
1096
1097 std::list<SMMUTranslationProcess *>::iterator it;
1098
1099 for (it = ifc.duplicateReqs.begin(); it != ifc.duplicateReqs.end(); ++it)
1100 if (*it == this)
1101 break;
1102
1103 if (it == ifc.duplicateReqs.end())
1104 panic("hazard4kRelease: request not found");
1105
1106 ifc.duplicateReqs.erase(it);
1107
1108 doBroadcastSignal(ifc.duplicateReqRemoved);
1109}
1110
1111void
1112SMMUTranslationProcess::hazardIdRegister()
1113{
1114 auto orderId = AMBA::orderId(request.pkt);
1115
1116 DPRINTF(SMMUv3Hazard, "IdReg: p=%p oid=%d\n", this, orderId);
1117
1118 assert(orderId < SMMU_MAX_TRANS_ID);
1119
1120 std::list<SMMUTranslationProcess *> &depReqs =
1121 request.isWrite ?
1122 ifc.dependentWrites[orderId] : ifc.dependentReads[orderId];
1123 depReqs.push_back(this);
1124}
1125
1126void
1127SMMUTranslationProcess::hazardIdHold(Yield &yield)
1128{
1129 auto orderId = AMBA::orderId(request.pkt);
1130
1131 DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d\n", this, orderId);
1132
1133 std::list<SMMUTranslationProcess *> &depReqs =
1134 request.isWrite ?
1135 ifc.dependentWrites[orderId] : ifc.dependentReads[orderId];
1136 std::list<SMMUTranslationProcess *>::iterator it;
1137
1138 bool found_hazard;
1139
1140 do {
1141 found_hazard = false;
1142
1143 for (auto it = depReqs.begin(); it!=depReqs.end() && *it!=this; ++it) {
1144 DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d Q: %p\n",
1145 this, orderId, *it);
1146
1147 if (AMBA::orderId((*it)->request.pkt) == orderId) {
1148 DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d WAIT on=%p\n",
1149 this, orderId, *it);
1150
1151 doWaitForSignal(yield, ifc.dependentReqRemoved);
1152
1153 DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d RESUME\n",
1154 this, orderId);
1155
1156 // This is to avoid checking *it!=this after doWaitForSignal()
1157 // since it could have been deleted.
1158 found_hazard = true;
1159 break;
1160 }
1161 }
1162 } while (found_hazard);
1163}
1164
1165void
1166SMMUTranslationProcess::hazardIdRelease()
1167{
1168 auto orderId = AMBA::orderId(request.pkt);
1169
1170 DPRINTF(SMMUv3Hazard, "IdRel: p=%p oid=%d\n", this, orderId);
1171
1172 std::list<SMMUTranslationProcess *> &depReqs =
1173 request.isWrite ?
1174 ifc.dependentWrites[orderId] : ifc.dependentReads[orderId];
1175 std::list<SMMUTranslationProcess *>::iterator it;
1176
1177 for (it = depReqs.begin(); it != depReqs.end(); ++it) {
1178 if (*it == this)
1179 break;
1180 }
1181
1182 if (it == depReqs.end())
1183 panic("hazardIdRelease: request not found");
1184
1185 depReqs.erase(it);
1186
1187 doBroadcastSignal(ifc.dependentReqRemoved);
1188}
1189
1190void
1191SMMUTranslationProcess::issuePrefetch(Addr addr)
1192{
1193 if (!smmu.system.isTimingMode())
1194 return;
1195
1196 if (!ifc.prefetchEnable || ifc.xlateSlotsRemaining == 0)
1197 return;
1198
1199 std::string proc_name = csprintf("%sprf", name());
1200 SMMUTranslationProcess *proc =
1201 new SMMUTranslationProcess(proc_name, smmu, ifc);
1202
1203 proc->beginTransaction(
1204 SMMUTranslRequest::prefetch(addr, request.sid, request.ssid));
1205 proc->scheduleWakeup(smmu.clockEdge(Cycles(1)));
1206}
1207
1208void
1209SMMUTranslationProcess::completeTransaction(Yield &yield,
1210 const TranslResult &tr)
1211{
1212 assert(tr.fault == FAULT_NONE);
1213
1214 unsigned numMasterBeats = request.isWrite ?
1215 (request.size + (smmu.masterPortWidth-1))
1216 / smmu.masterPortWidth :
1217 1;
1218
1219 doSemaphoreDown(yield, smmu.masterPortSem);
1220 doDelay(yield, Cycles(numMasterBeats));
1221 doSemaphoreUp(smmu.masterPortSem);
1222
1223
1224 smmu.translationTimeDist.sample(curTick() - recvTick);
1225 if (!request.isAtsRequest && request.isWrite)
1226 ifc.wrBufSlotsRemaining +=
1227 (request.size + (ifc.portWidth-1)) / ifc.portWidth;
1228
1229 smmu.scheduleSlaveRetries();
1230
1231
1232 SMMUAction a;
1233
1234 if (request.isAtsRequest) {
1235 a.type = ACTION_SEND_RESP_ATS;
1236
1237 if (smmu.system.isAtomicMode()) {
1238 request.pkt->makeAtomicResponse();
1239 } else if (smmu.system.isTimingMode()) {
1240 request.pkt->makeTimingResponse();
1241 } else {
1242 panic("Not in atomic or timing mode");
1243 }
1244 } else {
1245 a.type = ACTION_SEND_REQ_FINAL;
1246 a.ifc = &ifc;
1247 }
1248
1249 a.pkt = request.pkt;
1250 a.delay = 0;
1251
1252 a.pkt->setAddr(tr.addr);
1253 a.pkt->req->setPaddr(tr.addr);
1254
1255 yield(a);
1256
1257 if (!request.isAtsRequest) {
1258 PacketPtr pkt = yield.get();
1259 pkt->setAddr(request.addr);
1260
1261 a.type = ACTION_SEND_RESP;
1262 a.pkt = pkt;
1263 a.ifc = &ifc;
1264 a.delay = 0;
1265 yield(a);
1266 }
1267}
1268
1269void
1270SMMUTranslationProcess::completePrefetch(Yield &yield)
1271{
1272 SMMUAction a;
1273 a.type = ACTION_TERMINATE;
1274 a.pkt = NULL;
1275 a.ifc = &ifc;
1276 a.delay = 0;
1277 yield(a);
1278}
1279
1280void
1281SMMUTranslationProcess::sendEvent(Yield &yield, const SMMUEvent &ev)
1282{
1283 int sizeMask = mask(smmu.regs.eventq_base & Q_BASE_SIZE_MASK);
1284
1285 if (((smmu.regs.eventq_prod+1) & sizeMask) ==
1286 (smmu.regs.eventq_cons & sizeMask))
1287 panic("Event queue full - aborting\n");
1288
1289 Addr event_addr =
1290 (smmu.regs.eventq_base & Q_BASE_ADDR_MASK) +
1291 (smmu.regs.eventq_prod & sizeMask) * sizeof(ev);
1292
1293 DPRINTF(SMMUv3, "Sending event to addr=%#08x (pos=%d): type=%#x stag=%#x "
1294 "flags=%#x sid=%#x ssid=%#x va=%#08x ipa=%#x\n",
1295 event_addr, smmu.regs.eventq_prod, ev.type, ev.stag,
1296 ev.flags, ev.streamId, ev.substreamId, ev.va, ev.ipa);
1297
1298 // This deliberately resets the overflow field in eventq_prod!
1299 smmu.regs.eventq_prod = (smmu.regs.eventq_prod + 1) & sizeMask;
1300
1301 doWrite(yield, event_addr, &ev, sizeof(ev));
1302
1303 if (!(smmu.regs.eventq_irq_cfg0 & E_BASE_ENABLE_MASK))
1304 panic("eventq msi not enabled\n");
1305
1306 doWrite(yield, smmu.regs.eventq_irq_cfg0 & E_BASE_ADDR_MASK,
1307 &smmu.regs.eventq_irq_cfg1, sizeof(smmu.regs.eventq_irq_cfg1));
1308}
1309
1310void
1311SMMUTranslationProcess::doReadSTE(Yield &yield,
1312 StreamTableEntry &ste,
1313 uint32_t sid)
1314{
1315 unsigned max_sid = 1 << (smmu.regs.strtab_base_cfg & ST_CFG_SIZE_MASK);
1316 if (sid >= max_sid)
1317 panic("SID %#x out of range, max=%#x", sid, max_sid);
1318
1319 Addr ste_addr;
1320
1321 if ((smmu.regs.strtab_base_cfg & ST_CFG_FMT_MASK) == ST_CFG_FMT_2LEVEL) {
1322 unsigned split =
1323 (smmu.regs.strtab_base_cfg & ST_CFG_SPLIT_MASK) >> ST_CFG_SPLIT_SHIFT;
1324
1325 if (split!= 7 && split!=8 && split!=16)
1326 panic("Invalid stream table split %d", split);
1327
1328 uint64_t l2_ptr;
1329 uint64_t l2_addr =
1330 (smmu.regs.strtab_base & VMT_BASE_ADDR_MASK) +
1331 bits(sid, 32, split) * sizeof(l2_ptr);
1332
1333 DPRINTF(SMMUv3, "Read L1STE at %#x\n", l2_addr);
1334
1335 doReadConfig(yield, l2_addr, &l2_ptr, sizeof(l2_ptr), sid, 0);
1336
1337 DPRINTF(SMMUv3, "Got L1STE L1 at %#x: 0x%016x\n", l2_addr, l2_ptr);
1338
1339 unsigned span = l2_ptr & ST_L2_SPAN_MASK;
1340 if (span == 0)
1341 panic("Invalid level 1 stream table descriptor");
1342
1343 unsigned index = bits(sid, split-1, 0);
1344 if (index >= (1 << span))
1345 panic("StreamID %d out of level 1 descriptor range %d",
1346 sid, 1<<span);
1347
1348 ste_addr = (l2_ptr & ST_L2_ADDR_MASK) + index * sizeof(ste);
1349
1350 smmu.steL1Fetches++;
1351 } else if ((smmu.regs.strtab_base_cfg & ST_CFG_FMT_MASK) == ST_CFG_FMT_LINEAR) {
1352 ste_addr =
1353 (smmu.regs.strtab_base & VMT_BASE_ADDR_MASK) + sid * sizeof(ste);
1354 } else {
1355 panic("Invalid stream table format");
1356 }
1357
1358 DPRINTF(SMMUv3, "Read STE at %#x\n", ste_addr);
1359
1360 doReadConfig(yield, ste_addr, &ste, sizeof(ste), sid, 0);
1361
1362 DPRINTF(SMMUv3, "Got STE at %#x [0]: 0x%016x\n", ste_addr, ste.dw0);
1363 DPRINTF(SMMUv3, " STE at %#x [1]: 0x%016x\n", ste_addr, ste.dw1);
1364 DPRINTF(SMMUv3, " STE at %#x [2]: 0x%016x\n", ste_addr, ste.dw2);
1365 DPRINTF(SMMUv3, " STE at %#x [3]: 0x%016x\n", ste_addr, ste.dw3);
1366 DPRINTF(SMMUv3, " STE at %#x [4]: 0x%016x\n", ste_addr, ste._pad[0]);
1367 DPRINTF(SMMUv3, " STE at %#x [5]: 0x%016x\n", ste_addr, ste._pad[1]);
1368 DPRINTF(SMMUv3, " STE at %#x [6]: 0x%016x\n", ste_addr, ste._pad[2]);
1369 DPRINTF(SMMUv3, " STE at %#x [7]: 0x%016x\n", ste_addr, ste._pad[3]);
1370
1371 if (!ste.dw0.valid)
1372 panic("STE @ %#x not valid\n", ste_addr);
1373
1374 smmu.steFetches++;
1375}
1376
1377void
1378SMMUTranslationProcess::doReadCD(Yield &yield,
1379 ContextDescriptor &cd,
1380 const StreamTableEntry &ste,
1381 uint32_t sid, uint32_t ssid)
1382{
1383 Addr cd_addr;
1384
1385 if (ste.dw0.s1cdmax == 0) {
1386 cd_addr = ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT;
1387 } else {
1388 unsigned max_ssid = 1 << ste.dw0.s1cdmax;
1389 if (ssid >= max_ssid)
1390 panic("SSID %#x out of range, max=%#x", ssid, max_ssid);
1391
1392 if (ste.dw0.s1fmt==STAGE1_CFG_2L_4K ||
1393 ste.dw0.s1fmt==STAGE1_CFG_2L_64K)
1394 {
1395 unsigned split = ste.dw0.s1fmt==STAGE1_CFG_2L_4K ? 7 : 11;
1396
1397 uint64_t l2_ptr;
1398 uint64_t l2_addr = (ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT) +
1399 bits(ssid, 24, split) * sizeof(l2_ptr);
1400
1401 if (context.stage2Enable)
1402 l2_addr = translateStage2(yield, l2_addr, false).addr;
1403
1404 DPRINTF(SMMUv3, "Read L1CD at %#x\n", l2_addr);
1405
1406 doReadConfig(yield, l2_addr, &l2_ptr, sizeof(l2_ptr), sid, ssid);
1407
1408 DPRINTF(SMMUv3, "Got L1CD at %#x: 0x%016x\n", l2_addr, l2_ptr);
1409
1410 cd_addr = l2_ptr + bits(ssid, split-1, 0) * sizeof(cd);
1411
1412 smmu.cdL1Fetches++;
1413 } else if (ste.dw0.s1fmt == STAGE1_CFG_1L) {
1414 cd_addr = (ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT) + ssid*sizeof(cd);
1415 }
1416 }
1417
1418 if (context.stage2Enable)
1419 cd_addr = translateStage2(yield, cd_addr, false).addr;
1420
1421 DPRINTF(SMMUv3, "Read CD at %#x\n", cd_addr);
1422
1423 doReadConfig(yield, cd_addr, &cd, sizeof(cd), sid, ssid);
1424
1425 DPRINTF(SMMUv3, "Got CD at %#x [0]: 0x%016x\n", cd_addr, cd.dw0);
1426 DPRINTF(SMMUv3, " CD at %#x [1]: 0x%016x\n", cd_addr, cd.dw1);
1427 DPRINTF(SMMUv3, " CD at %#x [2]: 0x%016x\n", cd_addr, cd.dw2);
1428 DPRINTF(SMMUv3, " CD at %#x [3]: 0x%016x\n", cd_addr, cd.mair);
1429 DPRINTF(SMMUv3, " CD at %#x [4]: 0x%016x\n", cd_addr, cd.amair);
1430 DPRINTF(SMMUv3, " CD at %#x [5]: 0x%016x\n", cd_addr, cd._pad[0]);
1431 DPRINTF(SMMUv3, " CD at %#x [6]: 0x%016x\n", cd_addr, cd._pad[1]);
1432 DPRINTF(SMMUv3, " CD at %#x [7]: 0x%016x\n", cd_addr, cd._pad[2]);
1433
1434
1435 if (!cd.dw0.valid)
1436 panic("CD @ %#x not valid\n", cd_addr);
1437
1438 smmu.cdFetches++;
1439}
1440
1441void
1442SMMUTranslationProcess::doReadConfig(Yield &yield, Addr addr,
1443 void *ptr, size_t size,
1444 uint32_t sid, uint32_t ssid)
1445{
1446 doRead(yield, addr, ptr, size);
1447}
1448
1449void
1450SMMUTranslationProcess::doReadPTE(Yield &yield, Addr va, Addr addr,
1451 void *ptr, unsigned stage,
1452 unsigned level)
1453{
1454 size_t pte_size = sizeof(PageTableOps::pte_t);
1455
1456 Addr mask = pte_size - 1;
1457 Addr base = addr & ~mask;
1458
1459 doRead(yield, base, ptr, pte_size);
1460}
991 context.httb);
992 }
993
994 if (tr.fault == FAULT_NONE)
995 DPRINTF(SMMUv3, " Translated %saddr %#x to paddr %#x\n",
996 context.stage1Enable ? "ip" : "v", addr, tr.addr);
997
998 if (smmu.ipaCacheEnable) {
999 IPACache::Entry e;
1000 e.valid = true;
1001 e.ipaMask = tr.addrMask;
1002 e.ipa = addr & e.ipaMask;
1003 e.pa = tr.addr & tr.addrMask;
1004 e.permissions = tr.writable;
1005 e.vmid = context.vmid;
1006
1007 doSemaphoreDown(yield, smmu.ipaSem);
1008 smmu.ipaCache.store(e);
1009 doSemaphoreUp(smmu.ipaSem);
1010 }
1011
1012 return tr;
1013}
1014
1015SMMUTranslationProcess::TranslResult
1016SMMUTranslationProcess::combineTranslations(const TranslResult &s1tr,
1017 const TranslResult &s2tr) const
1018{
1019 if (s2tr.fault != FAULT_NONE)
1020 return s2tr;
1021
1022 assert(s1tr.fault == FAULT_NONE);
1023
1024 TranslResult tr;
1025 tr.fault = FAULT_NONE;
1026 tr.addr = s2tr.addr;
1027 tr.addrMask = s1tr.addrMask | s2tr.addrMask;
1028 tr.writable = s1tr.writable & s2tr.writable;
1029
1030 return tr;
1031}
1032
1033bool
1034SMMUTranslationProcess::hazard4kCheck()
1035{
1036 Addr addr4k = request.addr & ~0xfffULL;
1037
1038 for (auto it = ifc.duplicateReqs.begin();
1039 it != ifc.duplicateReqs.end();
1040 ++it)
1041 {
1042 Addr other4k = (*it)->request.addr & ~0xfffULL;
1043 if (addr4k == other4k)
1044 return true;
1045 }
1046
1047 return false;
1048}
1049
1050void
1051SMMUTranslationProcess::hazard4kRegister()
1052{
1053 DPRINTF(SMMUv3Hazard, "4kReg: p=%p a4k=%#x\n",
1054 this, request.addr & ~0xfffULL);
1055
1056 ifc.duplicateReqs.push_back(this);
1057}
1058
1059void
1060SMMUTranslationProcess::hazard4kHold(Yield &yield)
1061{
1062 Addr addr4k = request.addr & ~0xfffULL;
1063
1064 bool found_hazard;
1065
1066 do {
1067 found_hazard = false;
1068
1069 for (auto it = ifc.duplicateReqs.begin();
1070 it!=ifc.duplicateReqs.end() && *it!=this;
1071 ++it)
1072 {
1073 Addr other4k = (*it)->request.addr & ~0xfffULL;
1074
1075 DPRINTF(SMMUv3Hazard, "4kHold: p=%p a4k=%#x Q: p=%p a4k=%#x\n",
1076 this, addr4k, *it, other4k);
1077
1078 if (addr4k == other4k) {
1079 DPRINTF(SMMUv3Hazard,
1080 "4kHold: p=%p a4k=%#x WAIT on p=%p a4k=%#x\n",
1081 this, addr4k, *it, other4k);
1082
1083 doWaitForSignal(yield, ifc.duplicateReqRemoved);
1084
1085 DPRINTF(SMMUv3Hazard, "4kHold: p=%p a4k=%#x RESUME\n",
1086 this, addr4k);
1087
1088 // This is to avoid checking *it!=this after doWaitForSignal()
1089 // since it could have been deleted.
1090 found_hazard = true;
1091 break;
1092 }
1093 }
1094 } while (found_hazard);
1095}
1096
1097void
1098SMMUTranslationProcess::hazard4kRelease()
1099{
1100 DPRINTF(SMMUv3Hazard, "4kRel: p=%p a4k=%#x\n",
1101 this, request.addr & ~0xfffULL);
1102
1103 std::list<SMMUTranslationProcess *>::iterator it;
1104
1105 for (it = ifc.duplicateReqs.begin(); it != ifc.duplicateReqs.end(); ++it)
1106 if (*it == this)
1107 break;
1108
1109 if (it == ifc.duplicateReqs.end())
1110 panic("hazard4kRelease: request not found");
1111
1112 ifc.duplicateReqs.erase(it);
1113
1114 doBroadcastSignal(ifc.duplicateReqRemoved);
1115}
1116
1117void
1118SMMUTranslationProcess::hazardIdRegister()
1119{
1120 auto orderId = AMBA::orderId(request.pkt);
1121
1122 DPRINTF(SMMUv3Hazard, "IdReg: p=%p oid=%d\n", this, orderId);
1123
1124 assert(orderId < SMMU_MAX_TRANS_ID);
1125
1126 std::list<SMMUTranslationProcess *> &depReqs =
1127 request.isWrite ?
1128 ifc.dependentWrites[orderId] : ifc.dependentReads[orderId];
1129 depReqs.push_back(this);
1130}
1131
1132void
1133SMMUTranslationProcess::hazardIdHold(Yield &yield)
1134{
1135 auto orderId = AMBA::orderId(request.pkt);
1136
1137 DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d\n", this, orderId);
1138
1139 std::list<SMMUTranslationProcess *> &depReqs =
1140 request.isWrite ?
1141 ifc.dependentWrites[orderId] : ifc.dependentReads[orderId];
1142 std::list<SMMUTranslationProcess *>::iterator it;
1143
1144 bool found_hazard;
1145
1146 do {
1147 found_hazard = false;
1148
1149 for (auto it = depReqs.begin(); it!=depReqs.end() && *it!=this; ++it) {
1150 DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d Q: %p\n",
1151 this, orderId, *it);
1152
1153 if (AMBA::orderId((*it)->request.pkt) == orderId) {
1154 DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d WAIT on=%p\n",
1155 this, orderId, *it);
1156
1157 doWaitForSignal(yield, ifc.dependentReqRemoved);
1158
1159 DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d RESUME\n",
1160 this, orderId);
1161
1162 // This is to avoid checking *it!=this after doWaitForSignal()
1163 // since it could have been deleted.
1164 found_hazard = true;
1165 break;
1166 }
1167 }
1168 } while (found_hazard);
1169}
1170
1171void
1172SMMUTranslationProcess::hazardIdRelease()
1173{
1174 auto orderId = AMBA::orderId(request.pkt);
1175
1176 DPRINTF(SMMUv3Hazard, "IdRel: p=%p oid=%d\n", this, orderId);
1177
1178 std::list<SMMUTranslationProcess *> &depReqs =
1179 request.isWrite ?
1180 ifc.dependentWrites[orderId] : ifc.dependentReads[orderId];
1181 std::list<SMMUTranslationProcess *>::iterator it;
1182
1183 for (it = depReqs.begin(); it != depReqs.end(); ++it) {
1184 if (*it == this)
1185 break;
1186 }
1187
1188 if (it == depReqs.end())
1189 panic("hazardIdRelease: request not found");
1190
1191 depReqs.erase(it);
1192
1193 doBroadcastSignal(ifc.dependentReqRemoved);
1194}
1195
1196void
1197SMMUTranslationProcess::issuePrefetch(Addr addr)
1198{
1199 if (!smmu.system.isTimingMode())
1200 return;
1201
1202 if (!ifc.prefetchEnable || ifc.xlateSlotsRemaining == 0)
1203 return;
1204
1205 std::string proc_name = csprintf("%sprf", name());
1206 SMMUTranslationProcess *proc =
1207 new SMMUTranslationProcess(proc_name, smmu, ifc);
1208
1209 proc->beginTransaction(
1210 SMMUTranslRequest::prefetch(addr, request.sid, request.ssid));
1211 proc->scheduleWakeup(smmu.clockEdge(Cycles(1)));
1212}
1213
1214void
1215SMMUTranslationProcess::completeTransaction(Yield &yield,
1216 const TranslResult &tr)
1217{
1218 assert(tr.fault == FAULT_NONE);
1219
1220 unsigned numMasterBeats = request.isWrite ?
1221 (request.size + (smmu.masterPortWidth-1))
1222 / smmu.masterPortWidth :
1223 1;
1224
1225 doSemaphoreDown(yield, smmu.masterPortSem);
1226 doDelay(yield, Cycles(numMasterBeats));
1227 doSemaphoreUp(smmu.masterPortSem);
1228
1229
1230 smmu.translationTimeDist.sample(curTick() - recvTick);
1231 if (!request.isAtsRequest && request.isWrite)
1232 ifc.wrBufSlotsRemaining +=
1233 (request.size + (ifc.portWidth-1)) / ifc.portWidth;
1234
1235 smmu.scheduleSlaveRetries();
1236
1237
1238 SMMUAction a;
1239
1240 if (request.isAtsRequest) {
1241 a.type = ACTION_SEND_RESP_ATS;
1242
1243 if (smmu.system.isAtomicMode()) {
1244 request.pkt->makeAtomicResponse();
1245 } else if (smmu.system.isTimingMode()) {
1246 request.pkt->makeTimingResponse();
1247 } else {
1248 panic("Not in atomic or timing mode");
1249 }
1250 } else {
1251 a.type = ACTION_SEND_REQ_FINAL;
1252 a.ifc = &ifc;
1253 }
1254
1255 a.pkt = request.pkt;
1256 a.delay = 0;
1257
1258 a.pkt->setAddr(tr.addr);
1259 a.pkt->req->setPaddr(tr.addr);
1260
1261 yield(a);
1262
1263 if (!request.isAtsRequest) {
1264 PacketPtr pkt = yield.get();
1265 pkt->setAddr(request.addr);
1266
1267 a.type = ACTION_SEND_RESP;
1268 a.pkt = pkt;
1269 a.ifc = &ifc;
1270 a.delay = 0;
1271 yield(a);
1272 }
1273}
1274
1275void
1276SMMUTranslationProcess::completePrefetch(Yield &yield)
1277{
1278 SMMUAction a;
1279 a.type = ACTION_TERMINATE;
1280 a.pkt = NULL;
1281 a.ifc = &ifc;
1282 a.delay = 0;
1283 yield(a);
1284}
1285
1286void
1287SMMUTranslationProcess::sendEvent(Yield &yield, const SMMUEvent &ev)
1288{
1289 int sizeMask = mask(smmu.regs.eventq_base & Q_BASE_SIZE_MASK);
1290
1291 if (((smmu.regs.eventq_prod+1) & sizeMask) ==
1292 (smmu.regs.eventq_cons & sizeMask))
1293 panic("Event queue full - aborting\n");
1294
1295 Addr event_addr =
1296 (smmu.regs.eventq_base & Q_BASE_ADDR_MASK) +
1297 (smmu.regs.eventq_prod & sizeMask) * sizeof(ev);
1298
1299 DPRINTF(SMMUv3, "Sending event to addr=%#08x (pos=%d): type=%#x stag=%#x "
1300 "flags=%#x sid=%#x ssid=%#x va=%#08x ipa=%#x\n",
1301 event_addr, smmu.regs.eventq_prod, ev.type, ev.stag,
1302 ev.flags, ev.streamId, ev.substreamId, ev.va, ev.ipa);
1303
1304 // This deliberately resets the overflow field in eventq_prod!
1305 smmu.regs.eventq_prod = (smmu.regs.eventq_prod + 1) & sizeMask;
1306
1307 doWrite(yield, event_addr, &ev, sizeof(ev));
1308
1309 if (!(smmu.regs.eventq_irq_cfg0 & E_BASE_ENABLE_MASK))
1310 panic("eventq msi not enabled\n");
1311
1312 doWrite(yield, smmu.regs.eventq_irq_cfg0 & E_BASE_ADDR_MASK,
1313 &smmu.regs.eventq_irq_cfg1, sizeof(smmu.regs.eventq_irq_cfg1));
1314}
1315
1316void
1317SMMUTranslationProcess::doReadSTE(Yield &yield,
1318 StreamTableEntry &ste,
1319 uint32_t sid)
1320{
1321 unsigned max_sid = 1 << (smmu.regs.strtab_base_cfg & ST_CFG_SIZE_MASK);
1322 if (sid >= max_sid)
1323 panic("SID %#x out of range, max=%#x", sid, max_sid);
1324
1325 Addr ste_addr;
1326
1327 if ((smmu.regs.strtab_base_cfg & ST_CFG_FMT_MASK) == ST_CFG_FMT_2LEVEL) {
1328 unsigned split =
1329 (smmu.regs.strtab_base_cfg & ST_CFG_SPLIT_MASK) >> ST_CFG_SPLIT_SHIFT;
1330
1331 if (split!= 7 && split!=8 && split!=16)
1332 panic("Invalid stream table split %d", split);
1333
1334 uint64_t l2_ptr;
1335 uint64_t l2_addr =
1336 (smmu.regs.strtab_base & VMT_BASE_ADDR_MASK) +
1337 bits(sid, 32, split) * sizeof(l2_ptr);
1338
1339 DPRINTF(SMMUv3, "Read L1STE at %#x\n", l2_addr);
1340
1341 doReadConfig(yield, l2_addr, &l2_ptr, sizeof(l2_ptr), sid, 0);
1342
1343 DPRINTF(SMMUv3, "Got L1STE L1 at %#x: 0x%016x\n", l2_addr, l2_ptr);
1344
1345 unsigned span = l2_ptr & ST_L2_SPAN_MASK;
1346 if (span == 0)
1347 panic("Invalid level 1 stream table descriptor");
1348
1349 unsigned index = bits(sid, split-1, 0);
1350 if (index >= (1 << span))
1351 panic("StreamID %d out of level 1 descriptor range %d",
1352 sid, 1<<span);
1353
1354 ste_addr = (l2_ptr & ST_L2_ADDR_MASK) + index * sizeof(ste);
1355
1356 smmu.steL1Fetches++;
1357 } else if ((smmu.regs.strtab_base_cfg & ST_CFG_FMT_MASK) == ST_CFG_FMT_LINEAR) {
1358 ste_addr =
1359 (smmu.regs.strtab_base & VMT_BASE_ADDR_MASK) + sid * sizeof(ste);
1360 } else {
1361 panic("Invalid stream table format");
1362 }
1363
1364 DPRINTF(SMMUv3, "Read STE at %#x\n", ste_addr);
1365
1366 doReadConfig(yield, ste_addr, &ste, sizeof(ste), sid, 0);
1367
1368 DPRINTF(SMMUv3, "Got STE at %#x [0]: 0x%016x\n", ste_addr, ste.dw0);
1369 DPRINTF(SMMUv3, " STE at %#x [1]: 0x%016x\n", ste_addr, ste.dw1);
1370 DPRINTF(SMMUv3, " STE at %#x [2]: 0x%016x\n", ste_addr, ste.dw2);
1371 DPRINTF(SMMUv3, " STE at %#x [3]: 0x%016x\n", ste_addr, ste.dw3);
1372 DPRINTF(SMMUv3, " STE at %#x [4]: 0x%016x\n", ste_addr, ste._pad[0]);
1373 DPRINTF(SMMUv3, " STE at %#x [5]: 0x%016x\n", ste_addr, ste._pad[1]);
1374 DPRINTF(SMMUv3, " STE at %#x [6]: 0x%016x\n", ste_addr, ste._pad[2]);
1375 DPRINTF(SMMUv3, " STE at %#x [7]: 0x%016x\n", ste_addr, ste._pad[3]);
1376
1377 if (!ste.dw0.valid)
1378 panic("STE @ %#x not valid\n", ste_addr);
1379
1380 smmu.steFetches++;
1381}
1382
1383void
1384SMMUTranslationProcess::doReadCD(Yield &yield,
1385 ContextDescriptor &cd,
1386 const StreamTableEntry &ste,
1387 uint32_t sid, uint32_t ssid)
1388{
1389 Addr cd_addr;
1390
1391 if (ste.dw0.s1cdmax == 0) {
1392 cd_addr = ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT;
1393 } else {
1394 unsigned max_ssid = 1 << ste.dw0.s1cdmax;
1395 if (ssid >= max_ssid)
1396 panic("SSID %#x out of range, max=%#x", ssid, max_ssid);
1397
1398 if (ste.dw0.s1fmt==STAGE1_CFG_2L_4K ||
1399 ste.dw0.s1fmt==STAGE1_CFG_2L_64K)
1400 {
1401 unsigned split = ste.dw0.s1fmt==STAGE1_CFG_2L_4K ? 7 : 11;
1402
1403 uint64_t l2_ptr;
1404 uint64_t l2_addr = (ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT) +
1405 bits(ssid, 24, split) * sizeof(l2_ptr);
1406
1407 if (context.stage2Enable)
1408 l2_addr = translateStage2(yield, l2_addr, false).addr;
1409
1410 DPRINTF(SMMUv3, "Read L1CD at %#x\n", l2_addr);
1411
1412 doReadConfig(yield, l2_addr, &l2_ptr, sizeof(l2_ptr), sid, ssid);
1413
1414 DPRINTF(SMMUv3, "Got L1CD at %#x: 0x%016x\n", l2_addr, l2_ptr);
1415
1416 cd_addr = l2_ptr + bits(ssid, split-1, 0) * sizeof(cd);
1417
1418 smmu.cdL1Fetches++;
1419 } else if (ste.dw0.s1fmt == STAGE1_CFG_1L) {
1420 cd_addr = (ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT) + ssid*sizeof(cd);
1421 }
1422 }
1423
1424 if (context.stage2Enable)
1425 cd_addr = translateStage2(yield, cd_addr, false).addr;
1426
1427 DPRINTF(SMMUv3, "Read CD at %#x\n", cd_addr);
1428
1429 doReadConfig(yield, cd_addr, &cd, sizeof(cd), sid, ssid);
1430
1431 DPRINTF(SMMUv3, "Got CD at %#x [0]: 0x%016x\n", cd_addr, cd.dw0);
1432 DPRINTF(SMMUv3, " CD at %#x [1]: 0x%016x\n", cd_addr, cd.dw1);
1433 DPRINTF(SMMUv3, " CD at %#x [2]: 0x%016x\n", cd_addr, cd.dw2);
1434 DPRINTF(SMMUv3, " CD at %#x [3]: 0x%016x\n", cd_addr, cd.mair);
1435 DPRINTF(SMMUv3, " CD at %#x [4]: 0x%016x\n", cd_addr, cd.amair);
1436 DPRINTF(SMMUv3, " CD at %#x [5]: 0x%016x\n", cd_addr, cd._pad[0]);
1437 DPRINTF(SMMUv3, " CD at %#x [6]: 0x%016x\n", cd_addr, cd._pad[1]);
1438 DPRINTF(SMMUv3, " CD at %#x [7]: 0x%016x\n", cd_addr, cd._pad[2]);
1439
1440
1441 if (!cd.dw0.valid)
1442 panic("CD @ %#x not valid\n", cd_addr);
1443
1444 smmu.cdFetches++;
1445}
1446
1447void
1448SMMUTranslationProcess::doReadConfig(Yield &yield, Addr addr,
1449 void *ptr, size_t size,
1450 uint32_t sid, uint32_t ssid)
1451{
1452 doRead(yield, addr, ptr, size);
1453}
1454
1455void
1456SMMUTranslationProcess::doReadPTE(Yield &yield, Addr va, Addr addr,
1457 void *ptr, unsigned stage,
1458 unsigned level)
1459{
1460 size_t pte_size = sizeof(PageTableOps::pte_t);
1461
1462 Addr mask = pte_size - 1;
1463 Addr base = addr & ~mask;
1464
1465 doRead(yield, base, ptr, pte_size);
1466}