smmu_v3_transl.cc revision 14223:ae17e22dcae5
1/* 2 * Copyright (c) 2013, 2018-2019 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions are 16 * met: redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer; 18 * redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution; 21 * neither the name of the copyright holders nor the names of its 22 * contributors may be used to endorse or promote products derived from 23 * this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 * 37 * Authors: Stan Czerniawski 38 */ 39 40#include "dev/arm/smmu_v3_transl.hh" 41 42#include "debug/SMMUv3.hh" 43#include "debug/SMMUv3Hazard.hh" 44#include "dev/arm/amba.hh" 45#include "dev/arm/smmu_v3.hh" 46#include "sim/system.hh" 47 48SMMUTranslRequest 49SMMUTranslRequest::fromPacket(PacketPtr pkt, bool ats) 50{ 51 SMMUTranslRequest req; 52 req.addr = pkt->getAddr(); 53 req.size = pkt->getSize(); 54 req.sid = pkt->req->streamId(); 55 req.ssid = pkt->req->hasSubstreamId() ? 56 pkt->req->substreamId() : 0; 57 req.isWrite = pkt->isWrite(); 58 req.isPrefetch = false; 59 req.isAtsRequest = ats; 60 req.pkt = pkt; 61 62 return req; 63} 64 65SMMUTranslRequest 66SMMUTranslRequest::prefetch(Addr addr, uint32_t sid, uint32_t ssid) 67{ 68 SMMUTranslRequest req; 69 req.addr = addr; 70 req.size = 0; 71 req.sid = sid; 72 req.ssid = ssid; 73 req.isWrite = false; 74 req.isPrefetch = true; 75 req.isAtsRequest = false; 76 req.pkt = NULL; 77 78 return req; 79} 80 81SMMUTranslationProcess::SMMUTranslationProcess(const std::string &name, 82 SMMUv3 &_smmu, SMMUv3SlaveInterface &_ifc) 83 : 84 SMMUProcess(name, _smmu), 85 ifc(_ifc) 86{ 87 // Decrease number of pending translation slots on the slave interface 88 assert(ifc.xlateSlotsRemaining > 0); 89 ifc.xlateSlotsRemaining--; 90 91 ifc.pendingMemAccesses++; 92 reinit(); 93} 94 95SMMUTranslationProcess::~SMMUTranslationProcess() 96{ 97 // Increase number of pending translation slots on the slave interface 98 assert(ifc.pendingMemAccesses > 0); 99 ifc.pendingMemAccesses--; 100 101 // If no more SMMU memory accesses are pending, 102 // signal SMMU Slave Interface as drained 103 if (ifc.pendingMemAccesses == 0) { 104 ifc.signalDrainDone(); 105 } 106} 107 108void 109SMMUTranslationProcess::beginTransaction(const SMMUTranslRequest &req) 110{ 111 request = req; 112 113 reinit(); 114} 115 116void 117SMMUTranslationProcess::resumeTransaction() 118{ 119 assert(smmu.system.isTimingMode()); 120 121 assert(!"Stalls are broken"); 122 123 Tick resumeTick = curTick(); 124 125 (void) resumeTick; 126 DPRINTF(SMMUv3, "Resume at tick = %d. Fault duration = %d (%.3fus)\n", 127 resumeTick, resumeTick-faultTick, (resumeTick-faultTick) / 1e6); 128 129 beginTransaction(request); 130 131 smmu.runProcessTiming(this, request.pkt); 132} 133 134void 135SMMUTranslationProcess::main(Yield &yield) 136{ 137 // Hack: 138 // The coroutine starts running as soon as it's created. 139 // But we need to wait for request data esp. in atomic mode. 140 SMMUAction a; 141 a.type = ACTION_INITIAL_NOP; 142 a.pkt = NULL; 143 yield(a); 144 145 const Addr next4k = (request.addr + 0x1000ULL) & ~0xfffULL; 146 147 if ((request.addr + request.size) > next4k) 148 panic("Transaction crosses 4k boundary (addr=%#x size=%#x)!\n", 149 request.addr, request.size); 150 151 152 unsigned numSlaveBeats = request.isWrite ? 153 (request.size + (ifc.portWidth - 1)) / ifc.portWidth : 1; 154 155 doSemaphoreDown(yield, ifc.slavePortSem); 156 doDelay(yield, Cycles(numSlaveBeats)); 157 doSemaphoreUp(ifc.slavePortSem); 158 159 160 recvTick = curTick(); 161 162 if (!(smmu.regs.cr0 & CR0_SMMUEN_MASK)) { 163 // SMMU disabled 164 doDelay(yield, Cycles(1)); 165 completeTransaction(yield, bypass(request.addr)); 166 return; 167 } 168 169 TranslResult tr; 170 bool wasPrefetched = false; 171 172 if (request.isPrefetch) { 173 // Abort prefetch if: 174 // - there's already a transaction looking up the same 4k page, OR 175 // - requested address is already in the TLB. 176 if (hazard4kCheck() || ifcTLBLookup(yield, tr, wasPrefetched)) 177 completePrefetch(yield); // this never returns 178 179 hazard4kRegister(); 180 181 tr = smmuTranslation(yield); 182 183 if (tr.fault == FAULT_NONE) 184 ifcTLBUpdate(yield, tr); 185 186 hazard4kRelease(); 187 188 completePrefetch(yield); 189 } else { 190 hazardIdRegister(); 191 192 if (!microTLBLookup(yield, tr)) { 193 bool hit = ifcTLBLookup(yield, tr, wasPrefetched); 194 if (!hit) { 195 while (!hit && hazard4kCheck()) { 196 hazard4kHold(yield); 197 hit = ifcTLBLookup(yield, tr, wasPrefetched); 198 } 199 } 200 201 // Issue prefetch if: 202 // - there was a TLB hit and the entry was prefetched, OR 203 // - TLB miss was successfully serviced 204 if (hit) { 205 if (wasPrefetched) 206 issuePrefetch(next4k); 207 } else { 208 hazard4kRegister(); 209 210 tr = smmuTranslation(yield); 211 212 if (tr.fault == FAULT_NONE) { 213 ifcTLBUpdate(yield, tr); 214 215 issuePrefetch(next4k); 216 } 217 218 hazard4kRelease(); 219 } 220 221 if (tr.fault == FAULT_NONE) 222 microTLBUpdate(yield, tr); 223 } 224 225 hazardIdHold(yield); 226 hazardIdRelease(); 227 228 if (tr.fault != FAULT_NONE) 229 panic("fault\n"); 230 231 completeTransaction(yield, tr); 232 } 233} 234 235SMMUTranslationProcess::TranslResult 236SMMUTranslationProcess::bypass(Addr addr) const 237{ 238 TranslResult tr; 239 tr.fault = FAULT_NONE; 240 tr.addr = addr; 241 tr.addrMask = 0; 242 tr.writable = 1; 243 244 return tr; 245} 246 247SMMUTranslationProcess::TranslResult 248SMMUTranslationProcess::smmuTranslation(Yield &yield) 249{ 250 TranslResult tr; 251 252 // Need SMMU credit to proceed 253 doSemaphoreDown(yield, smmu.transSem); 254 255 // Simulate pipelined IFC->SMMU link 256 doSemaphoreDown(yield, smmu.ifcSmmuSem); 257 doDelay(yield, Cycles(1)); // serialize transactions 258 doSemaphoreUp(smmu.ifcSmmuSem); 259 doDelay(yield, smmu.ifcSmmuLat - Cycles(1)); // remaining pipeline delay 260 261 bool haveConfig = true; 262 if (!configCacheLookup(yield, context)) { 263 if(findConfig(yield, context, tr)) { 264 configCacheUpdate(yield, context); 265 } else { 266 haveConfig = false; 267 } 268 } 269 270 if (haveConfig && !smmuTLBLookup(yield, tr)) { 271 // SMMU main TLB miss 272 273 // Need PTW slot to proceed 274 doSemaphoreDown(yield, smmu.ptwSem); 275 276 // Page table walk 277 Tick ptwStartTick = curTick(); 278 279 if (context.stage1Enable) { 280 tr = translateStage1And2(yield, request.addr); 281 } else if (context.stage2Enable) { 282 tr = translateStage2(yield, request.addr, true); 283 } else { 284 tr = bypass(request.addr); 285 } 286 287 if (context.stage1Enable || context.stage2Enable) 288 smmu.ptwTimeDist.sample(curTick() - ptwStartTick); 289 290 // Free PTW slot 291 doSemaphoreUp(smmu.ptwSem); 292 293 if (tr.fault == FAULT_NONE) 294 smmuTLBUpdate(yield, tr); 295 } 296 297 // Simulate pipelined SMMU->SLAVE INTERFACE link 298 doSemaphoreDown(yield, smmu.smmuIfcSem); 299 doDelay(yield, Cycles(1)); // serialize transactions 300 doSemaphoreUp(smmu.smmuIfcSem); 301 doDelay(yield, smmu.smmuIfcLat - Cycles(1)); // remaining pipeline delay 302 303 // return SMMU credit 304 doSemaphoreUp(smmu.transSem); 305 306 return tr; 307} 308 309bool 310SMMUTranslationProcess::microTLBLookup(Yield &yield, TranslResult &tr) 311{ 312 if (!ifc.microTLBEnable) 313 return false; 314 315 doSemaphoreDown(yield, ifc.microTLBSem); 316 doDelay(yield, ifc.microTLBLat); 317 const SMMUTLB::Entry *e = 318 ifc.microTLB->lookup(request.sid, request.ssid, request.addr); 319 doSemaphoreUp(ifc.microTLBSem); 320 321 if (!e) { 322 DPRINTF(SMMUv3, "micro TLB miss vaddr=%#x sid=%#x ssid=%#x\n", 323 request.addr, request.sid, request.ssid); 324 325 return false; 326 } 327 328 DPRINTF(SMMUv3, 329 "micro TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x paddr=%#x\n", 330 request.addr, e->vaMask, request.sid, request.ssid, e->pa); 331 332 tr.fault = FAULT_NONE; 333 tr.addr = e->pa + (request.addr & ~e->vaMask);; 334 tr.addrMask = e->vaMask; 335 tr.writable = e->permissions; 336 337 return true; 338} 339 340bool 341SMMUTranslationProcess::ifcTLBLookup(Yield &yield, TranslResult &tr, 342 bool &wasPrefetched) 343{ 344 if (!ifc.mainTLBEnable) 345 return false; 346 347 doSemaphoreDown(yield, ifc.mainTLBSem); 348 doDelay(yield, ifc.mainTLBLat); 349 const SMMUTLB::Entry *e = 350 ifc.mainTLB->lookup(request.sid, request.ssid, request.addr); 351 doSemaphoreUp(ifc.mainTLBSem); 352 353 if (!e) { 354 DPRINTF(SMMUv3, 355 "SLAVE Interface TLB miss vaddr=%#x sid=%#x ssid=%#x\n", 356 request.addr, request.sid, request.ssid); 357 358 return false; 359 } 360 361 DPRINTF(SMMUv3, 362 "SLAVE Interface TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x " 363 "paddr=%#x\n", request.addr, e->vaMask, request.sid, 364 request.ssid, e->pa); 365 366 tr.fault = FAULT_NONE; 367 tr.addr = e->pa + (request.addr & ~e->vaMask);; 368 tr.addrMask = e->vaMask; 369 tr.writable = e->permissions; 370 wasPrefetched = e->prefetched; 371 372 return true; 373} 374 375bool 376SMMUTranslationProcess::smmuTLBLookup(Yield &yield, TranslResult &tr) 377{ 378 if (!smmu.tlbEnable) 379 return false; 380 381 doSemaphoreDown(yield, smmu.tlbSem); 382 doDelay(yield, smmu.tlbLat); 383 const ARMArchTLB::Entry *e = 384 smmu.tlb.lookup(request.addr, context.asid, context.vmid); 385 doSemaphoreUp(smmu.tlbSem); 386 387 if (!e) { 388 DPRINTF(SMMUv3, "SMMU TLB miss vaddr=%#x asid=%#x vmid=%#x\n", 389 request.addr, context.asid, context.vmid); 390 391 return false; 392 } 393 394 DPRINTF(SMMUv3, 395 "SMMU TLB hit vaddr=%#x amask=%#x asid=%#x vmid=%#x paddr=%#x\n", 396 request.addr, e->vaMask, context.asid, context.vmid, e->pa); 397 398 tr.fault = FAULT_NONE; 399 tr.addr = e->pa + (request.addr & ~e->vaMask);; 400 tr.addrMask = e->vaMask; 401 tr.writable = e->permissions; 402 403 return true; 404} 405 406void 407SMMUTranslationProcess::microTLBUpdate(Yield &yield, 408 const TranslResult &tr) 409{ 410 assert(tr.fault == FAULT_NONE); 411 412 if (!ifc.microTLBEnable) 413 return; 414 415 SMMUTLB::Entry e; 416 e.valid = true; 417 e.prefetched = false; 418 e.sid = request.sid; 419 e.ssid = request.ssid; 420 e.vaMask = tr.addrMask; 421 e.va = request.addr & e.vaMask; 422 e.pa = tr.addr & e.vaMask; 423 e.permissions = tr.writable; 424 e.asid = context.asid; 425 e.vmid = context.vmid; 426 427 doSemaphoreDown(yield, ifc.microTLBSem); 428 429 DPRINTF(SMMUv3, 430 "micro TLB upd vaddr=%#x amask=%#x paddr=%#x sid=%#x ssid=%#x\n", 431 e.va, e.vaMask, e.pa, e.sid, e.ssid); 432 433 ifc.microTLB->store(e, SMMUTLB::ALLOC_ANY_WAY); 434 435 doSemaphoreUp(ifc.microTLBSem); 436} 437 438void 439SMMUTranslationProcess::ifcTLBUpdate(Yield &yield, 440 const TranslResult &tr) 441{ 442 assert(tr.fault == FAULT_NONE); 443 444 if (!ifc.mainTLBEnable) 445 return; 446 447 SMMUTLB::Entry e; 448 e.valid = true; 449 e.prefetched = request.isPrefetch; 450 e.sid = request.sid; 451 e.ssid = request.ssid; 452 e.vaMask = tr.addrMask; 453 e.va = request.addr & e.vaMask; 454 e.pa = tr.addr & e.vaMask; 455 e.permissions = tr.writable; 456 e.asid = context.asid; 457 e.vmid = context.vmid; 458 459 SMMUTLB::AllocPolicy alloc = SMMUTLB::ALLOC_ANY_WAY; 460 if (ifc.prefetchEnable && ifc.prefetchReserveLastWay) 461 alloc = request.isPrefetch ? 462 SMMUTLB::ALLOC_LAST_WAY : SMMUTLB::ALLOC_ANY_BUT_LAST_WAY; 463 464 doSemaphoreDown(yield, ifc.mainTLBSem); 465 466 DPRINTF(SMMUv3, 467 "SLAVE Interface upd vaddr=%#x amask=%#x paddr=%#x sid=%#x " 468 "ssid=%#x\n", e.va, e.vaMask, e.pa, e.sid, e.ssid); 469 470 ifc.mainTLB->store(e, alloc); 471 472 doSemaphoreUp(ifc.mainTLBSem); 473} 474 475void 476SMMUTranslationProcess::smmuTLBUpdate(Yield &yield, 477 const TranslResult &tr) 478{ 479 assert(tr.fault == FAULT_NONE); 480 481 if (!smmu.tlbEnable) 482 return; 483 484 ARMArchTLB::Entry e; 485 e.valid = true; 486 e.vaMask = tr.addrMask; 487 e.va = request.addr & e.vaMask; 488 e.asid = context.asid; 489 e.vmid = context.vmid; 490 e.pa = tr.addr & e.vaMask; 491 e.permissions = tr.writable; 492 493 doSemaphoreDown(yield, smmu.tlbSem); 494 495 DPRINTF(SMMUv3, 496 "SMMU TLB upd vaddr=%#x amask=%#x paddr=%#x asid=%#x vmid=%#x\n", 497 e.va, e.vaMask, e.pa, e.asid, e.vmid); 498 499 smmu.tlb.store(e); 500 501 doSemaphoreUp(smmu.tlbSem); 502} 503 504bool 505SMMUTranslationProcess::configCacheLookup(Yield &yield, TranslContext &tc) 506{ 507 if (!smmu.configCacheEnable) 508 return false; 509 510 doSemaphoreDown(yield, smmu.configSem); 511 doDelay(yield, smmu.configLat); 512 const ConfigCache::Entry *e = 513 smmu.configCache.lookup(request.sid, request.ssid); 514 doSemaphoreUp(smmu.configSem); 515 516 if (!e) { 517 DPRINTF(SMMUv3, "Config miss sid=%#x ssid=%#x\n", 518 request.sid, request.ssid); 519 520 return false; 521 } 522 523 DPRINTF(SMMUv3, "Config hit sid=%#x ssid=%#x ttb=%#08x asid=%#x\n", 524 request.sid, request.ssid, e->ttb0, e->asid); 525 526 tc.stage1Enable = e->stage1_en; 527 tc.stage2Enable = e->stage2_en; 528 529 tc.ttb0 = e->ttb0; 530 tc.ttb1 = e->ttb1; 531 tc.asid = e->asid; 532 tc.httb = e->httb; 533 tc.vmid = e->vmid; 534 535 tc.stage1TranslGranule = e->stage1_tg; 536 tc.stage2TranslGranule = e->stage2_tg; 537 538 tc.t0sz = e->t0sz; 539 tc.s2t0sz = e->s2t0sz; 540 541 return true; 542} 543 544void 545SMMUTranslationProcess::configCacheUpdate(Yield &yield, 546 const TranslContext &tc) 547{ 548 if (!smmu.configCacheEnable) 549 return; 550 551 ConfigCache::Entry e; 552 e.valid = true; 553 e.sid = request.sid; 554 e.ssid = request.ssid; 555 e.stage1_en = tc.stage1Enable; 556 e.stage2_en = tc.stage2Enable; 557 e.ttb0 = tc.ttb0; 558 e.ttb1 = tc.ttb1; 559 e.asid = tc.asid; 560 e.httb = tc.httb; 561 e.vmid = tc.vmid; 562 e.stage1_tg = tc.stage1TranslGranule; 563 e.stage2_tg = tc.stage2TranslGranule; 564 e.t0sz = tc.t0sz; 565 e.s2t0sz = tc.s2t0sz; 566 567 doSemaphoreDown(yield, smmu.configSem); 568 569 DPRINTF(SMMUv3, "Config upd sid=%#x ssid=%#x\n", e.sid, e.ssid); 570 571 smmu.configCache.store(e); 572 573 doSemaphoreUp(smmu.configSem); 574} 575 576bool 577SMMUTranslationProcess::findConfig(Yield &yield, 578 TranslContext &tc, 579 TranslResult &tr) 580{ 581 tc.stage1Enable = false; 582 tc.stage2Enable = false; 583 584 StreamTableEntry ste; 585 doReadSTE(yield, ste, request.sid); 586 587 switch (ste.dw0.config) { 588 case STE_CONFIG_BYPASS: 589 break; 590 591 case STE_CONFIG_STAGE1_ONLY: 592 tc.stage1Enable = true; 593 break; 594 595 case STE_CONFIG_STAGE2_ONLY: 596 tc.stage2Enable = true; 597 break; 598 599 case STE_CONFIG_STAGE1_AND_2: 600 tc.stage1Enable = true; 601 tc.stage2Enable = true; 602 break; 603 604 default: 605 panic("Bad or unimplemented STE config %d\n", 606 ste.dw0.config); 607 } 608 609 610 // Establish stage 2 context first since 611 // Context Descriptors can be in IPA space. 612 if (tc.stage2Enable) { 613 tc.httb = ste.dw3.s2ttb << STE_S2TTB_SHIFT; 614 tc.vmid = ste.dw2.s2vmid; 615 tc.stage2TranslGranule = ste.dw2.s2tg; 616 tc.s2t0sz = ste.dw2.s2t0sz; 617 } else { 618 tc.httb = 0xdeadbeef; 619 tc.vmid = 0; 620 tc.stage2TranslGranule = TRANS_GRANULE_INVALID; 621 tc.s2t0sz = 0; 622 } 623 624 625 // Now fetch stage 1 config. 626 if (context.stage1Enable) { 627 ContextDescriptor cd; 628 doReadCD(yield, cd, ste, request.sid, request.ssid); 629 630 tc.ttb0 = cd.dw1.ttb0 << CD_TTB_SHIFT; 631 tc.ttb1 = cd.dw2.ttb1 << CD_TTB_SHIFT; 632 tc.asid = cd.dw0.asid; 633 tc.stage1TranslGranule = cd.dw0.tg0; 634 tc.t0sz = cd.dw0.t0sz; 635 } else { 636 tc.ttb0 = 0xcafebabe; 637 tc.ttb1 = 0xcafed00d; 638 tc.asid = 0; 639 tc.stage1TranslGranule = TRANS_GRANULE_INVALID; 640 tc.t0sz = 0; 641 } 642 643 return true; 644} 645 646void 647SMMUTranslationProcess::walkCacheLookup( 648 Yield &yield, 649 const WalkCache::Entry *&walkEntry, 650 Addr addr, uint16_t asid, uint16_t vmid, 651 unsigned stage, unsigned level) 652{ 653 const char *indent = stage==2 ? " " : ""; 654 (void) indent; // this is only used in DPRINTFs 655 656 const PageTableOps *pt_ops = 657 stage == 1 ? 658 smmu.getPageTableOps(context.stage1TranslGranule) : 659 smmu.getPageTableOps(context.stage2TranslGranule); 660 661 unsigned walkCacheLevels = 662 smmu.walkCacheEnable ? 663 (stage == 1 ? smmu.walkCacheS1Levels : smmu.walkCacheS2Levels) : 664 0; 665 666 if ((1 << level) & walkCacheLevels) { 667 doSemaphoreDown(yield, smmu.walkSem); 668 doDelay(yield, smmu.walkLat); 669 670 walkEntry = smmu.walkCache.lookup(addr, pt_ops->walkMask(level), 671 asid, vmid, stage, level); 672 673 if (walkEntry) { 674 DPRINTF(SMMUv3, "%sWalkCache hit va=%#x asid=%#x vmid=%#x " 675 "base=%#x (S%d, L%d)\n", 676 indent, addr, asid, vmid, walkEntry->pa, stage, level); 677 } else { 678 DPRINTF(SMMUv3, "%sWalkCache miss va=%#x asid=%#x vmid=%#x " 679 "(S%d, L%d)\n", 680 indent, addr, asid, vmid, stage, level); 681 } 682 683 doSemaphoreUp(smmu.walkSem); 684 } 685} 686 687void 688SMMUTranslationProcess::walkCacheUpdate(Yield &yield, Addr va, 689 Addr vaMask, Addr pa, 690 unsigned stage, unsigned level, 691 bool leaf, uint8_t permissions) 692{ 693 unsigned walkCacheLevels = 694 stage == 1 ? smmu.walkCacheS1Levels : smmu.walkCacheS2Levels; 695 696 if (smmu.walkCacheEnable && ((1<<level) & walkCacheLevels)) { 697 WalkCache::Entry e; 698 e.valid = true; 699 e.va = va; 700 e.vaMask = vaMask; 701 e.asid = stage==1 ? context.asid : 0; 702 e.vmid = context.vmid; 703 e.stage = stage; 704 e.level = level; 705 e.leaf = leaf; 706 e.pa = pa; 707 e.permissions = permissions; 708 709 doSemaphoreDown(yield, smmu.walkSem); 710 711 DPRINTF(SMMUv3, "%sWalkCache upd va=%#x mask=%#x asid=%#x vmid=%#x " 712 "tpa=%#x leaf=%s (S%d, L%d)\n", 713 e.stage==2 ? " " : "", 714 e.va, e.vaMask, e.asid, e.vmid, 715 e.pa, e.leaf, e.stage, e.level); 716 717 smmu.walkCache.store(e); 718 719 doSemaphoreUp(smmu.walkSem); 720 } 721} 722 723/* 724 * Please note: 725 * This does not deal with the case where stage 1 page size 726 * is larger than stage 2 page size. 727 */ 728SMMUTranslationProcess::TranslResult 729SMMUTranslationProcess::walkStage1And2(Yield &yield, Addr addr, 730 const PageTableOps *pt_ops, 731 unsigned level, Addr walkPtr) 732{ 733 PageTableOps::pte_t pte = 0; 734 735 doSemaphoreDown(yield, smmu.cycleSem); 736 doDelay(yield, Cycles(1)); 737 doSemaphoreUp(smmu.cycleSem); 738 739 for (; level <= pt_ops->lastLevel(); level++) { 740 Addr pte_addr = walkPtr + pt_ops->index(addr, level); 741 742 DPRINTF(SMMUv3, "Fetching S1 L%d PTE from pa=%#08x\n", 743 level, pte_addr); 744 745 doReadPTE(yield, addr, pte_addr, &pte, 1, level); 746 747 DPRINTF(SMMUv3, "Got S1 L%d PTE=%#x from pa=%#08x\n", 748 level, pte, pte_addr); 749 750 doSemaphoreDown(yield, smmu.cycleSem); 751 doDelay(yield, Cycles(1)); 752 doSemaphoreUp(smmu.cycleSem); 753 754 bool valid = pt_ops->isValid(pte, level); 755 bool leaf = pt_ops->isLeaf(pte, level); 756 757 if (!valid) { 758 DPRINTF(SMMUv3, "S1 PTE not valid - fault\n"); 759 760 TranslResult tr; 761 tr.fault = FAULT_TRANSLATION; 762 return tr; 763 } 764 765 if (valid && leaf && request.isWrite && 766 !pt_ops->isWritable(pte, level, false)) 767 { 768 DPRINTF(SMMUv3, "S1 page not writable - fault\n"); 769 770 TranslResult tr; 771 tr.fault = FAULT_PERMISSION; 772 return tr; 773 } 774 775 walkPtr = pt_ops->nextLevelPointer(pte, level); 776 777 if (leaf) 778 break; 779 780 if (context.stage2Enable) { 781 TranslResult s2tr = translateStage2(yield, walkPtr, false); 782 if (s2tr.fault != FAULT_NONE) 783 return s2tr; 784 785 walkPtr = s2tr.addr; 786 } 787 788 walkCacheUpdate(yield, addr, pt_ops->walkMask(level), walkPtr, 789 1, level, leaf, 0); 790 } 791 792 TranslResult tr; 793 tr.fault = FAULT_NONE; 794 tr.addrMask = pt_ops->pageMask(pte, level); 795 tr.addr = walkPtr + (addr & ~tr.addrMask); 796 tr.writable = pt_ops->isWritable(pte, level, false); 797 798 if (context.stage2Enable) { 799 TranslResult s2tr = translateStage2(yield, tr.addr, true); 800 if (s2tr.fault != FAULT_NONE) 801 return s2tr; 802 803 tr = combineTranslations(tr, s2tr); 804 } 805 806 walkCacheUpdate(yield, addr, tr.addrMask, tr.addr, 807 1, level, true, tr.writable); 808 809 return tr; 810} 811 812SMMUTranslationProcess::TranslResult 813SMMUTranslationProcess::walkStage2(Yield &yield, Addr addr, bool final_tr, 814 const PageTableOps *pt_ops, 815 unsigned level, Addr walkPtr) 816{ 817 PageTableOps::pte_t pte; 818 819 doSemaphoreDown(yield, smmu.cycleSem); 820 doDelay(yield, Cycles(1)); 821 doSemaphoreUp(smmu.cycleSem); 822 823 for (; level <= pt_ops->lastLevel(); level++) { 824 Addr pte_addr = walkPtr + pt_ops->index(addr, level); 825 826 DPRINTF(SMMUv3, " Fetching S2 L%d PTE from pa=%#08x\n", 827 level, pte_addr); 828 829 doReadPTE(yield, addr, pte_addr, &pte, 2, level); 830 831 DPRINTF(SMMUv3, " Got S2 L%d PTE=%#x from pa=%#08x\n", 832 level, pte, pte_addr); 833 834 doSemaphoreDown(yield, smmu.cycleSem); 835 doDelay(yield, Cycles(1)); 836 doSemaphoreUp(smmu.cycleSem); 837 838 bool valid = pt_ops->isValid(pte, level); 839 bool leaf = pt_ops->isLeaf(pte, level); 840 841 if (!valid) { 842 DPRINTF(SMMUv3, " S2 PTE not valid - fault\n"); 843 844 TranslResult tr; 845 tr.fault = FAULT_TRANSLATION; 846 return tr; 847 } 848 849 if (valid && leaf && request.isWrite && 850 !pt_ops->isWritable(pte, level, true)) 851 { 852 DPRINTF(SMMUv3, " S2 PTE not writable = fault\n"); 853 854 TranslResult tr; 855 tr.fault = FAULT_PERMISSION; 856 return tr; 857 } 858 859 walkPtr = pt_ops->nextLevelPointer(pte, level); 860 861 if (final_tr || smmu.walkCacheNonfinalEnable) 862 walkCacheUpdate(yield, addr, pt_ops->walkMask(level), walkPtr, 863 2, level, leaf, 864 leaf ? pt_ops->isWritable(pte, level, true) : 0); 865 if (leaf) 866 break; 867 } 868 869 TranslResult tr; 870 tr.fault = FAULT_NONE; 871 tr.addrMask = pt_ops->pageMask(pte, level); 872 tr.addr = walkPtr + (addr & ~tr.addrMask); 873 tr.writable = pt_ops->isWritable(pte, level, true); 874 875 return tr; 876} 877 878SMMUTranslationProcess::TranslResult 879SMMUTranslationProcess::translateStage1And2(Yield &yield, Addr addr) 880{ 881 const PageTableOps *pt_ops = 882 smmu.getPageTableOps(context.stage1TranslGranule); 883 884 const WalkCache::Entry *walk_ep = NULL; 885 unsigned level; 886 887 // Level here is actually (level+1) so we can count down 888 // to 0 using unsigned int. 889 for (level = pt_ops->lastLevel() + 1; 890 level > pt_ops->firstLevel(context.t0sz); 891 level--) 892 { 893 walkCacheLookup(yield, walk_ep, addr, 894 context.asid, context.vmid, 1, level-1); 895 896 if (walk_ep) 897 break; 898 } 899 900 // Correct level (see above). 901 level -= 1; 902 903 TranslResult tr; 904 if (walk_ep) { 905 if (walk_ep->leaf) { 906 tr.fault = FAULT_NONE; 907 tr.addr = walk_ep->pa + (addr & ~walk_ep->vaMask); 908 tr.addrMask = walk_ep->vaMask; 909 tr.writable = walk_ep->permissions; 910 } else { 911 tr = walkStage1And2(yield, addr, pt_ops, level+1, walk_ep->pa); 912 } 913 } else { 914 Addr table_addr = context.ttb0; 915 if (context.stage2Enable) { 916 TranslResult s2tr = translateStage2(yield, table_addr, false); 917 if (s2tr.fault != FAULT_NONE) 918 return s2tr; 919 920 table_addr = s2tr.addr; 921 } 922 923 tr = walkStage1And2(yield, addr, pt_ops, 924 pt_ops->firstLevel(context.t0sz), 925 table_addr); 926 } 927 928 if (tr.fault == FAULT_NONE) 929 DPRINTF(SMMUv3, "Translated vaddr %#x to paddr %#x\n", addr, tr.addr); 930 931 return tr; 932} 933 934SMMUTranslationProcess::TranslResult 935SMMUTranslationProcess::translateStage2(Yield &yield, Addr addr, bool final_tr) 936{ 937 const PageTableOps *pt_ops = 938 smmu.getPageTableOps(context.stage2TranslGranule); 939 940 const IPACache::Entry *ipa_ep = NULL; 941 if (smmu.ipaCacheEnable) { 942 doSemaphoreDown(yield, smmu.ipaSem); 943 doDelay(yield, smmu.ipaLat); 944 ipa_ep = smmu.ipaCache.lookup(addr, context.vmid); 945 doSemaphoreUp(smmu.ipaSem); 946 } 947 948 if (ipa_ep) { 949 TranslResult tr; 950 tr.fault = FAULT_NONE; 951 tr.addr = ipa_ep->pa + (addr & ~ipa_ep->ipaMask); 952 tr.addrMask = ipa_ep->ipaMask; 953 tr.writable = ipa_ep->permissions; 954 955 DPRINTF(SMMUv3, " IPACache hit ipa=%#x vmid=%#x pa=%#x\n", 956 addr, context.vmid, tr.addr); 957 958 return tr; 959 } else if (smmu.ipaCacheEnable) { 960 DPRINTF(SMMUv3, " IPACache miss ipa=%#x vmid=%#x\n", 961 addr, context.vmid); 962 } 963 964 const WalkCache::Entry *walk_ep = NULL; 965 unsigned level = pt_ops->firstLevel(context.s2t0sz); 966 967 if (final_tr || smmu.walkCacheNonfinalEnable) { 968 // Level here is actually (level+1) so we can count down 969 // to 0 using unsigned int. 970 for (level = pt_ops->lastLevel() + 1; 971 level > pt_ops->firstLevel(context.s2t0sz); 972 level--) 973 { 974 walkCacheLookup(yield, walk_ep, addr, 975 0, context.vmid, 2, level-1); 976 977 if (walk_ep) 978 break; 979 } 980 981 // Correct level (see above). 982 level -= 1; 983 } 984 985 TranslResult tr; 986 if (walk_ep) { 987 if (walk_ep->leaf) { 988 tr.fault = FAULT_NONE; 989 tr.addr = walk_ep->pa + (addr & ~walk_ep->vaMask); 990 tr.addrMask = walk_ep->vaMask; 991 tr.writable = walk_ep->permissions; 992 } else { 993 tr = walkStage2(yield, addr, final_tr, pt_ops, 994 level + 1, walk_ep->pa); 995 } 996 } else { 997 tr = walkStage2(yield, addr, final_tr, pt_ops, 998 pt_ops->firstLevel(context.s2t0sz), 999 context.httb); 1000 } 1001 1002 if (tr.fault == FAULT_NONE) 1003 DPRINTF(SMMUv3, " Translated %saddr %#x to paddr %#x\n", 1004 context.stage1Enable ? "ip" : "v", addr, tr.addr); 1005 1006 if (smmu.ipaCacheEnable) { 1007 IPACache::Entry e; 1008 e.valid = true; 1009 e.ipaMask = tr.addrMask; 1010 e.ipa = addr & e.ipaMask; 1011 e.pa = tr.addr & tr.addrMask; 1012 e.permissions = tr.writable; 1013 e.vmid = context.vmid; 1014 1015 doSemaphoreDown(yield, smmu.ipaSem); 1016 smmu.ipaCache.store(e); 1017 doSemaphoreUp(smmu.ipaSem); 1018 } 1019 1020 return tr; 1021} 1022 1023SMMUTranslationProcess::TranslResult 1024SMMUTranslationProcess::combineTranslations(const TranslResult &s1tr, 1025 const TranslResult &s2tr) const 1026{ 1027 if (s2tr.fault != FAULT_NONE) 1028 return s2tr; 1029 1030 assert(s1tr.fault == FAULT_NONE); 1031 1032 TranslResult tr; 1033 tr.fault = FAULT_NONE; 1034 tr.addr = s2tr.addr; 1035 tr.addrMask = s1tr.addrMask | s2tr.addrMask; 1036 tr.writable = s1tr.writable & s2tr.writable; 1037 1038 return tr; 1039} 1040 1041bool 1042SMMUTranslationProcess::hazard4kCheck() 1043{ 1044 Addr addr4k = request.addr & ~0xfffULL; 1045 1046 for (auto it = ifc.duplicateReqs.begin(); 1047 it != ifc.duplicateReqs.end(); 1048 ++it) 1049 { 1050 Addr other4k = (*it)->request.addr & ~0xfffULL; 1051 if (addr4k == other4k) 1052 return true; 1053 } 1054 1055 return false; 1056} 1057 1058void 1059SMMUTranslationProcess::hazard4kRegister() 1060{ 1061 DPRINTF(SMMUv3Hazard, "4kReg: p=%p a4k=%#x\n", 1062 this, request.addr & ~0xfffULL); 1063 1064 ifc.duplicateReqs.push_back(this); 1065} 1066 1067void 1068SMMUTranslationProcess::hazard4kHold(Yield &yield) 1069{ 1070 Addr addr4k = request.addr & ~0xfffULL; 1071 1072 bool found_hazard; 1073 1074 do { 1075 found_hazard = false; 1076 1077 for (auto it = ifc.duplicateReqs.begin(); 1078 it!=ifc.duplicateReqs.end() && *it!=this; 1079 ++it) 1080 { 1081 Addr other4k = (*it)->request.addr & ~0xfffULL; 1082 1083 DPRINTF(SMMUv3Hazard, "4kHold: p=%p a4k=%#x Q: p=%p a4k=%#x\n", 1084 this, addr4k, *it, other4k); 1085 1086 if (addr4k == other4k) { 1087 DPRINTF(SMMUv3Hazard, 1088 "4kHold: p=%p a4k=%#x WAIT on p=%p a4k=%#x\n", 1089 this, addr4k, *it, other4k); 1090 1091 doWaitForSignal(yield, ifc.duplicateReqRemoved); 1092 1093 DPRINTF(SMMUv3Hazard, "4kHold: p=%p a4k=%#x RESUME\n", 1094 this, addr4k); 1095 1096 // This is to avoid checking *it!=this after doWaitForSignal() 1097 // since it could have been deleted. 1098 found_hazard = true; 1099 break; 1100 } 1101 } 1102 } while (found_hazard); 1103} 1104 1105void 1106SMMUTranslationProcess::hazard4kRelease() 1107{ 1108 DPRINTF(SMMUv3Hazard, "4kRel: p=%p a4k=%#x\n", 1109 this, request.addr & ~0xfffULL); 1110 1111 std::list<SMMUTranslationProcess *>::iterator it; 1112 1113 for (it = ifc.duplicateReqs.begin(); it != ifc.duplicateReqs.end(); ++it) 1114 if (*it == this) 1115 break; 1116 1117 if (it == ifc.duplicateReqs.end()) 1118 panic("hazard4kRelease: request not found"); 1119 1120 ifc.duplicateReqs.erase(it); 1121 1122 doBroadcastSignal(ifc.duplicateReqRemoved); 1123} 1124 1125void 1126SMMUTranslationProcess::hazardIdRegister() 1127{ 1128 auto orderId = AMBA::orderId(request.pkt); 1129 1130 DPRINTF(SMMUv3Hazard, "IdReg: p=%p oid=%d\n", this, orderId); 1131 1132 assert(orderId < SMMU_MAX_TRANS_ID); 1133 1134 std::list<SMMUTranslationProcess *> &depReqs = 1135 request.isWrite ? 1136 ifc.dependentWrites[orderId] : ifc.dependentReads[orderId]; 1137 depReqs.push_back(this); 1138} 1139 1140void 1141SMMUTranslationProcess::hazardIdHold(Yield &yield) 1142{ 1143 auto orderId = AMBA::orderId(request.pkt); 1144 1145 DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d\n", this, orderId); 1146 1147 std::list<SMMUTranslationProcess *> &depReqs = 1148 request.isWrite ? 1149 ifc.dependentWrites[orderId] : ifc.dependentReads[orderId]; 1150 std::list<SMMUTranslationProcess *>::iterator it; 1151 1152 bool found_hazard; 1153 1154 do { 1155 found_hazard = false; 1156 1157 for (auto it = depReqs.begin(); it!=depReqs.end() && *it!=this; ++it) { 1158 DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d Q: %p\n", 1159 this, orderId, *it); 1160 1161 if (AMBA::orderId((*it)->request.pkt) == orderId) { 1162 DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d WAIT on=%p\n", 1163 this, orderId, *it); 1164 1165 doWaitForSignal(yield, ifc.dependentReqRemoved); 1166 1167 DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d RESUME\n", 1168 this, orderId); 1169 1170 // This is to avoid checking *it!=this after doWaitForSignal() 1171 // since it could have been deleted. 1172 found_hazard = true; 1173 break; 1174 } 1175 } 1176 } while (found_hazard); 1177} 1178 1179void 1180SMMUTranslationProcess::hazardIdRelease() 1181{ 1182 auto orderId = AMBA::orderId(request.pkt); 1183 1184 DPRINTF(SMMUv3Hazard, "IdRel: p=%p oid=%d\n", this, orderId); 1185 1186 std::list<SMMUTranslationProcess *> &depReqs = 1187 request.isWrite ? 1188 ifc.dependentWrites[orderId] : ifc.dependentReads[orderId]; 1189 std::list<SMMUTranslationProcess *>::iterator it; 1190 1191 for (it = depReqs.begin(); it != depReqs.end(); ++it) { 1192 if (*it == this) 1193 break; 1194 } 1195 1196 if (it == depReqs.end()) 1197 panic("hazardIdRelease: request not found"); 1198 1199 depReqs.erase(it); 1200 1201 doBroadcastSignal(ifc.dependentReqRemoved); 1202} 1203 1204void 1205SMMUTranslationProcess::issuePrefetch(Addr addr) 1206{ 1207 if (!smmu.system.isTimingMode()) 1208 return; 1209 1210 if (!ifc.prefetchEnable || ifc.xlateSlotsRemaining == 0) 1211 return; 1212 1213 std::string proc_name = csprintf("%sprf", name()); 1214 SMMUTranslationProcess *proc = 1215 new SMMUTranslationProcess(proc_name, smmu, ifc); 1216 1217 proc->beginTransaction( 1218 SMMUTranslRequest::prefetch(addr, request.sid, request.ssid)); 1219 proc->scheduleWakeup(smmu.clockEdge(Cycles(1))); 1220} 1221 1222void 1223SMMUTranslationProcess::completeTransaction(Yield &yield, 1224 const TranslResult &tr) 1225{ 1226 assert(tr.fault == FAULT_NONE); 1227 1228 unsigned numMasterBeats = request.isWrite ? 1229 (request.size + (smmu.masterPortWidth-1)) 1230 / smmu.masterPortWidth : 1231 1; 1232 1233 doSemaphoreDown(yield, smmu.masterPortSem); 1234 doDelay(yield, Cycles(numMasterBeats)); 1235 doSemaphoreUp(smmu.masterPortSem); 1236 1237 1238 smmu.translationTimeDist.sample(curTick() - recvTick); 1239 ifc.xlateSlotsRemaining++; 1240 if (!request.isAtsRequest && request.isWrite) 1241 ifc.wrBufSlotsRemaining += 1242 (request.size + (ifc.portWidth-1)) / ifc.portWidth; 1243 1244 smmu.scheduleSlaveRetries(); 1245 1246 1247 SMMUAction a; 1248 1249 if (request.isAtsRequest) { 1250 a.type = ACTION_SEND_RESP_ATS; 1251 1252 if (smmu.system.isAtomicMode()) { 1253 request.pkt->makeAtomicResponse(); 1254 } else if (smmu.system.isTimingMode()) { 1255 request.pkt->makeTimingResponse(); 1256 } else { 1257 panic("Not in atomic or timing mode"); 1258 } 1259 } else { 1260 a.type = ACTION_SEND_REQ_FINAL; 1261 a.ifc = &ifc; 1262 } 1263 1264 a.pkt = request.pkt; 1265 a.delay = 0; 1266 1267 a.pkt->setAddr(tr.addr); 1268 a.pkt->req->setPaddr(tr.addr); 1269 1270 yield(a); 1271 1272 if (!request.isAtsRequest) { 1273 PacketPtr pkt = yield.get(); 1274 pkt->setAddr(request.addr); 1275 1276 a.type = ACTION_SEND_RESP; 1277 a.pkt = pkt; 1278 a.ifc = &ifc; 1279 a.delay = 0; 1280 yield(a); 1281 } 1282} 1283 1284void 1285SMMUTranslationProcess::completePrefetch(Yield &yield) 1286{ 1287 ifc.xlateSlotsRemaining++; 1288 1289 SMMUAction a; 1290 a.type = ACTION_TERMINATE; 1291 a.pkt = NULL; 1292 a.ifc = &ifc; 1293 a.delay = 0; 1294 yield(a); 1295} 1296 1297void 1298SMMUTranslationProcess::sendEvent(Yield &yield, const SMMUEvent &ev) 1299{ 1300 int sizeMask = mask(smmu.regs.eventq_base & Q_BASE_SIZE_MASK); 1301 1302 if (((smmu.regs.eventq_prod+1) & sizeMask) == 1303 (smmu.regs.eventq_cons & sizeMask)) 1304 panic("Event queue full - aborting\n"); 1305 1306 Addr event_addr = 1307 (smmu.regs.eventq_base & Q_BASE_ADDR_MASK) + 1308 (smmu.regs.eventq_prod & sizeMask) * sizeof(ev); 1309 1310 DPRINTF(SMMUv3, "Sending event to addr=%#08x (pos=%d): type=%#x stag=%#x " 1311 "flags=%#x sid=%#x ssid=%#x va=%#08x ipa=%#x\n", 1312 event_addr, smmu.regs.eventq_prod, ev.type, ev.stag, 1313 ev.flags, ev.streamId, ev.substreamId, ev.va, ev.ipa); 1314 1315 // This deliberately resets the overflow field in eventq_prod! 1316 smmu.regs.eventq_prod = (smmu.regs.eventq_prod + 1) & sizeMask; 1317 1318 doWrite(yield, event_addr, &ev, sizeof(ev)); 1319 1320 if (!(smmu.regs.eventq_irq_cfg0 & E_BASE_ENABLE_MASK)) 1321 panic("eventq msi not enabled\n"); 1322 1323 doWrite(yield, smmu.regs.eventq_irq_cfg0 & E_BASE_ADDR_MASK, 1324 &smmu.regs.eventq_irq_cfg1, sizeof(smmu.regs.eventq_irq_cfg1)); 1325} 1326 1327void 1328SMMUTranslationProcess::doReadSTE(Yield &yield, 1329 StreamTableEntry &ste, 1330 uint32_t sid) 1331{ 1332 unsigned max_sid = 1 << (smmu.regs.strtab_base_cfg & ST_CFG_SIZE_MASK); 1333 if (sid >= max_sid) 1334 panic("SID %#x out of range, max=%#x", sid, max_sid); 1335 1336 Addr ste_addr; 1337 1338 if ((smmu.regs.strtab_base_cfg & ST_CFG_FMT_MASK) == ST_CFG_FMT_2LEVEL) { 1339 unsigned split = 1340 (smmu.regs.strtab_base_cfg & ST_CFG_SPLIT_MASK) >> ST_CFG_SPLIT_SHIFT; 1341 1342 if (split!= 7 && split!=8 && split!=16) 1343 panic("Invalid stream table split %d", split); 1344 1345 uint64_t l2_ptr; 1346 uint64_t l2_addr = 1347 (smmu.regs.strtab_base & VMT_BASE_ADDR_MASK) + 1348 bits(sid, 32, split) * sizeof(l2_ptr); 1349 1350 DPRINTF(SMMUv3, "Read L1STE at %#x\n", l2_addr); 1351 1352 doReadConfig(yield, l2_addr, &l2_ptr, sizeof(l2_ptr), sid, 0); 1353 1354 DPRINTF(SMMUv3, "Got L1STE L1 at %#x: 0x%016x\n", l2_addr, l2_ptr); 1355 1356 unsigned span = l2_ptr & ST_L2_SPAN_MASK; 1357 if (span == 0) 1358 panic("Invalid level 1 stream table descriptor"); 1359 1360 unsigned index = bits(sid, split-1, 0); 1361 if (index >= (1 << span)) 1362 panic("StreamID %d out of level 1 descriptor range %d", 1363 sid, 1<<span); 1364 1365 ste_addr = (l2_ptr & ST_L2_ADDR_MASK) + index * sizeof(ste); 1366 1367 smmu.steL1Fetches++; 1368 } else if ((smmu.regs.strtab_base_cfg & ST_CFG_FMT_MASK) == ST_CFG_FMT_LINEAR) { 1369 ste_addr = 1370 (smmu.regs.strtab_base & VMT_BASE_ADDR_MASK) + sid * sizeof(ste); 1371 } else { 1372 panic("Invalid stream table format"); 1373 } 1374 1375 DPRINTF(SMMUv3, "Read STE at %#x\n", ste_addr); 1376 1377 doReadConfig(yield, ste_addr, &ste, sizeof(ste), sid, 0); 1378 1379 DPRINTF(SMMUv3, "Got STE at %#x [0]: 0x%016x\n", ste_addr, ste.dw0); 1380 DPRINTF(SMMUv3, " STE at %#x [1]: 0x%016x\n", ste_addr, ste.dw1); 1381 DPRINTF(SMMUv3, " STE at %#x [2]: 0x%016x\n", ste_addr, ste.dw2); 1382 DPRINTF(SMMUv3, " STE at %#x [3]: 0x%016x\n", ste_addr, ste.dw3); 1383 DPRINTF(SMMUv3, " STE at %#x [4]: 0x%016x\n", ste_addr, ste._pad[0]); 1384 DPRINTF(SMMUv3, " STE at %#x [5]: 0x%016x\n", ste_addr, ste._pad[1]); 1385 DPRINTF(SMMUv3, " STE at %#x [6]: 0x%016x\n", ste_addr, ste._pad[2]); 1386 DPRINTF(SMMUv3, " STE at %#x [7]: 0x%016x\n", ste_addr, ste._pad[3]); 1387 1388 if (!ste.dw0.valid) 1389 panic("STE @ %#x not valid\n", ste_addr); 1390 1391 smmu.steFetches++; 1392} 1393 1394void 1395SMMUTranslationProcess::doReadCD(Yield &yield, 1396 ContextDescriptor &cd, 1397 const StreamTableEntry &ste, 1398 uint32_t sid, uint32_t ssid) 1399{ 1400 Addr cd_addr; 1401 1402 if (ste.dw0.s1cdmax == 0) { 1403 cd_addr = ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT; 1404 } else { 1405 unsigned max_ssid = 1 << ste.dw0.s1cdmax; 1406 if (ssid >= max_ssid) 1407 panic("SSID %#x out of range, max=%#x", ssid, max_ssid); 1408 1409 if (ste.dw0.s1fmt==STAGE1_CFG_2L_4K || 1410 ste.dw0.s1fmt==STAGE1_CFG_2L_64K) 1411 { 1412 unsigned split = ste.dw0.s1fmt==STAGE1_CFG_2L_4K ? 7 : 11; 1413 1414 uint64_t l2_ptr; 1415 uint64_t l2_addr = (ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT) + 1416 bits(ssid, 24, split) * sizeof(l2_ptr); 1417 1418 if (context.stage2Enable) 1419 l2_addr = translateStage2(yield, l2_addr, false).addr; 1420 1421 DPRINTF(SMMUv3, "Read L1CD at %#x\n", l2_addr); 1422 1423 doReadConfig(yield, l2_addr, &l2_ptr, sizeof(l2_ptr), sid, ssid); 1424 1425 DPRINTF(SMMUv3, "Got L1CD at %#x: 0x%016x\n", l2_addr, l2_ptr); 1426 1427 cd_addr = l2_ptr + bits(ssid, split-1, 0) * sizeof(cd); 1428 1429 smmu.cdL1Fetches++; 1430 } else if (ste.dw0.s1fmt == STAGE1_CFG_1L) { 1431 cd_addr = (ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT) + ssid*sizeof(cd); 1432 } 1433 } 1434 1435 if (context.stage2Enable) 1436 cd_addr = translateStage2(yield, cd_addr, false).addr; 1437 1438 DPRINTF(SMMUv3, "Read CD at %#x\n", cd_addr); 1439 1440 doReadConfig(yield, cd_addr, &cd, sizeof(cd), sid, ssid); 1441 1442 DPRINTF(SMMUv3, "Got CD at %#x [0]: 0x%016x\n", cd_addr, cd.dw0); 1443 DPRINTF(SMMUv3, " CD at %#x [1]: 0x%016x\n", cd_addr, cd.dw1); 1444 DPRINTF(SMMUv3, " CD at %#x [2]: 0x%016x\n", cd_addr, cd.dw2); 1445 DPRINTF(SMMUv3, " CD at %#x [3]: 0x%016x\n", cd_addr, cd.mair); 1446 DPRINTF(SMMUv3, " CD at %#x [4]: 0x%016x\n", cd_addr, cd.amair); 1447 DPRINTF(SMMUv3, " CD at %#x [5]: 0x%016x\n", cd_addr, cd._pad[0]); 1448 DPRINTF(SMMUv3, " CD at %#x [6]: 0x%016x\n", cd_addr, cd._pad[1]); 1449 DPRINTF(SMMUv3, " CD at %#x [7]: 0x%016x\n", cd_addr, cd._pad[2]); 1450 1451 1452 if (!cd.dw0.valid) 1453 panic("CD @ %#x not valid\n", cd_addr); 1454 1455 smmu.cdFetches++; 1456} 1457 1458void 1459SMMUTranslationProcess::doReadConfig(Yield &yield, Addr addr, 1460 void *ptr, size_t size, 1461 uint32_t sid, uint32_t ssid) 1462{ 1463 doRead(yield, addr, ptr, size); 1464} 1465 1466void 1467SMMUTranslationProcess::doReadPTE(Yield &yield, Addr va, Addr addr, 1468 void *ptr, unsigned stage, 1469 unsigned level) 1470{ 1471 size_t pte_size = sizeof(PageTableOps::pte_t); 1472 1473 Addr mask = pte_size - 1; 1474 Addr base = addr & ~mask; 1475 1476 doRead(yield, base, ptr, pte_size); 1477} 1478