smmu_v3_transl.cc revision 14063:fc05dc40f6d1
1/* 2 * Copyright (c) 2013, 2018-2019 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions are 16 * met: redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer; 18 * redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution; 21 * neither the name of the copyright holders nor the names of its 22 * contributors may be used to endorse or promote products derived from 23 * this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 * 37 * Authors: Stan Czerniawski 38 */ 39 40#include "dev/arm/smmu_v3_transl.hh" 41 42#include "debug/SMMUv3.hh" 43#include "debug/SMMUv3Hazard.hh" 44#include "dev/arm/amba.hh" 45#include "dev/arm/smmu_v3.hh" 46#include "sim/system.hh" 47 48SMMUTranslRequest 49SMMUTranslRequest::fromPacket(PacketPtr pkt, bool ats) 50{ 51 SMMUTranslRequest req; 52 req.addr = pkt->getAddr(); 53 req.size = pkt->getSize(); 54 req.sid = pkt->req->streamId(); 55 req.ssid = pkt->req->hasSubstreamId() ? 56 pkt->req->substreamId() : 0; 57 req.isWrite = pkt->isWrite(); 58 req.isPrefetch = false; 59 req.isAtsRequest = ats; 60 req.pkt = pkt; 61 62 return req; 63} 64 65SMMUTranslRequest 66SMMUTranslRequest::prefetch(Addr addr, uint32_t sid, uint32_t ssid) 67{ 68 SMMUTranslRequest req; 69 req.addr = addr; 70 req.size = 0; 71 req.sid = sid; 72 req.ssid = ssid; 73 req.isWrite = false; 74 req.isPrefetch = true; 75 req.isAtsRequest = false; 76 req.pkt = NULL; 77 78 return req; 79} 80 81SMMUTranslationProcess::SMMUTranslationProcess(const std::string &name, 82 SMMUv3 &_smmu, SMMUv3SlaveInterface &_ifc) 83 : 84 SMMUProcess(name, _smmu), 85 ifc(_ifc) 86{ 87 // Decrease number of pending translation slots on the slave interface 88 assert(ifc.xlateSlotsRemaining > 0); 89 ifc.xlateSlotsRemaining--; 90 reinit(); 91} 92 93SMMUTranslationProcess::~SMMUTranslationProcess() 94{ 95 // Increase number of pending translation slots on the slave interface 96 ifc.xlateSlotsRemaining++; 97} 98 99void 100SMMUTranslationProcess::beginTransaction(const SMMUTranslRequest &req) 101{ 102 request = req; 103 104 reinit(); 105} 106 107void 108SMMUTranslationProcess::resumeTransaction() 109{ 110 assert(smmu.system.isTimingMode()); 111 112 assert(!"Stalls are broken"); 113 114 Tick resumeTick = curTick(); 115 116 (void) resumeTick; 117 DPRINTF(SMMUv3, "Resume at tick = %d. Fault duration = %d (%.3fus)\n", 118 resumeTick, resumeTick-faultTick, (resumeTick-faultTick) / 1e6); 119 120 beginTransaction(request); 121 122 smmu.runProcessTiming(this, request.pkt); 123} 124 125void 126SMMUTranslationProcess::main(Yield &yield) 127{ 128 // Hack: 129 // The coroutine starts running as soon as it's created. 130 // But we need to wait for request data esp. in atomic mode. 131 SMMUAction a; 132 a.type = ACTION_INITIAL_NOP; 133 a.pkt = NULL; 134 yield(a); 135 136 const Addr next4k = (request.addr + 0x1000ULL) & ~0xfffULL; 137 138 if ((request.addr + request.size) > next4k) 139 panic("Transaction crosses 4k boundary (addr=%#x size=%#x)!\n", 140 request.addr, request.size); 141 142 143 unsigned numSlaveBeats = request.isWrite ? 144 (request.size + (ifc.portWidth - 1)) / ifc.portWidth : 1; 145 146 doSemaphoreDown(yield, ifc.slavePortSem); 147 doDelay(yield, Cycles(numSlaveBeats)); 148 doSemaphoreUp(ifc.slavePortSem); 149 150 151 recvTick = curTick(); 152 153 154 if (!(smmu.regs.cr0 & 0x1)) { 155 // SMMU disabled 156 doDelay(yield, Cycles(1)); 157 completeTransaction(yield, bypass(request.addr)); 158 return; 159 } 160 161 TranslResult tr; 162 bool wasPrefetched = false; 163 164 if (request.isPrefetch) { 165 // Abort prefetch if: 166 // - there's already a transaction looking up the same 4k page, OR 167 // - requested address is already in the TLB. 168 if (hazard4kCheck() || ifcTLBLookup(yield, tr, wasPrefetched)) 169 completePrefetch(yield); // this never returns 170 171 hazard4kRegister(); 172 173 tr = smmuTranslation(yield); 174 175 if (tr.fault == FAULT_NONE) 176 ifcTLBUpdate(yield, tr); 177 178 hazard4kRelease(); 179 180 completePrefetch(yield); 181 } else { 182 hazardIdRegister(); 183 184 if (!microTLBLookup(yield, tr)) { 185 bool hit = ifcTLBLookup(yield, tr, wasPrefetched); 186 if (!hit) { 187 while (!hit && hazard4kCheck()) { 188 hazard4kHold(yield); 189 hit = ifcTLBLookup(yield, tr, wasPrefetched); 190 } 191 } 192 193 // Issue prefetch if: 194 // - there was a TLB hit and the entry was prefetched, OR 195 // - TLB miss was successfully serviced 196 if (hit) { 197 if (wasPrefetched) 198 issuePrefetch(next4k); 199 } else { 200 hazard4kRegister(); 201 202 tr = smmuTranslation(yield); 203 204 if (tr.fault == FAULT_NONE) { 205 ifcTLBUpdate(yield, tr); 206 207 issuePrefetch(next4k); 208 } 209 210 hazard4kRelease(); 211 } 212 213 if (tr.fault == FAULT_NONE) 214 microTLBUpdate(yield, tr); 215 } 216 217 hazardIdHold(yield); 218 hazardIdRelease(); 219 220 if (tr.fault != FAULT_NONE) 221 panic("fault\n"); 222 223 completeTransaction(yield, tr); 224 } 225} 226 227SMMUTranslationProcess::TranslResult 228SMMUTranslationProcess::bypass(Addr addr) const 229{ 230 TranslResult tr; 231 tr.fault = FAULT_NONE; 232 tr.addr = addr; 233 tr.addrMask = 0; 234 tr.writable = 1; 235 236 return tr; 237} 238 239SMMUTranslationProcess::TranslResult 240SMMUTranslationProcess::smmuTranslation(Yield &yield) 241{ 242 TranslResult tr; 243 244 // Need SMMU credit to proceed 245 doSemaphoreDown(yield, smmu.transSem); 246 247 // Simulate pipelined IFC->SMMU link 248 doSemaphoreDown(yield, smmu.ifcSmmuSem); 249 doDelay(yield, Cycles(1)); // serialize transactions 250 doSemaphoreUp(smmu.ifcSmmuSem); 251 doDelay(yield, smmu.ifcSmmuLat - Cycles(1)); // remaining pipeline delay 252 253 bool haveConfig = true; 254 if (!configCacheLookup(yield, context)) { 255 if(findConfig(yield, context, tr)) { 256 configCacheUpdate(yield, context); 257 } else { 258 haveConfig = false; 259 } 260 } 261 262 if (haveConfig && !smmuTLBLookup(yield, tr)) { 263 // SMMU main TLB miss 264 265 // Need PTW slot to proceed 266 doSemaphoreDown(yield, smmu.ptwSem); 267 268 // Page table walk 269 Tick ptwStartTick = curTick(); 270 271 if (context.stage1Enable) { 272 tr = translateStage1And2(yield, request.addr); 273 } else if (context.stage2Enable) { 274 tr = translateStage2(yield, request.addr, true); 275 } else { 276 tr = bypass(request.addr); 277 } 278 279 if (context.stage1Enable || context.stage2Enable) 280 smmu.ptwTimeDist.sample(curTick() - ptwStartTick); 281 282 // Free PTW slot 283 doSemaphoreUp(smmu.ptwSem); 284 285 if (tr.fault == FAULT_NONE) 286 smmuTLBUpdate(yield, tr); 287 } 288 289 // Simulate pipelined SMMU->SLAVE INTERFACE link 290 doSemaphoreDown(yield, smmu.smmuIfcSem); 291 doDelay(yield, Cycles(1)); // serialize transactions 292 doSemaphoreUp(smmu.smmuIfcSem); 293 doDelay(yield, smmu.smmuIfcLat - Cycles(1)); // remaining pipeline delay 294 295 // return SMMU credit 296 doSemaphoreUp(smmu.transSem); 297 298 return tr; 299} 300 301bool 302SMMUTranslationProcess::microTLBLookup(Yield &yield, TranslResult &tr) 303{ 304 if (!ifc.microTLBEnable) 305 return false; 306 307 doSemaphoreDown(yield, ifc.microTLBSem); 308 doDelay(yield, ifc.microTLBLat); 309 const SMMUTLB::Entry *e = 310 ifc.microTLB->lookup(request.sid, request.ssid, request.addr); 311 doSemaphoreUp(ifc.microTLBSem); 312 313 if (!e) { 314 DPRINTF(SMMUv3, "micro TLB miss vaddr=%#x sid=%#x ssid=%#x\n", 315 request.addr, request.sid, request.ssid); 316 317 return false; 318 } 319 320 DPRINTF(SMMUv3, 321 "micro TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x paddr=%#x\n", 322 request.addr, e->vaMask, request.sid, request.ssid, e->pa); 323 324 tr.fault = FAULT_NONE; 325 tr.addr = e->pa + (request.addr & ~e->vaMask);; 326 tr.addrMask = e->vaMask; 327 tr.writable = e->permissions; 328 329 return true; 330} 331 332bool 333SMMUTranslationProcess::ifcTLBLookup(Yield &yield, TranslResult &tr, 334 bool &wasPrefetched) 335{ 336 if (!ifc.mainTLBEnable) 337 return false; 338 339 doSemaphoreDown(yield, ifc.mainTLBSem); 340 doDelay(yield, ifc.mainTLBLat); 341 const SMMUTLB::Entry *e = 342 ifc.mainTLB->lookup(request.sid, request.ssid, request.addr); 343 doSemaphoreUp(ifc.mainTLBSem); 344 345 if (!e) { 346 DPRINTF(SMMUv3, 347 "SLAVE Interface TLB miss vaddr=%#x sid=%#x ssid=%#x\n", 348 request.addr, request.sid, request.ssid); 349 350 return false; 351 } 352 353 DPRINTF(SMMUv3, 354 "SLAVE Interface TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x " 355 "paddr=%#x\n", request.addr, e->vaMask, request.sid, 356 request.ssid, e->pa); 357 358 tr.fault = FAULT_NONE; 359 tr.addr = e->pa + (request.addr & ~e->vaMask);; 360 tr.addrMask = e->vaMask; 361 tr.writable = e->permissions; 362 wasPrefetched = e->prefetched; 363 364 return true; 365} 366 367bool 368SMMUTranslationProcess::smmuTLBLookup(Yield &yield, TranslResult &tr) 369{ 370 if (!smmu.tlbEnable) 371 return false; 372 373 doSemaphoreDown(yield, smmu.tlbSem); 374 doDelay(yield, smmu.tlbLat); 375 const ARMArchTLB::Entry *e = 376 smmu.tlb.lookup(request.addr, context.asid, context.vmid); 377 doSemaphoreUp(smmu.tlbSem); 378 379 if (!e) { 380 DPRINTF(SMMUv3, "SMMU TLB miss vaddr=%#x asid=%#x vmid=%#x\n", 381 request.addr, context.asid, context.vmid); 382 383 return false; 384 } 385 386 DPRINTF(SMMUv3, 387 "SMMU TLB hit vaddr=%#x amask=%#x asid=%#x vmid=%#x paddr=%#x\n", 388 request.addr, e->vaMask, context.asid, context.vmid, e->pa); 389 390 tr.fault = FAULT_NONE; 391 tr.addr = e->pa + (request.addr & ~e->vaMask);; 392 tr.addrMask = e->vaMask; 393 tr.writable = e->permissions; 394 395 return true; 396} 397 398void 399SMMUTranslationProcess::microTLBUpdate(Yield &yield, 400 const TranslResult &tr) 401{ 402 assert(tr.fault == FAULT_NONE); 403 404 if (!ifc.microTLBEnable) 405 return; 406 407 SMMUTLB::Entry e; 408 e.valid = true; 409 e.prefetched = false; 410 e.sid = request.sid; 411 e.ssid = request.ssid; 412 e.vaMask = tr.addrMask; 413 e.va = request.addr & e.vaMask; 414 e.pa = tr.addr & e.vaMask; 415 e.permissions = tr.writable; 416 e.asid = context.asid; 417 e.vmid = context.vmid; 418 419 doSemaphoreDown(yield, ifc.microTLBSem); 420 421 DPRINTF(SMMUv3, 422 "micro TLB upd vaddr=%#x amask=%#x paddr=%#x sid=%#x ssid=%#x\n", 423 e.va, e.vaMask, e.pa, e.sid, e.ssid); 424 425 ifc.microTLB->store(e, SMMUTLB::ALLOC_ANY_WAY); 426 427 doSemaphoreUp(ifc.microTLBSem); 428} 429 430void 431SMMUTranslationProcess::ifcTLBUpdate(Yield &yield, 432 const TranslResult &tr) 433{ 434 assert(tr.fault == FAULT_NONE); 435 436 if (!ifc.mainTLBEnable) 437 return; 438 439 SMMUTLB::Entry e; 440 e.valid = true; 441 e.prefetched = request.isPrefetch; 442 e.sid = request.sid; 443 e.ssid = request.ssid; 444 e.vaMask = tr.addrMask; 445 e.va = request.addr & e.vaMask; 446 e.pa = tr.addr & e.vaMask; 447 e.permissions = tr.writable; 448 e.asid = context.asid; 449 e.vmid = context.vmid; 450 451 SMMUTLB::AllocPolicy alloc = SMMUTLB::ALLOC_ANY_WAY; 452 if (ifc.prefetchEnable && ifc.prefetchReserveLastWay) 453 alloc = request.isPrefetch ? 454 SMMUTLB::ALLOC_LAST_WAY : SMMUTLB::ALLOC_ANY_BUT_LAST_WAY; 455 456 doSemaphoreDown(yield, ifc.mainTLBSem); 457 458 DPRINTF(SMMUv3, 459 "SLAVE Interface upd vaddr=%#x amask=%#x paddr=%#x sid=%#x " 460 "ssid=%#x\n", e.va, e.vaMask, e.pa, e.sid, e.ssid); 461 462 ifc.mainTLB->store(e, alloc); 463 464 doSemaphoreUp(ifc.mainTLBSem); 465} 466 467void 468SMMUTranslationProcess::smmuTLBUpdate(Yield &yield, 469 const TranslResult &tr) 470{ 471 assert(tr.fault == FAULT_NONE); 472 473 if (!smmu.tlbEnable) 474 return; 475 476 ARMArchTLB::Entry e; 477 e.valid = true; 478 e.vaMask = tr.addrMask; 479 e.va = request.addr & e.vaMask; 480 e.asid = context.asid; 481 e.vmid = context.vmid; 482 e.pa = tr.addr & e.vaMask; 483 e.permissions = tr.writable; 484 485 doSemaphoreDown(yield, smmu.tlbSem); 486 487 DPRINTF(SMMUv3, 488 "SMMU TLB upd vaddr=%#x amask=%#x paddr=%#x asid=%#x vmid=%#x\n", 489 e.va, e.vaMask, e.pa, e.asid, e.vmid); 490 491 smmu.tlb.store(e); 492 493 doSemaphoreUp(smmu.tlbSem); 494} 495 496bool 497SMMUTranslationProcess::configCacheLookup(Yield &yield, TranslContext &tc) 498{ 499 if (!smmu.configCacheEnable) 500 return false; 501 502 doSemaphoreDown(yield, smmu.configSem); 503 doDelay(yield, smmu.configLat); 504 const ConfigCache::Entry *e = 505 smmu.configCache.lookup(request.sid, request.ssid); 506 doSemaphoreUp(smmu.configSem); 507 508 if (!e) { 509 DPRINTF(SMMUv3, "Config miss sid=%#x ssid=%#x\n", 510 request.sid, request.ssid); 511 512 return false; 513 } 514 515 DPRINTF(SMMUv3, "Config hit sid=%#x ssid=%#x ttb=%#08x asid=%#x\n", 516 request.sid, request.ssid, e->ttb0, e->asid); 517 518 tc.stage1Enable = e->stage1_en; 519 tc.stage2Enable = e->stage2_en; 520 521 tc.ttb0 = e->ttb0; 522 tc.ttb1 = e->ttb1; 523 tc.asid = e->asid; 524 tc.httb = e->httb; 525 tc.vmid = e->vmid; 526 527 tc.stage1TranslGranule = e->stage1_tg; 528 tc.stage2TranslGranule = e->stage2_tg; 529 530 return true; 531} 532 533void 534SMMUTranslationProcess::configCacheUpdate(Yield &yield, 535 const TranslContext &tc) 536{ 537 if (!smmu.configCacheEnable) 538 return; 539 540 ConfigCache::Entry e; 541 e.valid = true; 542 e.sid = request.sid; 543 e.ssid = request.ssid; 544 e.stage1_en = tc.stage1Enable; 545 e.stage2_en = tc.stage2Enable; 546 e.ttb0 = tc.ttb0; 547 e.ttb1 = tc.ttb1; 548 e.asid = tc.asid; 549 e.httb = tc.httb; 550 e.vmid = tc.vmid; 551 e.stage1_tg = tc.stage1TranslGranule; 552 e.stage2_tg = tc.stage2TranslGranule; 553 554 doSemaphoreDown(yield, smmu.configSem); 555 556 DPRINTF(SMMUv3, "Config upd sid=%#x ssid=%#x\n", e.sid, e.ssid); 557 558 smmu.configCache.store(e); 559 560 doSemaphoreUp(smmu.configSem); 561} 562 563bool 564SMMUTranslationProcess::findConfig(Yield &yield, 565 TranslContext &tc, 566 TranslResult &tr) 567{ 568 tc.stage1Enable = false; 569 tc.stage2Enable = false; 570 571 StreamTableEntry ste; 572 doReadSTE(yield, ste, request.sid); 573 574 switch (ste.dw0.config) { 575 case STE_CONFIG_BYPASS: 576 break; 577 578 case STE_CONFIG_STAGE1_ONLY: 579 tc.stage1Enable = true; 580 break; 581 582 case STE_CONFIG_STAGE2_ONLY: 583 tc.stage2Enable = true; 584 break; 585 586 case STE_CONFIG_STAGE1_AND_2: 587 tc.stage1Enable = true; 588 tc.stage2Enable = true; 589 break; 590 591 default: 592 panic("Bad or unimplemented STE config %d\n", 593 ste.dw0.config); 594 } 595 596 597 // Establish stage 2 context first since 598 // Context Descriptors can be in IPA space. 599 if (tc.stage2Enable) { 600 tc.httb = ste.dw3.s2ttb << STE_S2TTB_SHIFT; 601 tc.vmid = ste.dw2.s2vmid; 602 tc.stage2TranslGranule = ste.dw2.s2tg; 603 } else { 604 tc.httb = 0xdeadbeef; 605 tc.vmid = 0; 606 tc.stage2TranslGranule = TRANS_GRANULE_INVALID; 607 } 608 609 610 // Now fetch stage 1 config. 611 if (context.stage1Enable) { 612 ContextDescriptor cd; 613 doReadCD(yield, cd, ste, request.sid, request.ssid); 614 615 tc.ttb0 = cd.dw1.ttb0 << CD_TTB_SHIFT; 616 tc.ttb1 = cd.dw2.ttb1 << CD_TTB_SHIFT; 617 tc.asid = cd.dw0.asid; 618 tc.stage1TranslGranule = cd.dw0.tg0; 619 } else { 620 tc.ttb0 = 0xcafebabe; 621 tc.ttb1 = 0xcafed00d; 622 tc.asid = 0; 623 tc.stage1TranslGranule = TRANS_GRANULE_INVALID; 624 } 625 626 return true; 627} 628 629void 630SMMUTranslationProcess::walkCacheLookup( 631 Yield &yield, 632 const WalkCache::Entry *&walkEntry, 633 Addr addr, uint16_t asid, uint16_t vmid, 634 unsigned stage, unsigned level) 635{ 636 const char *indent = stage==2 ? " " : ""; 637 (void) indent; // this is only used in DPRINTFs 638 639 const PageTableOps *pt_ops = 640 stage == 1 ? 641 smmu.getPageTableOps(context.stage1TranslGranule) : 642 smmu.getPageTableOps(context.stage2TranslGranule); 643 644 unsigned walkCacheLevels = 645 smmu.walkCacheEnable ? 646 (stage == 1 ? smmu.walkCacheS1Levels : smmu.walkCacheS2Levels) : 647 0; 648 649 if ((1 << level) & walkCacheLevels) { 650 doSemaphoreDown(yield, smmu.walkSem); 651 doDelay(yield, smmu.walkLat); 652 653 walkEntry = smmu.walkCache.lookup(addr, pt_ops->walkMask(level), 654 asid, vmid, stage, level); 655 656 if (walkEntry) { 657 DPRINTF(SMMUv3, "%sWalkCache hit va=%#x asid=%#x vmid=%#x " 658 "base=%#x (S%d, L%d)\n", 659 indent, addr, asid, vmid, walkEntry->pa, stage, level); 660 } else { 661 DPRINTF(SMMUv3, "%sWalkCache miss va=%#x asid=%#x vmid=%#x " 662 "(S%d, L%d)\n", 663 indent, addr, asid, vmid, stage, level); 664 } 665 666 doSemaphoreUp(smmu.walkSem); 667 } 668} 669 670void 671SMMUTranslationProcess::walkCacheUpdate(Yield &yield, Addr va, 672 Addr vaMask, Addr pa, 673 unsigned stage, unsigned level, 674 bool leaf, uint8_t permissions) 675{ 676 unsigned walkCacheLevels = 677 stage == 1 ? smmu.walkCacheS1Levels : smmu.walkCacheS2Levels; 678 679 if (smmu.walkCacheEnable && ((1<<level) & walkCacheLevels)) { 680 WalkCache::Entry e; 681 e.valid = true; 682 e.va = va; 683 e.vaMask = vaMask; 684 e.asid = stage==1 ? context.asid : 0; 685 e.vmid = context.vmid; 686 e.stage = stage; 687 e.level = level; 688 e.leaf = leaf; 689 e.pa = pa; 690 e.permissions = permissions; 691 692 doSemaphoreDown(yield, smmu.walkSem); 693 694 DPRINTF(SMMUv3, "%sWalkCache upd va=%#x mask=%#x asid=%#x vmid=%#x " 695 "tpa=%#x leaf=%s (S%d, L%d)\n", 696 e.stage==2 ? " " : "", 697 e.va, e.vaMask, e.asid, e.vmid, 698 e.pa, e.leaf, e.stage, e.level); 699 700 smmu.walkCache.store(e); 701 702 doSemaphoreUp(smmu.walkSem); 703 } 704} 705 706/* 707 * Please note: 708 * This does not deal with the case where stage 1 page size 709 * is larger than stage 2 page size. 710 */ 711SMMUTranslationProcess::TranslResult 712SMMUTranslationProcess::walkStage1And2(Yield &yield, Addr addr, 713 const PageTableOps *pt_ops, 714 unsigned level, Addr walkPtr) 715{ 716 PageTableOps::pte_t pte = 0; 717 718 doSemaphoreDown(yield, smmu.cycleSem); 719 doDelay(yield, Cycles(1)); 720 doSemaphoreUp(smmu.cycleSem); 721 722 for (; level <= pt_ops->lastLevel(); level++) { 723 Addr pte_addr = walkPtr + pt_ops->index(addr, level); 724 725 DPRINTF(SMMUv3, "Fetching S1 L%d PTE from pa=%#08x\n", 726 level, pte_addr); 727 728 doReadPTE(yield, addr, pte_addr, &pte, 1, level); 729 730 DPRINTF(SMMUv3, "Got S1 L%d PTE=%#x from pa=%#08x\n", 731 level, pte, pte_addr); 732 733 doSemaphoreDown(yield, smmu.cycleSem); 734 doDelay(yield, Cycles(1)); 735 doSemaphoreUp(smmu.cycleSem); 736 737 bool valid = pt_ops->isValid(pte, level); 738 bool leaf = pt_ops->isLeaf(pte, level); 739 740 if (!valid) { 741 DPRINTF(SMMUv3, "S1 PTE not valid - fault\n"); 742 743 TranslResult tr; 744 tr.fault = FAULT_TRANSLATION; 745 return tr; 746 } 747 748 if (valid && leaf && request.isWrite && 749 !pt_ops->isWritable(pte, level, false)) 750 { 751 DPRINTF(SMMUv3, "S1 page not writable - fault\n"); 752 753 TranslResult tr; 754 tr.fault = FAULT_PERMISSION; 755 return tr; 756 } 757 758 walkPtr = pt_ops->nextLevelPointer(pte, level); 759 760 if (leaf) 761 break; 762 763 if (context.stage2Enable) { 764 TranslResult s2tr = translateStage2(yield, walkPtr, false); 765 if (s2tr.fault != FAULT_NONE) 766 return s2tr; 767 768 walkPtr = s2tr.addr; 769 } 770 771 walkCacheUpdate(yield, addr, pt_ops->walkMask(level), walkPtr, 772 1, level, leaf, 0); 773 } 774 775 TranslResult tr; 776 tr.fault = FAULT_NONE; 777 tr.addrMask = pt_ops->pageMask(pte, level); 778 tr.addr = walkPtr + (addr & ~tr.addrMask); 779 tr.writable = pt_ops->isWritable(pte, level, false); 780 781 if (context.stage2Enable) { 782 TranslResult s2tr = translateStage2(yield, tr.addr, true); 783 if (s2tr.fault != FAULT_NONE) 784 return s2tr; 785 786 tr = combineTranslations(tr, s2tr); 787 } 788 789 walkCacheUpdate(yield, addr, tr.addrMask, tr.addr, 790 1, level, true, tr.writable); 791 792 return tr; 793} 794 795SMMUTranslationProcess::TranslResult 796SMMUTranslationProcess::walkStage2(Yield &yield, Addr addr, bool final_tr, 797 const PageTableOps *pt_ops, 798 unsigned level, Addr walkPtr) 799{ 800 PageTableOps::pte_t pte; 801 802 doSemaphoreDown(yield, smmu.cycleSem); 803 doDelay(yield, Cycles(1)); 804 doSemaphoreUp(smmu.cycleSem); 805 806 for (; level <= pt_ops->lastLevel(); level++) { 807 Addr pte_addr = walkPtr + pt_ops->index(addr, level); 808 809 DPRINTF(SMMUv3, " Fetching S2 L%d PTE from pa=%#08x\n", 810 level, pte_addr); 811 812 doReadPTE(yield, addr, pte_addr, &pte, 2, level); 813 814 DPRINTF(SMMUv3, " Got S2 L%d PTE=%#x from pa=%#08x\n", 815 level, pte, pte_addr); 816 817 doSemaphoreDown(yield, smmu.cycleSem); 818 doDelay(yield, Cycles(1)); 819 doSemaphoreUp(smmu.cycleSem); 820 821 bool valid = pt_ops->isValid(pte, level); 822 bool leaf = pt_ops->isLeaf(pte, level); 823 824 if (!valid) { 825 DPRINTF(SMMUv3, " S2 PTE not valid - fault\n"); 826 827 TranslResult tr; 828 tr.fault = FAULT_TRANSLATION; 829 return tr; 830 } 831 832 if (valid && leaf && request.isWrite && 833 !pt_ops->isWritable(pte, level, true)) 834 { 835 DPRINTF(SMMUv3, " S2 PTE not writable = fault\n"); 836 837 TranslResult tr; 838 tr.fault = FAULT_PERMISSION; 839 return tr; 840 } 841 842 walkPtr = pt_ops->nextLevelPointer(pte, level); 843 844 if (final_tr || smmu.walkCacheNonfinalEnable) 845 walkCacheUpdate(yield, addr, pt_ops->walkMask(level), walkPtr, 846 2, level, leaf, 847 leaf ? pt_ops->isWritable(pte, level, true) : 0); 848 if (leaf) 849 break; 850 } 851 852 TranslResult tr; 853 tr.fault = FAULT_NONE; 854 tr.addrMask = pt_ops->pageMask(pte, level); 855 tr.addr = walkPtr + (addr & ~tr.addrMask); 856 tr.writable = pt_ops->isWritable(pte, level, true); 857 858 return tr; 859} 860 861SMMUTranslationProcess::TranslResult 862SMMUTranslationProcess::translateStage1And2(Yield &yield, Addr addr) 863{ 864 const PageTableOps *pt_ops = 865 smmu.getPageTableOps(context.stage1TranslGranule); 866 867 const WalkCache::Entry *walk_ep = NULL; 868 unsigned level; 869 870 // Level here is actually (level+1) so we can count down 871 // to 0 using unsigned int. 872 for (level = pt_ops->lastLevel() + 1; 873 level > pt_ops->firstLevel(); 874 level--) 875 { 876 walkCacheLookup(yield, walk_ep, addr, 877 context.asid, context.vmid, 1, level-1); 878 879 if (walk_ep) 880 break; 881 } 882 883 // Correct level (see above). 884 level -= 1; 885 886 TranslResult tr; 887 if (walk_ep) { 888 if (walk_ep->leaf) { 889 tr.fault = FAULT_NONE; 890 tr.addr = walk_ep->pa + (addr & ~walk_ep->vaMask); 891 tr.addrMask = walk_ep->vaMask; 892 tr.writable = walk_ep->permissions; 893 } else { 894 tr = walkStage1And2(yield, addr, pt_ops, level+1, walk_ep->pa); 895 } 896 } else { 897 Addr table_addr = context.ttb0; 898 if (context.stage2Enable) { 899 TranslResult s2tr = translateStage2(yield, table_addr, false); 900 if (s2tr.fault != FAULT_NONE) 901 return s2tr; 902 903 table_addr = s2tr.addr; 904 } 905 906 tr = walkStage1And2(yield, addr, pt_ops, pt_ops->firstLevel(), 907 table_addr); 908 } 909 910 if (tr.fault == FAULT_NONE) 911 DPRINTF(SMMUv3, "Translated vaddr %#x to paddr %#x\n", addr, tr.addr); 912 913 return tr; 914} 915 916SMMUTranslationProcess::TranslResult 917SMMUTranslationProcess::translateStage2(Yield &yield, Addr addr, bool final_tr) 918{ 919 const PageTableOps *pt_ops = 920 smmu.getPageTableOps(context.stage2TranslGranule); 921 922 const IPACache::Entry *ipa_ep = NULL; 923 if (smmu.ipaCacheEnable) { 924 doSemaphoreDown(yield, smmu.ipaSem); 925 doDelay(yield, smmu.ipaLat); 926 ipa_ep = smmu.ipaCache.lookup(addr, context.vmid); 927 doSemaphoreUp(smmu.ipaSem); 928 } 929 930 if (ipa_ep) { 931 TranslResult tr; 932 tr.fault = FAULT_NONE; 933 tr.addr = ipa_ep->pa + (addr & ~ipa_ep->ipaMask); 934 tr.addrMask = ipa_ep->ipaMask; 935 tr.writable = ipa_ep->permissions; 936 937 DPRINTF(SMMUv3, " IPACache hit ipa=%#x vmid=%#x pa=%#x\n", 938 addr, context.vmid, tr.addr); 939 940 return tr; 941 } else if (smmu.ipaCacheEnable) { 942 DPRINTF(SMMUv3, " IPACache miss ipa=%#x vmid=%#x\n", 943 addr, context.vmid); 944 } 945 946 const WalkCache::Entry *walk_ep = NULL; 947 unsigned level = pt_ops->firstLevel(); 948 949 if (final_tr || smmu.walkCacheNonfinalEnable) { 950 // Level here is actually (level+1) so we can count down 951 // to 0 using unsigned int. 952 for (level = pt_ops->lastLevel() + 1; 953 level > pt_ops->firstLevel(); 954 level--) 955 { 956 walkCacheLookup(yield, walk_ep, addr, 957 0, context.vmid, 2, level-1); 958 959 if (walk_ep) 960 break; 961 } 962 963 // Correct level (see above). 964 level -= 1; 965 } 966 967 TranslResult tr; 968 if (walk_ep) { 969 if (walk_ep->leaf) { 970 tr.fault = FAULT_NONE; 971 tr.addr = walk_ep->pa + (addr & ~walk_ep->vaMask); 972 tr.addrMask = walk_ep->vaMask; 973 tr.writable = walk_ep->permissions; 974 } else { 975 tr = walkStage2(yield, addr, final_tr, pt_ops, 976 level + 1, walk_ep->pa); 977 } 978 } else { 979 tr = walkStage2(yield, addr, final_tr, pt_ops, pt_ops->firstLevel(), 980 context.httb); 981 } 982 983 if (tr.fault == FAULT_NONE) 984 DPRINTF(SMMUv3, " Translated %saddr %#x to paddr %#x\n", 985 context.stage1Enable ? "ip" : "v", addr, tr.addr); 986 987 if (smmu.ipaCacheEnable) { 988 IPACache::Entry e; 989 e.valid = true; 990 e.ipaMask = tr.addrMask; 991 e.ipa = addr & e.ipaMask; 992 e.pa = tr.addr & tr.addrMask; 993 e.permissions = tr.writable; 994 e.vmid = context.vmid; 995 996 doSemaphoreDown(yield, smmu.ipaSem); 997 smmu.ipaCache.store(e); 998 doSemaphoreUp(smmu.ipaSem); 999 } 1000 1001 return tr; 1002} 1003 1004SMMUTranslationProcess::TranslResult 1005SMMUTranslationProcess::combineTranslations(const TranslResult &s1tr, 1006 const TranslResult &s2tr) const 1007{ 1008 if (s2tr.fault != FAULT_NONE) 1009 return s2tr; 1010 1011 assert(s1tr.fault == FAULT_NONE); 1012 1013 TranslResult tr; 1014 tr.fault = FAULT_NONE; 1015 tr.addr = s2tr.addr; 1016 tr.addrMask = s1tr.addrMask | s2tr.addrMask; 1017 tr.writable = s1tr.writable & s2tr.writable; 1018 1019 return tr; 1020} 1021 1022bool 1023SMMUTranslationProcess::hazard4kCheck() 1024{ 1025 Addr addr4k = request.addr & ~0xfffULL; 1026 1027 for (auto it = ifc.duplicateReqs.begin(); 1028 it != ifc.duplicateReqs.end(); 1029 ++it) 1030 { 1031 Addr other4k = (*it)->request.addr & ~0xfffULL; 1032 if (addr4k == other4k) 1033 return true; 1034 } 1035 1036 return false; 1037} 1038 1039void 1040SMMUTranslationProcess::hazard4kRegister() 1041{ 1042 DPRINTF(SMMUv3Hazard, "4kReg: p=%p a4k=%#x\n", 1043 this, request.addr & ~0xfffULL); 1044 1045 ifc.duplicateReqs.push_back(this); 1046} 1047 1048void 1049SMMUTranslationProcess::hazard4kHold(Yield &yield) 1050{ 1051 Addr addr4k = request.addr & ~0xfffULL; 1052 1053 bool found_hazard; 1054 1055 do { 1056 found_hazard = false; 1057 1058 for (auto it = ifc.duplicateReqs.begin(); 1059 it!=ifc.duplicateReqs.end() && *it!=this; 1060 ++it) 1061 { 1062 Addr other4k = (*it)->request.addr & ~0xfffULL; 1063 1064 DPRINTF(SMMUv3Hazard, "4kHold: p=%p a4k=%#x Q: p=%p a4k=%#x\n", 1065 this, addr4k, *it, other4k); 1066 1067 if (addr4k == other4k) { 1068 DPRINTF(SMMUv3Hazard, 1069 "4kHold: p=%p a4k=%#x WAIT on p=%p a4k=%#x\n", 1070 this, addr4k, *it, other4k); 1071 1072 doWaitForSignal(yield, ifc.duplicateReqRemoved); 1073 1074 DPRINTF(SMMUv3Hazard, "4kHold: p=%p a4k=%#x RESUME\n", 1075 this, addr4k); 1076 1077 // This is to avoid checking *it!=this after doWaitForSignal() 1078 // since it could have been deleted. 1079 found_hazard = true; 1080 break; 1081 } 1082 } 1083 } while (found_hazard); 1084} 1085 1086void 1087SMMUTranslationProcess::hazard4kRelease() 1088{ 1089 DPRINTF(SMMUv3Hazard, "4kRel: p=%p a4k=%#x\n", 1090 this, request.addr & ~0xfffULL); 1091 1092 std::list<SMMUTranslationProcess *>::iterator it; 1093 1094 for (it = ifc.duplicateReqs.begin(); it != ifc.duplicateReqs.end(); ++it) 1095 if (*it == this) 1096 break; 1097 1098 if (it == ifc.duplicateReqs.end()) 1099 panic("hazard4kRelease: request not found"); 1100 1101 ifc.duplicateReqs.erase(it); 1102 1103 doBroadcastSignal(ifc.duplicateReqRemoved); 1104} 1105 1106void 1107SMMUTranslationProcess::hazardIdRegister() 1108{ 1109 auto orderId = AMBA::orderId(request.pkt); 1110 1111 DPRINTF(SMMUv3Hazard, "IdReg: p=%p oid=%d\n", this, orderId); 1112 1113 assert(orderId < SMMU_MAX_TRANS_ID); 1114 1115 std::list<SMMUTranslationProcess *> &depReqs = 1116 request.isWrite ? 1117 ifc.dependentWrites[orderId] : ifc.dependentReads[orderId]; 1118 depReqs.push_back(this); 1119} 1120 1121void 1122SMMUTranslationProcess::hazardIdHold(Yield &yield) 1123{ 1124 auto orderId = AMBA::orderId(request.pkt); 1125 1126 DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d\n", this, orderId); 1127 1128 std::list<SMMUTranslationProcess *> &depReqs = 1129 request.isWrite ? 1130 ifc.dependentWrites[orderId] : ifc.dependentReads[orderId]; 1131 std::list<SMMUTranslationProcess *>::iterator it; 1132 1133 bool found_hazard; 1134 1135 do { 1136 found_hazard = false; 1137 1138 for (auto it = depReqs.begin(); it!=depReqs.end() && *it!=this; ++it) { 1139 DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d Q: %p\n", 1140 this, orderId, *it); 1141 1142 if (AMBA::orderId((*it)->request.pkt) == orderId) { 1143 DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d WAIT on=%p\n", 1144 this, orderId, *it); 1145 1146 doWaitForSignal(yield, ifc.dependentReqRemoved); 1147 1148 DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d RESUME\n", 1149 this, orderId); 1150 1151 // This is to avoid checking *it!=this after doWaitForSignal() 1152 // since it could have been deleted. 1153 found_hazard = true; 1154 break; 1155 } 1156 } 1157 } while (found_hazard); 1158} 1159 1160void 1161SMMUTranslationProcess::hazardIdRelease() 1162{ 1163 auto orderId = AMBA::orderId(request.pkt); 1164 1165 DPRINTF(SMMUv3Hazard, "IdRel: p=%p oid=%d\n", this, orderId); 1166 1167 std::list<SMMUTranslationProcess *> &depReqs = 1168 request.isWrite ? 1169 ifc.dependentWrites[orderId] : ifc.dependentReads[orderId]; 1170 std::list<SMMUTranslationProcess *>::iterator it; 1171 1172 for (it = depReqs.begin(); it != depReqs.end(); ++it) { 1173 if (*it == this) 1174 break; 1175 } 1176 1177 if (it == depReqs.end()) 1178 panic("hazardIdRelease: request not found"); 1179 1180 depReqs.erase(it); 1181 1182 doBroadcastSignal(ifc.dependentReqRemoved); 1183} 1184 1185void 1186SMMUTranslationProcess::issuePrefetch(Addr addr) 1187{ 1188 if (!smmu.system.isTimingMode()) 1189 return; 1190 1191 if (!ifc.prefetchEnable || ifc.xlateSlotsRemaining == 0) 1192 return; 1193 1194 std::string proc_name = csprintf("%sprf", name()); 1195 SMMUTranslationProcess *proc = 1196 new SMMUTranslationProcess(proc_name, smmu, ifc); 1197 1198 proc->beginTransaction( 1199 SMMUTranslRequest::prefetch(addr, request.sid, request.ssid)); 1200 proc->scheduleWakeup(smmu.clockEdge(Cycles(1))); 1201} 1202 1203void 1204SMMUTranslationProcess::completeTransaction(Yield &yield, 1205 const TranslResult &tr) 1206{ 1207 assert(tr.fault == FAULT_NONE); 1208 1209 unsigned numMasterBeats = request.isWrite ? 1210 (request.size + (smmu.masterPortWidth-1)) 1211 / smmu.masterPortWidth : 1212 1; 1213 1214 doSemaphoreDown(yield, smmu.masterPortSem); 1215 doDelay(yield, Cycles(numMasterBeats)); 1216 doSemaphoreUp(smmu.masterPortSem); 1217 1218 1219 smmu.translationTimeDist.sample(curTick() - recvTick); 1220 if (!request.isAtsRequest && request.isWrite) 1221 ifc.wrBufSlotsRemaining += 1222 (request.size + (ifc.portWidth-1)) / ifc.portWidth; 1223 1224 smmu.scheduleSlaveRetries(); 1225 1226 1227 SMMUAction a; 1228 1229 if (request.isAtsRequest) { 1230 a.type = ACTION_SEND_RESP_ATS; 1231 1232 if (smmu.system.isAtomicMode()) { 1233 request.pkt->makeAtomicResponse(); 1234 } else if (smmu.system.isTimingMode()) { 1235 request.pkt->makeTimingResponse(); 1236 } else { 1237 panic("Not in atomic or timing mode"); 1238 } 1239 } else { 1240 a.type = ACTION_SEND_REQ_FINAL; 1241 a.ifc = &ifc; 1242 } 1243 1244 a.pkt = request.pkt; 1245 a.delay = 0; 1246 1247 a.pkt->setAddr(tr.addr); 1248 a.pkt->req->setPaddr(tr.addr); 1249 1250 yield(a); 1251 1252 if (!request.isAtsRequest) { 1253 PacketPtr pkt = yield.get(); 1254 pkt->setAddr(request.addr); 1255 1256 a.type = ACTION_SEND_RESP; 1257 a.pkt = pkt; 1258 a.ifc = &ifc; 1259 a.delay = 0; 1260 yield(a); 1261 } 1262} 1263 1264void 1265SMMUTranslationProcess::completePrefetch(Yield &yield) 1266{ 1267 SMMUAction a; 1268 a.type = ACTION_TERMINATE; 1269 a.pkt = NULL; 1270 a.ifc = &ifc; 1271 a.delay = 0; 1272 yield(a); 1273} 1274 1275void 1276SMMUTranslationProcess::sendEvent(Yield &yield, const SMMUEvent &ev) 1277{ 1278 int sizeMask = mask(smmu.regs.eventq_base & Q_BASE_SIZE_MASK) & 1279 Q_CONS_PROD_MASK; 1280 1281 if (((smmu.regs.eventq_prod+1) & sizeMask) == 1282 (smmu.regs.eventq_cons & sizeMask)) 1283 panic("Event queue full - aborting\n"); 1284 1285 Addr event_addr = 1286 (smmu.regs.eventq_base & Q_BASE_ADDR_MASK) + 1287 (smmu.regs.eventq_prod & sizeMask) * sizeof(ev); 1288 1289 DPRINTF(SMMUv3, "Sending event to addr=%#08x (pos=%d): type=%#x stag=%#x " 1290 "flags=%#x sid=%#x ssid=%#x va=%#08x ipa=%#x\n", 1291 event_addr, smmu.regs.eventq_prod, ev.type, ev.stag, 1292 ev.flags, ev.streamId, ev.substreamId, ev.va, ev.ipa); 1293 1294 // This deliberately resets the overflow field in eventq_prod! 1295 smmu.regs.eventq_prod = (smmu.regs.eventq_prod + 1) & sizeMask; 1296 1297 doWrite(yield, event_addr, &ev, sizeof(ev)); 1298 1299 if (!(smmu.regs.eventq_irq_cfg0 & E_BASE_ENABLE_MASK)) 1300 panic("eventq msi not enabled\n"); 1301 1302 doWrite(yield, smmu.regs.eventq_irq_cfg0 & E_BASE_ADDR_MASK, 1303 &smmu.regs.eventq_irq_cfg1, sizeof(smmu.regs.eventq_irq_cfg1)); 1304} 1305 1306void 1307SMMUTranslationProcess::doReadSTE(Yield &yield, 1308 StreamTableEntry &ste, 1309 uint32_t sid) 1310{ 1311 unsigned max_sid = 1 << (smmu.regs.strtab_base_cfg & ST_CFG_SIZE_MASK); 1312 if (sid >= max_sid) 1313 panic("SID %#x out of range, max=%#x", sid, max_sid); 1314 1315 Addr ste_addr; 1316 1317 if ((smmu.regs.strtab_base_cfg & ST_CFG_FMT_MASK) == ST_CFG_FMT_2LEVEL) { 1318 unsigned split = 1319 (smmu.regs.strtab_base_cfg & ST_CFG_SPLIT_MASK) >> ST_CFG_SPLIT_SHIFT; 1320 1321 if (split!= 7 && split!=8 && split!=16) 1322 panic("Invalid stream table split %d", split); 1323 1324 uint64_t l2_ptr; 1325 uint64_t l2_addr = 1326 (smmu.regs.strtab_base & VMT_BASE_ADDR_MASK) + 1327 bits(sid, 32, split) * sizeof(l2_ptr); 1328 1329 DPRINTF(SMMUv3, "Read L1STE at %#x\n", l2_addr); 1330 1331 doReadConfig(yield, l2_addr, &l2_ptr, sizeof(l2_ptr), sid, 0); 1332 1333 DPRINTF(SMMUv3, "Got L1STE L1 at %#x: 0x%016x\n", l2_addr, l2_ptr); 1334 1335 unsigned span = l2_ptr & ST_L2_SPAN_MASK; 1336 if (span == 0) 1337 panic("Invalid level 1 stream table descriptor"); 1338 1339 unsigned index = bits(sid, split-1, 0); 1340 if (index >= (1 << span)) 1341 panic("StreamID %d out of level 1 descriptor range %d", 1342 sid, 1<<span); 1343 1344 ste_addr = (l2_ptr & ST_L2_ADDR_MASK) + index * sizeof(ste); 1345 1346 smmu.steL1Fetches++; 1347 } else if ((smmu.regs.strtab_base_cfg & ST_CFG_FMT_MASK) == ST_CFG_FMT_LINEAR) { 1348 ste_addr = 1349 (smmu.regs.strtab_base & VMT_BASE_ADDR_MASK) + sid * sizeof(ste); 1350 } else { 1351 panic("Invalid stream table format"); 1352 } 1353 1354 DPRINTF(SMMUv3, "Read STE at %#x\n", ste_addr); 1355 1356 doReadConfig(yield, ste_addr, &ste, sizeof(ste), sid, 0); 1357 1358 DPRINTF(SMMUv3, "Got STE at %#x [0]: 0x%016x\n", ste_addr, ste.dw0); 1359 DPRINTF(SMMUv3, " STE at %#x [1]: 0x%016x\n", ste_addr, ste.dw1); 1360 DPRINTF(SMMUv3, " STE at %#x [2]: 0x%016x\n", ste_addr, ste.dw2); 1361 DPRINTF(SMMUv3, " STE at %#x [3]: 0x%016x\n", ste_addr, ste.dw3); 1362 DPRINTF(SMMUv3, " STE at %#x [4]: 0x%016x\n", ste_addr, ste._pad[0]); 1363 DPRINTF(SMMUv3, " STE at %#x [5]: 0x%016x\n", ste_addr, ste._pad[1]); 1364 DPRINTF(SMMUv3, " STE at %#x [6]: 0x%016x\n", ste_addr, ste._pad[2]); 1365 DPRINTF(SMMUv3, " STE at %#x [7]: 0x%016x\n", ste_addr, ste._pad[3]); 1366 1367 if (!ste.dw0.valid) 1368 panic("STE @ %#x not valid\n", ste_addr); 1369 1370 smmu.steFetches++; 1371} 1372 1373void 1374SMMUTranslationProcess::doReadCD(Yield &yield, 1375 ContextDescriptor &cd, 1376 const StreamTableEntry &ste, 1377 uint32_t sid, uint32_t ssid) 1378{ 1379 Addr cd_addr; 1380 1381 if (ste.dw0.s1cdmax == 0) { 1382 cd_addr = ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT; 1383 } else { 1384 unsigned max_ssid = 1 << ste.dw0.s1cdmax; 1385 if (ssid >= max_ssid) 1386 panic("SSID %#x out of range, max=%#x", ssid, max_ssid); 1387 1388 if (ste.dw0.s1fmt==STAGE1_CFG_2L_4K || 1389 ste.dw0.s1fmt==STAGE1_CFG_2L_64K) 1390 { 1391 unsigned split = ste.dw0.s1fmt==STAGE1_CFG_2L_4K ? 7 : 11; 1392 1393 uint64_t l2_ptr; 1394 uint64_t l2_addr = (ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT) + 1395 bits(ssid, 24, split) * sizeof(l2_ptr); 1396 1397 if (context.stage2Enable) 1398 l2_addr = translateStage2(yield, l2_addr, false).addr; 1399 1400 DPRINTF(SMMUv3, "Read L1CD at %#x\n", l2_addr); 1401 1402 doReadConfig(yield, l2_addr, &l2_ptr, sizeof(l2_ptr), sid, ssid); 1403 1404 DPRINTF(SMMUv3, "Got L1CD at %#x: 0x%016x\n", l2_addr, l2_ptr); 1405 1406 cd_addr = l2_ptr + bits(ssid, split-1, 0) * sizeof(cd); 1407 1408 smmu.cdL1Fetches++; 1409 } else if (ste.dw0.s1fmt == STAGE1_CFG_1L) { 1410 cd_addr = (ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT) + ssid*sizeof(cd); 1411 } 1412 } 1413 1414 if (context.stage2Enable) 1415 cd_addr = translateStage2(yield, cd_addr, false).addr; 1416 1417 DPRINTF(SMMUv3, "Read CD at %#x\n", cd_addr); 1418 1419 doReadConfig(yield, cd_addr, &cd, sizeof(cd), sid, ssid); 1420 1421 DPRINTF(SMMUv3, "Got CD at %#x [0]: 0x%016x\n", cd_addr, cd.dw0); 1422 DPRINTF(SMMUv3, " CD at %#x [1]: 0x%016x\n", cd_addr, cd.dw1); 1423 DPRINTF(SMMUv3, " CD at %#x [2]: 0x%016x\n", cd_addr, cd.dw2); 1424 DPRINTF(SMMUv3, " CD at %#x [3]: 0x%016x\n", cd_addr, cd.mair); 1425 DPRINTF(SMMUv3, " CD at %#x [4]: 0x%016x\n", cd_addr, cd.amair); 1426 DPRINTF(SMMUv3, " CD at %#x [5]: 0x%016x\n", cd_addr, cd._pad[0]); 1427 DPRINTF(SMMUv3, " CD at %#x [6]: 0x%016x\n", cd_addr, cd._pad[1]); 1428 DPRINTF(SMMUv3, " CD at %#x [7]: 0x%016x\n", cd_addr, cd._pad[2]); 1429 1430 1431 if (!cd.dw0.valid) 1432 panic("CD @ %#x not valid\n", cd_addr); 1433 1434 smmu.cdFetches++; 1435} 1436 1437void 1438SMMUTranslationProcess::doReadConfig(Yield &yield, Addr addr, 1439 void *ptr, size_t size, 1440 uint32_t sid, uint32_t ssid) 1441{ 1442 doRead(yield, addr, ptr, size); 1443} 1444 1445void 1446SMMUTranslationProcess::doReadPTE(Yield &yield, Addr va, Addr addr, 1447 void *ptr, unsigned stage, 1448 unsigned level) 1449{ 1450 size_t pte_size = sizeof(PageTableOps::pte_t); 1451 1452 Addr mask = pte_size - 1; 1453 Addr base = addr & ~mask; 1454 1455 doRead(yield, base, ptr, pte_size); 1456} 1457