smmu_v3.cc revision 14132:d6093eeca3af
1/* 2 * Copyright (c) 2013, 2018-2019 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions are 16 * met: redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer; 18 * redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution; 21 * neither the name of the copyright holders nor the names of its 22 * contributors may be used to endorse or promote products derived from 23 * this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 * 37 * Authors: Stan Czerniawski 38 */ 39 40#include "dev/arm/smmu_v3.hh" 41 42#include <cstddef> 43#include <cstring> 44 45#include "base/bitfield.hh" 46#include "base/cast.hh" 47#include "base/logging.hh" 48#include "base/trace.hh" 49#include "base/types.hh" 50#include "debug/Checkpoint.hh" 51#include "debug/SMMUv3.hh" 52#include "dev/arm/smmu_v3_transl.hh" 53#include "mem/packet_access.hh" 54#include "sim/system.hh" 55 56SMMUv3::SMMUv3(SMMUv3Params *params) : 57 MemObject(params), 58 system(*params->system), 59 masterId(params->system->getMasterId(this)), 60 masterPort(name() + ".master", *this), 61 masterTableWalkPort(name() + ".master_walker", *this), 62 controlPort(name() + ".control", *this, params->reg_map), 63 tlb(params->tlb_entries, params->tlb_assoc, params->tlb_policy), 64 configCache(params->cfg_entries, params->cfg_assoc, params->cfg_policy), 65 ipaCache(params->ipa_entries, params->ipa_assoc, params->ipa_policy), 66 walkCache({ { params->walk_S1L0, params->walk_S1L1, 67 params->walk_S1L2, params->walk_S1L3, 68 params->walk_S2L0, params->walk_S2L1, 69 params->walk_S2L2, params->walk_S2L3 } }, 70 params->walk_assoc, params->walk_policy), 71 tlbEnable(params->tlb_enable), 72 configCacheEnable(params->cfg_enable), 73 ipaCacheEnable(params->ipa_enable), 74 walkCacheEnable(params->walk_enable), 75 tableWalkPortEnable(false), 76 walkCacheNonfinalEnable(params->wc_nonfinal_enable), 77 walkCacheS1Levels(params->wc_s1_levels), 78 walkCacheS2Levels(params->wc_s2_levels), 79 masterPortWidth(params->master_port_width), 80 tlbSem(params->tlb_slots), 81 ifcSmmuSem(1), 82 smmuIfcSem(1), 83 configSem(params->cfg_slots), 84 ipaSem(params->ipa_slots), 85 walkSem(params->walk_slots), 86 masterPortSem(1), 87 transSem(params->xlate_slots), 88 ptwSem(params->ptw_slots), 89 cycleSem(1), 90 tlbLat(params->tlb_lat), 91 ifcSmmuLat(params->ifc_smmu_lat), 92 smmuIfcLat(params->smmu_ifc_lat), 93 configLat(params->cfg_lat), 94 ipaLat(params->ipa_lat), 95 walkLat(params->walk_lat), 96 slaveInterfaces(params->slave_interfaces), 97 commandExecutor(name() + ".cmd_exec", *this), 98 regsMap(params->reg_map), 99 processCommandsEvent(this) 100{ 101 fatal_if(regsMap.size() != SMMU_REG_SIZE, 102 "Invalid register map size: %#x different than SMMU_REG_SIZE = %#x\n", 103 regsMap.size(), SMMU_REG_SIZE); 104 105 // Init smmu registers to 0 106 memset(®s, 0, sizeof(regs)); 107 108 // Setup RO ID registers 109 regs.idr0 = params->smmu_idr0; 110 regs.idr1 = params->smmu_idr1; 111 regs.idr2 = params->smmu_idr2; 112 regs.idr3 = params->smmu_idr3; 113 regs.idr4 = params->smmu_idr4; 114 regs.idr5 = params->smmu_idr5; 115 regs.iidr = params->smmu_iidr; 116 regs.aidr = params->smmu_aidr; 117 118 // TODO: At the moment it possible to set the ID registers to hold 119 // any possible value. It would be nice to have a sanity check here 120 // at construction time in case some idx registers are programmed to 121 // store an unallowed values or if the are configuration conflicts. 122 warn("SMMUv3 IDx register values unchecked\n"); 123 124 for (auto ifc : slaveInterfaces) 125 ifc->setSMMU(this); 126} 127 128bool 129SMMUv3::masterRecvTimingResp(PacketPtr pkt) 130{ 131 DPRINTF(SMMUv3, "[t] master resp addr=%#x size=%#x\n", 132 pkt->getAddr(), pkt->getSize()); 133 134 // @todo: We need to pay for this and not just zero it out 135 pkt->headerDelay = pkt->payloadDelay = 0; 136 137 SMMUProcess *proc = 138 safe_cast<SMMUProcess *>(pkt->popSenderState()); 139 140 runProcessTiming(proc, pkt); 141 142 return true; 143} 144 145void 146SMMUv3::masterRecvReqRetry() 147{ 148 assert(!packetsToRetry.empty()); 149 150 while (!packetsToRetry.empty()) { 151 SMMUAction a = packetsToRetry.front(); 152 153 assert(a.type==ACTION_SEND_REQ || a.type==ACTION_SEND_REQ_FINAL); 154 155 DPRINTF(SMMUv3, "[t] master retr addr=%#x size=%#x\n", 156 a.pkt->getAddr(), a.pkt->getSize()); 157 158 if (!masterPort.sendTimingReq(a.pkt)) 159 break; 160 161 packetsToRetry.pop(); 162 163 /* 164 * ACTION_SEND_REQ_FINAL means that we have just forwarded the packet 165 * on the master interface; this means that we no longer hold on to 166 * that transaction and therefore can accept a new one. 167 * If the slave port was stalled then unstall it (send retry). 168 */ 169 if (a.type == ACTION_SEND_REQ_FINAL) 170 scheduleSlaveRetries(); 171 } 172} 173 174bool 175SMMUv3::masterTableWalkRecvTimingResp(PacketPtr pkt) 176{ 177 DPRINTF(SMMUv3, "[t] master HWTW resp addr=%#x size=%#x\n", 178 pkt->getAddr(), pkt->getSize()); 179 180 // @todo: We need to pay for this and not just zero it out 181 pkt->headerDelay = pkt->payloadDelay = 0; 182 183 SMMUProcess *proc = 184 safe_cast<SMMUProcess *>(pkt->popSenderState()); 185 186 runProcessTiming(proc, pkt); 187 188 return true; 189} 190 191void 192SMMUv3::masterTableWalkRecvReqRetry() 193{ 194 assert(tableWalkPortEnable); 195 assert(!packetsTableWalkToRetry.empty()); 196 197 while (!packetsTableWalkToRetry.empty()) { 198 SMMUAction a = packetsTableWalkToRetry.front(); 199 200 assert(a.type==ACTION_SEND_REQ); 201 202 DPRINTF(SMMUv3, "[t] master HWTW retr addr=%#x size=%#x\n", 203 a.pkt->getAddr(), a.pkt->getSize()); 204 205 if (!masterTableWalkPort.sendTimingReq(a.pkt)) 206 break; 207 208 packetsTableWalkToRetry.pop(); 209 } 210} 211 212void 213SMMUv3::scheduleSlaveRetries() 214{ 215 for (auto ifc : slaveInterfaces) { 216 ifc->scheduleDeviceRetry(); 217 } 218} 219 220SMMUAction 221SMMUv3::runProcess(SMMUProcess *proc, PacketPtr pkt) 222{ 223 if (system.isAtomicMode()) { 224 return runProcessAtomic(proc, pkt); 225 } else if (system.isTimingMode()) { 226 return runProcessTiming(proc, pkt); 227 } else { 228 panic("Not in timing or atomic mode!"); 229 } 230} 231 232SMMUAction 233SMMUv3::runProcessAtomic(SMMUProcess *proc, PacketPtr pkt) 234{ 235 SMMUAction action; 236 Tick delay = 0; 237 bool finished = false; 238 239 do { 240 action = proc->run(pkt); 241 242 switch (action.type) { 243 case ACTION_SEND_REQ: 244 // Send an MMU initiated request on the table walk port if it is 245 // enabled. Otherwise, fall through and handle same as the final 246 // ACTION_SEND_REQ_FINAL request. 247 if (tableWalkPortEnable) { 248 delay += masterTableWalkPort.sendAtomic(action.pkt); 249 pkt = action.pkt; 250 break; 251 } 252 M5_FALLTHROUGH; 253 case ACTION_SEND_REQ_FINAL: 254 delay += masterPort.sendAtomic(action.pkt); 255 pkt = action.pkt; 256 break; 257 258 case ACTION_SEND_RESP: 259 case ACTION_SEND_RESP_ATS: 260 case ACTION_SLEEP: 261 finished = true; 262 break; 263 264 case ACTION_DELAY: 265 delay += action.delay; 266 break; 267 268 case ACTION_TERMINATE: 269 panic("ACTION_TERMINATE in atomic mode\n"); 270 271 default: 272 panic("Unknown action\n"); 273 } 274 } while (!finished); 275 276 action.delay = delay; 277 278 return action; 279} 280 281SMMUAction 282SMMUv3::runProcessTiming(SMMUProcess *proc, PacketPtr pkt) 283{ 284 SMMUAction action = proc->run(pkt); 285 286 switch (action.type) { 287 case ACTION_SEND_REQ: 288 // Send an MMU initiated request on the table walk port if it is 289 // enabled. Otherwise, fall through and handle same as the final 290 // ACTION_SEND_REQ_FINAL request. 291 if (tableWalkPortEnable) { 292 action.pkt->pushSenderState(proc); 293 294 DPRINTF(SMMUv3, "[t] master HWTW req addr=%#x size=%#x\n", 295 action.pkt->getAddr(), action.pkt->getSize()); 296 297 if (packetsTableWalkToRetry.empty() 298 && masterTableWalkPort.sendTimingReq(action.pkt)) { 299 scheduleSlaveRetries(); 300 } else { 301 DPRINTF(SMMUv3, "[t] master HWTW req needs retry," 302 " qlen=%d\n", packetsTableWalkToRetry.size()); 303 packetsTableWalkToRetry.push(action); 304 } 305 306 break; 307 } 308 M5_FALLTHROUGH; 309 case ACTION_SEND_REQ_FINAL: 310 action.pkt->pushSenderState(proc); 311 312 DPRINTF(SMMUv3, "[t] master req addr=%#x size=%#x\n", 313 action.pkt->getAddr(), action.pkt->getSize()); 314 315 if (packetsToRetry.empty() && masterPort.sendTimingReq(action.pkt)) { 316 scheduleSlaveRetries(); 317 } else { 318 DPRINTF(SMMUv3, "[t] master req needs retry, qlen=%d\n", 319 packetsToRetry.size()); 320 packetsToRetry.push(action); 321 } 322 323 break; 324 325 case ACTION_SEND_RESP: 326 // @todo: We need to pay for this and not just zero it out 327 action.pkt->headerDelay = action.pkt->payloadDelay = 0; 328 329 DPRINTF(SMMUv3, "[t] slave resp addr=%#x size=%#x\n", 330 action.pkt->getAddr(), 331 action.pkt->getSize()); 332 333 assert(action.ifc); 334 action.ifc->schedTimingResp(action.pkt); 335 336 delete proc; 337 break; 338 339 case ACTION_SEND_RESP_ATS: 340 // @todo: We need to pay for this and not just zero it out 341 action.pkt->headerDelay = action.pkt->payloadDelay = 0; 342 343 DPRINTF(SMMUv3, "[t] ATS slave resp addr=%#x size=%#x\n", 344 action.pkt->getAddr(), action.pkt->getSize()); 345 346 assert(action.ifc); 347 action.ifc->schedAtsTimingResp(action.pkt); 348 349 delete proc; 350 break; 351 352 case ACTION_DELAY: 353 case ACTION_SLEEP: 354 break; 355 356 case ACTION_TERMINATE: 357 delete proc; 358 break; 359 360 default: 361 panic("Unknown action\n"); 362 } 363 364 return action; 365} 366 367void 368SMMUv3::processCommands() 369{ 370 DPRINTF(SMMUv3, "processCommands()\n"); 371 372 if (system.isAtomicMode()) { 373 SMMUAction a = runProcessAtomic(&commandExecutor, NULL); 374 (void) a; 375 } else if (system.isTimingMode()) { 376 if (!commandExecutor.isBusy()) 377 runProcessTiming(&commandExecutor, NULL); 378 } else { 379 panic("Not in timing or atomic mode!"); 380 } 381} 382 383void 384SMMUv3::processCommand(const SMMUCommand &cmd) 385{ 386 switch (cmd.dw0.type) { 387 case CMD_PRF_CONFIG: 388 DPRINTF(SMMUv3, "CMD_PREFETCH_CONFIG - ignored\n"); 389 break; 390 391 case CMD_PRF_ADDR: 392 DPRINTF(SMMUv3, "CMD_PREFETCH_ADDR - ignored\n"); 393 break; 394 395 case CMD_CFGI_STE: { 396 DPRINTF(SMMUv3, "CMD_CFGI_STE sid=%#x\n", cmd.dw0.sid); 397 configCache.invalidateSID(cmd.dw0.sid); 398 399 for (auto slave_interface : slaveInterfaces) { 400 slave_interface->microTLB->invalidateSID(cmd.dw0.sid); 401 slave_interface->mainTLB->invalidateSID(cmd.dw0.sid); 402 } 403 break; 404 } 405 406 case CMD_CFGI_STE_RANGE: { 407 const auto range = cmd.dw1.range; 408 if (range == 31) { 409 // CMD_CFGI_ALL is an alias of CMD_CFGI_STE_RANGE with 410 // range = 31 411 DPRINTF(SMMUv3, "CMD_CFGI_ALL\n"); 412 configCache.invalidateAll(); 413 414 for (auto slave_interface : slaveInterfaces) { 415 slave_interface->microTLB->invalidateAll(); 416 slave_interface->mainTLB->invalidateAll(); 417 } 418 } else { 419 DPRINTF(SMMUv3, "CMD_CFGI_STE_RANGE\n"); 420 const auto start_sid = cmd.dw0.sid & ~((1 << (range + 1)) - 1); 421 const auto end_sid = start_sid + (1 << (range + 1)) - 1; 422 for (auto sid = start_sid; sid <= end_sid; sid++) { 423 configCache.invalidateSID(sid); 424 425 for (auto slave_interface : slaveInterfaces) { 426 slave_interface->microTLB->invalidateSID(sid); 427 slave_interface->mainTLB->invalidateSID(sid); 428 } 429 } 430 } 431 break; 432 } 433 434 case CMD_CFGI_CD: { 435 DPRINTF(SMMUv3, "CMD_CFGI_CD sid=%#x ssid=%#x\n", 436 cmd.dw0.sid, cmd.dw0.ssid); 437 configCache.invalidateSSID(cmd.dw0.sid, cmd.dw0.ssid); 438 439 for (auto slave_interface : slaveInterfaces) { 440 slave_interface->microTLB->invalidateSSID( 441 cmd.dw0.sid, cmd.dw0.ssid); 442 slave_interface->mainTLB->invalidateSSID( 443 cmd.dw0.sid, cmd.dw0.ssid); 444 } 445 break; 446 } 447 448 case CMD_CFGI_CD_ALL: { 449 DPRINTF(SMMUv3, "CMD_CFGI_CD_ALL sid=%#x\n", cmd.dw0.sid); 450 configCache.invalidateSID(cmd.dw0.sid); 451 452 for (auto slave_interface : slaveInterfaces) { 453 slave_interface->microTLB->invalidateSID(cmd.dw0.sid); 454 slave_interface->mainTLB->invalidateSID(cmd.dw0.sid); 455 } 456 break; 457 } 458 459 case CMD_TLBI_NH_ALL: { 460 DPRINTF(SMMUv3, "CMD_TLBI_NH_ALL vmid=%#x\n", cmd.dw0.vmid); 461 for (auto slave_interface : slaveInterfaces) { 462 slave_interface->microTLB->invalidateVMID(cmd.dw0.vmid); 463 slave_interface->mainTLB->invalidateVMID(cmd.dw0.vmid); 464 } 465 tlb.invalidateVMID(cmd.dw0.vmid); 466 walkCache.invalidateVMID(cmd.dw0.vmid); 467 break; 468 } 469 470 case CMD_TLBI_NH_ASID: { 471 DPRINTF(SMMUv3, "CMD_TLBI_NH_ASID asid=%#x vmid=%#x\n", 472 cmd.dw0.asid, cmd.dw0.vmid); 473 for (auto slave_interface : slaveInterfaces) { 474 slave_interface->microTLB->invalidateASID( 475 cmd.dw0.asid, cmd.dw0.vmid); 476 slave_interface->mainTLB->invalidateASID( 477 cmd.dw0.asid, cmd.dw0.vmid); 478 } 479 tlb.invalidateASID(cmd.dw0.asid, cmd.dw0.vmid); 480 walkCache.invalidateASID(cmd.dw0.asid, cmd.dw0.vmid); 481 break; 482 } 483 484 case CMD_TLBI_NH_VAA: { 485 const Addr addr = cmd.addr(); 486 DPRINTF(SMMUv3, "CMD_TLBI_NH_VAA va=%#08x vmid=%#x\n", 487 addr, cmd.dw0.vmid); 488 for (auto slave_interface : slaveInterfaces) { 489 slave_interface->microTLB->invalidateVAA( 490 addr, cmd.dw0.vmid); 491 slave_interface->mainTLB->invalidateVAA( 492 addr, cmd.dw0.vmid); 493 } 494 tlb.invalidateVAA(addr, cmd.dw0.vmid); 495 496 if (!cmd.dw1.leaf) 497 walkCache.invalidateVAA(addr, cmd.dw0.vmid); 498 break; 499 } 500 501 case CMD_TLBI_NH_VA: { 502 const Addr addr = cmd.addr(); 503 DPRINTF(SMMUv3, "CMD_TLBI_NH_VA va=%#08x asid=%#x vmid=%#x\n", 504 addr, cmd.dw0.asid, cmd.dw0.vmid); 505 for (auto slave_interface : slaveInterfaces) { 506 slave_interface->microTLB->invalidateVA( 507 addr, cmd.dw0.asid, cmd.dw0.vmid); 508 slave_interface->mainTLB->invalidateVA( 509 addr, cmd.dw0.asid, cmd.dw0.vmid); 510 } 511 tlb.invalidateVA(addr, cmd.dw0.asid, cmd.dw0.vmid); 512 513 if (!cmd.dw1.leaf) 514 walkCache.invalidateVA(addr, cmd.dw0.asid, cmd.dw0.vmid); 515 break; 516 } 517 518 case CMD_TLBI_S2_IPA: { 519 const Addr addr = cmd.addr(); 520 DPRINTF(SMMUv3, "CMD_TLBI_S2_IPA ipa=%#08x vmid=%#x\n", 521 addr, cmd.dw0.vmid); 522 // This does not invalidate TLBs containing 523 // combined Stage1 + Stage2 translations, as per the spec. 524 ipaCache.invalidateIPA(addr, cmd.dw0.vmid); 525 526 if (!cmd.dw1.leaf) 527 walkCache.invalidateVMID(cmd.dw0.vmid); 528 break; 529 } 530 531 case CMD_TLBI_S12_VMALL: { 532 DPRINTF(SMMUv3, "CMD_TLBI_S12_VMALL vmid=%#x\n", cmd.dw0.vmid); 533 for (auto slave_interface : slaveInterfaces) { 534 slave_interface->microTLB->invalidateVMID(cmd.dw0.vmid); 535 slave_interface->mainTLB->invalidateVMID(cmd.dw0.vmid); 536 } 537 tlb.invalidateVMID(cmd.dw0.vmid); 538 ipaCache.invalidateVMID(cmd.dw0.vmid); 539 walkCache.invalidateVMID(cmd.dw0.vmid); 540 break; 541 } 542 543 case CMD_TLBI_NSNH_ALL: { 544 DPRINTF(SMMUv3, "CMD_TLBI_NSNH_ALL\n"); 545 for (auto slave_interface : slaveInterfaces) { 546 slave_interface->microTLB->invalidateAll(); 547 slave_interface->mainTLB->invalidateAll(); 548 } 549 tlb.invalidateAll(); 550 ipaCache.invalidateAll(); 551 walkCache.invalidateAll(); 552 break; 553 } 554 555 case CMD_RESUME: 556 DPRINTF(SMMUv3, "CMD_RESUME\n"); 557 panic("resume unimplemented"); 558 break; 559 560 default: 561 warn("Unimplemented command %#x\n", cmd.dw0.type); 562 break; 563 } 564} 565 566const PageTableOps* 567SMMUv3::getPageTableOps(uint8_t trans_granule) 568{ 569 static V8PageTableOps4k ptOps4k; 570 static V8PageTableOps16k ptOps16k; 571 static V8PageTableOps64k ptOps64k; 572 573 switch (trans_granule) { 574 case TRANS_GRANULE_4K: return &ptOps4k; 575 case TRANS_GRANULE_16K: return &ptOps16k; 576 case TRANS_GRANULE_64K: return &ptOps64k; 577 default: 578 panic("Unknown translation granule size %d", trans_granule); 579 } 580} 581 582Tick 583SMMUv3::readControl(PacketPtr pkt) 584{ 585 DPRINTF(SMMUv3, "readControl: addr=%08x size=%d\n", 586 pkt->getAddr(), pkt->getSize()); 587 588 int offset = pkt->getAddr() - regsMap.start(); 589 assert(offset >= 0 && offset < SMMU_REG_SIZE); 590 591 if (inSecureBlock(offset)) { 592 warn("smmu: secure registers (0x%x) are not implemented\n", 593 offset); 594 } 595 596 auto reg_ptr = regs.data + offset; 597 598 switch (pkt->getSize()) { 599 case sizeof(uint32_t): 600 pkt->setLE<uint32_t>(*reinterpret_cast<uint32_t *>(reg_ptr)); 601 break; 602 case sizeof(uint64_t): 603 pkt->setLE<uint64_t>(*reinterpret_cast<uint64_t *>(reg_ptr)); 604 break; 605 default: 606 panic("smmu: unallowed access size: %d bytes\n", pkt->getSize()); 607 break; 608 } 609 610 pkt->makeAtomicResponse(); 611 612 return 0; 613} 614 615Tick 616SMMUv3::writeControl(PacketPtr pkt) 617{ 618 int offset = pkt->getAddr() - regsMap.start(); 619 assert(offset >= 0 && offset < SMMU_REG_SIZE); 620 621 DPRINTF(SMMUv3, "writeControl: addr=%08x size=%d data=%16x\n", 622 pkt->getAddr(), pkt->getSize(), 623 pkt->getSize() == sizeof(uint64_t) ? 624 pkt->getLE<uint64_t>() : pkt->getLE<uint32_t>()); 625 626 switch (offset) { 627 case offsetof(SMMURegs, cr0): 628 assert(pkt->getSize() == sizeof(uint32_t)); 629 regs.cr0 = regs.cr0ack = pkt->getLE<uint32_t>(); 630 break; 631 632 case offsetof(SMMURegs, cr1): 633 case offsetof(SMMURegs, cr2): 634 case offsetof(SMMURegs, strtab_base_cfg): 635 case offsetof(SMMURegs, eventq_cons): 636 case offsetof(SMMURegs, eventq_irq_cfg1): 637 case offsetof(SMMURegs, priq_cons): 638 assert(pkt->getSize() == sizeof(uint32_t)); 639 *reinterpret_cast<uint32_t *>(regs.data + offset) = 640 pkt->getLE<uint32_t>(); 641 break; 642 643 case offsetof(SMMURegs, cmdq_cons): 644 assert(pkt->getSize() == sizeof(uint32_t)); 645 if (regs.cr0 & CR0_CMDQEN_MASK) { 646 warn("CMDQ is enabled: ignoring write to CMDQ_CONS\n"); 647 } else { 648 *reinterpret_cast<uint32_t *>(regs.data + offset) = 649 pkt->getLE<uint32_t>(); 650 } 651 break; 652 653 case offsetof(SMMURegs, cmdq_prod): 654 assert(pkt->getSize() == sizeof(uint32_t)); 655 *reinterpret_cast<uint32_t *>(regs.data + offset) = 656 pkt->getLE<uint32_t>(); 657 schedule(processCommandsEvent, nextCycle()); 658 break; 659 660 case offsetof(SMMURegs, strtab_base): 661 case offsetof(SMMURegs, eventq_irq_cfg0): 662 assert(pkt->getSize() == sizeof(uint64_t)); 663 *reinterpret_cast<uint64_t *>(regs.data + offset) = 664 pkt->getLE<uint64_t>(); 665 break; 666 667 case offsetof(SMMURegs, cmdq_base): 668 assert(pkt->getSize() == sizeof(uint64_t)); 669 if (regs.cr0 & CR0_CMDQEN_MASK) { 670 warn("CMDQ is enabled: ignoring write to CMDQ_BASE\n"); 671 } else { 672 *reinterpret_cast<uint64_t *>(regs.data + offset) = 673 pkt->getLE<uint64_t>(); 674 regs.cmdq_cons = 0; 675 regs.cmdq_prod = 0; 676 } 677 break; 678 679 case offsetof(SMMURegs, eventq_base): 680 assert(pkt->getSize() == sizeof(uint64_t)); 681 *reinterpret_cast<uint64_t *>(regs.data + offset) = 682 pkt->getLE<uint64_t>(); 683 regs.eventq_cons = 0; 684 regs.eventq_prod = 0; 685 break; 686 687 case offsetof(SMMURegs, priq_base): 688 assert(pkt->getSize() == sizeof(uint64_t)); 689 *reinterpret_cast<uint64_t *>(regs.data + offset) = 690 pkt->getLE<uint64_t>(); 691 regs.priq_cons = 0; 692 regs.priq_prod = 0; 693 break; 694 695 default: 696 if (inSecureBlock(offset)) { 697 warn("smmu: secure registers (0x%x) are not implemented\n", 698 offset); 699 } else { 700 warn("smmu: write to read-only/undefined register at 0x%x\n", 701 offset); 702 } 703 } 704 705 pkt->makeAtomicResponse(); 706 707 return 0; 708} 709 710bool 711SMMUv3::inSecureBlock(uint32_t offs) const 712{ 713 if (offs >= offsetof(SMMURegs, _secure_regs) && offs < SMMU_SECURE_SZ) 714 return true; 715 else 716 return false; 717} 718 719void 720SMMUv3::init() 721{ 722 // make sure both sides are connected and have the same block size 723 if (!masterPort.isConnected()) 724 fatal("Master port is not connected.\n"); 725 726 // If the second master port is connected for the table walks, enable 727 // the mode to send table walks through this port instead 728 if (masterTableWalkPort.isConnected()) 729 tableWalkPortEnable = true; 730 731 // notify the master side of our address ranges 732 for (auto ifc : slaveInterfaces) { 733 ifc->sendRange(); 734 } 735 736 if (controlPort.isConnected()) 737 controlPort.sendRangeChange(); 738} 739 740void 741SMMUv3::regStats() 742{ 743 MemObject::regStats(); 744 745 using namespace Stats; 746 747 for (size_t i = 0; i < slaveInterfaces.size(); i++) { 748 slaveInterfaces[i]->microTLB->regStats( 749 csprintf("%s.utlb%d", name(), i)); 750 slaveInterfaces[i]->mainTLB->regStats( 751 csprintf("%s.maintlb%d", name(), i)); 752 } 753 754 tlb.regStats(name() + ".tlb"); 755 configCache.regStats(name() + ".cfg"); 756 ipaCache.regStats(name() + ".ipa"); 757 walkCache.regStats(name() + ".walk"); 758 759 steL1Fetches 760 .name(name() + ".steL1Fetches") 761 .desc("STE L1 fetches") 762 .flags(pdf); 763 764 steFetches 765 .name(name() + ".steFetches") 766 .desc("STE fetches") 767 .flags(pdf); 768 769 cdL1Fetches 770 .name(name() + ".cdL1Fetches") 771 .desc("CD L1 fetches") 772 .flags(pdf); 773 774 cdFetches 775 .name(name() + ".cdFetches") 776 .desc("CD fetches") 777 .flags(pdf); 778 779 translationTimeDist 780 .init(0, 2000000, 2000) 781 .name(name() + ".translationTimeDist") 782 .desc("Time to translate address") 783 .flags(pdf); 784 785 ptwTimeDist 786 .init(0, 2000000, 2000) 787 .name(name() + ".ptwTimeDist") 788 .desc("Time to walk page tables") 789 .flags(pdf); 790} 791 792DrainState 793SMMUv3::drain() 794{ 795 // Wait until the Command Executor is not busy 796 if (commandExecutor.isBusy()) { 797 return DrainState::Draining; 798 } 799 return DrainState::Drained; 800} 801 802void 803SMMUv3::serialize(CheckpointOut &cp) const 804{ 805 DPRINTF(Checkpoint, "Serializing SMMUv3\n"); 806 807 SERIALIZE_ARRAY(regs.data, sizeof(regs.data) / sizeof(regs.data[0])); 808} 809 810void 811SMMUv3::unserialize(CheckpointIn &cp) 812{ 813 DPRINTF(Checkpoint, "Unserializing SMMUv3\n"); 814 815 UNSERIALIZE_ARRAY(regs.data, sizeof(regs.data) / sizeof(regs.data[0])); 816} 817 818Port& 819SMMUv3::getPort(const std::string &name, PortID id) 820{ 821 if (name == "master") { 822 return masterPort; 823 } else if (name == "master_walker") { 824 return masterTableWalkPort; 825 } else if (name == "control") { 826 return controlPort; 827 } else { 828 return MemObject::getPort(name, id); 829 } 830} 831 832SMMUv3* 833SMMUv3Params::create() 834{ 835 return new SMMUv3(this); 836} 837