tlb.cc revision 11628:85011e8eaad9
1/* 2 * Copyright (c) 2007-2008 The Hewlett-Packard Development Company 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions are 16 * met: redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer; 18 * redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution; 21 * neither the name of the copyright holders nor the names of its 22 * contributors may be used to endorse or promote products derived from 23 * this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 * 37 * Authors: Gabe Black 38 */ 39 40#include <cstring> 41#include <memory> 42 43#include "arch/generic/mmapped_ipr.hh" 44#include "arch/x86/insts/microldstop.hh" 45#include "arch/x86/regs/misc.hh" 46#include "arch/x86/regs/msr.hh" 47#include "arch/x86/faults.hh" 48#include "arch/x86/pagetable.hh" 49#include "arch/x86/pagetable_walker.hh" 50#include "arch/x86/tlb.hh" 51#include "arch/x86/x86_traits.hh" 52#include "base/bitfield.hh" 53#include "base/trace.hh" 54#include "cpu/base.hh" 55#include "cpu/thread_context.hh" 56#include "debug/TLB.hh" 57#include "mem/packet_access.hh" 58#include "mem/page_table.hh" 59#include "mem/request.hh" 60#include "sim/full_system.hh" 61#include "sim/process.hh" 62 63namespace X86ISA { 64 65TLB::TLB(const Params *p) 66 : BaseTLB(p), configAddress(0), size(p->size), 67 tlb(size), lruSeq(0) 68{ 69 if (!size) 70 fatal("TLBs must have a non-zero size.\n"); 71 72 for (int x = 0; x < size; x++) { 73 tlb[x].trieHandle = NULL; 74 freeList.push_back(&tlb[x]); 75 } 76 77 walker = p->walker; 78 walker->setTLB(this); 79} 80 81void 82TLB::evictLRU() 83{ 84 // Find the entry with the lowest (and hence least recently updated) 85 // sequence number. 86 87 unsigned lru = 0; 88 for (unsigned i = 1; i < size; i++) { 89 if (tlb[i].lruSeq < tlb[lru].lruSeq) 90 lru = i; 91 } 92 93 assert(tlb[lru].trieHandle); 94 trie.remove(tlb[lru].trieHandle); 95 tlb[lru].trieHandle = NULL; 96 freeList.push_back(&tlb[lru]); 97} 98 99TlbEntry * 100TLB::insert(Addr vpn, TlbEntry &entry) 101{ 102 // If somebody beat us to it, just use that existing entry. 103 TlbEntry *newEntry = trie.lookup(vpn); 104 if (newEntry) { 105 assert(newEntry->vaddr == vpn); 106 return newEntry; 107 } 108 109 if (freeList.empty()) 110 evictLRU(); 111 112 newEntry = freeList.front(); 113 freeList.pop_front(); 114 115 *newEntry = entry; 116 newEntry->lruSeq = nextSeq(); 117 newEntry->vaddr = vpn; 118 newEntry->trieHandle = 119 trie.insert(vpn, TlbEntryTrie::MaxBits - entry.logBytes, newEntry); 120 return newEntry; 121} 122 123TlbEntry * 124TLB::lookup(Addr va, bool update_lru) 125{ 126 TlbEntry *entry = trie.lookup(va); 127 if (entry && update_lru) 128 entry->lruSeq = nextSeq(); 129 return entry; 130} 131 132void 133TLB::flushAll() 134{ 135 DPRINTF(TLB, "Invalidating all entries.\n"); 136 for (unsigned i = 0; i < size; i++) { 137 if (tlb[i].trieHandle) { 138 trie.remove(tlb[i].trieHandle); 139 tlb[i].trieHandle = NULL; 140 freeList.push_back(&tlb[i]); 141 } 142 } 143} 144 145void 146TLB::setConfigAddress(uint32_t addr) 147{ 148 configAddress = addr; 149} 150 151void 152TLB::flushNonGlobal() 153{ 154 DPRINTF(TLB, "Invalidating all non global entries.\n"); 155 for (unsigned i = 0; i < size; i++) { 156 if (tlb[i].trieHandle && !tlb[i].global) { 157 trie.remove(tlb[i].trieHandle); 158 tlb[i].trieHandle = NULL; 159 freeList.push_back(&tlb[i]); 160 } 161 } 162} 163 164void 165TLB::demapPage(Addr va, uint64_t asn) 166{ 167 TlbEntry *entry = trie.lookup(va); 168 if (entry) { 169 trie.remove(entry->trieHandle); 170 entry->trieHandle = NULL; 171 freeList.push_back(entry); 172 } 173} 174 175Fault 176TLB::translateInt(RequestPtr req, ThreadContext *tc) 177{ 178 DPRINTF(TLB, "Addresses references internal memory.\n"); 179 Addr vaddr = req->getVaddr(); 180 Addr prefix = (vaddr >> 3) & IntAddrPrefixMask; 181 if (prefix == IntAddrPrefixCPUID) { 182 panic("CPUID memory space not yet implemented!\n"); 183 } else if (prefix == IntAddrPrefixMSR) { 184 vaddr = (vaddr >> 3) & ~IntAddrPrefixMask; 185 req->setFlags(Request::MMAPPED_IPR); 186 187 MiscRegIndex regNum; 188 if (!msrAddrToIndex(regNum, vaddr)) 189 return std::make_shared<GeneralProtection>(0); 190 191 //The index is multiplied by the size of a MiscReg so that 192 //any memory dependence calculations will not see these as 193 //overlapping. 194 req->setPaddr((Addr)regNum * sizeof(MiscReg)); 195 return NoFault; 196 } else if (prefix == IntAddrPrefixIO) { 197 // TODO If CPL > IOPL or in virtual mode, check the I/O permission 198 // bitmap in the TSS. 199 200 Addr IOPort = vaddr & ~IntAddrPrefixMask; 201 // Make sure the address fits in the expected 16 bit IO address 202 // space. 203 assert(!(IOPort & ~0xFFFF)); 204 if (IOPort == 0xCF8 && req->getSize() == 4) { 205 req->setFlags(Request::MMAPPED_IPR); 206 req->setPaddr(MISCREG_PCI_CONFIG_ADDRESS * sizeof(MiscReg)); 207 } else if ((IOPort & ~mask(2)) == 0xCFC) { 208 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER); 209 Addr configAddress = 210 tc->readMiscRegNoEffect(MISCREG_PCI_CONFIG_ADDRESS); 211 if (bits(configAddress, 31, 31)) { 212 req->setPaddr(PhysAddrPrefixPciConfig | 213 mbits(configAddress, 30, 2) | 214 (IOPort & mask(2))); 215 } else { 216 req->setPaddr(PhysAddrPrefixIO | IOPort); 217 } 218 } else { 219 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER); 220 req->setPaddr(PhysAddrPrefixIO | IOPort); 221 } 222 return NoFault; 223 } else { 224 panic("Access to unrecognized internal address space %#x.\n", 225 prefix); 226 } 227} 228 229Fault 230TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const 231{ 232 Addr paddr = req->getPaddr(); 233 234 AddrRange m5opRange(0xFFFF0000, 0xFFFFFFFF); 235 236 if (m5opRange.contains(paddr)) { 237 if (m5opRange.contains(paddr)) { 238 req->setFlags(Request::MMAPPED_IPR | Request::GENERIC_IPR | 239 Request::STRICT_ORDER); 240 req->setPaddr(GenericISA::iprAddressPseudoInst( 241 (paddr >> 8) & 0xFF, 242 paddr & 0xFF)); 243 } 244 } else if (FullSystem) { 245 // Check for an access to the local APIC 246 LocalApicBase localApicBase = 247 tc->readMiscRegNoEffect(MISCREG_APIC_BASE); 248 AddrRange apicRange(localApicBase.base * PageBytes, 249 (localApicBase.base + 1) * PageBytes - 1); 250 251 if (apicRange.contains(paddr)) { 252 // The Intel developer's manuals say the below restrictions apply, 253 // but the linux kernel, because of a compiler optimization, breaks 254 // them. 255 /* 256 // Check alignment 257 if (paddr & ((32/8) - 1)) 258 return new GeneralProtection(0); 259 // Check access size 260 if (req->getSize() != (32/8)) 261 return new GeneralProtection(0); 262 */ 263 // Force the access to be uncacheable. 264 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER); 265 req->setPaddr(x86LocalAPICAddress(tc->contextId(), 266 paddr - apicRange.start())); 267 } 268 } 269 270 return NoFault; 271} 272 273Fault 274TLB::translate(RequestPtr req, ThreadContext *tc, Translation *translation, 275 Mode mode, bool &delayedResponse, bool timing) 276{ 277 Request::Flags flags = req->getFlags(); 278 int seg = flags & SegmentFlagMask; 279 bool storeCheck = flags & (StoreCheck << FlagShift); 280 281 delayedResponse = false; 282 283 // If this is true, we're dealing with a request to a non-memory address 284 // space. 285 if (seg == SEGMENT_REG_MS) { 286 return translateInt(req, tc); 287 } 288 289 Addr vaddr = req->getVaddr(); 290 DPRINTF(TLB, "Translating vaddr %#x.\n", vaddr); 291 292 HandyM5Reg m5Reg = tc->readMiscRegNoEffect(MISCREG_M5_REG); 293 294 // If protected mode has been enabled... 295 if (m5Reg.prot) { 296 DPRINTF(TLB, "In protected mode.\n"); 297 // If we're not in 64-bit mode, do protection/limit checks 298 if (m5Reg.mode != LongMode) { 299 DPRINTF(TLB, "Not in long mode. Checking segment protection.\n"); 300 // Check for a NULL segment selector. 301 if (!(seg == SEGMENT_REG_TSG || seg == SYS_SEGMENT_REG_IDTR || 302 seg == SEGMENT_REG_HS || seg == SEGMENT_REG_LS) 303 && !tc->readMiscRegNoEffect(MISCREG_SEG_SEL(seg))) 304 return std::make_shared<GeneralProtection>(0); 305 bool expandDown = false; 306 SegAttr attr = tc->readMiscRegNoEffect(MISCREG_SEG_ATTR(seg)); 307 if (seg >= SEGMENT_REG_ES && seg <= SEGMENT_REG_HS) { 308 if (!attr.writable && (mode == Write || storeCheck)) 309 return std::make_shared<GeneralProtection>(0); 310 if (!attr.readable && mode == Read) 311 return std::make_shared<GeneralProtection>(0); 312 expandDown = attr.expandDown; 313 314 } 315 Addr base = tc->readMiscRegNoEffect(MISCREG_SEG_BASE(seg)); 316 Addr limit = tc->readMiscRegNoEffect(MISCREG_SEG_LIMIT(seg)); 317 bool sizeOverride = (flags & (AddrSizeFlagBit << FlagShift)); 318 unsigned logSize = sizeOverride ? (unsigned)m5Reg.altAddr 319 : (unsigned)m5Reg.defAddr; 320 int size = (1 << logSize) * 8; 321 Addr offset = bits(vaddr - base, size - 1, 0); 322 Addr endOffset = offset + req->getSize() - 1; 323 if (expandDown) { 324 DPRINTF(TLB, "Checking an expand down segment.\n"); 325 warn_once("Expand down segments are untested.\n"); 326 if (offset <= limit || endOffset <= limit) 327 return std::make_shared<GeneralProtection>(0); 328 } else { 329 if (offset > limit || endOffset > limit) 330 return std::make_shared<GeneralProtection>(0); 331 } 332 } 333 if (m5Reg.submode != SixtyFourBitMode || 334 (flags & (AddrSizeFlagBit << FlagShift))) 335 vaddr &= mask(32); 336 // If paging is enabled, do the translation. 337 if (m5Reg.paging) { 338 DPRINTF(TLB, "Paging enabled.\n"); 339 // The vaddr already has the segment base applied. 340 TlbEntry *entry = lookup(vaddr); 341 if (!entry) { 342 if (FullSystem) { 343 Fault fault = walker->start(tc, translation, req, mode); 344 if (timing || fault != NoFault) { 345 // This gets ignored in atomic mode. 346 delayedResponse = true; 347 return fault; 348 } 349 entry = lookup(vaddr); 350 assert(entry); 351 } else { 352 DPRINTF(TLB, "Handling a TLB miss for " 353 "address %#x at pc %#x.\n", 354 vaddr, tc->instAddr()); 355 356 Process *p = tc->getProcessPtr(); 357 TlbEntry newEntry; 358 bool success = p->pTable->lookup(vaddr, newEntry); 359 if (!success && mode != Execute) { 360 // Check if we just need to grow the stack. 361 if (p->fixupStackFault(vaddr)) { 362 // If we did, lookup the entry for the new page. 363 success = p->pTable->lookup(vaddr, newEntry); 364 } 365 } 366 if (!success) { 367 return std::make_shared<PageFault>(vaddr, true, mode, 368 true, false); 369 } else { 370 Addr alignedVaddr = p->pTable->pageAlign(vaddr); 371 DPRINTF(TLB, "Mapping %#x to %#x\n", alignedVaddr, 372 newEntry.pageStart()); 373 entry = insert(alignedVaddr, newEntry); 374 } 375 DPRINTF(TLB, "Miss was serviced.\n"); 376 } 377 } 378 379 DPRINTF(TLB, "Entry found with paddr %#x, " 380 "doing protection checks.\n", entry->paddr); 381 // Do paging protection checks. 382 bool inUser = (m5Reg.cpl == 3 && 383 !(flags & (CPL0FlagBit << FlagShift))); 384 CR0 cr0 = tc->readMiscRegNoEffect(MISCREG_CR0); 385 bool badWrite = (!entry->writable && (inUser || cr0.wp)); 386 if ((inUser && !entry->user) || (mode == Write && badWrite)) { 387 // The page must have been present to get into the TLB in 388 // the first place. We'll assume the reserved bits are 389 // fine even though we're not checking them. 390 return std::make_shared<PageFault>(vaddr, true, mode, inUser, 391 false); 392 } 393 if (storeCheck && badWrite) { 394 // This would fault if this were a write, so return a page 395 // fault that reflects that happening. 396 return std::make_shared<PageFault>(vaddr, true, Write, inUser, 397 false); 398 } 399 400 Addr paddr = entry->paddr | (vaddr & mask(entry->logBytes)); 401 DPRINTF(TLB, "Translated %#x -> %#x.\n", vaddr, paddr); 402 req->setPaddr(paddr); 403 if (entry->uncacheable) 404 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER); 405 } else { 406 //Use the address which already has segmentation applied. 407 DPRINTF(TLB, "Paging disabled.\n"); 408 DPRINTF(TLB, "Translated %#x -> %#x.\n", vaddr, vaddr); 409 req->setPaddr(vaddr); 410 } 411 } else { 412 // Real mode 413 DPRINTF(TLB, "In real mode.\n"); 414 DPRINTF(TLB, "Translated %#x -> %#x.\n", vaddr, vaddr); 415 req->setPaddr(vaddr); 416 } 417 418 return finalizePhysical(req, tc, mode); 419} 420 421Fault 422TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode) 423{ 424 bool delayedResponse; 425 return TLB::translate(req, tc, NULL, mode, delayedResponse, false); 426} 427 428void 429TLB::translateTiming(RequestPtr req, ThreadContext *tc, 430 Translation *translation, Mode mode) 431{ 432 bool delayedResponse; 433 assert(translation); 434 Fault fault = 435 TLB::translate(req, tc, translation, mode, delayedResponse, true); 436 if (!delayedResponse) 437 translation->finish(fault, req, tc, mode); 438} 439 440Fault 441TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode) 442{ 443 panic("Not implemented\n"); 444 return NoFault; 445} 446 447Walker * 448TLB::getWalker() 449{ 450 return walker; 451} 452 453void 454TLB::serialize(CheckpointOut &cp) const 455{ 456 // Only store the entries in use. 457 uint32_t _size = size - freeList.size(); 458 SERIALIZE_SCALAR(_size); 459 SERIALIZE_SCALAR(lruSeq); 460 461 uint32_t _count = 0; 462 for (uint32_t x = 0; x < size; x++) { 463 if (tlb[x].trieHandle != NULL) 464 tlb[x].serializeSection(cp, csprintf("Entry%d", _count++)); 465 } 466} 467 468void 469TLB::unserialize(CheckpointIn &cp) 470{ 471 // Do not allow to restore with a smaller tlb. 472 uint32_t _size; 473 UNSERIALIZE_SCALAR(_size); 474 if (_size > size) { 475 fatal("TLB size less than the one in checkpoint!"); 476 } 477 478 UNSERIALIZE_SCALAR(lruSeq); 479 480 for (uint32_t x = 0; x < _size; x++) { 481 TlbEntry *newEntry = freeList.front(); 482 freeList.pop_front(); 483 484 newEntry->unserializeSection(cp, csprintf("Entry%d", x)); 485 newEntry->trieHandle = trie.insert(newEntry->vaddr, 486 TlbEntryTrie::MaxBits - newEntry->logBytes, newEntry); 487 } 488} 489 490BaseMasterPort * 491TLB::getMasterPort() 492{ 493 return &walker->getMasterPort("port"); 494} 495 496} // namespace X86ISA 497 498X86ISA::TLB * 499X86TLBParams::create() 500{ 501 return new X86ISA::TLB(this); 502} 503