1/* 2 * Copyright (c) 2007-2008 The Hewlett-Packard Development Company 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions are 16 * met: redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer; 18 * redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution; 21 * neither the name of the copyright holders nor the names of its 22 * contributors may be used to endorse or promote products derived from 23 * this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 * 37 * Authors: Gabe Black 38 */ 39
|
52#include "arch/x86/x86_traits.hh" 53#include "base/bitfield.hh" 54#include "base/trace.hh" 55#include "cpu/base.hh" 56#include "cpu/thread_context.hh" 57#include "debug/TLB.hh" 58#include "mem/packet_access.hh" 59#include "mem/page_table.hh" 60#include "mem/request.hh" 61#include "sim/full_system.hh" 62#include "sim/process.hh" 63 64namespace X86ISA { 65 66TLB::TLB(const Params *p) 67 : BaseTLB(p), configAddress(0), size(p->size), 68 tlb(size), lruSeq(0) 69{ 70 if (!size) 71 fatal("TLBs must have a non-zero size.\n"); 72 73 for (int x = 0; x < size; x++) { 74 tlb[x].trieHandle = NULL; 75 freeList.push_back(&tlb[x]); 76 } 77 78 walker = p->walker; 79 walker->setTLB(this); 80} 81 82void 83TLB::evictLRU() 84{ 85 // Find the entry with the lowest (and hence least recently updated) 86 // sequence number. 87 88 unsigned lru = 0; 89 for (unsigned i = 1; i < size; i++) { 90 if (tlb[i].lruSeq < tlb[lru].lruSeq) 91 lru = i; 92 } 93 94 assert(tlb[lru].trieHandle); 95 trie.remove(tlb[lru].trieHandle); 96 tlb[lru].trieHandle = NULL; 97 freeList.push_back(&tlb[lru]); 98} 99 100TlbEntry * 101TLB::insert(Addr vpn, TlbEntry &entry) 102{ 103 // If somebody beat us to it, just use that existing entry. 104 TlbEntry *newEntry = trie.lookup(vpn); 105 if (newEntry) { 106 assert(newEntry->vaddr == vpn); 107 return newEntry; 108 } 109 110 if (freeList.empty()) 111 evictLRU(); 112 113 newEntry = freeList.front(); 114 freeList.pop_front(); 115 116 *newEntry = entry; 117 newEntry->lruSeq = nextSeq(); 118 newEntry->vaddr = vpn; 119 newEntry->trieHandle = 120 trie.insert(vpn, TlbEntryTrie::MaxBits - entry.logBytes, newEntry); 121 return newEntry; 122} 123 124TlbEntry * 125TLB::lookup(Addr va, bool update_lru) 126{ 127 TlbEntry *entry = trie.lookup(va); 128 if (entry && update_lru) 129 entry->lruSeq = nextSeq(); 130 return entry; 131} 132 133void 134TLB::flushAll() 135{ 136 DPRINTF(TLB, "Invalidating all entries.\n"); 137 for (unsigned i = 0; i < size; i++) { 138 if (tlb[i].trieHandle) { 139 trie.remove(tlb[i].trieHandle); 140 tlb[i].trieHandle = NULL; 141 freeList.push_back(&tlb[i]); 142 } 143 } 144} 145 146void 147TLB::setConfigAddress(uint32_t addr) 148{ 149 configAddress = addr; 150} 151 152void 153TLB::flushNonGlobal() 154{ 155 DPRINTF(TLB, "Invalidating all non global entries.\n"); 156 for (unsigned i = 0; i < size; i++) { 157 if (tlb[i].trieHandle && !tlb[i].global) { 158 trie.remove(tlb[i].trieHandle); 159 tlb[i].trieHandle = NULL; 160 freeList.push_back(&tlb[i]); 161 } 162 } 163} 164 165void 166TLB::demapPage(Addr va, uint64_t asn) 167{ 168 TlbEntry *entry = trie.lookup(va); 169 if (entry) { 170 trie.remove(entry->trieHandle); 171 entry->trieHandle = NULL; 172 freeList.push_back(entry); 173 } 174} 175 176Fault 177TLB::translateInt(RequestPtr req, ThreadContext *tc) 178{ 179 DPRINTF(TLB, "Addresses references internal memory.\n"); 180 Addr vaddr = req->getVaddr(); 181 Addr prefix = (vaddr >> 3) & IntAddrPrefixMask; 182 if (prefix == IntAddrPrefixCPUID) { 183 panic("CPUID memory space not yet implemented!\n"); 184 } else if (prefix == IntAddrPrefixMSR) { 185 vaddr = (vaddr >> 3) & ~IntAddrPrefixMask; 186 req->setFlags(Request::MMAPPED_IPR); 187 188 MiscRegIndex regNum; 189 if (!msrAddrToIndex(regNum, vaddr)) 190 return std::make_shared<GeneralProtection>(0); 191 192 //The index is multiplied by the size of a MiscReg so that 193 //any memory dependence calculations will not see these as 194 //overlapping. 195 req->setPaddr((Addr)regNum * sizeof(MiscReg)); 196 return NoFault; 197 } else if (prefix == IntAddrPrefixIO) { 198 // TODO If CPL > IOPL or in virtual mode, check the I/O permission 199 // bitmap in the TSS. 200 201 Addr IOPort = vaddr & ~IntAddrPrefixMask; 202 // Make sure the address fits in the expected 16 bit IO address 203 // space. 204 assert(!(IOPort & ~0xFFFF)); 205 if (IOPort == 0xCF8 && req->getSize() == 4) { 206 req->setFlags(Request::MMAPPED_IPR); 207 req->setPaddr(MISCREG_PCI_CONFIG_ADDRESS * sizeof(MiscReg)); 208 } else if ((IOPort & ~mask(2)) == 0xCFC) { 209 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER); 210 Addr configAddress = 211 tc->readMiscRegNoEffect(MISCREG_PCI_CONFIG_ADDRESS); 212 if (bits(configAddress, 31, 31)) { 213 req->setPaddr(PhysAddrPrefixPciConfig | 214 mbits(configAddress, 30, 2) | 215 (IOPort & mask(2))); 216 } else { 217 req->setPaddr(PhysAddrPrefixIO | IOPort); 218 } 219 } else { 220 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER); 221 req->setPaddr(PhysAddrPrefixIO | IOPort); 222 } 223 return NoFault; 224 } else { 225 panic("Access to unrecognized internal address space %#x.\n", 226 prefix); 227 } 228} 229 230Fault 231TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const 232{ 233 Addr paddr = req->getPaddr(); 234 235 AddrRange m5opRange(0xFFFF0000, 0xFFFFFFFF); 236 237 if (m5opRange.contains(paddr)) { 238 if (m5opRange.contains(paddr)) { 239 req->setFlags(Request::MMAPPED_IPR | Request::GENERIC_IPR | 240 Request::STRICT_ORDER); 241 req->setPaddr(GenericISA::iprAddressPseudoInst( 242 (paddr >> 8) & 0xFF, 243 paddr & 0xFF)); 244 } 245 } else if (FullSystem) { 246 // Check for an access to the local APIC 247 LocalApicBase localApicBase = 248 tc->readMiscRegNoEffect(MISCREG_APIC_BASE); 249 AddrRange apicRange(localApicBase.base * PageBytes, 250 (localApicBase.base + 1) * PageBytes - 1); 251 252 if (apicRange.contains(paddr)) { 253 // The Intel developer's manuals say the below restrictions apply, 254 // but the linux kernel, because of a compiler optimization, breaks 255 // them. 256 /* 257 // Check alignment 258 if (paddr & ((32/8) - 1)) 259 return new GeneralProtection(0); 260 // Check access size 261 if (req->getSize() != (32/8)) 262 return new GeneralProtection(0); 263 */ 264 // Force the access to be uncacheable. 265 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER); 266 req->setPaddr(x86LocalAPICAddress(tc->contextId(), 267 paddr - apicRange.start())); 268 } 269 } 270 271 return NoFault; 272} 273 274Fault 275TLB::translate(RequestPtr req, ThreadContext *tc, Translation *translation, 276 Mode mode, bool &delayedResponse, bool timing) 277{ 278 Request::Flags flags = req->getFlags(); 279 int seg = flags & SegmentFlagMask; 280 bool storeCheck = flags & (StoreCheck << FlagShift); 281 282 delayedResponse = false; 283 284 // If this is true, we're dealing with a request to a non-memory address 285 // space. 286 if (seg == SEGMENT_REG_MS) { 287 return translateInt(req, tc); 288 } 289 290 Addr vaddr = req->getVaddr(); 291 DPRINTF(TLB, "Translating vaddr %#x.\n", vaddr); 292 293 HandyM5Reg m5Reg = tc->readMiscRegNoEffect(MISCREG_M5_REG); 294 295 // If protected mode has been enabled... 296 if (m5Reg.prot) { 297 DPRINTF(TLB, "In protected mode.\n"); 298 // If we're not in 64-bit mode, do protection/limit checks 299 if (m5Reg.mode != LongMode) { 300 DPRINTF(TLB, "Not in long mode. Checking segment protection.\n"); 301 // Check for a NULL segment selector. 302 if (!(seg == SEGMENT_REG_TSG || seg == SYS_SEGMENT_REG_IDTR || 303 seg == SEGMENT_REG_HS || seg == SEGMENT_REG_LS) 304 && !tc->readMiscRegNoEffect(MISCREG_SEG_SEL(seg))) 305 return std::make_shared<GeneralProtection>(0); 306 bool expandDown = false; 307 SegAttr attr = tc->readMiscRegNoEffect(MISCREG_SEG_ATTR(seg)); 308 if (seg >= SEGMENT_REG_ES && seg <= SEGMENT_REG_HS) { 309 if (!attr.writable && (mode == Write || storeCheck)) 310 return std::make_shared<GeneralProtection>(0); 311 if (!attr.readable && mode == Read) 312 return std::make_shared<GeneralProtection>(0); 313 expandDown = attr.expandDown; 314 315 } 316 Addr base = tc->readMiscRegNoEffect(MISCREG_SEG_BASE(seg)); 317 Addr limit = tc->readMiscRegNoEffect(MISCREG_SEG_LIMIT(seg)); 318 bool sizeOverride = (flags & (AddrSizeFlagBit << FlagShift)); 319 unsigned logSize = sizeOverride ? (unsigned)m5Reg.altAddr 320 : (unsigned)m5Reg.defAddr; 321 int size = (1 << logSize) * 8; 322 Addr offset = bits(vaddr - base, size - 1, 0); 323 Addr endOffset = offset + req->getSize() - 1; 324 if (expandDown) { 325 DPRINTF(TLB, "Checking an expand down segment.\n"); 326 warn_once("Expand down segments are untested.\n"); 327 if (offset <= limit || endOffset <= limit) 328 return std::make_shared<GeneralProtection>(0); 329 } else { 330 if (offset > limit || endOffset > limit) 331 return std::make_shared<GeneralProtection>(0); 332 } 333 } 334 if (m5Reg.submode != SixtyFourBitMode || 335 (flags & (AddrSizeFlagBit << FlagShift))) 336 vaddr &= mask(32); 337 // If paging is enabled, do the translation. 338 if (m5Reg.paging) { 339 DPRINTF(TLB, "Paging enabled.\n"); 340 // The vaddr already has the segment base applied. 341 TlbEntry *entry = lookup(vaddr); 342 if (!entry) { 343 if (FullSystem) { 344 Fault fault = walker->start(tc, translation, req, mode); 345 if (timing || fault != NoFault) { 346 // This gets ignored in atomic mode. 347 delayedResponse = true; 348 return fault; 349 } 350 entry = lookup(vaddr); 351 assert(entry); 352 } else { 353 DPRINTF(TLB, "Handling a TLB miss for " 354 "address %#x at pc %#x.\n", 355 vaddr, tc->instAddr()); 356 357 Process *p = tc->getProcessPtr(); 358 TlbEntry newEntry; 359 bool success = p->pTable->lookup(vaddr, newEntry); 360 if (!success && mode != Execute) { 361 // Check if we just need to grow the stack. 362 if (p->fixupStackFault(vaddr)) { 363 // If we did, lookup the entry for the new page. 364 success = p->pTable->lookup(vaddr, newEntry); 365 } 366 } 367 if (!success) { 368 return std::make_shared<PageFault>(vaddr, true, mode, 369 true, false); 370 } else { 371 Addr alignedVaddr = p->pTable->pageAlign(vaddr); 372 DPRINTF(TLB, "Mapping %#x to %#x\n", alignedVaddr, 373 newEntry.pageStart()); 374 entry = insert(alignedVaddr, newEntry); 375 } 376 DPRINTF(TLB, "Miss was serviced.\n"); 377 } 378 } 379 380 DPRINTF(TLB, "Entry found with paddr %#x, " 381 "doing protection checks.\n", entry->paddr); 382 // Do paging protection checks. 383 bool inUser = (m5Reg.cpl == 3 && 384 !(flags & (CPL0FlagBit << FlagShift))); 385 CR0 cr0 = tc->readMiscRegNoEffect(MISCREG_CR0); 386 bool badWrite = (!entry->writable && (inUser || cr0.wp)); 387 if ((inUser && !entry->user) || (mode == Write && badWrite)) { 388 // The page must have been present to get into the TLB in 389 // the first place. We'll assume the reserved bits are 390 // fine even though we're not checking them. 391 return std::make_shared<PageFault>(vaddr, true, mode, inUser, 392 false); 393 } 394 if (storeCheck && badWrite) { 395 // This would fault if this were a write, so return a page 396 // fault that reflects that happening. 397 return std::make_shared<PageFault>(vaddr, true, Write, inUser, 398 false); 399 } 400 401 Addr paddr = entry->paddr | (vaddr & mask(entry->logBytes)); 402 DPRINTF(TLB, "Translated %#x -> %#x.\n", vaddr, paddr); 403 req->setPaddr(paddr); 404 if (entry->uncacheable) 405 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER); 406 } else { 407 //Use the address which already has segmentation applied. 408 DPRINTF(TLB, "Paging disabled.\n"); 409 DPRINTF(TLB, "Translated %#x -> %#x.\n", vaddr, vaddr); 410 req->setPaddr(vaddr); 411 } 412 } else { 413 // Real mode 414 DPRINTF(TLB, "In real mode.\n"); 415 DPRINTF(TLB, "Translated %#x -> %#x.\n", vaddr, vaddr); 416 req->setPaddr(vaddr); 417 } 418 419 return finalizePhysical(req, tc, mode); 420} 421 422Fault 423TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode) 424{ 425 bool delayedResponse; 426 return TLB::translate(req, tc, NULL, mode, delayedResponse, false); 427} 428 429void 430TLB::translateTiming(RequestPtr req, ThreadContext *tc, 431 Translation *translation, Mode mode) 432{ 433 bool delayedResponse; 434 assert(translation); 435 Fault fault = 436 TLB::translate(req, tc, translation, mode, delayedResponse, true); 437 if (!delayedResponse) 438 translation->finish(fault, req, tc, mode); 439} 440 441Fault 442TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode) 443{ 444 panic("Not implemented\n"); 445 return NoFault; 446} 447 448Walker * 449TLB::getWalker() 450{ 451 return walker; 452} 453 454void 455TLB::serialize(CheckpointOut &cp) const 456{ 457 // Only store the entries in use. 458 uint32_t _size = size - freeList.size(); 459 SERIALIZE_SCALAR(_size); 460 SERIALIZE_SCALAR(lruSeq); 461 462 uint32_t _count = 0; 463 for (uint32_t x = 0; x < size; x++) { 464 if (tlb[x].trieHandle != NULL) 465 tlb[x].serializeSection(cp, csprintf("Entry%d", _count++)); 466 } 467} 468 469void 470TLB::unserialize(CheckpointIn &cp) 471{ 472 // Do not allow to restore with a smaller tlb. 473 uint32_t _size; 474 UNSERIALIZE_SCALAR(_size); 475 if (_size > size) { 476 fatal("TLB size less than the one in checkpoint!"); 477 } 478 479 UNSERIALIZE_SCALAR(lruSeq); 480 481 for (uint32_t x = 0; x < _size; x++) { 482 TlbEntry *newEntry = freeList.front(); 483 freeList.pop_front(); 484 485 newEntry->unserializeSection(cp, csprintf("Entry%d", x)); 486 newEntry->trieHandle = trie.insert(newEntry->vaddr, 487 TlbEntryTrie::MaxBits - newEntry->logBytes, newEntry); 488 } 489} 490 491BaseMasterPort * 492TLB::getMasterPort() 493{ 494 return &walker->getMasterPort("port"); 495} 496 497} // namespace X86ISA 498 499X86ISA::TLB * 500X86TLBParams::create() 501{ 502 return new X86ISA::TLB(this); 503}
|