tlb.cc (12461:a4cb506cda74) tlb.cc (12749:223c83ed9979)
1/*
2 * Copyright (c) 2007-2008 The Hewlett-Packard Development Company
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Gabe Black
38 */
39
40#include "arch/x86/tlb.hh"
41
42#include <cstring>
43#include <memory>
44
45#include "arch/generic/mmapped_ipr.hh"
46#include "arch/x86/faults.hh"
47#include "arch/x86/insts/microldstop.hh"
48#include "arch/x86/pagetable_walker.hh"
49#include "arch/x86/regs/misc.hh"
50#include "arch/x86/regs/msr.hh"
51#include "arch/x86/x86_traits.hh"
52#include "base/trace.hh"
53#include "cpu/thread_context.hh"
54#include "debug/TLB.hh"
55#include "mem/page_table.hh"
56#include "mem/request.hh"
57#include "sim/full_system.hh"
58#include "sim/process.hh"
59
60namespace X86ISA {
61
62TLB::TLB(const Params *p)
63 : BaseTLB(p), configAddress(0), size(p->size),
64 tlb(size), lruSeq(0)
65{
66 if (!size)
67 fatal("TLBs must have a non-zero size.\n");
68
69 for (int x = 0; x < size; x++) {
70 tlb[x].trieHandle = NULL;
71 freeList.push_back(&tlb[x]);
72 }
73
74 walker = p->walker;
75 walker->setTLB(this);
76}
77
78void
79TLB::evictLRU()
80{
81 // Find the entry with the lowest (and hence least recently updated)
82 // sequence number.
83
84 unsigned lru = 0;
85 for (unsigned i = 1; i < size; i++) {
86 if (tlb[i].lruSeq < tlb[lru].lruSeq)
87 lru = i;
88 }
89
90 assert(tlb[lru].trieHandle);
91 trie.remove(tlb[lru].trieHandle);
92 tlb[lru].trieHandle = NULL;
93 freeList.push_back(&tlb[lru]);
94}
95
96TlbEntry *
97TLB::insert(Addr vpn, const TlbEntry &entry)
98{
99 // If somebody beat us to it, just use that existing entry.
100 TlbEntry *newEntry = trie.lookup(vpn);
101 if (newEntry) {
102 assert(newEntry->vaddr == vpn);
103 return newEntry;
104 }
105
106 if (freeList.empty())
107 evictLRU();
108
109 newEntry = freeList.front();
110 freeList.pop_front();
111
112 *newEntry = entry;
113 newEntry->lruSeq = nextSeq();
114 newEntry->vaddr = vpn;
115 newEntry->trieHandle =
116 trie.insert(vpn, TlbEntryTrie::MaxBits - entry.logBytes, newEntry);
117 return newEntry;
118}
119
120TlbEntry *
121TLB::lookup(Addr va, bool update_lru)
122{
123 TlbEntry *entry = trie.lookup(va);
124 if (entry && update_lru)
125 entry->lruSeq = nextSeq();
126 return entry;
127}
128
129void
130TLB::flushAll()
131{
132 DPRINTF(TLB, "Invalidating all entries.\n");
133 for (unsigned i = 0; i < size; i++) {
134 if (tlb[i].trieHandle) {
135 trie.remove(tlb[i].trieHandle);
136 tlb[i].trieHandle = NULL;
137 freeList.push_back(&tlb[i]);
138 }
139 }
140}
141
142void
143TLB::setConfigAddress(uint32_t addr)
144{
145 configAddress = addr;
146}
147
148void
149TLB::flushNonGlobal()
150{
151 DPRINTF(TLB, "Invalidating all non global entries.\n");
152 for (unsigned i = 0; i < size; i++) {
153 if (tlb[i].trieHandle && !tlb[i].global) {
154 trie.remove(tlb[i].trieHandle);
155 tlb[i].trieHandle = NULL;
156 freeList.push_back(&tlb[i]);
157 }
158 }
159}
160
161void
162TLB::demapPage(Addr va, uint64_t asn)
163{
164 TlbEntry *entry = trie.lookup(va);
165 if (entry) {
166 trie.remove(entry->trieHandle);
167 entry->trieHandle = NULL;
168 freeList.push_back(entry);
169 }
170}
171
172Fault
1/*
2 * Copyright (c) 2007-2008 The Hewlett-Packard Development Company
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Gabe Black
38 */
39
40#include "arch/x86/tlb.hh"
41
42#include <cstring>
43#include <memory>
44
45#include "arch/generic/mmapped_ipr.hh"
46#include "arch/x86/faults.hh"
47#include "arch/x86/insts/microldstop.hh"
48#include "arch/x86/pagetable_walker.hh"
49#include "arch/x86/regs/misc.hh"
50#include "arch/x86/regs/msr.hh"
51#include "arch/x86/x86_traits.hh"
52#include "base/trace.hh"
53#include "cpu/thread_context.hh"
54#include "debug/TLB.hh"
55#include "mem/page_table.hh"
56#include "mem/request.hh"
57#include "sim/full_system.hh"
58#include "sim/process.hh"
59
60namespace X86ISA {
61
62TLB::TLB(const Params *p)
63 : BaseTLB(p), configAddress(0), size(p->size),
64 tlb(size), lruSeq(0)
65{
66 if (!size)
67 fatal("TLBs must have a non-zero size.\n");
68
69 for (int x = 0; x < size; x++) {
70 tlb[x].trieHandle = NULL;
71 freeList.push_back(&tlb[x]);
72 }
73
74 walker = p->walker;
75 walker->setTLB(this);
76}
77
78void
79TLB::evictLRU()
80{
81 // Find the entry with the lowest (and hence least recently updated)
82 // sequence number.
83
84 unsigned lru = 0;
85 for (unsigned i = 1; i < size; i++) {
86 if (tlb[i].lruSeq < tlb[lru].lruSeq)
87 lru = i;
88 }
89
90 assert(tlb[lru].trieHandle);
91 trie.remove(tlb[lru].trieHandle);
92 tlb[lru].trieHandle = NULL;
93 freeList.push_back(&tlb[lru]);
94}
95
96TlbEntry *
97TLB::insert(Addr vpn, const TlbEntry &entry)
98{
99 // If somebody beat us to it, just use that existing entry.
100 TlbEntry *newEntry = trie.lookup(vpn);
101 if (newEntry) {
102 assert(newEntry->vaddr == vpn);
103 return newEntry;
104 }
105
106 if (freeList.empty())
107 evictLRU();
108
109 newEntry = freeList.front();
110 freeList.pop_front();
111
112 *newEntry = entry;
113 newEntry->lruSeq = nextSeq();
114 newEntry->vaddr = vpn;
115 newEntry->trieHandle =
116 trie.insert(vpn, TlbEntryTrie::MaxBits - entry.logBytes, newEntry);
117 return newEntry;
118}
119
120TlbEntry *
121TLB::lookup(Addr va, bool update_lru)
122{
123 TlbEntry *entry = trie.lookup(va);
124 if (entry && update_lru)
125 entry->lruSeq = nextSeq();
126 return entry;
127}
128
129void
130TLB::flushAll()
131{
132 DPRINTF(TLB, "Invalidating all entries.\n");
133 for (unsigned i = 0; i < size; i++) {
134 if (tlb[i].trieHandle) {
135 trie.remove(tlb[i].trieHandle);
136 tlb[i].trieHandle = NULL;
137 freeList.push_back(&tlb[i]);
138 }
139 }
140}
141
142void
143TLB::setConfigAddress(uint32_t addr)
144{
145 configAddress = addr;
146}
147
148void
149TLB::flushNonGlobal()
150{
151 DPRINTF(TLB, "Invalidating all non global entries.\n");
152 for (unsigned i = 0; i < size; i++) {
153 if (tlb[i].trieHandle && !tlb[i].global) {
154 trie.remove(tlb[i].trieHandle);
155 tlb[i].trieHandle = NULL;
156 freeList.push_back(&tlb[i]);
157 }
158 }
159}
160
161void
162TLB::demapPage(Addr va, uint64_t asn)
163{
164 TlbEntry *entry = trie.lookup(va);
165 if (entry) {
166 trie.remove(entry->trieHandle);
167 entry->trieHandle = NULL;
168 freeList.push_back(entry);
169 }
170}
171
172Fault
173TLB::translateInt(RequestPtr req, ThreadContext *tc)
173TLB::translateInt(const RequestPtr &req, ThreadContext *tc)
174{
175 DPRINTF(TLB, "Addresses references internal memory.\n");
176 Addr vaddr = req->getVaddr();
177 Addr prefix = (vaddr >> 3) & IntAddrPrefixMask;
178 if (prefix == IntAddrPrefixCPUID) {
179 panic("CPUID memory space not yet implemented!\n");
180 } else if (prefix == IntAddrPrefixMSR) {
181 vaddr = (vaddr >> 3) & ~IntAddrPrefixMask;
182 req->setFlags(Request::MMAPPED_IPR);
183
184 MiscRegIndex regNum;
185 if (!msrAddrToIndex(regNum, vaddr))
186 return std::make_shared<GeneralProtection>(0);
187
188 //The index is multiplied by the size of a MiscReg so that
189 //any memory dependence calculations will not see these as
190 //overlapping.
191 req->setPaddr((Addr)regNum * sizeof(MiscReg));
192 return NoFault;
193 } else if (prefix == IntAddrPrefixIO) {
194 // TODO If CPL > IOPL or in virtual mode, check the I/O permission
195 // bitmap in the TSS.
196
197 Addr IOPort = vaddr & ~IntAddrPrefixMask;
198 // Make sure the address fits in the expected 16 bit IO address
199 // space.
200 assert(!(IOPort & ~0xFFFF));
201 if (IOPort == 0xCF8 && req->getSize() == 4) {
202 req->setFlags(Request::MMAPPED_IPR);
203 req->setPaddr(MISCREG_PCI_CONFIG_ADDRESS * sizeof(MiscReg));
204 } else if ((IOPort & ~mask(2)) == 0xCFC) {
205 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
206 Addr configAddress =
207 tc->readMiscRegNoEffect(MISCREG_PCI_CONFIG_ADDRESS);
208 if (bits(configAddress, 31, 31)) {
209 req->setPaddr(PhysAddrPrefixPciConfig |
210 mbits(configAddress, 30, 2) |
211 (IOPort & mask(2)));
212 } else {
213 req->setPaddr(PhysAddrPrefixIO | IOPort);
214 }
215 } else {
216 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
217 req->setPaddr(PhysAddrPrefixIO | IOPort);
218 }
219 return NoFault;
220 } else {
221 panic("Access to unrecognized internal address space %#x.\n",
222 prefix);
223 }
224}
225
226Fault
174{
175 DPRINTF(TLB, "Addresses references internal memory.\n");
176 Addr vaddr = req->getVaddr();
177 Addr prefix = (vaddr >> 3) & IntAddrPrefixMask;
178 if (prefix == IntAddrPrefixCPUID) {
179 panic("CPUID memory space not yet implemented!\n");
180 } else if (prefix == IntAddrPrefixMSR) {
181 vaddr = (vaddr >> 3) & ~IntAddrPrefixMask;
182 req->setFlags(Request::MMAPPED_IPR);
183
184 MiscRegIndex regNum;
185 if (!msrAddrToIndex(regNum, vaddr))
186 return std::make_shared<GeneralProtection>(0);
187
188 //The index is multiplied by the size of a MiscReg so that
189 //any memory dependence calculations will not see these as
190 //overlapping.
191 req->setPaddr((Addr)regNum * sizeof(MiscReg));
192 return NoFault;
193 } else if (prefix == IntAddrPrefixIO) {
194 // TODO If CPL > IOPL or in virtual mode, check the I/O permission
195 // bitmap in the TSS.
196
197 Addr IOPort = vaddr & ~IntAddrPrefixMask;
198 // Make sure the address fits in the expected 16 bit IO address
199 // space.
200 assert(!(IOPort & ~0xFFFF));
201 if (IOPort == 0xCF8 && req->getSize() == 4) {
202 req->setFlags(Request::MMAPPED_IPR);
203 req->setPaddr(MISCREG_PCI_CONFIG_ADDRESS * sizeof(MiscReg));
204 } else if ((IOPort & ~mask(2)) == 0xCFC) {
205 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
206 Addr configAddress =
207 tc->readMiscRegNoEffect(MISCREG_PCI_CONFIG_ADDRESS);
208 if (bits(configAddress, 31, 31)) {
209 req->setPaddr(PhysAddrPrefixPciConfig |
210 mbits(configAddress, 30, 2) |
211 (IOPort & mask(2)));
212 } else {
213 req->setPaddr(PhysAddrPrefixIO | IOPort);
214 }
215 } else {
216 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
217 req->setPaddr(PhysAddrPrefixIO | IOPort);
218 }
219 return NoFault;
220 } else {
221 panic("Access to unrecognized internal address space %#x.\n",
222 prefix);
223 }
224}
225
226Fault
227TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
227TLB::finalizePhysical(const RequestPtr &req,
228 ThreadContext *tc, Mode mode) const
228{
229 Addr paddr = req->getPaddr();
230
231 AddrRange m5opRange(0xFFFF0000, 0xFFFFFFFF);
232
233 if (m5opRange.contains(paddr)) {
234 req->setFlags(Request::MMAPPED_IPR | Request::GENERIC_IPR |
235 Request::STRICT_ORDER);
236 req->setPaddr(GenericISA::iprAddressPseudoInst((paddr >> 8) & 0xFF,
237 paddr & 0xFF));
238 } else if (FullSystem) {
239 // Check for an access to the local APIC
240 LocalApicBase localApicBase =
241 tc->readMiscRegNoEffect(MISCREG_APIC_BASE);
242 AddrRange apicRange(localApicBase.base * PageBytes,
243 (localApicBase.base + 1) * PageBytes - 1);
244
245 if (apicRange.contains(paddr)) {
246 // The Intel developer's manuals say the below restrictions apply,
247 // but the linux kernel, because of a compiler optimization, breaks
248 // them.
249 /*
250 // Check alignment
251 if (paddr & ((32/8) - 1))
252 return new GeneralProtection(0);
253 // Check access size
254 if (req->getSize() != (32/8))
255 return new GeneralProtection(0);
256 */
257 // Force the access to be uncacheable.
258 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
259 req->setPaddr(x86LocalAPICAddress(tc->contextId(),
260 paddr - apicRange.start()));
261 }
262 }
263
264 return NoFault;
265}
266
267Fault
229{
230 Addr paddr = req->getPaddr();
231
232 AddrRange m5opRange(0xFFFF0000, 0xFFFFFFFF);
233
234 if (m5opRange.contains(paddr)) {
235 req->setFlags(Request::MMAPPED_IPR | Request::GENERIC_IPR |
236 Request::STRICT_ORDER);
237 req->setPaddr(GenericISA::iprAddressPseudoInst((paddr >> 8) & 0xFF,
238 paddr & 0xFF));
239 } else if (FullSystem) {
240 // Check for an access to the local APIC
241 LocalApicBase localApicBase =
242 tc->readMiscRegNoEffect(MISCREG_APIC_BASE);
243 AddrRange apicRange(localApicBase.base * PageBytes,
244 (localApicBase.base + 1) * PageBytes - 1);
245
246 if (apicRange.contains(paddr)) {
247 // The Intel developer's manuals say the below restrictions apply,
248 // but the linux kernel, because of a compiler optimization, breaks
249 // them.
250 /*
251 // Check alignment
252 if (paddr & ((32/8) - 1))
253 return new GeneralProtection(0);
254 // Check access size
255 if (req->getSize() != (32/8))
256 return new GeneralProtection(0);
257 */
258 // Force the access to be uncacheable.
259 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
260 req->setPaddr(x86LocalAPICAddress(tc->contextId(),
261 paddr - apicRange.start()));
262 }
263 }
264
265 return NoFault;
266}
267
268Fault
268TLB::translate(RequestPtr req, ThreadContext *tc, Translation *translation,
269TLB::translate(const RequestPtr &req,
270 ThreadContext *tc, Translation *translation,
269 Mode mode, bool &delayedResponse, bool timing)
270{
271 Request::Flags flags = req->getFlags();
272 int seg = flags & SegmentFlagMask;
273 bool storeCheck = flags & (StoreCheck << FlagShift);
274
275 delayedResponse = false;
276
277 // If this is true, we're dealing with a request to a non-memory address
278 // space.
279 if (seg == SEGMENT_REG_MS) {
280 return translateInt(req, tc);
281 }
282
283 Addr vaddr = req->getVaddr();
284 DPRINTF(TLB, "Translating vaddr %#x.\n", vaddr);
285
286 HandyM5Reg m5Reg = tc->readMiscRegNoEffect(MISCREG_M5_REG);
287
288 // If protected mode has been enabled...
289 if (m5Reg.prot) {
290 DPRINTF(TLB, "In protected mode.\n");
291 // If we're not in 64-bit mode, do protection/limit checks
292 if (m5Reg.mode != LongMode) {
293 DPRINTF(TLB, "Not in long mode. Checking segment protection.\n");
294 // Check for a NULL segment selector.
295 if (!(seg == SEGMENT_REG_TSG || seg == SYS_SEGMENT_REG_IDTR ||
296 seg == SEGMENT_REG_HS || seg == SEGMENT_REG_LS)
297 && !tc->readMiscRegNoEffect(MISCREG_SEG_SEL(seg)))
298 return std::make_shared<GeneralProtection>(0);
299 bool expandDown = false;
300 SegAttr attr = tc->readMiscRegNoEffect(MISCREG_SEG_ATTR(seg));
301 if (seg >= SEGMENT_REG_ES && seg <= SEGMENT_REG_HS) {
302 if (!attr.writable && (mode == Write || storeCheck))
303 return std::make_shared<GeneralProtection>(0);
304 if (!attr.readable && mode == Read)
305 return std::make_shared<GeneralProtection>(0);
306 expandDown = attr.expandDown;
307
308 }
309 Addr base = tc->readMiscRegNoEffect(MISCREG_SEG_BASE(seg));
310 Addr limit = tc->readMiscRegNoEffect(MISCREG_SEG_LIMIT(seg));
311 bool sizeOverride = (flags & (AddrSizeFlagBit << FlagShift));
312 unsigned logSize = sizeOverride ? (unsigned)m5Reg.altAddr
313 : (unsigned)m5Reg.defAddr;
314 int size = (1 << logSize) * 8;
315 Addr offset = bits(vaddr - base, size - 1, 0);
316 Addr endOffset = offset + req->getSize() - 1;
317 if (expandDown) {
318 DPRINTF(TLB, "Checking an expand down segment.\n");
319 warn_once("Expand down segments are untested.\n");
320 if (offset <= limit || endOffset <= limit)
321 return std::make_shared<GeneralProtection>(0);
322 } else {
323 if (offset > limit || endOffset > limit)
324 return std::make_shared<GeneralProtection>(0);
325 }
326 }
327 if (m5Reg.submode != SixtyFourBitMode ||
328 (flags & (AddrSizeFlagBit << FlagShift)))
329 vaddr &= mask(32);
330 // If paging is enabled, do the translation.
331 if (m5Reg.paging) {
332 DPRINTF(TLB, "Paging enabled.\n");
333 // The vaddr already has the segment base applied.
334 TlbEntry *entry = lookup(vaddr);
335 if (mode == Read) {
336 rdAccesses++;
337 } else {
338 wrAccesses++;
339 }
340 if (!entry) {
341 DPRINTF(TLB, "Handling a TLB miss for "
342 "address %#x at pc %#x.\n",
343 vaddr, tc->instAddr());
344 if (mode == Read) {
345 rdMisses++;
346 } else {
347 wrMisses++;
348 }
349 if (FullSystem) {
350 Fault fault = walker->start(tc, translation, req, mode);
351 if (timing || fault != NoFault) {
352 // This gets ignored in atomic mode.
353 delayedResponse = true;
354 return fault;
355 }
356 entry = lookup(vaddr);
357 assert(entry);
358 } else {
359 Process *p = tc->getProcessPtr();
360 const EmulationPageTable::Entry *pte =
361 p->pTable->lookup(vaddr);
362 if (!pte && mode != Execute) {
363 // Check if we just need to grow the stack.
364 if (p->fixupStackFault(vaddr)) {
365 // If we did, lookup the entry for the new page.
366 pte = p->pTable->lookup(vaddr);
367 }
368 }
369 if (!pte) {
370 return std::make_shared<PageFault>(vaddr, true, mode,
371 true, false);
372 } else {
373 Addr alignedVaddr = p->pTable->pageAlign(vaddr);
374 DPRINTF(TLB, "Mapping %#x to %#x\n", alignedVaddr,
375 pte->paddr);
376 entry = insert(alignedVaddr, TlbEntry(
377 p->pTable->pid(), alignedVaddr, pte->paddr,
378 pte->flags & EmulationPageTable::Uncacheable,
379 pte->flags & EmulationPageTable::ReadOnly));
380 }
381 DPRINTF(TLB, "Miss was serviced.\n");
382 }
383 }
384
385 DPRINTF(TLB, "Entry found with paddr %#x, "
386 "doing protection checks.\n", entry->paddr);
387 // Do paging protection checks.
388 bool inUser = (m5Reg.cpl == 3 &&
389 !(flags & (CPL0FlagBit << FlagShift)));
390 CR0 cr0 = tc->readMiscRegNoEffect(MISCREG_CR0);
391 bool badWrite = (!entry->writable && (inUser || cr0.wp));
392 if ((inUser && !entry->user) || (mode == Write && badWrite)) {
393 // The page must have been present to get into the TLB in
394 // the first place. We'll assume the reserved bits are
395 // fine even though we're not checking them.
396 return std::make_shared<PageFault>(vaddr, true, mode, inUser,
397 false);
398 }
399 if (storeCheck && badWrite) {
400 // This would fault if this were a write, so return a page
401 // fault that reflects that happening.
402 return std::make_shared<PageFault>(vaddr, true, Write, inUser,
403 false);
404 }
405
406 Addr paddr = entry->paddr | (vaddr & mask(entry->logBytes));
407 DPRINTF(TLB, "Translated %#x -> %#x.\n", vaddr, paddr);
408 req->setPaddr(paddr);
409 if (entry->uncacheable)
410 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
411 } else {
412 //Use the address which already has segmentation applied.
413 DPRINTF(TLB, "Paging disabled.\n");
414 DPRINTF(TLB, "Translated %#x -> %#x.\n", vaddr, vaddr);
415 req->setPaddr(vaddr);
416 }
417 } else {
418 // Real mode
419 DPRINTF(TLB, "In real mode.\n");
420 DPRINTF(TLB, "Translated %#x -> %#x.\n", vaddr, vaddr);
421 req->setPaddr(vaddr);
422 }
423
424 return finalizePhysical(req, tc, mode);
425}
426
427Fault
271 Mode mode, bool &delayedResponse, bool timing)
272{
273 Request::Flags flags = req->getFlags();
274 int seg = flags & SegmentFlagMask;
275 bool storeCheck = flags & (StoreCheck << FlagShift);
276
277 delayedResponse = false;
278
279 // If this is true, we're dealing with a request to a non-memory address
280 // space.
281 if (seg == SEGMENT_REG_MS) {
282 return translateInt(req, tc);
283 }
284
285 Addr vaddr = req->getVaddr();
286 DPRINTF(TLB, "Translating vaddr %#x.\n", vaddr);
287
288 HandyM5Reg m5Reg = tc->readMiscRegNoEffect(MISCREG_M5_REG);
289
290 // If protected mode has been enabled...
291 if (m5Reg.prot) {
292 DPRINTF(TLB, "In protected mode.\n");
293 // If we're not in 64-bit mode, do protection/limit checks
294 if (m5Reg.mode != LongMode) {
295 DPRINTF(TLB, "Not in long mode. Checking segment protection.\n");
296 // Check for a NULL segment selector.
297 if (!(seg == SEGMENT_REG_TSG || seg == SYS_SEGMENT_REG_IDTR ||
298 seg == SEGMENT_REG_HS || seg == SEGMENT_REG_LS)
299 && !tc->readMiscRegNoEffect(MISCREG_SEG_SEL(seg)))
300 return std::make_shared<GeneralProtection>(0);
301 bool expandDown = false;
302 SegAttr attr = tc->readMiscRegNoEffect(MISCREG_SEG_ATTR(seg));
303 if (seg >= SEGMENT_REG_ES && seg <= SEGMENT_REG_HS) {
304 if (!attr.writable && (mode == Write || storeCheck))
305 return std::make_shared<GeneralProtection>(0);
306 if (!attr.readable && mode == Read)
307 return std::make_shared<GeneralProtection>(0);
308 expandDown = attr.expandDown;
309
310 }
311 Addr base = tc->readMiscRegNoEffect(MISCREG_SEG_BASE(seg));
312 Addr limit = tc->readMiscRegNoEffect(MISCREG_SEG_LIMIT(seg));
313 bool sizeOverride = (flags & (AddrSizeFlagBit << FlagShift));
314 unsigned logSize = sizeOverride ? (unsigned)m5Reg.altAddr
315 : (unsigned)m5Reg.defAddr;
316 int size = (1 << logSize) * 8;
317 Addr offset = bits(vaddr - base, size - 1, 0);
318 Addr endOffset = offset + req->getSize() - 1;
319 if (expandDown) {
320 DPRINTF(TLB, "Checking an expand down segment.\n");
321 warn_once("Expand down segments are untested.\n");
322 if (offset <= limit || endOffset <= limit)
323 return std::make_shared<GeneralProtection>(0);
324 } else {
325 if (offset > limit || endOffset > limit)
326 return std::make_shared<GeneralProtection>(0);
327 }
328 }
329 if (m5Reg.submode != SixtyFourBitMode ||
330 (flags & (AddrSizeFlagBit << FlagShift)))
331 vaddr &= mask(32);
332 // If paging is enabled, do the translation.
333 if (m5Reg.paging) {
334 DPRINTF(TLB, "Paging enabled.\n");
335 // The vaddr already has the segment base applied.
336 TlbEntry *entry = lookup(vaddr);
337 if (mode == Read) {
338 rdAccesses++;
339 } else {
340 wrAccesses++;
341 }
342 if (!entry) {
343 DPRINTF(TLB, "Handling a TLB miss for "
344 "address %#x at pc %#x.\n",
345 vaddr, tc->instAddr());
346 if (mode == Read) {
347 rdMisses++;
348 } else {
349 wrMisses++;
350 }
351 if (FullSystem) {
352 Fault fault = walker->start(tc, translation, req, mode);
353 if (timing || fault != NoFault) {
354 // This gets ignored in atomic mode.
355 delayedResponse = true;
356 return fault;
357 }
358 entry = lookup(vaddr);
359 assert(entry);
360 } else {
361 Process *p = tc->getProcessPtr();
362 const EmulationPageTable::Entry *pte =
363 p->pTable->lookup(vaddr);
364 if (!pte && mode != Execute) {
365 // Check if we just need to grow the stack.
366 if (p->fixupStackFault(vaddr)) {
367 // If we did, lookup the entry for the new page.
368 pte = p->pTable->lookup(vaddr);
369 }
370 }
371 if (!pte) {
372 return std::make_shared<PageFault>(vaddr, true, mode,
373 true, false);
374 } else {
375 Addr alignedVaddr = p->pTable->pageAlign(vaddr);
376 DPRINTF(TLB, "Mapping %#x to %#x\n", alignedVaddr,
377 pte->paddr);
378 entry = insert(alignedVaddr, TlbEntry(
379 p->pTable->pid(), alignedVaddr, pte->paddr,
380 pte->flags & EmulationPageTable::Uncacheable,
381 pte->flags & EmulationPageTable::ReadOnly));
382 }
383 DPRINTF(TLB, "Miss was serviced.\n");
384 }
385 }
386
387 DPRINTF(TLB, "Entry found with paddr %#x, "
388 "doing protection checks.\n", entry->paddr);
389 // Do paging protection checks.
390 bool inUser = (m5Reg.cpl == 3 &&
391 !(flags & (CPL0FlagBit << FlagShift)));
392 CR0 cr0 = tc->readMiscRegNoEffect(MISCREG_CR0);
393 bool badWrite = (!entry->writable && (inUser || cr0.wp));
394 if ((inUser && !entry->user) || (mode == Write && badWrite)) {
395 // The page must have been present to get into the TLB in
396 // the first place. We'll assume the reserved bits are
397 // fine even though we're not checking them.
398 return std::make_shared<PageFault>(vaddr, true, mode, inUser,
399 false);
400 }
401 if (storeCheck && badWrite) {
402 // This would fault if this were a write, so return a page
403 // fault that reflects that happening.
404 return std::make_shared<PageFault>(vaddr, true, Write, inUser,
405 false);
406 }
407
408 Addr paddr = entry->paddr | (vaddr & mask(entry->logBytes));
409 DPRINTF(TLB, "Translated %#x -> %#x.\n", vaddr, paddr);
410 req->setPaddr(paddr);
411 if (entry->uncacheable)
412 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
413 } else {
414 //Use the address which already has segmentation applied.
415 DPRINTF(TLB, "Paging disabled.\n");
416 DPRINTF(TLB, "Translated %#x -> %#x.\n", vaddr, vaddr);
417 req->setPaddr(vaddr);
418 }
419 } else {
420 // Real mode
421 DPRINTF(TLB, "In real mode.\n");
422 DPRINTF(TLB, "Translated %#x -> %#x.\n", vaddr, vaddr);
423 req->setPaddr(vaddr);
424 }
425
426 return finalizePhysical(req, tc, mode);
427}
428
429Fault
428TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode)
430TLB::translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode)
429{
430 bool delayedResponse;
431 return TLB::translate(req, tc, NULL, mode, delayedResponse, false);
432}
433
434void
431{
432 bool delayedResponse;
433 return TLB::translate(req, tc, NULL, mode, delayedResponse, false);
434}
435
436void
435TLB::translateTiming(RequestPtr req, ThreadContext *tc,
437TLB::translateTiming(const RequestPtr &req, ThreadContext *tc,
436 Translation *translation, Mode mode)
437{
438 bool delayedResponse;
439 assert(translation);
440 Fault fault =
441 TLB::translate(req, tc, translation, mode, delayedResponse, true);
442 if (!delayedResponse)
443 translation->finish(fault, req, tc, mode);
444}
445
446Walker *
447TLB::getWalker()
448{
449 return walker;
450}
451
452void
453TLB::regStats()
454{
455 using namespace Stats;
456
457 rdAccesses
458 .name(name() + ".rdAccesses")
459 .desc("TLB accesses on read requests");
460
461 wrAccesses
462 .name(name() + ".wrAccesses")
463 .desc("TLB accesses on write requests");
464
465 rdMisses
466 .name(name() + ".rdMisses")
467 .desc("TLB misses on read requests");
468
469 wrMisses
470 .name(name() + ".wrMisses")
471 .desc("TLB misses on write requests");
472
473}
474
475void
476TLB::serialize(CheckpointOut &cp) const
477{
478 // Only store the entries in use.
479 uint32_t _size = size - freeList.size();
480 SERIALIZE_SCALAR(_size);
481 SERIALIZE_SCALAR(lruSeq);
482
483 uint32_t _count = 0;
484 for (uint32_t x = 0; x < size; x++) {
485 if (tlb[x].trieHandle != NULL)
486 tlb[x].serializeSection(cp, csprintf("Entry%d", _count++));
487 }
488}
489
490void
491TLB::unserialize(CheckpointIn &cp)
492{
493 // Do not allow to restore with a smaller tlb.
494 uint32_t _size;
495 UNSERIALIZE_SCALAR(_size);
496 if (_size > size) {
497 fatal("TLB size less than the one in checkpoint!");
498 }
499
500 UNSERIALIZE_SCALAR(lruSeq);
501
502 for (uint32_t x = 0; x < _size; x++) {
503 TlbEntry *newEntry = freeList.front();
504 freeList.pop_front();
505
506 newEntry->unserializeSection(cp, csprintf("Entry%d", x));
507 newEntry->trieHandle = trie.insert(newEntry->vaddr,
508 TlbEntryTrie::MaxBits - newEntry->logBytes, newEntry);
509 }
510}
511
512BaseMasterPort *
513TLB::getMasterPort()
514{
515 return &walker->getMasterPort("port");
516}
517
518} // namespace X86ISA
519
520X86ISA::TLB *
521X86TLBParams::create()
522{
523 return new X86ISA::TLB(this);
524}
438 Translation *translation, Mode mode)
439{
440 bool delayedResponse;
441 assert(translation);
442 Fault fault =
443 TLB::translate(req, tc, translation, mode, delayedResponse, true);
444 if (!delayedResponse)
445 translation->finish(fault, req, tc, mode);
446}
447
448Walker *
449TLB::getWalker()
450{
451 return walker;
452}
453
454void
455TLB::regStats()
456{
457 using namespace Stats;
458
459 rdAccesses
460 .name(name() + ".rdAccesses")
461 .desc("TLB accesses on read requests");
462
463 wrAccesses
464 .name(name() + ".wrAccesses")
465 .desc("TLB accesses on write requests");
466
467 rdMisses
468 .name(name() + ".rdMisses")
469 .desc("TLB misses on read requests");
470
471 wrMisses
472 .name(name() + ".wrMisses")
473 .desc("TLB misses on write requests");
474
475}
476
477void
478TLB::serialize(CheckpointOut &cp) const
479{
480 // Only store the entries in use.
481 uint32_t _size = size - freeList.size();
482 SERIALIZE_SCALAR(_size);
483 SERIALIZE_SCALAR(lruSeq);
484
485 uint32_t _count = 0;
486 for (uint32_t x = 0; x < size; x++) {
487 if (tlb[x].trieHandle != NULL)
488 tlb[x].serializeSection(cp, csprintf("Entry%d", _count++));
489 }
490}
491
492void
493TLB::unserialize(CheckpointIn &cp)
494{
495 // Do not allow to restore with a smaller tlb.
496 uint32_t _size;
497 UNSERIALIZE_SCALAR(_size);
498 if (_size > size) {
499 fatal("TLB size less than the one in checkpoint!");
500 }
501
502 UNSERIALIZE_SCALAR(lruSeq);
503
504 for (uint32_t x = 0; x < _size; x++) {
505 TlbEntry *newEntry = freeList.front();
506 freeList.pop_front();
507
508 newEntry->unserializeSection(cp, csprintf("Entry%d", x));
509 newEntry->trieHandle = trie.insert(newEntry->vaddr,
510 TlbEntryTrie::MaxBits - newEntry->logBytes, newEntry);
511 }
512}
513
514BaseMasterPort *
515TLB::getMasterPort()
516{
517 return &walker->getMasterPort("port");
518}
519
520} // namespace X86ISA
521
522X86ISA::TLB *
523X86TLBParams::create()
524{
525 return new X86ISA::TLB(this);
526}