tlb.cc (8582:dd79a696b91c) tlb.cc (8646:ef6cbf0f14dc)
1/*
2 * Copyright (c) 2007-2008 The Hewlett-Packard Development Company
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Gabe Black
38 */
39
40#include <cstring>
41
42#include "arch/x86/insts/microldstop.hh"
43#include "arch/x86/regs/misc.hh"
44#include "arch/x86/regs/msr.hh"
45#include "arch/x86/faults.hh"
46#include "arch/x86/pagetable.hh"
47#include "arch/x86/tlb.hh"
48#include "arch/x86/x86_traits.hh"
49#include "base/bitfield.hh"
50#include "base/trace.hh"
51#include "config/full_system.hh"
52#include "cpu/base.hh"
53#include "cpu/thread_context.hh"
54#include "debug/TLB.hh"
55#include "mem/packet_access.hh"
56#include "mem/request.hh"
57
58#if FULL_SYSTEM
59#include "arch/x86/pagetable_walker.hh"
60#else
61#include "mem/page_table.hh"
62#include "sim/process.hh"
63#endif
64
65namespace X86ISA {
66
67TLB::TLB(const Params *p) : BaseTLB(p), configAddress(0), size(p->size)
68{
69 tlb = new TlbEntry[size];
70 std::memset(tlb, 0, sizeof(TlbEntry) * size);
71
72 for (int x = 0; x < size; x++)
73 freeList.push_back(&tlb[x]);
74
75#if FULL_SYSTEM
76 walker = p->walker;
77 walker->setTLB(this);
78#endif
79}
80
81TlbEntry *
82TLB::insert(Addr vpn, TlbEntry &entry)
83{
84 //TODO Deal with conflicting entries
85
86 TlbEntry *newEntry = NULL;
87 if (!freeList.empty()) {
88 newEntry = freeList.front();
89 freeList.pop_front();
90 } else {
91 newEntry = entryList.back();
92 entryList.pop_back();
93 }
94 *newEntry = entry;
95 newEntry->vaddr = vpn;
96 entryList.push_front(newEntry);
97 return newEntry;
98}
99
100TLB::EntryList::iterator
101TLB::lookupIt(Addr va, bool update_lru)
102{
103 //TODO make this smarter at some point
104 EntryList::iterator entry;
105 for (entry = entryList.begin(); entry != entryList.end(); entry++) {
106 if ((*entry)->vaddr <= va && (*entry)->vaddr + (*entry)->size > va) {
107 DPRINTF(TLB, "Matched vaddr %#x to entry starting at %#x "
108 "with size %#x.\n", va, (*entry)->vaddr, (*entry)->size);
109 if (update_lru) {
110 entryList.push_front(*entry);
111 entryList.erase(entry);
112 entry = entryList.begin();
113 }
114 break;
115 }
116 }
117 return entry;
118}
119
120TlbEntry *
121TLB::lookup(Addr va, bool update_lru)
122{
123 EntryList::iterator entry = lookupIt(va, update_lru);
124 if (entry == entryList.end())
125 return NULL;
126 else
127 return *entry;
128}
129
130void
131TLB::invalidateAll()
132{
133 DPRINTF(TLB, "Invalidating all entries.\n");
134 while (!entryList.empty()) {
135 TlbEntry *entry = entryList.front();
136 entryList.pop_front();
137 freeList.push_back(entry);
138 }
139}
140
141void
142TLB::setConfigAddress(uint32_t addr)
143{
144 configAddress = addr;
145}
146
147void
148TLB::invalidateNonGlobal()
149{
150 DPRINTF(TLB, "Invalidating all non global entries.\n");
151 EntryList::iterator entryIt;
152 for (entryIt = entryList.begin(); entryIt != entryList.end();) {
153 if (!(*entryIt)->global) {
154 freeList.push_back(*entryIt);
155 entryList.erase(entryIt++);
156 } else {
157 entryIt++;
158 }
159 }
160}
161
162void
163TLB::demapPage(Addr va, uint64_t asn)
164{
165 EntryList::iterator entry = lookupIt(va, false);
166 if (entry != entryList.end()) {
167 freeList.push_back(*entry);
168 entryList.erase(entry);
169 }
170}
171
172Fault
173TLB::translateInt(RequestPtr req, ThreadContext *tc)
174{
175 DPRINTF(TLB, "Addresses references internal memory.\n");
176 Addr vaddr = req->getVaddr();
177 Addr prefix = (vaddr >> 3) & IntAddrPrefixMask;
178 if (prefix == IntAddrPrefixCPUID) {
179 panic("CPUID memory space not yet implemented!\n");
180 } else if (prefix == IntAddrPrefixMSR) {
181 vaddr = (vaddr >> 3) & ~IntAddrPrefixMask;
182 req->setFlags(Request::MMAPPED_IPR);
183
184 MiscRegIndex regNum;
185 if (!msrAddrToIndex(regNum, vaddr))
186 return new GeneralProtection(0);
187
188 //The index is multiplied by the size of a MiscReg so that
189 //any memory dependence calculations will not see these as
190 //overlapping.
191 req->setPaddr((Addr)regNum * sizeof(MiscReg));
192 return NoFault;
193 } else if (prefix == IntAddrPrefixIO) {
194 // TODO If CPL > IOPL or in virtual mode, check the I/O permission
195 // bitmap in the TSS.
196
197 Addr IOPort = vaddr & ~IntAddrPrefixMask;
198 // Make sure the address fits in the expected 16 bit IO address
199 // space.
200 assert(!(IOPort & ~0xFFFF));
201 if (IOPort == 0xCF8 && req->getSize() == 4) {
202 req->setFlags(Request::MMAPPED_IPR);
203 req->setPaddr(MISCREG_PCI_CONFIG_ADDRESS * sizeof(MiscReg));
204 } else if ((IOPort & ~mask(2)) == 0xCFC) {
205 req->setFlags(Request::UNCACHEABLE);
206 Addr configAddress =
207 tc->readMiscRegNoEffect(MISCREG_PCI_CONFIG_ADDRESS);
208 if (bits(configAddress, 31, 31)) {
209 req->setPaddr(PhysAddrPrefixPciConfig |
210 mbits(configAddress, 30, 2) |
211 (IOPort & mask(2)));
212 } else {
213 req->setPaddr(PhysAddrPrefixIO | IOPort);
214 }
215 } else {
216 req->setFlags(Request::UNCACHEABLE);
217 req->setPaddr(PhysAddrPrefixIO | IOPort);
218 }
219 return NoFault;
220 } else {
221 panic("Access to unrecognized internal address space %#x.\n",
222 prefix);
223 }
224}
225
226Fault
227TLB::translate(RequestPtr req, ThreadContext *tc, Translation *translation,
228 Mode mode, bool &delayedResponse, bool timing)
229{
230 uint32_t flags = req->getFlags();
231 int seg = flags & SegmentFlagMask;
232 bool storeCheck = flags & (StoreCheck << FlagShift);
233
234 delayedResponse = false;
235
236 // If this is true, we're dealing with a request to a non-memory address
237 // space.
238 if (seg == SEGMENT_REG_MS) {
239 return translateInt(req, tc);
240 }
241
242 Addr vaddr = req->getVaddr();
243 DPRINTF(TLB, "Translating vaddr %#x.\n", vaddr);
244
245 HandyM5Reg m5Reg = tc->readMiscRegNoEffect(MISCREG_M5_REG);
246
247 // If protected mode has been enabled...
248 if (m5Reg.prot) {
249 DPRINTF(TLB, "In protected mode.\n");
250 // If we're not in 64-bit mode, do protection/limit checks
251 if (m5Reg.mode != LongMode) {
252 DPRINTF(TLB, "Not in long mode. Checking segment protection.\n");
253 // Check for a NULL segment selector.
254 if (!(seg == SEGMENT_REG_TSG || seg == SYS_SEGMENT_REG_IDTR ||
255 seg == SEGMENT_REG_HS || seg == SEGMENT_REG_LS)
256 && !tc->readMiscRegNoEffect(MISCREG_SEG_SEL(seg)))
257 return new GeneralProtection(0);
258 bool expandDown = false;
259 SegAttr attr = tc->readMiscRegNoEffect(MISCREG_SEG_ATTR(seg));
260 if (seg >= SEGMENT_REG_ES && seg <= SEGMENT_REG_HS) {
261 if (!attr.writable && (mode == Write || storeCheck))
262 return new GeneralProtection(0);
263 if (!attr.readable && mode == Read)
264 return new GeneralProtection(0);
265 expandDown = attr.expandDown;
266
267 }
268 Addr base = tc->readMiscRegNoEffect(MISCREG_SEG_BASE(seg));
269 Addr limit = tc->readMiscRegNoEffect(MISCREG_SEG_LIMIT(seg));
270 // This assumes we're not in 64 bit mode. If we were, the default
271 // address size is 64 bits, overridable to 32.
272 int size = 32;
273 bool sizeOverride = (flags & (AddrSizeFlagBit << FlagShift));
274 SegAttr csAttr = tc->readMiscRegNoEffect(MISCREG_CS_ATTR);
275 if ((csAttr.defaultSize && sizeOverride) ||
276 (!csAttr.defaultSize && !sizeOverride))
277 size = 16;
278 Addr offset = bits(vaddr - base, size-1, 0);
279 Addr endOffset = offset + req->getSize() - 1;
280 if (expandDown) {
281 DPRINTF(TLB, "Checking an expand down segment.\n");
282 warn_once("Expand down segments are untested.\n");
283 if (offset <= limit || endOffset <= limit)
284 return new GeneralProtection(0);
285 } else {
286 if (offset > limit || endOffset > limit)
287 return new GeneralProtection(0);
288 }
289 }
290 // If paging is enabled, do the translation.
291 if (m5Reg.paging) {
292 DPRINTF(TLB, "Paging enabled.\n");
293 // The vaddr already has the segment base applied.
294 TlbEntry *entry = lookup(vaddr);
295 if (!entry) {
296#if FULL_SYSTEM
297 Fault fault = walker->start(tc, translation, req, mode);
298 if (timing || fault != NoFault) {
299 // This gets ignored in atomic mode.
300 delayedResponse = true;
301 return fault;
302 }
303 entry = lookup(vaddr);
304 assert(entry);
305#else
306 DPRINTF(TLB, "Handling a TLB miss for "
307 "address %#x at pc %#x.\n",
308 vaddr, tc->instAddr());
309
310 Process *p = tc->getProcessPtr();
311 TlbEntry newEntry;
312 bool success = p->pTable->lookup(vaddr, newEntry);
313 if (!success && mode != Execute) {
314 // Check if we just need to grow the stack.
315 if (p->fixupStackFault(vaddr)) {
316 // If we did, lookup the entry for the new page.
317 success = p->pTable->lookup(vaddr, newEntry);
318 }
319 }
320 if (!success) {
321 return new PageFault(vaddr, true, mode, true, false);
322 } else {
323 Addr alignedVaddr = p->pTable->pageAlign(vaddr);
324 DPRINTF(TLB, "Mapping %#x to %#x\n", alignedVaddr,
325 newEntry.pageStart());
326 entry = insert(alignedVaddr, newEntry);
327 }
328 DPRINTF(TLB, "Miss was serviced.\n");
329#endif
330 }
1/*
2 * Copyright (c) 2007-2008 The Hewlett-Packard Development Company
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Gabe Black
38 */
39
40#include <cstring>
41
42#include "arch/x86/insts/microldstop.hh"
43#include "arch/x86/regs/misc.hh"
44#include "arch/x86/regs/msr.hh"
45#include "arch/x86/faults.hh"
46#include "arch/x86/pagetable.hh"
47#include "arch/x86/tlb.hh"
48#include "arch/x86/x86_traits.hh"
49#include "base/bitfield.hh"
50#include "base/trace.hh"
51#include "config/full_system.hh"
52#include "cpu/base.hh"
53#include "cpu/thread_context.hh"
54#include "debug/TLB.hh"
55#include "mem/packet_access.hh"
56#include "mem/request.hh"
57
58#if FULL_SYSTEM
59#include "arch/x86/pagetable_walker.hh"
60#else
61#include "mem/page_table.hh"
62#include "sim/process.hh"
63#endif
64
65namespace X86ISA {
66
67TLB::TLB(const Params *p) : BaseTLB(p), configAddress(0), size(p->size)
68{
69 tlb = new TlbEntry[size];
70 std::memset(tlb, 0, sizeof(TlbEntry) * size);
71
72 for (int x = 0; x < size; x++)
73 freeList.push_back(&tlb[x]);
74
75#if FULL_SYSTEM
76 walker = p->walker;
77 walker->setTLB(this);
78#endif
79}
80
81TlbEntry *
82TLB::insert(Addr vpn, TlbEntry &entry)
83{
84 //TODO Deal with conflicting entries
85
86 TlbEntry *newEntry = NULL;
87 if (!freeList.empty()) {
88 newEntry = freeList.front();
89 freeList.pop_front();
90 } else {
91 newEntry = entryList.back();
92 entryList.pop_back();
93 }
94 *newEntry = entry;
95 newEntry->vaddr = vpn;
96 entryList.push_front(newEntry);
97 return newEntry;
98}
99
100TLB::EntryList::iterator
101TLB::lookupIt(Addr va, bool update_lru)
102{
103 //TODO make this smarter at some point
104 EntryList::iterator entry;
105 for (entry = entryList.begin(); entry != entryList.end(); entry++) {
106 if ((*entry)->vaddr <= va && (*entry)->vaddr + (*entry)->size > va) {
107 DPRINTF(TLB, "Matched vaddr %#x to entry starting at %#x "
108 "with size %#x.\n", va, (*entry)->vaddr, (*entry)->size);
109 if (update_lru) {
110 entryList.push_front(*entry);
111 entryList.erase(entry);
112 entry = entryList.begin();
113 }
114 break;
115 }
116 }
117 return entry;
118}
119
120TlbEntry *
121TLB::lookup(Addr va, bool update_lru)
122{
123 EntryList::iterator entry = lookupIt(va, update_lru);
124 if (entry == entryList.end())
125 return NULL;
126 else
127 return *entry;
128}
129
130void
131TLB::invalidateAll()
132{
133 DPRINTF(TLB, "Invalidating all entries.\n");
134 while (!entryList.empty()) {
135 TlbEntry *entry = entryList.front();
136 entryList.pop_front();
137 freeList.push_back(entry);
138 }
139}
140
141void
142TLB::setConfigAddress(uint32_t addr)
143{
144 configAddress = addr;
145}
146
147void
148TLB::invalidateNonGlobal()
149{
150 DPRINTF(TLB, "Invalidating all non global entries.\n");
151 EntryList::iterator entryIt;
152 for (entryIt = entryList.begin(); entryIt != entryList.end();) {
153 if (!(*entryIt)->global) {
154 freeList.push_back(*entryIt);
155 entryList.erase(entryIt++);
156 } else {
157 entryIt++;
158 }
159 }
160}
161
162void
163TLB::demapPage(Addr va, uint64_t asn)
164{
165 EntryList::iterator entry = lookupIt(va, false);
166 if (entry != entryList.end()) {
167 freeList.push_back(*entry);
168 entryList.erase(entry);
169 }
170}
171
172Fault
173TLB::translateInt(RequestPtr req, ThreadContext *tc)
174{
175 DPRINTF(TLB, "Addresses references internal memory.\n");
176 Addr vaddr = req->getVaddr();
177 Addr prefix = (vaddr >> 3) & IntAddrPrefixMask;
178 if (prefix == IntAddrPrefixCPUID) {
179 panic("CPUID memory space not yet implemented!\n");
180 } else if (prefix == IntAddrPrefixMSR) {
181 vaddr = (vaddr >> 3) & ~IntAddrPrefixMask;
182 req->setFlags(Request::MMAPPED_IPR);
183
184 MiscRegIndex regNum;
185 if (!msrAddrToIndex(regNum, vaddr))
186 return new GeneralProtection(0);
187
188 //The index is multiplied by the size of a MiscReg so that
189 //any memory dependence calculations will not see these as
190 //overlapping.
191 req->setPaddr((Addr)regNum * sizeof(MiscReg));
192 return NoFault;
193 } else if (prefix == IntAddrPrefixIO) {
194 // TODO If CPL > IOPL or in virtual mode, check the I/O permission
195 // bitmap in the TSS.
196
197 Addr IOPort = vaddr & ~IntAddrPrefixMask;
198 // Make sure the address fits in the expected 16 bit IO address
199 // space.
200 assert(!(IOPort & ~0xFFFF));
201 if (IOPort == 0xCF8 && req->getSize() == 4) {
202 req->setFlags(Request::MMAPPED_IPR);
203 req->setPaddr(MISCREG_PCI_CONFIG_ADDRESS * sizeof(MiscReg));
204 } else if ((IOPort & ~mask(2)) == 0xCFC) {
205 req->setFlags(Request::UNCACHEABLE);
206 Addr configAddress =
207 tc->readMiscRegNoEffect(MISCREG_PCI_CONFIG_ADDRESS);
208 if (bits(configAddress, 31, 31)) {
209 req->setPaddr(PhysAddrPrefixPciConfig |
210 mbits(configAddress, 30, 2) |
211 (IOPort & mask(2)));
212 } else {
213 req->setPaddr(PhysAddrPrefixIO | IOPort);
214 }
215 } else {
216 req->setFlags(Request::UNCACHEABLE);
217 req->setPaddr(PhysAddrPrefixIO | IOPort);
218 }
219 return NoFault;
220 } else {
221 panic("Access to unrecognized internal address space %#x.\n",
222 prefix);
223 }
224}
225
226Fault
227TLB::translate(RequestPtr req, ThreadContext *tc, Translation *translation,
228 Mode mode, bool &delayedResponse, bool timing)
229{
230 uint32_t flags = req->getFlags();
231 int seg = flags & SegmentFlagMask;
232 bool storeCheck = flags & (StoreCheck << FlagShift);
233
234 delayedResponse = false;
235
236 // If this is true, we're dealing with a request to a non-memory address
237 // space.
238 if (seg == SEGMENT_REG_MS) {
239 return translateInt(req, tc);
240 }
241
242 Addr vaddr = req->getVaddr();
243 DPRINTF(TLB, "Translating vaddr %#x.\n", vaddr);
244
245 HandyM5Reg m5Reg = tc->readMiscRegNoEffect(MISCREG_M5_REG);
246
247 // If protected mode has been enabled...
248 if (m5Reg.prot) {
249 DPRINTF(TLB, "In protected mode.\n");
250 // If we're not in 64-bit mode, do protection/limit checks
251 if (m5Reg.mode != LongMode) {
252 DPRINTF(TLB, "Not in long mode. Checking segment protection.\n");
253 // Check for a NULL segment selector.
254 if (!(seg == SEGMENT_REG_TSG || seg == SYS_SEGMENT_REG_IDTR ||
255 seg == SEGMENT_REG_HS || seg == SEGMENT_REG_LS)
256 && !tc->readMiscRegNoEffect(MISCREG_SEG_SEL(seg)))
257 return new GeneralProtection(0);
258 bool expandDown = false;
259 SegAttr attr = tc->readMiscRegNoEffect(MISCREG_SEG_ATTR(seg));
260 if (seg >= SEGMENT_REG_ES && seg <= SEGMENT_REG_HS) {
261 if (!attr.writable && (mode == Write || storeCheck))
262 return new GeneralProtection(0);
263 if (!attr.readable && mode == Read)
264 return new GeneralProtection(0);
265 expandDown = attr.expandDown;
266
267 }
268 Addr base = tc->readMiscRegNoEffect(MISCREG_SEG_BASE(seg));
269 Addr limit = tc->readMiscRegNoEffect(MISCREG_SEG_LIMIT(seg));
270 // This assumes we're not in 64 bit mode. If we were, the default
271 // address size is 64 bits, overridable to 32.
272 int size = 32;
273 bool sizeOverride = (flags & (AddrSizeFlagBit << FlagShift));
274 SegAttr csAttr = tc->readMiscRegNoEffect(MISCREG_CS_ATTR);
275 if ((csAttr.defaultSize && sizeOverride) ||
276 (!csAttr.defaultSize && !sizeOverride))
277 size = 16;
278 Addr offset = bits(vaddr - base, size-1, 0);
279 Addr endOffset = offset + req->getSize() - 1;
280 if (expandDown) {
281 DPRINTF(TLB, "Checking an expand down segment.\n");
282 warn_once("Expand down segments are untested.\n");
283 if (offset <= limit || endOffset <= limit)
284 return new GeneralProtection(0);
285 } else {
286 if (offset > limit || endOffset > limit)
287 return new GeneralProtection(0);
288 }
289 }
290 // If paging is enabled, do the translation.
291 if (m5Reg.paging) {
292 DPRINTF(TLB, "Paging enabled.\n");
293 // The vaddr already has the segment base applied.
294 TlbEntry *entry = lookup(vaddr);
295 if (!entry) {
296#if FULL_SYSTEM
297 Fault fault = walker->start(tc, translation, req, mode);
298 if (timing || fault != NoFault) {
299 // This gets ignored in atomic mode.
300 delayedResponse = true;
301 return fault;
302 }
303 entry = lookup(vaddr);
304 assert(entry);
305#else
306 DPRINTF(TLB, "Handling a TLB miss for "
307 "address %#x at pc %#x.\n",
308 vaddr, tc->instAddr());
309
310 Process *p = tc->getProcessPtr();
311 TlbEntry newEntry;
312 bool success = p->pTable->lookup(vaddr, newEntry);
313 if (!success && mode != Execute) {
314 // Check if we just need to grow the stack.
315 if (p->fixupStackFault(vaddr)) {
316 // If we did, lookup the entry for the new page.
317 success = p->pTable->lookup(vaddr, newEntry);
318 }
319 }
320 if (!success) {
321 return new PageFault(vaddr, true, mode, true, false);
322 } else {
323 Addr alignedVaddr = p->pTable->pageAlign(vaddr);
324 DPRINTF(TLB, "Mapping %#x to %#x\n", alignedVaddr,
325 newEntry.pageStart());
326 entry = insert(alignedVaddr, newEntry);
327 }
328 DPRINTF(TLB, "Miss was serviced.\n");
329#endif
330 }
331
332 DPRINTF(TLB, "Entry found with paddr %#x, "
333 "doing protection checks.\n", entry->paddr);
331 // Do paging protection checks.
332 bool inUser = (m5Reg.cpl == 3 &&
333 !(flags & (CPL0FlagBit << FlagShift)));
334 CR0 cr0 = tc->readMiscRegNoEffect(MISCREG_CR0);
335 bool badWrite = (!entry->writable && (inUser || cr0.wp));
336 if ((inUser && !entry->user) || (mode == Write && badWrite)) {
337 // The page must have been present to get into the TLB in
338 // the first place. We'll assume the reserved bits are
339 // fine even though we're not checking them.
340 return new PageFault(vaddr, true, mode, inUser, false);
341 }
342 if (storeCheck && badWrite) {
343 // This would fault if this were a write, so return a page
344 // fault that reflects that happening.
345 return new PageFault(vaddr, true, Write, inUser, false);
346 }
347
334 // Do paging protection checks.
335 bool inUser = (m5Reg.cpl == 3 &&
336 !(flags & (CPL0FlagBit << FlagShift)));
337 CR0 cr0 = tc->readMiscRegNoEffect(MISCREG_CR0);
338 bool badWrite = (!entry->writable && (inUser || cr0.wp));
339 if ((inUser && !entry->user) || (mode == Write && badWrite)) {
340 // The page must have been present to get into the TLB in
341 // the first place. We'll assume the reserved bits are
342 // fine even though we're not checking them.
343 return new PageFault(vaddr, true, mode, inUser, false);
344 }
345 if (storeCheck && badWrite) {
346 // This would fault if this were a write, so return a page
347 // fault that reflects that happening.
348 return new PageFault(vaddr, true, Write, inUser, false);
349 }
350
348
349 DPRINTF(TLB, "Entry found with paddr %#x, "
350 "doing protection checks.\n", entry->paddr);
351 Addr paddr = entry->paddr | (vaddr & (entry->size-1));
352 DPRINTF(TLB, "Translated %#x -> %#x.\n", vaddr, paddr);
353 req->setPaddr(paddr);
354 if (entry->uncacheable)
355 req->setFlags(Request::UNCACHEABLE);
356 } else {
357 //Use the address which already has segmentation applied.
358 DPRINTF(TLB, "Paging disabled.\n");
359 DPRINTF(TLB, "Translated %#x -> %#x.\n", vaddr, vaddr);
360 req->setPaddr(vaddr);
361 }
362 } else {
363 // Real mode
364 DPRINTF(TLB, "In real mode.\n");
365 DPRINTF(TLB, "Translated %#x -> %#x.\n", vaddr, vaddr);
366 req->setPaddr(vaddr);
367 }
368 // Check for an access to the local APIC
369#if FULL_SYSTEM
370 LocalApicBase localApicBase = tc->readMiscRegNoEffect(MISCREG_APIC_BASE);
371 Addr baseAddr = localApicBase.base * PageBytes;
372 Addr paddr = req->getPaddr();
373 if (baseAddr <= paddr && baseAddr + PageBytes > paddr) {
374 // The Intel developer's manuals say the below restrictions apply,
375 // but the linux kernel, because of a compiler optimization, breaks
376 // them.
377 /*
378 // Check alignment
379 if (paddr & ((32/8) - 1))
380 return new GeneralProtection(0);
381 // Check access size
382 if (req->getSize() != (32/8))
383 return new GeneralProtection(0);
384 */
385 // Force the access to be uncacheable.
386 req->setFlags(Request::UNCACHEABLE);
387 req->setPaddr(x86LocalAPICAddress(tc->contextId(), paddr - baseAddr));
388 }
389#endif
390 return NoFault;
391};
392
393Fault
394TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode)
395{
396 bool delayedResponse;
397 return TLB::translate(req, tc, NULL, mode, delayedResponse, false);
398}
399
400void
401TLB::translateTiming(RequestPtr req, ThreadContext *tc,
402 Translation *translation, Mode mode)
403{
404 bool delayedResponse;
405 assert(translation);
406 Fault fault =
407 TLB::translate(req, tc, translation, mode, delayedResponse, true);
408 if (!delayedResponse)
409 translation->finish(fault, req, tc, mode);
410}
411
412#if FULL_SYSTEM
413
414Tick
415TLB::doMmuRegRead(ThreadContext *tc, Packet *pkt)
416{
417 return tc->getCpuPtr()->ticks(1);
418}
419
420Tick
421TLB::doMmuRegWrite(ThreadContext *tc, Packet *pkt)
422{
423 return tc->getCpuPtr()->ticks(1);
424}
425
426Walker *
427TLB::getWalker()
428{
429 return walker;
430}
431
432#endif
433
434void
435TLB::serialize(std::ostream &os)
436{
437}
438
439void
440TLB::unserialize(Checkpoint *cp, const std::string &section)
441{
442}
443
444} // namespace X86ISA
445
446X86ISA::TLB *
447X86TLBParams::create()
448{
449 return new X86ISA::TLB(this);
450}
351 Addr paddr = entry->paddr | (vaddr & (entry->size-1));
352 DPRINTF(TLB, "Translated %#x -> %#x.\n", vaddr, paddr);
353 req->setPaddr(paddr);
354 if (entry->uncacheable)
355 req->setFlags(Request::UNCACHEABLE);
356 } else {
357 //Use the address which already has segmentation applied.
358 DPRINTF(TLB, "Paging disabled.\n");
359 DPRINTF(TLB, "Translated %#x -> %#x.\n", vaddr, vaddr);
360 req->setPaddr(vaddr);
361 }
362 } else {
363 // Real mode
364 DPRINTF(TLB, "In real mode.\n");
365 DPRINTF(TLB, "Translated %#x -> %#x.\n", vaddr, vaddr);
366 req->setPaddr(vaddr);
367 }
368 // Check for an access to the local APIC
369#if FULL_SYSTEM
370 LocalApicBase localApicBase = tc->readMiscRegNoEffect(MISCREG_APIC_BASE);
371 Addr baseAddr = localApicBase.base * PageBytes;
372 Addr paddr = req->getPaddr();
373 if (baseAddr <= paddr && baseAddr + PageBytes > paddr) {
374 // The Intel developer's manuals say the below restrictions apply,
375 // but the linux kernel, because of a compiler optimization, breaks
376 // them.
377 /*
378 // Check alignment
379 if (paddr & ((32/8) - 1))
380 return new GeneralProtection(0);
381 // Check access size
382 if (req->getSize() != (32/8))
383 return new GeneralProtection(0);
384 */
385 // Force the access to be uncacheable.
386 req->setFlags(Request::UNCACHEABLE);
387 req->setPaddr(x86LocalAPICAddress(tc->contextId(), paddr - baseAddr));
388 }
389#endif
390 return NoFault;
391};
392
393Fault
394TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode)
395{
396 bool delayedResponse;
397 return TLB::translate(req, tc, NULL, mode, delayedResponse, false);
398}
399
400void
401TLB::translateTiming(RequestPtr req, ThreadContext *tc,
402 Translation *translation, Mode mode)
403{
404 bool delayedResponse;
405 assert(translation);
406 Fault fault =
407 TLB::translate(req, tc, translation, mode, delayedResponse, true);
408 if (!delayedResponse)
409 translation->finish(fault, req, tc, mode);
410}
411
412#if FULL_SYSTEM
413
414Tick
415TLB::doMmuRegRead(ThreadContext *tc, Packet *pkt)
416{
417 return tc->getCpuPtr()->ticks(1);
418}
419
420Tick
421TLB::doMmuRegWrite(ThreadContext *tc, Packet *pkt)
422{
423 return tc->getCpuPtr()->ticks(1);
424}
425
426Walker *
427TLB::getWalker()
428{
429 return walker;
430}
431
432#endif
433
434void
435TLB::serialize(std::ostream &os)
436{
437}
438
439void
440TLB::unserialize(Checkpoint *cp, const std::string &section)
441{
442}
443
444} // namespace X86ISA
445
446X86ISA::TLB *
447X86TLBParams::create()
448{
449 return new X86ISA::TLB(this);
450}