tlb.cc (10824:308771bd2647) tlb.cc (10905:a6ca6831e775)
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 * Steve Reinhardt
30 * Andrew Schultz
31 */
32
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 * Steve Reinhardt
30 * Andrew Schultz
31 */
32
33#include "arch/alpha/tlb.hh"
34
35#include <algorithm>
33#include <memory>
34#include <string>
35#include <vector>
36
37#include "arch/alpha/faults.hh"
38#include "arch/alpha/pagetable.hh"
36#include <memory>
37#include <string>
38#include <vector>
39
40#include "arch/alpha/faults.hh"
41#include "arch/alpha/pagetable.hh"
39#include "arch/alpha/tlb.hh"
40#include "arch/generic/debugfaults.hh"
41#include "base/inifile.hh"
42#include "base/str.hh"
43#include "base/trace.hh"
44#include "cpu/thread_context.hh"
45#include "debug/TLB.hh"
46#include "sim/full_system.hh"
47
48using namespace std;
49
50namespace AlphaISA {
51
52///////////////////////////////////////////////////////////////////////
53//
54// Alpha TLB
55//
56
57#ifdef DEBUG
58bool uncacheBit39 = false;
59bool uncacheBit40 = false;
60#endif
61
62#define MODE2MASK(X) (1 << (X))
63
64TLB::TLB(const Params *p)
42#include "arch/generic/debugfaults.hh"
43#include "base/inifile.hh"
44#include "base/str.hh"
45#include "base/trace.hh"
46#include "cpu/thread_context.hh"
47#include "debug/TLB.hh"
48#include "sim/full_system.hh"
49
50using namespace std;
51
52namespace AlphaISA {
53
54///////////////////////////////////////////////////////////////////////
55//
56// Alpha TLB
57//
58
59#ifdef DEBUG
60bool uncacheBit39 = false;
61bool uncacheBit40 = false;
62#endif
63
64#define MODE2MASK(X) (1 << (X))
65
66TLB::TLB(const Params *p)
65 : BaseTLB(p), size(p->size), nlu(0)
67 : BaseTLB(p), table(p->size), nlu(0)
66{
68{
67 table = new TlbEntry[size];
68 memset(table, 0, sizeof(TlbEntry) * size);
69 flushCache();
70}
71
72TLB::~TLB()
73{
69 flushCache();
70}
71
72TLB::~TLB()
73{
74 if (table)
75 delete [] table;
76}
77
78void
79TLB::regStats()
80{
81 fetch_hits
82 .name(name() + ".fetch_hits")
83 .desc("ITB hits");
84 fetch_misses
85 .name(name() + ".fetch_misses")
86 .desc("ITB misses");
87 fetch_acv
88 .name(name() + ".fetch_acv")
89 .desc("ITB acv");
90 fetch_accesses
91 .name(name() + ".fetch_accesses")
92 .desc("ITB accesses");
93
94 fetch_accesses = fetch_hits + fetch_misses;
95
96 read_hits
97 .name(name() + ".read_hits")
98 .desc("DTB read hits")
99 ;
100
101 read_misses
102 .name(name() + ".read_misses")
103 .desc("DTB read misses")
104 ;
105
106 read_acv
107 .name(name() + ".read_acv")
108 .desc("DTB read access violations")
109 ;
110
111 read_accesses
112 .name(name() + ".read_accesses")
113 .desc("DTB read accesses")
114 ;
115
116 write_hits
117 .name(name() + ".write_hits")
118 .desc("DTB write hits")
119 ;
120
121 write_misses
122 .name(name() + ".write_misses")
123 .desc("DTB write misses")
124 ;
125
126 write_acv
127 .name(name() + ".write_acv")
128 .desc("DTB write access violations")
129 ;
130
131 write_accesses
132 .name(name() + ".write_accesses")
133 .desc("DTB write accesses")
134 ;
135
136 data_hits
137 .name(name() + ".data_hits")
138 .desc("DTB hits")
139 ;
140
141 data_misses
142 .name(name() + ".data_misses")
143 .desc("DTB misses")
144 ;
145
146 data_acv
147 .name(name() + ".data_acv")
148 .desc("DTB access violations")
149 ;
150
151 data_accesses
152 .name(name() + ".data_accesses")
153 .desc("DTB accesses")
154 ;
155
156 data_hits = read_hits + write_hits;
157 data_misses = read_misses + write_misses;
158 data_acv = read_acv + write_acv;
159 data_accesses = read_accesses + write_accesses;
160}
161
162// look up an entry in the TLB
163TlbEntry *
164TLB::lookup(Addr vpn, uint8_t asn)
165{
166 // assume not found...
167 TlbEntry *retval = NULL;
168
169 if (EntryCache[0]) {
170 if (vpn == EntryCache[0]->tag &&
171 (EntryCache[0]->asma || EntryCache[0]->asn == asn))
172 retval = EntryCache[0];
173 else if (EntryCache[1]) {
174 if (vpn == EntryCache[1]->tag &&
175 (EntryCache[1]->asma || EntryCache[1]->asn == asn))
176 retval = EntryCache[1];
177 else if (EntryCache[2] && vpn == EntryCache[2]->tag &&
178 (EntryCache[2]->asma || EntryCache[2]->asn == asn))
179 retval = EntryCache[2];
180 }
181 }
182
183 if (retval == NULL) {
184 PageTable::const_iterator i = lookupTable.find(vpn);
185 if (i != lookupTable.end()) {
186 while (i->first == vpn) {
187 int index = i->second;
188 TlbEntry *entry = &table[index];
189 assert(entry->valid);
190 if (vpn == entry->tag && (entry->asma || entry->asn == asn)) {
191 retval = updateCache(entry);
192 break;
193 }
194
195 ++i;
196 }
197 }
198 }
199
200 DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn,
201 retval ? "hit" : "miss", retval ? retval->ppn : 0);
202 return retval;
203}
204
205Fault
206TLB::checkCacheability(RequestPtr &req, bool itb)
207{
208 // in Alpha, cacheability is controlled by upper-level bits of the
209 // physical address
210
211 /*
212 * We support having the uncacheable bit in either bit 39 or bit
213 * 40. The Turbolaser platform (and EV5) support having the bit
214 * in 39, but Tsunami (which Linux assumes uses an EV6) generates
215 * accesses with the bit in 40. So we must check for both, but we
216 * have debug flags to catch a weird case where both are used,
217 * which shouldn't happen.
218 */
219
220
221 if (req->getPaddr() & PAddrUncachedBit43) {
222 // IPR memory space not implemented
223 if (PAddrIprSpace(req->getPaddr())) {
224 return std::make_shared<UnimpFault>(
225 "IPR memory space not implemented!");
226 } else {
227 // mark request as uncacheable
228 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
229
230 // Clear bits 42:35 of the physical address (10-2 in
231 // Tsunami manual)
232 req->setPaddr(req->getPaddr() & PAddrUncachedMask);
233 }
234 // We shouldn't be able to read from an uncachable address in Alpha as
235 // we don't have a ROM and we don't want to try to fetch from a device
236 // register as we destroy any data that is clear-on-read.
237 if (req->isUncacheable() && itb)
238 return std::make_shared<UnimpFault>(
239 "CPU trying to fetch from uncached I/O");
240
241 }
242 return NoFault;
243}
244
245
246// insert a new TLB entry
247void
248TLB::insert(Addr addr, TlbEntry &entry)
249{
250 flushCache();
251 VAddr vaddr = addr;
252 if (table[nlu].valid) {
253 Addr oldvpn = table[nlu].tag;
254 PageTable::iterator i = lookupTable.find(oldvpn);
255
256 if (i == lookupTable.end())
257 panic("TLB entry not found in lookupTable");
258
259 int index;
260 while ((index = i->second) != nlu) {
261 if (table[index].tag != oldvpn)
262 panic("TLB entry not found in lookupTable");
263
264 ++i;
265 }
266
267 DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn);
268
269 lookupTable.erase(i);
270 }
271
272 DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), entry.ppn);
273
274 table[nlu] = entry;
275 table[nlu].tag = vaddr.vpn();
276 table[nlu].valid = true;
277
278 lookupTable.insert(make_pair(vaddr.vpn(), nlu));
279 nextnlu();
280}
281
282void
283TLB::flushAll()
284{
285 DPRINTF(TLB, "flushAll\n");
74}
75
76void
77TLB::regStats()
78{
79 fetch_hits
80 .name(name() + ".fetch_hits")
81 .desc("ITB hits");
82 fetch_misses
83 .name(name() + ".fetch_misses")
84 .desc("ITB misses");
85 fetch_acv
86 .name(name() + ".fetch_acv")
87 .desc("ITB acv");
88 fetch_accesses
89 .name(name() + ".fetch_accesses")
90 .desc("ITB accesses");
91
92 fetch_accesses = fetch_hits + fetch_misses;
93
94 read_hits
95 .name(name() + ".read_hits")
96 .desc("DTB read hits")
97 ;
98
99 read_misses
100 .name(name() + ".read_misses")
101 .desc("DTB read misses")
102 ;
103
104 read_acv
105 .name(name() + ".read_acv")
106 .desc("DTB read access violations")
107 ;
108
109 read_accesses
110 .name(name() + ".read_accesses")
111 .desc("DTB read accesses")
112 ;
113
114 write_hits
115 .name(name() + ".write_hits")
116 .desc("DTB write hits")
117 ;
118
119 write_misses
120 .name(name() + ".write_misses")
121 .desc("DTB write misses")
122 ;
123
124 write_acv
125 .name(name() + ".write_acv")
126 .desc("DTB write access violations")
127 ;
128
129 write_accesses
130 .name(name() + ".write_accesses")
131 .desc("DTB write accesses")
132 ;
133
134 data_hits
135 .name(name() + ".data_hits")
136 .desc("DTB hits")
137 ;
138
139 data_misses
140 .name(name() + ".data_misses")
141 .desc("DTB misses")
142 ;
143
144 data_acv
145 .name(name() + ".data_acv")
146 .desc("DTB access violations")
147 ;
148
149 data_accesses
150 .name(name() + ".data_accesses")
151 .desc("DTB accesses")
152 ;
153
154 data_hits = read_hits + write_hits;
155 data_misses = read_misses + write_misses;
156 data_acv = read_acv + write_acv;
157 data_accesses = read_accesses + write_accesses;
158}
159
160// look up an entry in the TLB
161TlbEntry *
162TLB::lookup(Addr vpn, uint8_t asn)
163{
164 // assume not found...
165 TlbEntry *retval = NULL;
166
167 if (EntryCache[0]) {
168 if (vpn == EntryCache[0]->tag &&
169 (EntryCache[0]->asma || EntryCache[0]->asn == asn))
170 retval = EntryCache[0];
171 else if (EntryCache[1]) {
172 if (vpn == EntryCache[1]->tag &&
173 (EntryCache[1]->asma || EntryCache[1]->asn == asn))
174 retval = EntryCache[1];
175 else if (EntryCache[2] && vpn == EntryCache[2]->tag &&
176 (EntryCache[2]->asma || EntryCache[2]->asn == asn))
177 retval = EntryCache[2];
178 }
179 }
180
181 if (retval == NULL) {
182 PageTable::const_iterator i = lookupTable.find(vpn);
183 if (i != lookupTable.end()) {
184 while (i->first == vpn) {
185 int index = i->second;
186 TlbEntry *entry = &table[index];
187 assert(entry->valid);
188 if (vpn == entry->tag && (entry->asma || entry->asn == asn)) {
189 retval = updateCache(entry);
190 break;
191 }
192
193 ++i;
194 }
195 }
196 }
197
198 DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn,
199 retval ? "hit" : "miss", retval ? retval->ppn : 0);
200 return retval;
201}
202
203Fault
204TLB::checkCacheability(RequestPtr &req, bool itb)
205{
206 // in Alpha, cacheability is controlled by upper-level bits of the
207 // physical address
208
209 /*
210 * We support having the uncacheable bit in either bit 39 or bit
211 * 40. The Turbolaser platform (and EV5) support having the bit
212 * in 39, but Tsunami (which Linux assumes uses an EV6) generates
213 * accesses with the bit in 40. So we must check for both, but we
214 * have debug flags to catch a weird case where both are used,
215 * which shouldn't happen.
216 */
217
218
219 if (req->getPaddr() & PAddrUncachedBit43) {
220 // IPR memory space not implemented
221 if (PAddrIprSpace(req->getPaddr())) {
222 return std::make_shared<UnimpFault>(
223 "IPR memory space not implemented!");
224 } else {
225 // mark request as uncacheable
226 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
227
228 // Clear bits 42:35 of the physical address (10-2 in
229 // Tsunami manual)
230 req->setPaddr(req->getPaddr() & PAddrUncachedMask);
231 }
232 // We shouldn't be able to read from an uncachable address in Alpha as
233 // we don't have a ROM and we don't want to try to fetch from a device
234 // register as we destroy any data that is clear-on-read.
235 if (req->isUncacheable() && itb)
236 return std::make_shared<UnimpFault>(
237 "CPU trying to fetch from uncached I/O");
238
239 }
240 return NoFault;
241}
242
243
244// insert a new TLB entry
245void
246TLB::insert(Addr addr, TlbEntry &entry)
247{
248 flushCache();
249 VAddr vaddr = addr;
250 if (table[nlu].valid) {
251 Addr oldvpn = table[nlu].tag;
252 PageTable::iterator i = lookupTable.find(oldvpn);
253
254 if (i == lookupTable.end())
255 panic("TLB entry not found in lookupTable");
256
257 int index;
258 while ((index = i->second) != nlu) {
259 if (table[index].tag != oldvpn)
260 panic("TLB entry not found in lookupTable");
261
262 ++i;
263 }
264
265 DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn);
266
267 lookupTable.erase(i);
268 }
269
270 DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), entry.ppn);
271
272 table[nlu] = entry;
273 table[nlu].tag = vaddr.vpn();
274 table[nlu].valid = true;
275
276 lookupTable.insert(make_pair(vaddr.vpn(), nlu));
277 nextnlu();
278}
279
280void
281TLB::flushAll()
282{
283 DPRINTF(TLB, "flushAll\n");
286 memset(table, 0, sizeof(TlbEntry) * size);
284 std::fill(table.begin(), table.end(), TlbEntry());
287 flushCache();
288 lookupTable.clear();
289 nlu = 0;
290}
291
292void
293TLB::flushProcesses()
294{
295 flushCache();
296 PageTable::iterator i = lookupTable.begin();
297 PageTable::iterator end = lookupTable.end();
298 while (i != end) {
299 int index = i->second;
300 TlbEntry *entry = &table[index];
301 assert(entry->valid);
302
303 // we can't increment i after we erase it, so save a copy and
304 // increment it to get the next entry now
305 PageTable::iterator cur = i;
306 ++i;
307
308 if (!entry->asma) {
309 DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index,
310 entry->tag, entry->ppn);
311 entry->valid = false;
312 lookupTable.erase(cur);
313 }
314 }
315}
316
317void
318TLB::flushAddr(Addr addr, uint8_t asn)
319{
320 flushCache();
321 VAddr vaddr = addr;
322
323 PageTable::iterator i = lookupTable.find(vaddr.vpn());
324 if (i == lookupTable.end())
325 return;
326
327 while (i != lookupTable.end() && i->first == vaddr.vpn()) {
328 int index = i->second;
329 TlbEntry *entry = &table[index];
330 assert(entry->valid);
331
332 if (vaddr.vpn() == entry->tag && (entry->asma || entry->asn == asn)) {
333 DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(),
334 entry->ppn);
335
336 // invalidate this entry
337 entry->valid = false;
338
339 lookupTable.erase(i++);
340 } else {
341 ++i;
342 }
343 }
344}
345
346
347void
285 flushCache();
286 lookupTable.clear();
287 nlu = 0;
288}
289
290void
291TLB::flushProcesses()
292{
293 flushCache();
294 PageTable::iterator i = lookupTable.begin();
295 PageTable::iterator end = lookupTable.end();
296 while (i != end) {
297 int index = i->second;
298 TlbEntry *entry = &table[index];
299 assert(entry->valid);
300
301 // we can't increment i after we erase it, so save a copy and
302 // increment it to get the next entry now
303 PageTable::iterator cur = i;
304 ++i;
305
306 if (!entry->asma) {
307 DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index,
308 entry->tag, entry->ppn);
309 entry->valid = false;
310 lookupTable.erase(cur);
311 }
312 }
313}
314
315void
316TLB::flushAddr(Addr addr, uint8_t asn)
317{
318 flushCache();
319 VAddr vaddr = addr;
320
321 PageTable::iterator i = lookupTable.find(vaddr.vpn());
322 if (i == lookupTable.end())
323 return;
324
325 while (i != lookupTable.end() && i->first == vaddr.vpn()) {
326 int index = i->second;
327 TlbEntry *entry = &table[index];
328 assert(entry->valid);
329
330 if (vaddr.vpn() == entry->tag && (entry->asma || entry->asn == asn)) {
331 DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(),
332 entry->ppn);
333
334 // invalidate this entry
335 entry->valid = false;
336
337 lookupTable.erase(i++);
338 } else {
339 ++i;
340 }
341 }
342}
343
344
345void
348TLB::serialize(ostream &os)
346TLB::serialize(CheckpointOut &cp) const
349{
347{
348 const unsigned size(table.size());
350 SERIALIZE_SCALAR(size);
351 SERIALIZE_SCALAR(nlu);
352
349 SERIALIZE_SCALAR(size);
350 SERIALIZE_SCALAR(nlu);
351
353 for (int i = 0; i < size; i++) {
354 nameOut(os, csprintf("%s.Entry%d", name(), i));
355 table[i].serialize(os);
356 }
352 for (int i = 0; i < size; i++)
353 table[i].serializeSection(cp, csprintf("Entry%d", i));
357}
358
359void
354}
355
356void
360TLB::unserialize(Checkpoint *cp, const string &section)
357TLB::unserialize(CheckpointIn &cp)
361{
358{
359 unsigned size(0);
362 UNSERIALIZE_SCALAR(size);
363 UNSERIALIZE_SCALAR(nlu);
364
360 UNSERIALIZE_SCALAR(size);
361 UNSERIALIZE_SCALAR(nlu);
362
363 table.resize(size);
365 for (int i = 0; i < size; i++) {
364 for (int i = 0; i < size; i++) {
366 table[i].unserialize(cp, csprintf("%s.Entry%d", section, i));
365 table[i].unserializeSection(cp, csprintf("Entry%d", i));
367 if (table[i].valid) {
368 lookupTable.insert(make_pair(table[i].tag, i));
369 }
370 }
371}
372
373Fault
374TLB::translateInst(RequestPtr req, ThreadContext *tc)
375{
376 //If this is a pal pc, then set PHYSICAL
377 if (FullSystem && PcPAL(req->getPC()))
378 req->setFlags(Request::PHYSICAL);
379
380 if (PcPAL(req->getPC())) {
381 // strip off PAL PC marker (lsb is 1)
382 req->setPaddr((req->getVaddr() & ~3) & PAddrImplMask);
383 fetch_hits++;
384 return NoFault;
385 }
386
387 if (req->getFlags() & Request::PHYSICAL) {
388 req->setPaddr(req->getVaddr());
389 } else {
390 // verify that this is a good virtual address
391 if (!validVirtualAddress(req->getVaddr())) {
392 fetch_acv++;
393 return std::make_shared<ItbAcvFault>(req->getVaddr());
394 }
395
396
397 // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
398 // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
399 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
400 // only valid in kernel mode
401 if (ICM_CM(tc->readMiscRegNoEffect(IPR_ICM)) !=
402 mode_kernel) {
403 fetch_acv++;
404 return std::make_shared<ItbAcvFault>(req->getVaddr());
405 }
406
407 req->setPaddr(req->getVaddr() & PAddrImplMask);
408
409 // sign extend the physical address properly
410 if (req->getPaddr() & PAddrUncachedBit40)
411 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
412 else
413 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
414 } else {
415 // not a physical address: need to look up pte
416 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
417 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(),
418 asn);
419
420 if (!entry) {
421 fetch_misses++;
422 return std::make_shared<ItbPageFault>(req->getVaddr());
423 }
424
425 req->setPaddr((entry->ppn << PageShift) +
426 (VAddr(req->getVaddr()).offset()
427 & ~3));
428
429 // check permissions for this access
430 if (!(entry->xre &
431 (1 << ICM_CM(tc->readMiscRegNoEffect(IPR_ICM))))) {
432 // instruction access fault
433 fetch_acv++;
434 return std::make_shared<ItbAcvFault>(req->getVaddr());
435 }
436
437 fetch_hits++;
438 }
439 }
440
441 // check that the physical address is ok (catch bad physical addresses)
442 if (req->getPaddr() & ~PAddrImplMask) {
443 return std::make_shared<MachineCheckFault>();
444 }
445
446 return checkCacheability(req, true);
447
448}
449
450Fault
451TLB::translateData(RequestPtr req, ThreadContext *tc, bool write)
452{
453 mode_type mode =
454 (mode_type)DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM));
455
456 /**
457 * Check for alignment faults
458 */
459 if (req->getVaddr() & (req->getSize() - 1)) {
460 DPRINTF(TLB, "Alignment Fault on %#x, size = %d\n", req->getVaddr(),
461 req->getSize());
462 uint64_t flags = write ? MM_STAT_WR_MASK : 0;
463 return std::make_shared<DtbAlignmentFault>(req->getVaddr(),
464 req->getFlags(),
465 flags);
466 }
467
468 if (PcPAL(req->getPC())) {
469 mode = (req->getFlags() & AlphaRequestFlags::ALTMODE) ?
470 (mode_type)ALT_MODE_AM(
471 tc->readMiscRegNoEffect(IPR_ALT_MODE))
472 : mode_kernel;
473 }
474
475 if (req->getFlags() & Request::PHYSICAL) {
476 req->setPaddr(req->getVaddr());
477 } else {
478 // verify that this is a good virtual address
479 if (!validVirtualAddress(req->getVaddr())) {
480 if (write) { write_acv++; } else { read_acv++; }
481 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
482 MM_STAT_BAD_VA_MASK |
483 MM_STAT_ACV_MASK;
484 return std::make_shared<DtbPageFault>(req->getVaddr(),
485 req->getFlags(),
486 flags);
487 }
488
489 // Check for "superpage" mapping
490 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
491 // only valid in kernel mode
492 if (DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM)) !=
493 mode_kernel) {
494 if (write) { write_acv++; } else { read_acv++; }
495 uint64_t flags = ((write ? MM_STAT_WR_MASK : 0) |
496 MM_STAT_ACV_MASK);
497
498 return std::make_shared<DtbAcvFault>(req->getVaddr(),
499 req->getFlags(),
500 flags);
501 }
502
503 req->setPaddr(req->getVaddr() & PAddrImplMask);
504
505 // sign extend the physical address properly
506 if (req->getPaddr() & PAddrUncachedBit40)
507 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
508 else
509 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
510 } else {
511 if (write)
512 write_accesses++;
513 else
514 read_accesses++;
515
516 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
517
518 // not a physical address: need to look up pte
519 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(), asn);
520
521 if (!entry) {
522 // page fault
523 if (write) { write_misses++; } else { read_misses++; }
524 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
525 MM_STAT_DTB_MISS_MASK;
526 return (req->getFlags() & AlphaRequestFlags::VPTE) ?
527 (Fault)(std::make_shared<PDtbMissFault>(req->getVaddr(),
528 req->getFlags(),
529 flags)) :
530 (Fault)(std::make_shared<NDtbMissFault>(req->getVaddr(),
531 req->getFlags(),
532 flags));
533 }
534
535 req->setPaddr((entry->ppn << PageShift) +
536 VAddr(req->getVaddr()).offset());
537
538 if (write) {
539 if (!(entry->xwe & MODE2MASK(mode))) {
540 // declare the instruction access fault
541 write_acv++;
542 uint64_t flags = MM_STAT_WR_MASK |
543 MM_STAT_ACV_MASK |
544 (entry->fonw ? MM_STAT_FONW_MASK : 0);
545 return std::make_shared<DtbPageFault>(req->getVaddr(),
546 req->getFlags(),
547 flags);
548 }
549 if (entry->fonw) {
550 write_acv++;
551 uint64_t flags = MM_STAT_WR_MASK | MM_STAT_FONW_MASK;
552 return std::make_shared<DtbPageFault>(req->getVaddr(),
553 req->getFlags(),
554 flags);
555 }
556 } else {
557 if (!(entry->xre & MODE2MASK(mode))) {
558 read_acv++;
559 uint64_t flags = MM_STAT_ACV_MASK |
560 (entry->fonr ? MM_STAT_FONR_MASK : 0);
561 return std::make_shared<DtbAcvFault>(req->getVaddr(),
562 req->getFlags(),
563 flags);
564 }
565 if (entry->fonr) {
566 read_acv++;
567 uint64_t flags = MM_STAT_FONR_MASK;
568 return std::make_shared<DtbPageFault>(req->getVaddr(),
569 req->getFlags(),
570 flags);
571 }
572 }
573 }
574
575 if (write)
576 write_hits++;
577 else
578 read_hits++;
579 }
580
581 // check that the physical address is ok (catch bad physical addresses)
582 if (req->getPaddr() & ~PAddrImplMask) {
583 return std::make_shared<MachineCheckFault>();
584 }
585
586 return checkCacheability(req);
587}
588
589TlbEntry &
590TLB::index(bool advance)
591{
592 TlbEntry *entry = &table[nlu];
593
594 if (advance)
595 nextnlu();
596
597 return *entry;
598}
599
600Fault
601TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode)
602{
603 if (mode == Execute)
604 return translateInst(req, tc);
605 else
606 return translateData(req, tc, mode == Write);
607}
608
609void
610TLB::translateTiming(RequestPtr req, ThreadContext *tc,
611 Translation *translation, Mode mode)
612{
613 assert(translation);
614 translation->finish(translateAtomic(req, tc, mode), req, tc, mode);
615}
616
617Fault
618TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode)
619{
620 panic("Not implemented\n");
621 return NoFault;
622}
623
624Fault
625TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
626{
627 return NoFault;
628}
629
630} // namespace AlphaISA
631
632AlphaISA::TLB *
633AlphaTLBParams::create()
634{
635 return new AlphaISA::TLB(this);
636}
366 if (table[i].valid) {
367 lookupTable.insert(make_pair(table[i].tag, i));
368 }
369 }
370}
371
372Fault
373TLB::translateInst(RequestPtr req, ThreadContext *tc)
374{
375 //If this is a pal pc, then set PHYSICAL
376 if (FullSystem && PcPAL(req->getPC()))
377 req->setFlags(Request::PHYSICAL);
378
379 if (PcPAL(req->getPC())) {
380 // strip off PAL PC marker (lsb is 1)
381 req->setPaddr((req->getVaddr() & ~3) & PAddrImplMask);
382 fetch_hits++;
383 return NoFault;
384 }
385
386 if (req->getFlags() & Request::PHYSICAL) {
387 req->setPaddr(req->getVaddr());
388 } else {
389 // verify that this is a good virtual address
390 if (!validVirtualAddress(req->getVaddr())) {
391 fetch_acv++;
392 return std::make_shared<ItbAcvFault>(req->getVaddr());
393 }
394
395
396 // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
397 // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
398 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
399 // only valid in kernel mode
400 if (ICM_CM(tc->readMiscRegNoEffect(IPR_ICM)) !=
401 mode_kernel) {
402 fetch_acv++;
403 return std::make_shared<ItbAcvFault>(req->getVaddr());
404 }
405
406 req->setPaddr(req->getVaddr() & PAddrImplMask);
407
408 // sign extend the physical address properly
409 if (req->getPaddr() & PAddrUncachedBit40)
410 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
411 else
412 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
413 } else {
414 // not a physical address: need to look up pte
415 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
416 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(),
417 asn);
418
419 if (!entry) {
420 fetch_misses++;
421 return std::make_shared<ItbPageFault>(req->getVaddr());
422 }
423
424 req->setPaddr((entry->ppn << PageShift) +
425 (VAddr(req->getVaddr()).offset()
426 & ~3));
427
428 // check permissions for this access
429 if (!(entry->xre &
430 (1 << ICM_CM(tc->readMiscRegNoEffect(IPR_ICM))))) {
431 // instruction access fault
432 fetch_acv++;
433 return std::make_shared<ItbAcvFault>(req->getVaddr());
434 }
435
436 fetch_hits++;
437 }
438 }
439
440 // check that the physical address is ok (catch bad physical addresses)
441 if (req->getPaddr() & ~PAddrImplMask) {
442 return std::make_shared<MachineCheckFault>();
443 }
444
445 return checkCacheability(req, true);
446
447}
448
449Fault
450TLB::translateData(RequestPtr req, ThreadContext *tc, bool write)
451{
452 mode_type mode =
453 (mode_type)DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM));
454
455 /**
456 * Check for alignment faults
457 */
458 if (req->getVaddr() & (req->getSize() - 1)) {
459 DPRINTF(TLB, "Alignment Fault on %#x, size = %d\n", req->getVaddr(),
460 req->getSize());
461 uint64_t flags = write ? MM_STAT_WR_MASK : 0;
462 return std::make_shared<DtbAlignmentFault>(req->getVaddr(),
463 req->getFlags(),
464 flags);
465 }
466
467 if (PcPAL(req->getPC())) {
468 mode = (req->getFlags() & AlphaRequestFlags::ALTMODE) ?
469 (mode_type)ALT_MODE_AM(
470 tc->readMiscRegNoEffect(IPR_ALT_MODE))
471 : mode_kernel;
472 }
473
474 if (req->getFlags() & Request::PHYSICAL) {
475 req->setPaddr(req->getVaddr());
476 } else {
477 // verify that this is a good virtual address
478 if (!validVirtualAddress(req->getVaddr())) {
479 if (write) { write_acv++; } else { read_acv++; }
480 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
481 MM_STAT_BAD_VA_MASK |
482 MM_STAT_ACV_MASK;
483 return std::make_shared<DtbPageFault>(req->getVaddr(),
484 req->getFlags(),
485 flags);
486 }
487
488 // Check for "superpage" mapping
489 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
490 // only valid in kernel mode
491 if (DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM)) !=
492 mode_kernel) {
493 if (write) { write_acv++; } else { read_acv++; }
494 uint64_t flags = ((write ? MM_STAT_WR_MASK : 0) |
495 MM_STAT_ACV_MASK);
496
497 return std::make_shared<DtbAcvFault>(req->getVaddr(),
498 req->getFlags(),
499 flags);
500 }
501
502 req->setPaddr(req->getVaddr() & PAddrImplMask);
503
504 // sign extend the physical address properly
505 if (req->getPaddr() & PAddrUncachedBit40)
506 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
507 else
508 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
509 } else {
510 if (write)
511 write_accesses++;
512 else
513 read_accesses++;
514
515 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
516
517 // not a physical address: need to look up pte
518 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(), asn);
519
520 if (!entry) {
521 // page fault
522 if (write) { write_misses++; } else { read_misses++; }
523 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
524 MM_STAT_DTB_MISS_MASK;
525 return (req->getFlags() & AlphaRequestFlags::VPTE) ?
526 (Fault)(std::make_shared<PDtbMissFault>(req->getVaddr(),
527 req->getFlags(),
528 flags)) :
529 (Fault)(std::make_shared<NDtbMissFault>(req->getVaddr(),
530 req->getFlags(),
531 flags));
532 }
533
534 req->setPaddr((entry->ppn << PageShift) +
535 VAddr(req->getVaddr()).offset());
536
537 if (write) {
538 if (!(entry->xwe & MODE2MASK(mode))) {
539 // declare the instruction access fault
540 write_acv++;
541 uint64_t flags = MM_STAT_WR_MASK |
542 MM_STAT_ACV_MASK |
543 (entry->fonw ? MM_STAT_FONW_MASK : 0);
544 return std::make_shared<DtbPageFault>(req->getVaddr(),
545 req->getFlags(),
546 flags);
547 }
548 if (entry->fonw) {
549 write_acv++;
550 uint64_t flags = MM_STAT_WR_MASK | MM_STAT_FONW_MASK;
551 return std::make_shared<DtbPageFault>(req->getVaddr(),
552 req->getFlags(),
553 flags);
554 }
555 } else {
556 if (!(entry->xre & MODE2MASK(mode))) {
557 read_acv++;
558 uint64_t flags = MM_STAT_ACV_MASK |
559 (entry->fonr ? MM_STAT_FONR_MASK : 0);
560 return std::make_shared<DtbAcvFault>(req->getVaddr(),
561 req->getFlags(),
562 flags);
563 }
564 if (entry->fonr) {
565 read_acv++;
566 uint64_t flags = MM_STAT_FONR_MASK;
567 return std::make_shared<DtbPageFault>(req->getVaddr(),
568 req->getFlags(),
569 flags);
570 }
571 }
572 }
573
574 if (write)
575 write_hits++;
576 else
577 read_hits++;
578 }
579
580 // check that the physical address is ok (catch bad physical addresses)
581 if (req->getPaddr() & ~PAddrImplMask) {
582 return std::make_shared<MachineCheckFault>();
583 }
584
585 return checkCacheability(req);
586}
587
588TlbEntry &
589TLB::index(bool advance)
590{
591 TlbEntry *entry = &table[nlu];
592
593 if (advance)
594 nextnlu();
595
596 return *entry;
597}
598
599Fault
600TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode)
601{
602 if (mode == Execute)
603 return translateInst(req, tc);
604 else
605 return translateData(req, tc, mode == Write);
606}
607
608void
609TLB::translateTiming(RequestPtr req, ThreadContext *tc,
610 Translation *translation, Mode mode)
611{
612 assert(translation);
613 translation->finish(translateAtomic(req, tc, mode), req, tc, mode);
614}
615
616Fault
617TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode)
618{
619 panic("Not implemented\n");
620 return NoFault;
621}
622
623Fault
624TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
625{
626 return NoFault;
627}
628
629} // namespace AlphaISA
630
631AlphaISA::TLB *
632AlphaTLBParams::create()
633{
634 return new AlphaISA::TLB(this);
635}