tlb.cc (6023:47b4fcb10c11) tlb.cc (6025:044903442dcb)
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 * Steve Reinhardt
30 * Andrew Schultz
31 */
32
33#include <string>
34#include <vector>
35
36#include "arch/alpha/pagetable.hh"
37#include "arch/alpha/tlb.hh"
38#include "arch/alpha/faults.hh"
39#include "base/inifile.hh"
40#include "base/str.hh"
41#include "base/trace.hh"
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 * Steve Reinhardt
30 * Andrew Schultz
31 */
32
33#include <string>
34#include <vector>
35
36#include "arch/alpha/pagetable.hh"
37#include "arch/alpha/tlb.hh"
38#include "arch/alpha/faults.hh"
39#include "base/inifile.hh"
40#include "base/str.hh"
41#include "base/trace.hh"
42#include "config/alpha_tlaser.hh"
43#include "cpu/thread_context.hh"
44
45using namespace std;
46
47namespace AlphaISA {
48
49///////////////////////////////////////////////////////////////////////
50//
51// Alpha TLB
52//
53
54#ifdef DEBUG
55bool uncacheBit39 = false;
56bool uncacheBit40 = false;
57#endif
58
59#define MODE2MASK(X) (1 << (X))
60
61TLB::TLB(const Params *p)
62 : BaseTLB(p), size(p->size), nlu(0)
63{
64 table = new TlbEntry[size];
65 memset(table, 0, sizeof(TlbEntry[size]));
66 flushCache();
67}
68
69TLB::~TLB()
70{
71 if (table)
72 delete [] table;
73}
74
75void
76TLB::regStats()
77{
78 fetch_hits
79 .name(name() + ".fetch_hits")
80 .desc("ITB hits");
81 fetch_misses
82 .name(name() + ".fetch_misses")
83 .desc("ITB misses");
84 fetch_acv
85 .name(name() + ".fetch_acv")
86 .desc("ITB acv");
87 fetch_accesses
88 .name(name() + ".fetch_accesses")
89 .desc("ITB accesses");
90
91 fetch_accesses = fetch_hits + fetch_misses;
92
93 read_hits
94 .name(name() + ".read_hits")
95 .desc("DTB read hits")
96 ;
97
98 read_misses
99 .name(name() + ".read_misses")
100 .desc("DTB read misses")
101 ;
102
103 read_acv
104 .name(name() + ".read_acv")
105 .desc("DTB read access violations")
106 ;
107
108 read_accesses
109 .name(name() + ".read_accesses")
110 .desc("DTB read accesses")
111 ;
112
113 write_hits
114 .name(name() + ".write_hits")
115 .desc("DTB write hits")
116 ;
117
118 write_misses
119 .name(name() + ".write_misses")
120 .desc("DTB write misses")
121 ;
122
123 write_acv
124 .name(name() + ".write_acv")
125 .desc("DTB write access violations")
126 ;
127
128 write_accesses
129 .name(name() + ".write_accesses")
130 .desc("DTB write accesses")
131 ;
132
133 data_hits
134 .name(name() + ".data_hits")
135 .desc("DTB hits")
136 ;
137
138 data_misses
139 .name(name() + ".data_misses")
140 .desc("DTB misses")
141 ;
142
143 data_acv
144 .name(name() + ".data_acv")
145 .desc("DTB access violations")
146 ;
147
148 data_accesses
149 .name(name() + ".data_accesses")
150 .desc("DTB accesses")
151 ;
152
153 data_hits = read_hits + write_hits;
154 data_misses = read_misses + write_misses;
155 data_acv = read_acv + write_acv;
156 data_accesses = read_accesses + write_accesses;
157}
158
159// look up an entry in the TLB
160TlbEntry *
161TLB::lookup(Addr vpn, uint8_t asn)
162{
163 // assume not found...
164 TlbEntry *retval = NULL;
165
166 if (EntryCache[0]) {
167 if (vpn == EntryCache[0]->tag &&
168 (EntryCache[0]->asma || EntryCache[0]->asn == asn))
169 retval = EntryCache[0];
170 else if (EntryCache[1]) {
171 if (vpn == EntryCache[1]->tag &&
172 (EntryCache[1]->asma || EntryCache[1]->asn == asn))
173 retval = EntryCache[1];
174 else if (EntryCache[2] && vpn == EntryCache[2]->tag &&
175 (EntryCache[2]->asma || EntryCache[2]->asn == asn))
176 retval = EntryCache[2];
177 }
178 }
179
180 if (retval == NULL) {
181 PageTable::const_iterator i = lookupTable.find(vpn);
182 if (i != lookupTable.end()) {
183 while (i->first == vpn) {
184 int index = i->second;
185 TlbEntry *entry = &table[index];
186 assert(entry->valid);
187 if (vpn == entry->tag && (entry->asma || entry->asn == asn)) {
188 retval = updateCache(entry);
189 break;
190 }
191
192 ++i;
193 }
194 }
195 }
196
197 DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn,
198 retval ? "hit" : "miss", retval ? retval->ppn : 0);
199 return retval;
200}
201
202Fault
203TLB::checkCacheability(RequestPtr &req, bool itb)
204{
205 // in Alpha, cacheability is controlled by upper-level bits of the
206 // physical address
207
208 /*
209 * We support having the uncacheable bit in either bit 39 or bit
210 * 40. The Turbolaser platform (and EV5) support having the bit
211 * in 39, but Tsunami (which Linux assumes uses an EV6) generates
212 * accesses with the bit in 40. So we must check for both, but we
213 * have debug flags to catch a weird case where both are used,
214 * which shouldn't happen.
215 */
216
217
42#include "cpu/thread_context.hh"
43
44using namespace std;
45
46namespace AlphaISA {
47
48///////////////////////////////////////////////////////////////////////
49//
50// Alpha TLB
51//
52
53#ifdef DEBUG
54bool uncacheBit39 = false;
55bool uncacheBit40 = false;
56#endif
57
58#define MODE2MASK(X) (1 << (X))
59
60TLB::TLB(const Params *p)
61 : BaseTLB(p), size(p->size), nlu(0)
62{
63 table = new TlbEntry[size];
64 memset(table, 0, sizeof(TlbEntry[size]));
65 flushCache();
66}
67
68TLB::~TLB()
69{
70 if (table)
71 delete [] table;
72}
73
74void
75TLB::regStats()
76{
77 fetch_hits
78 .name(name() + ".fetch_hits")
79 .desc("ITB hits");
80 fetch_misses
81 .name(name() + ".fetch_misses")
82 .desc("ITB misses");
83 fetch_acv
84 .name(name() + ".fetch_acv")
85 .desc("ITB acv");
86 fetch_accesses
87 .name(name() + ".fetch_accesses")
88 .desc("ITB accesses");
89
90 fetch_accesses = fetch_hits + fetch_misses;
91
92 read_hits
93 .name(name() + ".read_hits")
94 .desc("DTB read hits")
95 ;
96
97 read_misses
98 .name(name() + ".read_misses")
99 .desc("DTB read misses")
100 ;
101
102 read_acv
103 .name(name() + ".read_acv")
104 .desc("DTB read access violations")
105 ;
106
107 read_accesses
108 .name(name() + ".read_accesses")
109 .desc("DTB read accesses")
110 ;
111
112 write_hits
113 .name(name() + ".write_hits")
114 .desc("DTB write hits")
115 ;
116
117 write_misses
118 .name(name() + ".write_misses")
119 .desc("DTB write misses")
120 ;
121
122 write_acv
123 .name(name() + ".write_acv")
124 .desc("DTB write access violations")
125 ;
126
127 write_accesses
128 .name(name() + ".write_accesses")
129 .desc("DTB write accesses")
130 ;
131
132 data_hits
133 .name(name() + ".data_hits")
134 .desc("DTB hits")
135 ;
136
137 data_misses
138 .name(name() + ".data_misses")
139 .desc("DTB misses")
140 ;
141
142 data_acv
143 .name(name() + ".data_acv")
144 .desc("DTB access violations")
145 ;
146
147 data_accesses
148 .name(name() + ".data_accesses")
149 .desc("DTB accesses")
150 ;
151
152 data_hits = read_hits + write_hits;
153 data_misses = read_misses + write_misses;
154 data_acv = read_acv + write_acv;
155 data_accesses = read_accesses + write_accesses;
156}
157
158// look up an entry in the TLB
159TlbEntry *
160TLB::lookup(Addr vpn, uint8_t asn)
161{
162 // assume not found...
163 TlbEntry *retval = NULL;
164
165 if (EntryCache[0]) {
166 if (vpn == EntryCache[0]->tag &&
167 (EntryCache[0]->asma || EntryCache[0]->asn == asn))
168 retval = EntryCache[0];
169 else if (EntryCache[1]) {
170 if (vpn == EntryCache[1]->tag &&
171 (EntryCache[1]->asma || EntryCache[1]->asn == asn))
172 retval = EntryCache[1];
173 else if (EntryCache[2] && vpn == EntryCache[2]->tag &&
174 (EntryCache[2]->asma || EntryCache[2]->asn == asn))
175 retval = EntryCache[2];
176 }
177 }
178
179 if (retval == NULL) {
180 PageTable::const_iterator i = lookupTable.find(vpn);
181 if (i != lookupTable.end()) {
182 while (i->first == vpn) {
183 int index = i->second;
184 TlbEntry *entry = &table[index];
185 assert(entry->valid);
186 if (vpn == entry->tag && (entry->asma || entry->asn == asn)) {
187 retval = updateCache(entry);
188 break;
189 }
190
191 ++i;
192 }
193 }
194 }
195
196 DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn,
197 retval ? "hit" : "miss", retval ? retval->ppn : 0);
198 return retval;
199}
200
201Fault
202TLB::checkCacheability(RequestPtr &req, bool itb)
203{
204 // in Alpha, cacheability is controlled by upper-level bits of the
205 // physical address
206
207 /*
208 * We support having the uncacheable bit in either bit 39 or bit
209 * 40. The Turbolaser platform (and EV5) support having the bit
210 * in 39, but Tsunami (which Linux assumes uses an EV6) generates
211 * accesses with the bit in 40. So we must check for both, but we
212 * have debug flags to catch a weird case where both are used,
213 * which shouldn't happen.
214 */
215
216
218#if ALPHA_TLASER
219 if (req->getPaddr() & PAddrUncachedBit39)
220#else
221 if (req->getPaddr() & PAddrUncachedBit43)
222#endif
223 {
217 if (req->getPaddr() & PAddrUncachedBit43) {
224 // IPR memory space not implemented
225 if (PAddrIprSpace(req->getPaddr())) {
226 return new UnimpFault("IPR memory space not implemented!");
227 } else {
228 // mark request as uncacheable
229 req->setFlags(Request::UNCACHEABLE);
230
218 // IPR memory space not implemented
219 if (PAddrIprSpace(req->getPaddr())) {
220 return new UnimpFault("IPR memory space not implemented!");
221 } else {
222 // mark request as uncacheable
223 req->setFlags(Request::UNCACHEABLE);
224
231#if !ALPHA_TLASER
232 // Clear bits 42:35 of the physical address (10-2 in
233 // Tsunami manual)
234 req->setPaddr(req->getPaddr() & PAddrUncachedMask);
225 // Clear bits 42:35 of the physical address (10-2 in
226 // Tsunami manual)
227 req->setPaddr(req->getPaddr() & PAddrUncachedMask);
235#endif
236 }
237 // We shouldn't be able to read from an uncachable address in Alpha as
238 // we don't have a ROM and we don't want to try to fetch from a device
239 // register as we destroy any data that is clear-on-read.
240 if (req->isUncacheable() && itb)
241 return new UnimpFault("CPU trying to fetch from uncached I/O");
242
243 }
244 return NoFault;
245}
246
247
248// insert a new TLB entry
249void
250TLB::insert(Addr addr, TlbEntry &entry)
251{
252 flushCache();
253 VAddr vaddr = addr;
254 if (table[nlu].valid) {
255 Addr oldvpn = table[nlu].tag;
256 PageTable::iterator i = lookupTable.find(oldvpn);
257
258 if (i == lookupTable.end())
259 panic("TLB entry not found in lookupTable");
260
261 int index;
262 while ((index = i->second) != nlu) {
263 if (table[index].tag != oldvpn)
264 panic("TLB entry not found in lookupTable");
265
266 ++i;
267 }
268
269 DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn);
270
271 lookupTable.erase(i);
272 }
273
274 DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), entry.ppn);
275
276 table[nlu] = entry;
277 table[nlu].tag = vaddr.vpn();
278 table[nlu].valid = true;
279
280 lookupTable.insert(make_pair(vaddr.vpn(), nlu));
281 nextnlu();
282}
283
284void
285TLB::flushAll()
286{
287 DPRINTF(TLB, "flushAll\n");
288 memset(table, 0, sizeof(TlbEntry[size]));
289 flushCache();
290 lookupTable.clear();
291 nlu = 0;
292}
293
294void
295TLB::flushProcesses()
296{
297 flushCache();
298 PageTable::iterator i = lookupTable.begin();
299 PageTable::iterator end = lookupTable.end();
300 while (i != end) {
301 int index = i->second;
302 TlbEntry *entry = &table[index];
303 assert(entry->valid);
304
305 // we can't increment i after we erase it, so save a copy and
306 // increment it to get the next entry now
307 PageTable::iterator cur = i;
308 ++i;
309
310 if (!entry->asma) {
311 DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index,
312 entry->tag, entry->ppn);
313 entry->valid = false;
314 lookupTable.erase(cur);
315 }
316 }
317}
318
319void
320TLB::flushAddr(Addr addr, uint8_t asn)
321{
322 flushCache();
323 VAddr vaddr = addr;
324
325 PageTable::iterator i = lookupTable.find(vaddr.vpn());
326 if (i == lookupTable.end())
327 return;
328
329 while (i != lookupTable.end() && i->first == vaddr.vpn()) {
330 int index = i->second;
331 TlbEntry *entry = &table[index];
332 assert(entry->valid);
333
334 if (vaddr.vpn() == entry->tag && (entry->asma || entry->asn == asn)) {
335 DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(),
336 entry->ppn);
337
338 // invalidate this entry
339 entry->valid = false;
340
341 lookupTable.erase(i++);
342 } else {
343 ++i;
344 }
345 }
346}
347
348
349void
350TLB::serialize(ostream &os)
351{
352 SERIALIZE_SCALAR(size);
353 SERIALIZE_SCALAR(nlu);
354
355 for (int i = 0; i < size; i++) {
356 nameOut(os, csprintf("%s.Entry%d", name(), i));
357 table[i].serialize(os);
358 }
359}
360
361void
362TLB::unserialize(Checkpoint *cp, const string &section)
363{
364 UNSERIALIZE_SCALAR(size);
365 UNSERIALIZE_SCALAR(nlu);
366
367 for (int i = 0; i < size; i++) {
368 table[i].unserialize(cp, csprintf("%s.Entry%d", section, i));
369 if (table[i].valid) {
370 lookupTable.insert(make_pair(table[i].tag, i));
371 }
372 }
373}
374
375Fault
376TLB::translateInst(RequestPtr req, ThreadContext *tc)
377{
378 //If this is a pal pc, then set PHYSICAL
379 if (FULL_SYSTEM && PcPAL(req->getPC()))
380 req->setFlags(Request::PHYSICAL);
381
382 if (PcPAL(req->getPC())) {
383 // strip off PAL PC marker (lsb is 1)
384 req->setPaddr((req->getVaddr() & ~3) & PAddrImplMask);
385 fetch_hits++;
386 return NoFault;
387 }
388
389 if (req->getFlags() & Request::PHYSICAL) {
390 req->setPaddr(req->getVaddr());
391 } else {
392 // verify that this is a good virtual address
393 if (!validVirtualAddress(req->getVaddr())) {
394 fetch_acv++;
395 return new ItbAcvFault(req->getVaddr());
396 }
397
398
399 // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
400 // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
228 }
229 // We shouldn't be able to read from an uncachable address in Alpha as
230 // we don't have a ROM and we don't want to try to fetch from a device
231 // register as we destroy any data that is clear-on-read.
232 if (req->isUncacheable() && itb)
233 return new UnimpFault("CPU trying to fetch from uncached I/O");
234
235 }
236 return NoFault;
237}
238
239
240// insert a new TLB entry
241void
242TLB::insert(Addr addr, TlbEntry &entry)
243{
244 flushCache();
245 VAddr vaddr = addr;
246 if (table[nlu].valid) {
247 Addr oldvpn = table[nlu].tag;
248 PageTable::iterator i = lookupTable.find(oldvpn);
249
250 if (i == lookupTable.end())
251 panic("TLB entry not found in lookupTable");
252
253 int index;
254 while ((index = i->second) != nlu) {
255 if (table[index].tag != oldvpn)
256 panic("TLB entry not found in lookupTable");
257
258 ++i;
259 }
260
261 DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn);
262
263 lookupTable.erase(i);
264 }
265
266 DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), entry.ppn);
267
268 table[nlu] = entry;
269 table[nlu].tag = vaddr.vpn();
270 table[nlu].valid = true;
271
272 lookupTable.insert(make_pair(vaddr.vpn(), nlu));
273 nextnlu();
274}
275
276void
277TLB::flushAll()
278{
279 DPRINTF(TLB, "flushAll\n");
280 memset(table, 0, sizeof(TlbEntry[size]));
281 flushCache();
282 lookupTable.clear();
283 nlu = 0;
284}
285
286void
287TLB::flushProcesses()
288{
289 flushCache();
290 PageTable::iterator i = lookupTable.begin();
291 PageTable::iterator end = lookupTable.end();
292 while (i != end) {
293 int index = i->second;
294 TlbEntry *entry = &table[index];
295 assert(entry->valid);
296
297 // we can't increment i after we erase it, so save a copy and
298 // increment it to get the next entry now
299 PageTable::iterator cur = i;
300 ++i;
301
302 if (!entry->asma) {
303 DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index,
304 entry->tag, entry->ppn);
305 entry->valid = false;
306 lookupTable.erase(cur);
307 }
308 }
309}
310
311void
312TLB::flushAddr(Addr addr, uint8_t asn)
313{
314 flushCache();
315 VAddr vaddr = addr;
316
317 PageTable::iterator i = lookupTable.find(vaddr.vpn());
318 if (i == lookupTable.end())
319 return;
320
321 while (i != lookupTable.end() && i->first == vaddr.vpn()) {
322 int index = i->second;
323 TlbEntry *entry = &table[index];
324 assert(entry->valid);
325
326 if (vaddr.vpn() == entry->tag && (entry->asma || entry->asn == asn)) {
327 DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(),
328 entry->ppn);
329
330 // invalidate this entry
331 entry->valid = false;
332
333 lookupTable.erase(i++);
334 } else {
335 ++i;
336 }
337 }
338}
339
340
341void
342TLB::serialize(ostream &os)
343{
344 SERIALIZE_SCALAR(size);
345 SERIALIZE_SCALAR(nlu);
346
347 for (int i = 0; i < size; i++) {
348 nameOut(os, csprintf("%s.Entry%d", name(), i));
349 table[i].serialize(os);
350 }
351}
352
353void
354TLB::unserialize(Checkpoint *cp, const string &section)
355{
356 UNSERIALIZE_SCALAR(size);
357 UNSERIALIZE_SCALAR(nlu);
358
359 for (int i = 0; i < size; i++) {
360 table[i].unserialize(cp, csprintf("%s.Entry%d", section, i));
361 if (table[i].valid) {
362 lookupTable.insert(make_pair(table[i].tag, i));
363 }
364 }
365}
366
367Fault
368TLB::translateInst(RequestPtr req, ThreadContext *tc)
369{
370 //If this is a pal pc, then set PHYSICAL
371 if (FULL_SYSTEM && PcPAL(req->getPC()))
372 req->setFlags(Request::PHYSICAL);
373
374 if (PcPAL(req->getPC())) {
375 // strip off PAL PC marker (lsb is 1)
376 req->setPaddr((req->getVaddr() & ~3) & PAddrImplMask);
377 fetch_hits++;
378 return NoFault;
379 }
380
381 if (req->getFlags() & Request::PHYSICAL) {
382 req->setPaddr(req->getVaddr());
383 } else {
384 // verify that this is a good virtual address
385 if (!validVirtualAddress(req->getVaddr())) {
386 fetch_acv++;
387 return new ItbAcvFault(req->getVaddr());
388 }
389
390
391 // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
392 // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
401#if ALPHA_TLASER
402 if ((MCSR_SP(tc->readMiscRegNoEffect(IPR_MCSR)) & 2) &&
403 VAddrSpaceEV5(req->getVaddr()) == 2)
404#else
405 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e)
406#endif
407 {
393 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
408 // only valid in kernel mode
409 if (ICM_CM(tc->readMiscRegNoEffect(IPR_ICM)) !=
410 mode_kernel) {
411 fetch_acv++;
412 return new ItbAcvFault(req->getVaddr());
413 }
414
415 req->setPaddr(req->getVaddr() & PAddrImplMask);
416
394 // only valid in kernel mode
395 if (ICM_CM(tc->readMiscRegNoEffect(IPR_ICM)) !=
396 mode_kernel) {
397 fetch_acv++;
398 return new ItbAcvFault(req->getVaddr());
399 }
400
401 req->setPaddr(req->getVaddr() & PAddrImplMask);
402
417#if !ALPHA_TLASER
418 // sign extend the physical address properly
419 if (req->getPaddr() & PAddrUncachedBit40)
420 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
421 else
422 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
403 // sign extend the physical address properly
404 if (req->getPaddr() & PAddrUncachedBit40)
405 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
406 else
407 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
423#endif
424
425 } else {
426 // not a physical address: need to look up pte
427 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
428 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(),
429 asn);
430
431 if (!entry) {
432 fetch_misses++;
433 return new ItbPageFault(req->getVaddr());
434 }
435
436 req->setPaddr((entry->ppn << PageShift) +
437 (VAddr(req->getVaddr()).offset()
438 & ~3));
439
440 // check permissions for this access
441 if (!(entry->xre &
442 (1 << ICM_CM(tc->readMiscRegNoEffect(IPR_ICM))))) {
443 // instruction access fault
444 fetch_acv++;
445 return new ItbAcvFault(req->getVaddr());
446 }
447
448 fetch_hits++;
449 }
450 }
451
452 // check that the physical address is ok (catch bad physical addresses)
453 if (req->getPaddr() & ~PAddrImplMask)
454 return genMachineCheckFault();
455
456 return checkCacheability(req, true);
457
458}
459
460Fault
461TLB::translateData(RequestPtr req, ThreadContext *tc, bool write)
462{
463 Addr pc = tc->readPC();
464
465 mode_type mode =
466 (mode_type)DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM));
467
468 /**
469 * Check for alignment faults
470 */
471 if (req->getVaddr() & (req->getSize() - 1)) {
472 DPRINTF(TLB, "Alignment Fault on %#x, size = %d", req->getVaddr(),
473 req->getSize());
474 uint64_t flags = write ? MM_STAT_WR_MASK : 0;
475 return new DtbAlignmentFault(req->getVaddr(), req->getFlags(), flags);
476 }
477
478 if (PcPAL(pc)) {
479 mode = (req->getFlags() & Request::ALTMODE) ?
480 (mode_type)ALT_MODE_AM(
481 tc->readMiscRegNoEffect(IPR_ALT_MODE))
482 : mode_kernel;
483 }
484
485 if (req->getFlags() & Request::PHYSICAL) {
486 req->setPaddr(req->getVaddr());
487 } else {
488 // verify that this is a good virtual address
489 if (!validVirtualAddress(req->getVaddr())) {
490 if (write) { write_acv++; } else { read_acv++; }
491 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
492 MM_STAT_BAD_VA_MASK |
493 MM_STAT_ACV_MASK;
494 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
495 }
496
497 // Check for "superpage" mapping
408 } else {
409 // not a physical address: need to look up pte
410 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
411 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(),
412 asn);
413
414 if (!entry) {
415 fetch_misses++;
416 return new ItbPageFault(req->getVaddr());
417 }
418
419 req->setPaddr((entry->ppn << PageShift) +
420 (VAddr(req->getVaddr()).offset()
421 & ~3));
422
423 // check permissions for this access
424 if (!(entry->xre &
425 (1 << ICM_CM(tc->readMiscRegNoEffect(IPR_ICM))))) {
426 // instruction access fault
427 fetch_acv++;
428 return new ItbAcvFault(req->getVaddr());
429 }
430
431 fetch_hits++;
432 }
433 }
434
435 // check that the physical address is ok (catch bad physical addresses)
436 if (req->getPaddr() & ~PAddrImplMask)
437 return genMachineCheckFault();
438
439 return checkCacheability(req, true);
440
441}
442
443Fault
444TLB::translateData(RequestPtr req, ThreadContext *tc, bool write)
445{
446 Addr pc = tc->readPC();
447
448 mode_type mode =
449 (mode_type)DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM));
450
451 /**
452 * Check for alignment faults
453 */
454 if (req->getVaddr() & (req->getSize() - 1)) {
455 DPRINTF(TLB, "Alignment Fault on %#x, size = %d", req->getVaddr(),
456 req->getSize());
457 uint64_t flags = write ? MM_STAT_WR_MASK : 0;
458 return new DtbAlignmentFault(req->getVaddr(), req->getFlags(), flags);
459 }
460
461 if (PcPAL(pc)) {
462 mode = (req->getFlags() & Request::ALTMODE) ?
463 (mode_type)ALT_MODE_AM(
464 tc->readMiscRegNoEffect(IPR_ALT_MODE))
465 : mode_kernel;
466 }
467
468 if (req->getFlags() & Request::PHYSICAL) {
469 req->setPaddr(req->getVaddr());
470 } else {
471 // verify that this is a good virtual address
472 if (!validVirtualAddress(req->getVaddr())) {
473 if (write) { write_acv++; } else { read_acv++; }
474 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
475 MM_STAT_BAD_VA_MASK |
476 MM_STAT_ACV_MASK;
477 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
478 }
479
480 // Check for "superpage" mapping
498#if ALPHA_TLASER
499 if ((MCSR_SP(tc->readMiscRegNoEffect(IPR_MCSR)) & 2) &&
500 VAddrSpaceEV5(req->getVaddr()) == 2)
501#else
502 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e)
503#endif
504 {
481 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
505 // only valid in kernel mode
506 if (DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM)) !=
507 mode_kernel) {
508 if (write) { write_acv++; } else { read_acv++; }
509 uint64_t flags = ((write ? MM_STAT_WR_MASK : 0) |
510 MM_STAT_ACV_MASK);
511
512 return new DtbAcvFault(req->getVaddr(), req->getFlags(),
513 flags);
514 }
515
516 req->setPaddr(req->getVaddr() & PAddrImplMask);
517
482 // only valid in kernel mode
483 if (DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM)) !=
484 mode_kernel) {
485 if (write) { write_acv++; } else { read_acv++; }
486 uint64_t flags = ((write ? MM_STAT_WR_MASK : 0) |
487 MM_STAT_ACV_MASK);
488
489 return new DtbAcvFault(req->getVaddr(), req->getFlags(),
490 flags);
491 }
492
493 req->setPaddr(req->getVaddr() & PAddrImplMask);
494
518#if !ALPHA_TLASER
519 // sign extend the physical address properly
520 if (req->getPaddr() & PAddrUncachedBit40)
521 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
522 else
523 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
495 // sign extend the physical address properly
496 if (req->getPaddr() & PAddrUncachedBit40)
497 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
498 else
499 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
524#endif
525
526 } else {
527 if (write)
528 write_accesses++;
529 else
530 read_accesses++;
531
532 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
533
534 // not a physical address: need to look up pte
535 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(), asn);
536
537 if (!entry) {
538 // page fault
539 if (write) { write_misses++; } else { read_misses++; }
540 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
541 MM_STAT_DTB_MISS_MASK;
542 return (req->getFlags() & Request::VPTE) ?
543 (Fault)(new PDtbMissFault(req->getVaddr(), req->getFlags(),
544 flags)) :
545 (Fault)(new NDtbMissFault(req->getVaddr(), req->getFlags(),
546 flags));
547 }
548
549 req->setPaddr((entry->ppn << PageShift) +
550 VAddr(req->getVaddr()).offset());
551
552 if (write) {
553 if (!(entry->xwe & MODE2MASK(mode))) {
554 // declare the instruction access fault
555 write_acv++;
556 uint64_t flags = MM_STAT_WR_MASK |
557 MM_STAT_ACV_MASK |
558 (entry->fonw ? MM_STAT_FONW_MASK : 0);
559 return new DtbPageFault(req->getVaddr(), req->getFlags(),
560 flags);
561 }
562 if (entry->fonw) {
563 write_acv++;
564 uint64_t flags = MM_STAT_WR_MASK | MM_STAT_FONW_MASK;
565 return new DtbPageFault(req->getVaddr(), req->getFlags(),
566 flags);
567 }
568 } else {
569 if (!(entry->xre & MODE2MASK(mode))) {
570 read_acv++;
571 uint64_t flags = MM_STAT_ACV_MASK |
572 (entry->fonr ? MM_STAT_FONR_MASK : 0);
573 return new DtbAcvFault(req->getVaddr(), req->getFlags(),
574 flags);
575 }
576 if (entry->fonr) {
577 read_acv++;
578 uint64_t flags = MM_STAT_FONR_MASK;
579 return new DtbPageFault(req->getVaddr(), req->getFlags(),
580 flags);
581 }
582 }
583 }
584
585 if (write)
586 write_hits++;
587 else
588 read_hits++;
589 }
590
591 // check that the physical address is ok (catch bad physical addresses)
592 if (req->getPaddr() & ~PAddrImplMask)
593 return genMachineCheckFault();
594
595 return checkCacheability(req);
596}
597
598TlbEntry &
599TLB::index(bool advance)
600{
601 TlbEntry *entry = &table[nlu];
602
603 if (advance)
604 nextnlu();
605
606 return *entry;
607}
608
609Fault
610TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode)
611{
612 if (mode == Execute)
613 return translateInst(req, tc);
614 else
615 return translateData(req, tc, mode == Write);
616}
617
618void
619TLB::translateTiming(RequestPtr req, ThreadContext *tc,
620 Translation *translation, Mode mode)
621{
622 assert(translation);
623 translation->finish(translateAtomic(req, tc, mode), req, tc, mode);
624}
625
626/* end namespace AlphaISA */ }
627
628AlphaISA::TLB *
629AlphaTLBParams::create()
630{
631 return new AlphaISA::TLB(this);
632}
500 } else {
501 if (write)
502 write_accesses++;
503 else
504 read_accesses++;
505
506 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
507
508 // not a physical address: need to look up pte
509 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(), asn);
510
511 if (!entry) {
512 // page fault
513 if (write) { write_misses++; } else { read_misses++; }
514 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
515 MM_STAT_DTB_MISS_MASK;
516 return (req->getFlags() & Request::VPTE) ?
517 (Fault)(new PDtbMissFault(req->getVaddr(), req->getFlags(),
518 flags)) :
519 (Fault)(new NDtbMissFault(req->getVaddr(), req->getFlags(),
520 flags));
521 }
522
523 req->setPaddr((entry->ppn << PageShift) +
524 VAddr(req->getVaddr()).offset());
525
526 if (write) {
527 if (!(entry->xwe & MODE2MASK(mode))) {
528 // declare the instruction access fault
529 write_acv++;
530 uint64_t flags = MM_STAT_WR_MASK |
531 MM_STAT_ACV_MASK |
532 (entry->fonw ? MM_STAT_FONW_MASK : 0);
533 return new DtbPageFault(req->getVaddr(), req->getFlags(),
534 flags);
535 }
536 if (entry->fonw) {
537 write_acv++;
538 uint64_t flags = MM_STAT_WR_MASK | MM_STAT_FONW_MASK;
539 return new DtbPageFault(req->getVaddr(), req->getFlags(),
540 flags);
541 }
542 } else {
543 if (!(entry->xre & MODE2MASK(mode))) {
544 read_acv++;
545 uint64_t flags = MM_STAT_ACV_MASK |
546 (entry->fonr ? MM_STAT_FONR_MASK : 0);
547 return new DtbAcvFault(req->getVaddr(), req->getFlags(),
548 flags);
549 }
550 if (entry->fonr) {
551 read_acv++;
552 uint64_t flags = MM_STAT_FONR_MASK;
553 return new DtbPageFault(req->getVaddr(), req->getFlags(),
554 flags);
555 }
556 }
557 }
558
559 if (write)
560 write_hits++;
561 else
562 read_hits++;
563 }
564
565 // check that the physical address is ok (catch bad physical addresses)
566 if (req->getPaddr() & ~PAddrImplMask)
567 return genMachineCheckFault();
568
569 return checkCacheability(req);
570}
571
572TlbEntry &
573TLB::index(bool advance)
574{
575 TlbEntry *entry = &table[nlu];
576
577 if (advance)
578 nextnlu();
579
580 return *entry;
581}
582
583Fault
584TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode)
585{
586 if (mode == Execute)
587 return translateInst(req, tc);
588 else
589 return translateData(req, tc, mode == Write);
590}
591
592void
593TLB::translateTiming(RequestPtr req, ThreadContext *tc,
594 Translation *translation, Mode mode)
595{
596 assert(translation);
597 translation->finish(translateAtomic(req, tc, mode), req, tc, mode);
598}
599
600/* end namespace AlphaISA */ }
601
602AlphaISA::TLB *
603AlphaTLBParams::create()
604{
605 return new AlphaISA::TLB(this);
606}