tlb.cc (3633:524f2aadbc89) tlb.cc (3838:3d0e174a9d2b)
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;

--- 34 unchanged lines hidden (view full) ---

43#include "cpu/thread_context.hh"
44#include "sim/builder.hh"
45
46using namespace std;
47using namespace EV5;
48
49namespace AlphaISA
50{
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;

--- 34 unchanged lines hidden (view full) ---

43#include "cpu/thread_context.hh"
44#include "sim/builder.hh"
45
46using namespace std;
47using namespace EV5;
48
49namespace AlphaISA
50{
51 ///////////////////////////////////////////////////////////////////////
52 //
53 // Alpha TLB
54 //
51///////////////////////////////////////////////////////////////////////
52//
53// Alpha TLB
54//
55#ifdef DEBUG
55#ifdef DEBUG
56 bool uncacheBit39 = false;
57 bool uncacheBit40 = false;
56bool uncacheBit39 = false;
57bool uncacheBit40 = false;
58#endif
59
60#define MODE2MASK(X) (1 << (X))
61
58#endif
59
60#define MODE2MASK(X) (1 << (X))
61
62 TLB::TLB(const string &name, int s)
63 : SimObject(name), size(s), nlu(0)
64 {
65 table = new PTE[size];
66 memset(table, 0, sizeof(PTE[size]));
67 }
62TLB::TLB(const string &name, int s)
63 : SimObject(name), size(s), nlu(0)
64{
65 table = new PTE[size];
66 memset(table, 0, sizeof(PTE[size]));
67}
68
68
69 TLB::~TLB()
70 {
71 if (table)
72 delete [] table;
73 }
69TLB::~TLB()
70{
71 if (table)
72 delete [] table;
73}
74
74
75 // look up an entry in the TLB
76 PTE *
77 TLB::lookup(Addr vpn, uint8_t asn) const
78 {
79 // assume not found...
80 PTE *retval = NULL;
75// look up an entry in the TLB
76PTE *
77TLB::lookup(Addr vpn, uint8_t asn) const
78{
79 // assume not found...
80 PTE *retval = NULL;
81
81
82 PageTable::const_iterator i = lookupTable.find(vpn);
83 if (i != lookupTable.end()) {
84 while (i->first == vpn) {
85 int index = i->second;
86 PTE *pte = &table[index];
87 assert(pte->valid);
88 if (vpn == pte->tag && (pte->asma || pte->asn == asn)) {
89 retval = pte;
90 break;
91 }
92
93 ++i;
82 PageTable::const_iterator i = lookupTable.find(vpn);
83 if (i != lookupTable.end()) {
84 while (i->first == vpn) {
85 int index = i->second;
86 PTE *pte = &table[index];
87 assert(pte->valid);
88 if (vpn == pte->tag && (pte->asma || pte->asn == asn)) {
89 retval = pte;
90 break;
94 }
91 }
95 }
96
92
97 DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn,
98 retval ? "hit" : "miss", retval ? retval->ppn : 0);
99 return retval;
93 ++i;
94 }
100 }
101
95 }
96
97 DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn,
98 retval ? "hit" : "miss", retval ? retval->ppn : 0);
99 return retval;
100}
102
101
103 Fault
104 TLB::checkCacheability(RequestPtr &req)
105 {
106 // in Alpha, cacheability is controlled by upper-level bits of the
107 // physical address
108
102
109 /*
110 * We support having the uncacheable bit in either bit 39 or bit 40.
111 * The Turbolaser platform (and EV5) support having the bit in 39, but
112 * Tsunami (which Linux assumes uses an EV6) generates accesses with
113 * the bit in 40. So we must check for both, but we have debug flags
114 * to catch a weird case where both are used, which shouldn't happen.
115 */
103Fault
104TLB::checkCacheability(RequestPtr &req)
105{
106// in Alpha, cacheability is controlled by upper-level bits of the
107// physical address
116
108
109/*
110 * We support having the uncacheable bit in either bit 39 or bit 40.
111 * The Turbolaser platform (and EV5) support having the bit in 39, but
112 * Tsunami (which Linux assumes uses an EV6) generates accesses with
113 * the bit in 40. So we must check for both, but we have debug flags
114 * to catch a weird case where both are used, which shouldn't happen.
115 */
117
116
117
118#if ALPHA_TLASER
118#if ALPHA_TLASER
119 if (req->getPaddr() & PAddrUncachedBit39) {
119 if (req->getPaddr() & PAddrUncachedBit39) {
120#else
120#else
121 if (req->getPaddr() & PAddrUncachedBit43) {
121 if (req->getPaddr() & PAddrUncachedBit43) {
122#endif
122#endif
123 // IPR memory space not implemented
124 if (PAddrIprSpace(req->getPaddr())) {
125 return new UnimpFault("IPR memory space not implemented!");
126 } else {
127 // mark request as uncacheable
128 req->setFlags(req->getFlags() | UNCACHEABLE);
123 // IPR memory space not implemented
124 if (PAddrIprSpace(req->getPaddr())) {
125 return new UnimpFault("IPR memory space not implemented!");
126 } else {
127 // mark request as uncacheable
128 req->setFlags(req->getFlags() | UNCACHEABLE);
129
130#if !ALPHA_TLASER
129
130#if !ALPHA_TLASER
131 // Clear bits 42:35 of the physical address (10-2 in Tsunami manual)
132 req->setPaddr(req->getPaddr() & PAddrUncachedMask);
131 // Clear bits 42:35 of the physical address (10-2 in Tsunami manual)
132 req->setPaddr(req->getPaddr() & PAddrUncachedMask);
133#endif
133#endif
134 }
135 }
134 }
136 return NoFault;
137 }
135 }
136 return NoFault;
137}
138
139
138
139
140 // insert a new TLB entry
141 void
142 TLB::insert(Addr addr, PTE &pte)
143 {
144 VAddr vaddr = addr;
145 if (table[nlu].valid) {
146 Addr oldvpn = table[nlu].tag;
147 PageTable::iterator i = lookupTable.find(oldvpn);
140// insert a new TLB entry
141void
142TLB::insert(Addr addr, PTE &pte)
143{
144 VAddr vaddr = addr;
145 if (table[nlu].valid) {
146 Addr oldvpn = table[nlu].tag;
147 PageTable::iterator i = lookupTable.find(oldvpn);
148
148
149 if (i == lookupTable.end())
149 if (i == lookupTable.end())
150 panic("TLB entry not found in lookupTable");
151
152 int index;
153 while ((index = i->second) != nlu) {
154 if (table[index].tag != oldvpn)
150 panic("TLB entry not found in lookupTable");
151
155 panic("TLB entry not found in lookupTable");
156
152 int index;
153 while ((index = i->second) != nlu) {
154 if (table[index].tag != oldvpn)
155 panic("TLB entry not found in lookupTable");
157 ++i;
158 }
156
159
157 ++i;
158 }
160 DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn);
159
161
160 DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn);
162 lookupTable.erase(i);
163 }
161
164
162 lookupTable.erase(i);
163 }
165 DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), pte.ppn);
164
166
165 DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), pte.ppn);
167 table[nlu] = pte;
168 table[nlu].tag = vaddr.vpn();
169 table[nlu].valid = true;
166
170
167 table[nlu] = pte;
168 table[nlu].tag = vaddr.vpn();
169 table[nlu].valid = true;
171 lookupTable.insert(make_pair(vaddr.vpn(), nlu));
172 nextnlu();
173}
170
174
171 lookupTable.insert(make_pair(vaddr.vpn(), nlu));
172 nextnlu();
173 }
175void
176TLB::flushAll()
177{
178 DPRINTF(TLB, "flushAll\n");
179 memset(table, 0, sizeof(PTE[size]));
180 lookupTable.clear();
181 nlu = 0;
182}
174
183
175 void
176 TLB::flushAll()
177 {
178 DPRINTF(TLB, "flushAll\n");
179 memset(table, 0, sizeof(PTE[size]));
180 lookupTable.clear();
181 nlu = 0;
182 }
184void
185TLB::flushProcesses()
186{
187 PageTable::iterator i = lookupTable.begin();
188 PageTable::iterator end = lookupTable.end();
189 while (i != end) {
190 int index = i->second;
191 PTE *pte = &table[index];
192 assert(pte->valid);
183
193
184 void
185 TLB::flushProcesses()
186 {
187 PageTable::iterator i = lookupTable.begin();
188 PageTable::iterator end = lookupTable.end();
189 while (i != end) {
190 int index = i->second;
191 PTE *pte = &table[index];
192 assert(pte->valid);
194 // we can't increment i after we erase it, so save a copy and
195 // increment it to get the next entry now
196 PageTable::iterator cur = i;
197 ++i;
193
198
194 // we can't increment i after we erase it, so save a copy and
195 // increment it to get the next entry now
196 PageTable::iterator cur = i;
197 ++i;
198
199 if (!pte->asma) {
200 DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index, pte->tag, pte->ppn);
201 pte->valid = false;
202 lookupTable.erase(cur);
203 }
199 if (!pte->asma) {
200 DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index, pte->tag, pte->ppn);
201 pte->valid = false;
202 lookupTable.erase(cur);
204 }
205 }
203 }
204 }
205}
206
206
207 void
208 TLB::flushAddr(Addr addr, uint8_t asn)
209 {
210 VAddr vaddr = addr;
207void
208TLB::flushAddr(Addr addr, uint8_t asn)
209{
210 VAddr vaddr = addr;
211
211
212 PageTable::iterator i = lookupTable.find(vaddr.vpn());
213 if (i == lookupTable.end())
214 return;
212 PageTable::iterator i = lookupTable.find(vaddr.vpn());
213 if (i == lookupTable.end())
214 return;
215
215
216 while (i->first == vaddr.vpn()) {
217 int index = i->second;
218 PTE *pte = &table[index];
219 assert(pte->valid);
216 while (i->first == vaddr.vpn()) {
217 int index = i->second;
218 PTE *pte = &table[index];
219 assert(pte->valid);
220
220
221 if (vaddr.vpn() == pte->tag && (pte->asma || pte->asn == asn)) {
222 DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(),
223 pte->ppn);
221 if (vaddr.vpn() == pte->tag && (pte->asma || pte->asn == asn)) {
222 DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(),
223 pte->ppn);
224
224
225 // invalidate this entry
226 pte->valid = false;
225 // invalidate this entry
226 pte->valid = false;
227
227
228 lookupTable.erase(i);
229 }
230
231 ++i;
228 lookupTable.erase(i);
232 }
229 }
230
231 ++i;
233 }
232 }
233}
234
235
234
235
236 void
237 TLB::serialize(ostream &os)
238 {
239 SERIALIZE_SCALAR(size);
240 SERIALIZE_SCALAR(nlu);
236void
237TLB::serialize(ostream &os)
238{
239 SERIALIZE_SCALAR(size);
240 SERIALIZE_SCALAR(nlu);
241
241
242 for (int i = 0; i < size; i++) {
243 nameOut(os, csprintf("%s.PTE%d", name(), i));
244 table[i].serialize(os);
245 }
242 for (int i = 0; i < size; i++) {
243 nameOut(os, csprintf("%s.PTE%d", name(), i));
244 table[i].serialize(os);
246 }
245 }
246}
247
247
248 void
249 TLB::unserialize(Checkpoint *cp, const string &section)
250 {
251 UNSERIALIZE_SCALAR(size);
252 UNSERIALIZE_SCALAR(nlu);
248void
249TLB::unserialize(Checkpoint *cp, const string §ion)
250{
251 UNSERIALIZE_SCALAR(size);
252 UNSERIALIZE_SCALAR(nlu);
253
253
254 for (int i = 0; i < size; i++) {
255 table[i].unserialize(cp, csprintf("%s.PTE%d", section, i));
256 if (table[i].valid) {
257 lookupTable.insert(make_pair(table[i].tag, i));
258 }
254 for (int i = 0; i < size; i++) {
255 table[i].unserialize(cp, csprintf("%s.PTE%d", section, i));
256 if (table[i].valid) {
257 lookupTable.insert(make_pair(table[i].tag, i));
259 }
260 }
258 }
259 }
260}
261
262
261
262
263 ///////////////////////////////////////////////////////////////////////
264 //
265 // Alpha ITB
266 //
267 ITB::ITB(const std::string &name, int size)
268 : TLB(name, size)
269 {}
263///////////////////////////////////////////////////////////////////////
264//
265// Alpha ITB
266//
267ITB::ITB(const std::string &name, int size)
268 : TLB(name, size)
269{}
270
271
270
271
272 void
273 ITB::regStats()
274 {
275 hits
276 .name(name() + ".hits")
277 .desc("ITB hits");
278 misses
279 .name(name() + ".misses")
280 .desc("ITB misses");
281 acv
282 .name(name() + ".acv")
283 .desc("ITB acv");
284 accesses
285 .name(name() + ".accesses")
286 .desc("ITB accesses");
272void
273ITB::regStats()
274{
275 hits
276 .name(name() + ".hits")
277 .desc("ITB hits");
278 misses
279 .name(name() + ".misses")
280 .desc("ITB misses");
281 acv
282 .name(name() + ".acv")
283 .desc("ITB acv");
284 accesses
285 .name(name() + ".accesses")
286 .desc("ITB accesses");
287
287
288 accesses = hits + misses;
289 }
288 accesses = hits + misses;
289}
290
291
290
291
292 Fault
293 ITB::translate(RequestPtr &req, ThreadContext *tc) const
294 {
295 if (PcPAL(req->getPC())) {
296 // strip off PAL PC marker (lsb is 1)
297 req->setPaddr((req->getVaddr() & ~3) & PAddrImplMask);
298 hits++;
299 return NoFault;
292Fault
293ITB::translate(RequestPtr &req, ThreadContext *tc) const
294{
295 if (PcPAL(req->getPC())) {
296 // strip off PAL PC marker (lsb is 1)
297 req->setPaddr((req->getVaddr() & ~3) & PAddrImplMask);
298 hits++;
299 return NoFault;
300 }
301
302 if (req->getFlags() & PHYSICAL) {
303 req->setPaddr(req->getVaddr());
304 } else {
305 // verify that this is a good virtual address
306 if (!validVirtualAddress(req->getVaddr())) {
307 acv++;
308 return new ItbAcvFault(req->getVaddr());
300 }
301
309 }
310
302 if (req->getFlags() & PHYSICAL) {
303 req->setPaddr(req->getVaddr());
304 } else {
305 // verify that this is a good virtual address
306 if (!validVirtualAddress(req->getVaddr())) {
307 acv++;
308 return new ItbAcvFault(req->getVaddr());
309 }
310
311
311
312 // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
313 // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
312 // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
313 // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
314#if ALPHA_TLASER
314#if ALPHA_TLASER
315 if ((MCSR_SP(tc->readMiscReg(IPR_MCSR)) & 2) &&
316 VAddrSpaceEV5(req->getVaddr()) == 2) {
315 if ((MCSR_SP(tc->readMiscReg(IPR_MCSR)) & 2) &&
316 VAddrSpaceEV5(req->getVaddr()) == 2) {
317#else
317#else
318 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
318 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
319#endif
319#endif
320 // only valid in kernel mode
321 if (ICM_CM(tc->readMiscReg(IPR_ICM)) !=
322 mode_kernel) {
323 acv++;
324 return new ItbAcvFault(req->getVaddr());
325 }
320 // only valid in kernel mode
321 if (ICM_CM(tc->readMiscReg(IPR_ICM)) !=
322 mode_kernel) {
323 acv++;
324 return new ItbAcvFault(req->getVaddr());
325 }
326
326
327 req->setPaddr(req->getVaddr() & PAddrImplMask);
327 req->setPaddr(req->getVaddr() & PAddrImplMask);
328
329#if !ALPHA_TLASER
328
329#if !ALPHA_TLASER
330 // sign extend the physical address properly
331 if (req->getPaddr() & PAddrUncachedBit40)
332 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
333 else
334 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
330 // sign extend the physical address properly
331 if (req->getPaddr() & PAddrUncachedBit40)
332 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
333 else
334 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
335#endif
336
335#endif
336
337 } else {
338 // not a physical address: need to look up pte
339 int asn = DTB_ASN_ASN(tc->readMiscReg(IPR_DTB_ASN));
340 PTE *pte = lookup(VAddr(req->getVaddr()).vpn(),
341 asn);
337 } else {
338 // not a physical address: need to look up pte
339 int asn = DTB_ASN_ASN(tc->readMiscReg(IPR_DTB_ASN));
340 PTE *pte = lookup(VAddr(req->getVaddr()).vpn(),
341 asn);
342
342
343 if (!pte) {
344 misses++;
345 return new ItbPageFault(req->getVaddr());
346 }
343 if (!pte) {
344 misses++;
345 return new ItbPageFault(req->getVaddr());
346 }
347
347
348 req->setPaddr((pte->ppn << PageShift) +
349 (VAddr(req->getVaddr()).offset()
350 & ~3));
348 req->setPaddr((pte->ppn << PageShift) +
349 (VAddr(req->getVaddr()).offset()
350 & ~3));
351
351
352 // check permissions for this access
353 if (!(pte->xre &
354 (1 << ICM_CM(tc->readMiscReg(IPR_ICM))))) {
355 // instruction access fault
356 acv++;
357 return new ItbAcvFault(req->getVaddr());
358 }
359
360 hits++;
352 // check permissions for this access
353 if (!(pte->xre &
354 (1 << ICM_CM(tc->readMiscReg(IPR_ICM))))) {
355 // instruction access fault
356 acv++;
357 return new ItbAcvFault(req->getVaddr());
361 }
358 }
359
360 hits++;
362 }
361 }
362 }
363
363
364 // check that the physical address is ok (catch bad physical addresses)
365 if (req->getPaddr() & ~PAddrImplMask)
366 return genMachineCheckFault();
364 // check that the physical address is ok (catch bad physical addresses)
365 if (req->getPaddr() & ~PAddrImplMask)
366 return genMachineCheckFault();
367
367
368 return checkCacheability(req);
368 return checkCacheability(req);
369
369
370 }
370}
371
371
372 ///////////////////////////////////////////////////////////////////////
373 //
374 // Alpha DTB
375 //
376 DTB::DTB(const std::string &name, int size)
377 : TLB(name, size)
378 {}
372///////////////////////////////////////////////////////////////////////
373//
374// Alpha DTB
375//
376 DTB::DTB(const std::string &name, int size)
377 : TLB(name, size)
378{}
379
379
380 void
381 DTB::regStats()
382 {
383 read_hits
384 .name(name() + ".read_hits")
385 .desc("DTB read hits")
386 ;
380void
381DTB::regStats()
382{
383 read_hits
384 .name(name() + ".read_hits")
385 .desc("DTB read hits")
386 ;
387
387
388 read_misses
389 .name(name() + ".read_misses")
390 .desc("DTB read misses")
391 ;
388 read_misses
389 .name(name() + ".read_misses")
390 .desc("DTB read misses")
391 ;
392
392
393 read_acv
394 .name(name() + ".read_acv")
395 .desc("DTB read access violations")
396 ;
393 read_acv
394 .name(name() + ".read_acv")
395 .desc("DTB read access violations")
396 ;
397
397
398 read_accesses
399 .name(name() + ".read_accesses")
400 .desc("DTB read accesses")
401 ;
398 read_accesses
399 .name(name() + ".read_accesses")
400 .desc("DTB read accesses")
401 ;
402
402
403 write_hits
404 .name(name() + ".write_hits")
405 .desc("DTB write hits")
406 ;
403 write_hits
404 .name(name() + ".write_hits")
405 .desc("DTB write hits")
406 ;
407
407
408 write_misses
409 .name(name() + ".write_misses")
410 .desc("DTB write misses")
411 ;
408 write_misses
409 .name(name() + ".write_misses")
410 .desc("DTB write misses")
411 ;
412
412
413 write_acv
414 .name(name() + ".write_acv")
415 .desc("DTB write access violations")
416 ;
413 write_acv
414 .name(name() + ".write_acv")
415 .desc("DTB write access violations")
416 ;
417
417
418 write_accesses
419 .name(name() + ".write_accesses")
420 .desc("DTB write accesses")
421 ;
418 write_accesses
419 .name(name() + ".write_accesses")
420 .desc("DTB write accesses")
421 ;
422
422
423 hits
424 .name(name() + ".hits")
425 .desc("DTB hits")
426 ;
423 hits
424 .name(name() + ".hits")
425 .desc("DTB hits")
426 ;
427
427
428 misses
429 .name(name() + ".misses")
430 .desc("DTB misses")
431 ;
428 misses
429 .name(name() + ".misses")
430 .desc("DTB misses")
431 ;
432
432
433 acv
434 .name(name() + ".acv")
435 .desc("DTB access violations")
436 ;
433 acv
434 .name(name() + ".acv")
435 .desc("DTB access violations")
436 ;
437
437
438 accesses
439 .name(name() + ".accesses")
440 .desc("DTB accesses")
441 ;
438 accesses
439 .name(name() + ".accesses")
440 .desc("DTB accesses")
441 ;
442
442
443 hits = read_hits + write_hits;
444 misses = read_misses + write_misses;
445 acv = read_acv + write_acv;
446 accesses = read_accesses + write_accesses;
447 }
443 hits = read_hits + write_hits;
444 misses = read_misses + write_misses;
445 acv = read_acv + write_acv;
446 accesses = read_accesses + write_accesses;
447}
448
448
449 Fault
450 DTB::translate(RequestPtr &req, ThreadContext *tc, bool write) const
451 {
452 Addr pc = tc->readPC();
449Fault
450DTB::translate(RequestPtr &req, ThreadContext *tc, bool write) const
451{
452 Addr pc = tc->readPC();
453
453
454 mode_type mode =
455 (mode_type)DTB_CM_CM(tc->readMiscReg(IPR_DTB_CM));
454 mode_type mode =
455 (mode_type)DTB_CM_CM(tc->readMiscReg(IPR_DTB_CM));
456
457
456
457
458 /**
459 * Check for alignment faults
460 */
461 if (req->getVaddr() & (req->getSize() - 1)) {
462 DPRINTF(TLB, "Alignment Fault on %#x, size = %d", req->getVaddr(),
463 req->getSize());
464 uint64_t flags = write ? MM_STAT_WR_MASK : 0;
465 return new DtbAlignmentFault(req->getVaddr(), req->getFlags(), flags);
466 }
458 /**
459 * Check for alignment faults
460 */
461 if (req->getVaddr() & (req->getSize() - 1)) {
462 DPRINTF(TLB, "Alignment Fault on %#x, size = %d", req->getVaddr(),
463 req->getSize());
464 uint64_t flags = write ? MM_STAT_WR_MASK : 0;
465 return new DtbAlignmentFault(req->getVaddr(), req->getFlags(), flags);
466 }
467
467
468 if (pc & 0x1) {
469 mode = (req->getFlags() & ALTMODE) ?
470 (mode_type)ALT_MODE_AM(
471 tc->readMiscReg(IPR_ALT_MODE))
472 : mode_kernel;
468 if (PcPAL(pc)) {
469 mode = (req->getFlags() & ALTMODE) ?
470 (mode_type)ALT_MODE_AM(
471 tc->readMiscReg(IPR_ALT_MODE))
472 : mode_kernel;
473 }
474
475 if (req->getFlags() & PHYSICAL) {
476 req->setPaddr(req->getVaddr());
477 } else {
478 // verify that this is a good virtual address
479 if (!validVirtualAddress(req->getVaddr())) {
480 if (write) { write_acv++; } else { read_acv++; }
481 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
482 MM_STAT_BAD_VA_MASK |
483 MM_STAT_ACV_MASK;
484 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
473 }
474
485 }
486
475 if (req->getFlags() & PHYSICAL) {
476 req->setPaddr(req->getVaddr());
477 } else {
478 // verify that this is a good virtual address
479 if (!validVirtualAddress(req->getVaddr())) {
480 if (write) { write_acv++; } else { read_acv++; }
481 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
482 MM_STAT_BAD_VA_MASK |
483 MM_STAT_ACV_MASK;
484 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
485 }
486
487 // Check for "superpage" mapping
487 // Check for "superpage" mapping
488#if ALPHA_TLASER
488#if ALPHA_TLASER
489 if ((MCSR_SP(tc->readMiscReg(IPR_MCSR)) & 2) &&
490 VAddrSpaceEV5(req->getVaddr()) == 2) {
489 if ((MCSR_SP(tc->readMiscReg(IPR_MCSR)) & 2) &&
490 VAddrSpaceEV5(req->getVaddr()) == 2) {
491#else
491#else
492 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
492 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
493#endif
494
493#endif
494
495 // only valid in kernel mode
496 if (DTB_CM_CM(tc->readMiscReg(IPR_DTB_CM)) !=
497 mode_kernel) {
498 if (write) { write_acv++; } else { read_acv++; }
499 uint64_t flags = ((write ? MM_STAT_WR_MASK : 0) |
500 MM_STAT_ACV_MASK);
501 return new DtbAcvFault(req->getVaddr(), req->getFlags(), flags);
502 }
495 // only valid in kernel mode
496 if (DTB_CM_CM(tc->readMiscReg(IPR_DTB_CM)) !=
497 mode_kernel) {
498 if (write) { write_acv++; } else { read_acv++; }
499 uint64_t flags = ((write ? MM_STAT_WR_MASK : 0) |
500 MM_STAT_ACV_MASK);
501 return new DtbAcvFault(req->getVaddr(), req->getFlags(), flags);
502 }
503
503
504 req->setPaddr(req->getVaddr() & PAddrImplMask);
504 req->setPaddr(req->getVaddr() & PAddrImplMask);
505
506#if !ALPHA_TLASER
505
506#if !ALPHA_TLASER
507 // sign extend the physical address properly
508 if (req->getPaddr() & PAddrUncachedBit40)
509 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
510 else
511 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
507 // sign extend the physical address properly
508 if (req->getPaddr() & PAddrUncachedBit40)
509 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
510 else
511 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
512#endif
513
512#endif
513
514 } else {
515 if (write)
516 write_accesses++;
517 else
518 read_accesses++;
514 } else {
515 if (write)
516 write_accesses++;
517 else
518 read_accesses++;
519
519
520 int asn = DTB_ASN_ASN(tc->readMiscReg(IPR_DTB_ASN));
520 int asn = DTB_ASN_ASN(tc->readMiscReg(IPR_DTB_ASN));
521
521
522 // not a physical address: need to look up pte
523 PTE *pte = lookup(VAddr(req->getVaddr()).vpn(),
524 asn);
522 // not a physical address: need to look up pte
523 PTE *pte = lookup(VAddr(req->getVaddr()).vpn(),
524 asn);
525
525
526 if (!pte) {
527 // page fault
528 if (write) { write_misses++; } else { read_misses++; }
529 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
530 MM_STAT_DTB_MISS_MASK;
531 return (req->getFlags() & VPTE) ?
532 (Fault)(new PDtbMissFault(req->getVaddr(), req->getFlags(),
533 flags)) :
534 (Fault)(new NDtbMissFault(req->getVaddr(), req->getFlags(),
535 flags));
536 }
526 if (!pte) {
527 // page fault
528 if (write) { write_misses++; } else { read_misses++; }
529 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
530 MM_STAT_DTB_MISS_MASK;
531 return (req->getFlags() & VPTE) ?
532 (Fault)(new PDtbMissFault(req->getVaddr(), req->getFlags(),
533 flags)) :
534 (Fault)(new NDtbMissFault(req->getVaddr(), req->getFlags(),
535 flags));
536 }
537
537
538 req->setPaddr((pte->ppn << PageShift) +
539 VAddr(req->getVaddr()).offset());
538 req->setPaddr((pte->ppn << PageShift) +
539 VAddr(req->getVaddr()).offset());
540
540
541 if (write) {
542 if (!(pte->xwe & MODE2MASK(mode))) {
543 // declare the instruction access fault
544 write_acv++;
545 uint64_t flags = MM_STAT_WR_MASK |
546 MM_STAT_ACV_MASK |
547 (pte->fonw ? MM_STAT_FONW_MASK : 0);
548 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
549 }
550 if (pte->fonw) {
551 write_acv++;
552 uint64_t flags = MM_STAT_WR_MASK |
553 MM_STAT_FONW_MASK;
554 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
555 }
556 } else {
557 if (!(pte->xre & MODE2MASK(mode))) {
558 read_acv++;
559 uint64_t flags = MM_STAT_ACV_MASK |
560 (pte->fonr ? MM_STAT_FONR_MASK : 0);
561 return new DtbAcvFault(req->getVaddr(), req->getFlags(), flags);
562 }
563 if (pte->fonr) {
564 read_acv++;
565 uint64_t flags = MM_STAT_FONR_MASK;
566 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
567 }
541 if (write) {
542 if (!(pte->xwe & MODE2MASK(mode))) {
543 // declare the instruction access fault
544 write_acv++;
545 uint64_t flags = MM_STAT_WR_MASK |
546 MM_STAT_ACV_MASK |
547 (pte->fonw ? MM_STAT_FONW_MASK : 0);
548 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
568 }
549 }
550 if (pte->fonw) {
551 write_acv++;
552 uint64_t flags = MM_STAT_WR_MASK |
553 MM_STAT_FONW_MASK;
554 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
555 }
556 } else {
557 if (!(pte->xre & MODE2MASK(mode))) {
558 read_acv++;
559 uint64_t flags = MM_STAT_ACV_MASK |
560 (pte->fonr ? MM_STAT_FONR_MASK : 0);
561 return new DtbAcvFault(req->getVaddr(), req->getFlags(), flags);
562 }
563 if (pte->fonr) {
564 read_acv++;
565 uint64_t flags = MM_STAT_FONR_MASK;
566 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
567 }
569 }
568 }
570
571 if (write)
572 write_hits++;
573 else
574 read_hits++;
575 }
576
569 }
570
577 // check that the physical address is ok (catch bad physical addresses)
578 if (req->getPaddr() & ~PAddrImplMask)
579 return genMachineCheckFault();
580
581 return checkCacheability(req);
571 if (write)
572 write_hits++;
573 else
574 read_hits++;
582 }
583
575 }
576
584 PTE &
585 TLB::index(bool advance)
586 {
587 PTE *pte = &table[nlu];
577 // check that the physical address is ok (catch bad physical addresses)
578 if (req->getPaddr() & ~PAddrImplMask)
579 return genMachineCheckFault();
588
580
589 if (advance)
590 nextnlu();
581 return checkCacheability(req);
582}
591
583
592 return *pte;
593 }
584PTE &
585TLB::index(bool advance)
586{
587 PTE *pte = &table[nlu];
594
588
595 DEFINE_SIM_OBJECT_CLASS_NAME("AlphaTLB", TLB)
589 if (advance)
590 nextnlu();
596
591
597 BEGIN_DECLARE_SIM_OBJECT_PARAMS(ITB)
592 return *pte;
593}
598
594
599 Param<int> size;
595DEFINE_SIM_OBJECT_CLASS_NAME("AlphaTLB", TLB)
600
596
601 END_DECLARE_SIM_OBJECT_PARAMS(ITB)
597BEGIN_DECLARE_SIM_OBJECT_PARAMS(ITB)
602
598
603 BEGIN_INIT_SIM_OBJECT_PARAMS(ITB)
599 Param<int> size;
604
600
605 INIT_PARAM_DFLT(size, "TLB size", 48)
601END_DECLARE_SIM_OBJECT_PARAMS(ITB)
606
602
607 END_INIT_SIM_OBJECT_PARAMS(ITB)
603BEGIN_INIT_SIM_OBJECT_PARAMS(ITB)
608
604
605 INIT_PARAM_DFLT(size, "TLB size", 48)
609
606
610 CREATE_SIM_OBJECT(ITB)
611 {
612 return new ITB(getInstanceName(), size);
613 }
607END_INIT_SIM_OBJECT_PARAMS(ITB)
614
608
615 REGISTER_SIM_OBJECT("AlphaITB", ITB)
616
609
617 BEGIN_DECLARE_SIM_OBJECT_PARAMS(DTB)
610CREATE_SIM_OBJECT(ITB)
611{
612 return new ITB(getInstanceName(), size);
613}
618
614
619 Param<int> size;
615REGISTER_SIM_OBJECT("AlphaITB", ITB)
620
616
621 END_DECLARE_SIM_OBJECT_PARAMS(DTB)
617BEGIN_DECLARE_SIM_OBJECT_PARAMS(DTB)
622
618
623 BEGIN_INIT_SIM_OBJECT_PARAMS(DTB)
619 Param<int> size;
624
620
625 INIT_PARAM_DFLT(size, "TLB size", 64)
621END_DECLARE_SIM_OBJECT_PARAMS(DTB)
626
622
627 END_INIT_SIM_OBJECT_PARAMS(DTB)
623BEGIN_INIT_SIM_OBJECT_PARAMS(DTB)
628
624
625 INIT_PARAM_DFLT(size, "TLB size", 64)
629
626
630 CREATE_SIM_OBJECT(DTB)
631 {
632 return new DTB(getInstanceName(), size);
633 }
627END_INIT_SIM_OBJECT_PARAMS(DTB)
634
628
635 REGISTER_SIM_OBJECT("AlphaDTB", DTB)
629
630CREATE_SIM_OBJECT(DTB)
631{
632 return new DTB(getInstanceName(), size);
636}
633}
634
635REGISTER_SIM_OBJECT("AlphaDTB", DTB)
636}