tlb.cc (2665:a124942bacb8) tlb.cc (2680:246e7104f744)
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 * Steve Reinhardt
30 * Andrew Schultz
31 */
32
33#include <string>
34#include <vector>
35
36#include "arch/alpha/tlb.hh"
37#include "base/inifile.hh"
38#include "base/str.hh"
39#include "base/trace.hh"
40#include "config/alpha_tlaser.hh"
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 * Steve Reinhardt
30 * Andrew Schultz
31 */
32
33#include <string>
34#include <vector>
35
36#include "arch/alpha/tlb.hh"
37#include "base/inifile.hh"
38#include "base/str.hh"
39#include "base/trace.hh"
40#include "config/alpha_tlaser.hh"
41#include "cpu/exec_context.hh"
41#include "cpu/thread_context.hh"
42#include "sim/builder.hh"
43
44using namespace std;
45using namespace EV5;
46
47///////////////////////////////////////////////////////////////////////
48//
49// Alpha TLB
50//
51#ifdef DEBUG
52bool uncacheBit39 = false;
53bool uncacheBit40 = false;
54#endif
55
56#define MODE2MASK(X) (1 << (X))
57
58AlphaTLB::AlphaTLB(const string &name, int s)
59 : SimObject(name), size(s), nlu(0)
60{
61 table = new AlphaISA::PTE[size];
62 memset(table, 0, sizeof(AlphaISA::PTE[size]));
63}
64
65AlphaTLB::~AlphaTLB()
66{
67 if (table)
68 delete [] table;
69}
70
71// look up an entry in the TLB
72AlphaISA::PTE *
73AlphaTLB::lookup(Addr vpn, uint8_t asn) const
74{
75 // assume not found...
76 AlphaISA::PTE *retval = NULL;
77
78 PageTable::const_iterator i = lookupTable.find(vpn);
79 if (i != lookupTable.end()) {
80 while (i->first == vpn) {
81 int index = i->second;
82 AlphaISA::PTE *pte = &table[index];
83 assert(pte->valid);
84 if (vpn == pte->tag && (pte->asma || pte->asn == asn)) {
85 retval = pte;
86 break;
87 }
88
89 ++i;
90 }
91 }
92
93 DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn,
94 retval ? "hit" : "miss", retval ? retval->ppn : 0);
95 return retval;
96}
97
98
99Fault
100AlphaTLB::checkCacheability(RequestPtr &req)
101{
102 // in Alpha, cacheability is controlled by upper-level bits of the
103 // physical address
104
105 /*
106 * We support having the uncacheable bit in either bit 39 or bit 40.
107 * The Turbolaser platform (and EV5) support having the bit in 39, but
108 * Tsunami (which Linux assumes uses an EV6) generates accesses with
109 * the bit in 40. So we must check for both, but we have debug flags
110 * to catch a weird case where both are used, which shouldn't happen.
111 */
112
113
114#if ALPHA_TLASER
115 if (req->getPaddr() & PAddrUncachedBit39) {
116#else
117 if (req->getPaddr() & PAddrUncachedBit43) {
118#endif
119 // IPR memory space not implemented
120 if (PAddrIprSpace(req->getPaddr())) {
121 return new UnimpFault("IPR memory space not implemented!");
122 } else {
123 // mark request as uncacheable
124 req->setFlags(req->getFlags() | UNCACHEABLE);
125
126#if !ALPHA_TLASER
127 // Clear bits 42:35 of the physical address (10-2 in Tsunami manual)
128 req->setPaddr(req->getPaddr() & PAddrUncachedMask);
129#endif
130 }
131 }
132 return NoFault;
133}
134
135
136// insert a new TLB entry
137void
138AlphaTLB::insert(Addr addr, AlphaISA::PTE &pte)
139{
140 AlphaISA::VAddr vaddr = addr;
141 if (table[nlu].valid) {
142 Addr oldvpn = table[nlu].tag;
143 PageTable::iterator i = lookupTable.find(oldvpn);
144
145 if (i == lookupTable.end())
146 panic("TLB entry not found in lookupTable");
147
148 int index;
149 while ((index = i->second) != nlu) {
150 if (table[index].tag != oldvpn)
151 panic("TLB entry not found in lookupTable");
152
153 ++i;
154 }
155
156 DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn);
157
158 lookupTable.erase(i);
159 }
160
161 DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), pte.ppn);
162
163 table[nlu] = pte;
164 table[nlu].tag = vaddr.vpn();
165 table[nlu].valid = true;
166
167 lookupTable.insert(make_pair(vaddr.vpn(), nlu));
168 nextnlu();
169}
170
171void
172AlphaTLB::flushAll()
173{
174 DPRINTF(TLB, "flushAll\n");
175 memset(table, 0, sizeof(AlphaISA::PTE[size]));
176 lookupTable.clear();
177 nlu = 0;
178}
179
180void
181AlphaTLB::flushProcesses()
182{
183 PageTable::iterator i = lookupTable.begin();
184 PageTable::iterator end = lookupTable.end();
185 while (i != end) {
186 int index = i->second;
187 AlphaISA::PTE *pte = &table[index];
188 assert(pte->valid);
189
190 // we can't increment i after we erase it, so save a copy and
191 // increment it to get the next entry now
192 PageTable::iterator cur = i;
193 ++i;
194
195 if (!pte->asma) {
196 DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index, pte->tag, pte->ppn);
197 pte->valid = false;
198 lookupTable.erase(cur);
199 }
200 }
201}
202
203void
204AlphaTLB::flushAddr(Addr addr, uint8_t asn)
205{
206 AlphaISA::VAddr vaddr = addr;
207
208 PageTable::iterator i = lookupTable.find(vaddr.vpn());
209 if (i == lookupTable.end())
210 return;
211
212 while (i->first == vaddr.vpn()) {
213 int index = i->second;
214 AlphaISA::PTE *pte = &table[index];
215 assert(pte->valid);
216
217 if (vaddr.vpn() == pte->tag && (pte->asma || pte->asn == asn)) {
218 DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(),
219 pte->ppn);
220
221 // invalidate this entry
222 pte->valid = false;
223
224 lookupTable.erase(i);
225 }
226
227 ++i;
228 }
229}
230
231
232void
233AlphaTLB::serialize(ostream &os)
234{
235 SERIALIZE_SCALAR(size);
236 SERIALIZE_SCALAR(nlu);
237
238 for (int i = 0; i < size; i++) {
239 nameOut(os, csprintf("%s.PTE%d", name(), i));
240 table[i].serialize(os);
241 }
242}
243
244void
245AlphaTLB::unserialize(Checkpoint *cp, const string &section)
246{
247 UNSERIALIZE_SCALAR(size);
248 UNSERIALIZE_SCALAR(nlu);
249
250 for (int i = 0; i < size; i++) {
251 table[i].unserialize(cp, csprintf("%s.PTE%d", section, i));
252 if (table[i].valid) {
253 lookupTable.insert(make_pair(table[i].tag, i));
254 }
255 }
256}
257
258
259///////////////////////////////////////////////////////////////////////
260//
261// Alpha ITB
262//
263AlphaITB::AlphaITB(const std::string &name, int size)
264 : AlphaTLB(name, size)
265{}
266
267
268void
269AlphaITB::regStats()
270{
271 hits
272 .name(name() + ".hits")
273 .desc("ITB hits");
274 misses
275 .name(name() + ".misses")
276 .desc("ITB misses");
277 acv
278 .name(name() + ".acv")
279 .desc("ITB acv");
280 accesses
281 .name(name() + ".accesses")
282 .desc("ITB accesses");
283
284 accesses = hits + misses;
285}
286
287
288Fault
42#include "sim/builder.hh"
43
44using namespace std;
45using namespace EV5;
46
47///////////////////////////////////////////////////////////////////////
48//
49// Alpha TLB
50//
51#ifdef DEBUG
52bool uncacheBit39 = false;
53bool uncacheBit40 = false;
54#endif
55
56#define MODE2MASK(X) (1 << (X))
57
58AlphaTLB::AlphaTLB(const string &name, int s)
59 : SimObject(name), size(s), nlu(0)
60{
61 table = new AlphaISA::PTE[size];
62 memset(table, 0, sizeof(AlphaISA::PTE[size]));
63}
64
65AlphaTLB::~AlphaTLB()
66{
67 if (table)
68 delete [] table;
69}
70
71// look up an entry in the TLB
72AlphaISA::PTE *
73AlphaTLB::lookup(Addr vpn, uint8_t asn) const
74{
75 // assume not found...
76 AlphaISA::PTE *retval = NULL;
77
78 PageTable::const_iterator i = lookupTable.find(vpn);
79 if (i != lookupTable.end()) {
80 while (i->first == vpn) {
81 int index = i->second;
82 AlphaISA::PTE *pte = &table[index];
83 assert(pte->valid);
84 if (vpn == pte->tag && (pte->asma || pte->asn == asn)) {
85 retval = pte;
86 break;
87 }
88
89 ++i;
90 }
91 }
92
93 DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn,
94 retval ? "hit" : "miss", retval ? retval->ppn : 0);
95 return retval;
96}
97
98
99Fault
100AlphaTLB::checkCacheability(RequestPtr &req)
101{
102 // in Alpha, cacheability is controlled by upper-level bits of the
103 // physical address
104
105 /*
106 * We support having the uncacheable bit in either bit 39 or bit 40.
107 * The Turbolaser platform (and EV5) support having the bit in 39, but
108 * Tsunami (which Linux assumes uses an EV6) generates accesses with
109 * the bit in 40. So we must check for both, but we have debug flags
110 * to catch a weird case where both are used, which shouldn't happen.
111 */
112
113
114#if ALPHA_TLASER
115 if (req->getPaddr() & PAddrUncachedBit39) {
116#else
117 if (req->getPaddr() & PAddrUncachedBit43) {
118#endif
119 // IPR memory space not implemented
120 if (PAddrIprSpace(req->getPaddr())) {
121 return new UnimpFault("IPR memory space not implemented!");
122 } else {
123 // mark request as uncacheable
124 req->setFlags(req->getFlags() | UNCACHEABLE);
125
126#if !ALPHA_TLASER
127 // Clear bits 42:35 of the physical address (10-2 in Tsunami manual)
128 req->setPaddr(req->getPaddr() & PAddrUncachedMask);
129#endif
130 }
131 }
132 return NoFault;
133}
134
135
136// insert a new TLB entry
137void
138AlphaTLB::insert(Addr addr, AlphaISA::PTE &pte)
139{
140 AlphaISA::VAddr vaddr = addr;
141 if (table[nlu].valid) {
142 Addr oldvpn = table[nlu].tag;
143 PageTable::iterator i = lookupTable.find(oldvpn);
144
145 if (i == lookupTable.end())
146 panic("TLB entry not found in lookupTable");
147
148 int index;
149 while ((index = i->second) != nlu) {
150 if (table[index].tag != oldvpn)
151 panic("TLB entry not found in lookupTable");
152
153 ++i;
154 }
155
156 DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn);
157
158 lookupTable.erase(i);
159 }
160
161 DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), pte.ppn);
162
163 table[nlu] = pte;
164 table[nlu].tag = vaddr.vpn();
165 table[nlu].valid = true;
166
167 lookupTable.insert(make_pair(vaddr.vpn(), nlu));
168 nextnlu();
169}
170
171void
172AlphaTLB::flushAll()
173{
174 DPRINTF(TLB, "flushAll\n");
175 memset(table, 0, sizeof(AlphaISA::PTE[size]));
176 lookupTable.clear();
177 nlu = 0;
178}
179
180void
181AlphaTLB::flushProcesses()
182{
183 PageTable::iterator i = lookupTable.begin();
184 PageTable::iterator end = lookupTable.end();
185 while (i != end) {
186 int index = i->second;
187 AlphaISA::PTE *pte = &table[index];
188 assert(pte->valid);
189
190 // we can't increment i after we erase it, so save a copy and
191 // increment it to get the next entry now
192 PageTable::iterator cur = i;
193 ++i;
194
195 if (!pte->asma) {
196 DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index, pte->tag, pte->ppn);
197 pte->valid = false;
198 lookupTable.erase(cur);
199 }
200 }
201}
202
203void
204AlphaTLB::flushAddr(Addr addr, uint8_t asn)
205{
206 AlphaISA::VAddr vaddr = addr;
207
208 PageTable::iterator i = lookupTable.find(vaddr.vpn());
209 if (i == lookupTable.end())
210 return;
211
212 while (i->first == vaddr.vpn()) {
213 int index = i->second;
214 AlphaISA::PTE *pte = &table[index];
215 assert(pte->valid);
216
217 if (vaddr.vpn() == pte->tag && (pte->asma || pte->asn == asn)) {
218 DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(),
219 pte->ppn);
220
221 // invalidate this entry
222 pte->valid = false;
223
224 lookupTable.erase(i);
225 }
226
227 ++i;
228 }
229}
230
231
232void
233AlphaTLB::serialize(ostream &os)
234{
235 SERIALIZE_SCALAR(size);
236 SERIALIZE_SCALAR(nlu);
237
238 for (int i = 0; i < size; i++) {
239 nameOut(os, csprintf("%s.PTE%d", name(), i));
240 table[i].serialize(os);
241 }
242}
243
244void
245AlphaTLB::unserialize(Checkpoint *cp, const string &section)
246{
247 UNSERIALIZE_SCALAR(size);
248 UNSERIALIZE_SCALAR(nlu);
249
250 for (int i = 0; i < size; i++) {
251 table[i].unserialize(cp, csprintf("%s.PTE%d", section, i));
252 if (table[i].valid) {
253 lookupTable.insert(make_pair(table[i].tag, i));
254 }
255 }
256}
257
258
259///////////////////////////////////////////////////////////////////////
260//
261// Alpha ITB
262//
263AlphaITB::AlphaITB(const std::string &name, int size)
264 : AlphaTLB(name, size)
265{}
266
267
268void
269AlphaITB::regStats()
270{
271 hits
272 .name(name() + ".hits")
273 .desc("ITB hits");
274 misses
275 .name(name() + ".misses")
276 .desc("ITB misses");
277 acv
278 .name(name() + ".acv")
279 .desc("ITB acv");
280 accesses
281 .name(name() + ".accesses")
282 .desc("ITB accesses");
283
284 accesses = hits + misses;
285}
286
287
288Fault
289AlphaITB::translate(RequestPtr &req, ExecContext *xc) const
289AlphaITB::translate(RequestPtr &req, ThreadContext *tc) const
290{
291 if (AlphaISA::PcPAL(req->getVaddr())) {
292 // strip off PAL PC marker (lsb is 1)
293 req->setPaddr((req->getVaddr() & ~3) & PAddrImplMask);
294 hits++;
295 return NoFault;
296 }
297
298 if (req->getFlags() & PHYSICAL) {
299 req->setPaddr(req->getVaddr());
300 } else {
301 // verify that this is a good virtual address
302 if (!validVirtualAddress(req->getVaddr())) {
303 acv++;
304 return new ItbAcvFault(req->getVaddr());
305 }
306
307
308 // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
309 // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
310#if ALPHA_TLASER
290{
291 if (AlphaISA::PcPAL(req->getVaddr())) {
292 // strip off PAL PC marker (lsb is 1)
293 req->setPaddr((req->getVaddr() & ~3) & PAddrImplMask);
294 hits++;
295 return NoFault;
296 }
297
298 if (req->getFlags() & PHYSICAL) {
299 req->setPaddr(req->getVaddr());
300 } else {
301 // verify that this is a good virtual address
302 if (!validVirtualAddress(req->getVaddr())) {
303 acv++;
304 return new ItbAcvFault(req->getVaddr());
305 }
306
307
308 // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
309 // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
310#if ALPHA_TLASER
311 if ((MCSR_SP(xc->readMiscReg(AlphaISA::IPR_MCSR)) & 2) &&
311 if ((MCSR_SP(tc->readMiscReg(AlphaISA::IPR_MCSR)) & 2) &&
312 VAddrSpaceEV5(req->getVaddr()) == 2) {
313#else
314 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
315#endif
316 // only valid in kernel mode
312 VAddrSpaceEV5(req->getVaddr()) == 2) {
313#else
314 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
315#endif
316 // only valid in kernel mode
317 if (ICM_CM(xc->readMiscReg(AlphaISA::IPR_ICM)) !=
317 if (ICM_CM(tc->readMiscReg(AlphaISA::IPR_ICM)) !=
318 AlphaISA::mode_kernel) {
319 acv++;
320 return new ItbAcvFault(req->getVaddr());
321 }
322
323 req->setPaddr(req->getVaddr() & PAddrImplMask);
324
325#if !ALPHA_TLASER
326 // sign extend the physical address properly
327 if (req->getPaddr() & PAddrUncachedBit40)
328 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
329 else
330 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
331#endif
332
333 } else {
334 // not a physical address: need to look up pte
318 AlphaISA::mode_kernel) {
319 acv++;
320 return new ItbAcvFault(req->getVaddr());
321 }
322
323 req->setPaddr(req->getVaddr() & PAddrImplMask);
324
325#if !ALPHA_TLASER
326 // sign extend the physical address properly
327 if (req->getPaddr() & PAddrUncachedBit40)
328 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
329 else
330 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
331#endif
332
333 } else {
334 // not a physical address: need to look up pte
335 int asn = DTB_ASN_ASN(xc->readMiscReg(AlphaISA::IPR_DTB_ASN));
335 int asn = DTB_ASN_ASN(tc->readMiscReg(AlphaISA::IPR_DTB_ASN));
336 AlphaISA::PTE *pte = lookup(AlphaISA::VAddr(req->getVaddr()).vpn(),
337 asn);
338
339 if (!pte) {
340 misses++;
341 return new ItbPageFault(req->getVaddr());
342 }
343
344 req->setPaddr((pte->ppn << AlphaISA::PageShift) +
345 (AlphaISA::VAddr(req->getVaddr()).offset()
346 & ~3));
347
348 // check permissions for this access
349 if (!(pte->xre &
336 AlphaISA::PTE *pte = lookup(AlphaISA::VAddr(req->getVaddr()).vpn(),
337 asn);
338
339 if (!pte) {
340 misses++;
341 return new ItbPageFault(req->getVaddr());
342 }
343
344 req->setPaddr((pte->ppn << AlphaISA::PageShift) +
345 (AlphaISA::VAddr(req->getVaddr()).offset()
346 & ~3));
347
348 // check permissions for this access
349 if (!(pte->xre &
350 (1 << ICM_CM(xc->readMiscReg(AlphaISA::IPR_ICM))))) {
350 (1 << ICM_CM(tc->readMiscReg(AlphaISA::IPR_ICM))))) {
351 // instruction access fault
352 acv++;
353 return new ItbAcvFault(req->getVaddr());
354 }
355
356 hits++;
357 }
358 }
359
360 // check that the physical address is ok (catch bad physical addresses)
361 if (req->getPaddr() & ~PAddrImplMask)
362 return genMachineCheckFault();
363
364 return checkCacheability(req);
365
366}
367
368///////////////////////////////////////////////////////////////////////
369//
370// Alpha DTB
371//
372AlphaDTB::AlphaDTB(const std::string &name, int size)
373 : AlphaTLB(name, size)
374{}
375
376void
377AlphaDTB::regStats()
378{
379 read_hits
380 .name(name() + ".read_hits")
381 .desc("DTB read hits")
382 ;
383
384 read_misses
385 .name(name() + ".read_misses")
386 .desc("DTB read misses")
387 ;
388
389 read_acv
390 .name(name() + ".read_acv")
391 .desc("DTB read access violations")
392 ;
393
394 read_accesses
395 .name(name() + ".read_accesses")
396 .desc("DTB read accesses")
397 ;
398
399 write_hits
400 .name(name() + ".write_hits")
401 .desc("DTB write hits")
402 ;
403
404 write_misses
405 .name(name() + ".write_misses")
406 .desc("DTB write misses")
407 ;
408
409 write_acv
410 .name(name() + ".write_acv")
411 .desc("DTB write access violations")
412 ;
413
414 write_accesses
415 .name(name() + ".write_accesses")
416 .desc("DTB write accesses")
417 ;
418
419 hits
420 .name(name() + ".hits")
421 .desc("DTB hits")
422 ;
423
424 misses
425 .name(name() + ".misses")
426 .desc("DTB misses")
427 ;
428
429 acv
430 .name(name() + ".acv")
431 .desc("DTB access violations")
432 ;
433
434 accesses
435 .name(name() + ".accesses")
436 .desc("DTB accesses")
437 ;
438
439 hits = read_hits + write_hits;
440 misses = read_misses + write_misses;
441 acv = read_acv + write_acv;
442 accesses = read_accesses + write_accesses;
443}
444
445Fault
351 // instruction access fault
352 acv++;
353 return new ItbAcvFault(req->getVaddr());
354 }
355
356 hits++;
357 }
358 }
359
360 // check that the physical address is ok (catch bad physical addresses)
361 if (req->getPaddr() & ~PAddrImplMask)
362 return genMachineCheckFault();
363
364 return checkCacheability(req);
365
366}
367
368///////////////////////////////////////////////////////////////////////
369//
370// Alpha DTB
371//
372AlphaDTB::AlphaDTB(const std::string &name, int size)
373 : AlphaTLB(name, size)
374{}
375
376void
377AlphaDTB::regStats()
378{
379 read_hits
380 .name(name() + ".read_hits")
381 .desc("DTB read hits")
382 ;
383
384 read_misses
385 .name(name() + ".read_misses")
386 .desc("DTB read misses")
387 ;
388
389 read_acv
390 .name(name() + ".read_acv")
391 .desc("DTB read access violations")
392 ;
393
394 read_accesses
395 .name(name() + ".read_accesses")
396 .desc("DTB read accesses")
397 ;
398
399 write_hits
400 .name(name() + ".write_hits")
401 .desc("DTB write hits")
402 ;
403
404 write_misses
405 .name(name() + ".write_misses")
406 .desc("DTB write misses")
407 ;
408
409 write_acv
410 .name(name() + ".write_acv")
411 .desc("DTB write access violations")
412 ;
413
414 write_accesses
415 .name(name() + ".write_accesses")
416 .desc("DTB write accesses")
417 ;
418
419 hits
420 .name(name() + ".hits")
421 .desc("DTB hits")
422 ;
423
424 misses
425 .name(name() + ".misses")
426 .desc("DTB misses")
427 ;
428
429 acv
430 .name(name() + ".acv")
431 .desc("DTB access violations")
432 ;
433
434 accesses
435 .name(name() + ".accesses")
436 .desc("DTB accesses")
437 ;
438
439 hits = read_hits + write_hits;
440 misses = read_misses + write_misses;
441 acv = read_acv + write_acv;
442 accesses = read_accesses + write_accesses;
443}
444
445Fault
446AlphaDTB::translate(RequestPtr &req, ExecContext *xc, bool write) const
446AlphaDTB::translate(RequestPtr &req, ThreadContext *tc, bool write) const
447{
447{
448 Addr pc = xc->readPC();
448 Addr pc = tc->readPC();
449
450 AlphaISA::mode_type mode =
449
450 AlphaISA::mode_type mode =
451 (AlphaISA::mode_type)DTB_CM_CM(xc->readMiscReg(AlphaISA::IPR_DTB_CM));
451 (AlphaISA::mode_type)DTB_CM_CM(tc->readMiscReg(AlphaISA::IPR_DTB_CM));
452
453
454 /**
455 * Check for alignment faults
456 */
457 if (req->getVaddr() & (req->getSize() - 1)) {
458 DPRINTF(TLB, "Alignment Fault on %#x, size = %d", req->getVaddr(),
459 req->getSize());
460 uint64_t flags = write ? MM_STAT_WR_MASK : 0;
461 return new DtbAlignmentFault(req->getVaddr(), req->getFlags(), flags);
462 }
463
464 if (pc & 0x1) {
465 mode = (req->getFlags() & ALTMODE) ?
466 (AlphaISA::mode_type)ALT_MODE_AM(
452
453
454 /**
455 * Check for alignment faults
456 */
457 if (req->getVaddr() & (req->getSize() - 1)) {
458 DPRINTF(TLB, "Alignment Fault on %#x, size = %d", req->getVaddr(),
459 req->getSize());
460 uint64_t flags = write ? MM_STAT_WR_MASK : 0;
461 return new DtbAlignmentFault(req->getVaddr(), req->getFlags(), flags);
462 }
463
464 if (pc & 0x1) {
465 mode = (req->getFlags() & ALTMODE) ?
466 (AlphaISA::mode_type)ALT_MODE_AM(
467 xc->readMiscReg(AlphaISA::IPR_ALT_MODE))
467 tc->readMiscReg(AlphaISA::IPR_ALT_MODE))
468 : AlphaISA::mode_kernel;
469 }
470
471 if (req->getFlags() & PHYSICAL) {
472 req->setPaddr(req->getVaddr());
473 } else {
474 // verify that this is a good virtual address
475 if (!validVirtualAddress(req->getVaddr())) {
476 if (write) { write_acv++; } else { read_acv++; }
477 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
478 MM_STAT_BAD_VA_MASK |
479 MM_STAT_ACV_MASK;
480 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
481 }
482
483 // Check for "superpage" mapping
484#if ALPHA_TLASER
468 : AlphaISA::mode_kernel;
469 }
470
471 if (req->getFlags() & PHYSICAL) {
472 req->setPaddr(req->getVaddr());
473 } else {
474 // verify that this is a good virtual address
475 if (!validVirtualAddress(req->getVaddr())) {
476 if (write) { write_acv++; } else { read_acv++; }
477 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
478 MM_STAT_BAD_VA_MASK |
479 MM_STAT_ACV_MASK;
480 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
481 }
482
483 // Check for "superpage" mapping
484#if ALPHA_TLASER
485 if ((MCSR_SP(xc->readMiscReg(AlphaISA::IPR_MCSR)) & 2) &&
485 if ((MCSR_SP(tc->readMiscReg(AlphaISA::IPR_MCSR)) & 2) &&
486 VAddrSpaceEV5(req->getVaddr()) == 2) {
487#else
488 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
489#endif
490
491 // only valid in kernel mode
486 VAddrSpaceEV5(req->getVaddr()) == 2) {
487#else
488 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
489#endif
490
491 // only valid in kernel mode
492 if (DTB_CM_CM(xc->readMiscReg(AlphaISA::IPR_DTB_CM)) !=
492 if (DTB_CM_CM(tc->readMiscReg(AlphaISA::IPR_DTB_CM)) !=
493 AlphaISA::mode_kernel) {
494 if (write) { write_acv++; } else { read_acv++; }
495 uint64_t flags = ((write ? MM_STAT_WR_MASK : 0) |
496 MM_STAT_ACV_MASK);
497 return new DtbAcvFault(req->getVaddr(), req->getFlags(), flags);
498 }
499
500 req->setPaddr(req->getVaddr() & PAddrImplMask);
501
502#if !ALPHA_TLASER
503 // sign extend the physical address properly
504 if (req->getPaddr() & PAddrUncachedBit40)
505 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
506 else
507 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
508#endif
509
510 } else {
511 if (write)
512 write_accesses++;
513 else
514 read_accesses++;
515
493 AlphaISA::mode_kernel) {
494 if (write) { write_acv++; } else { read_acv++; }
495 uint64_t flags = ((write ? MM_STAT_WR_MASK : 0) |
496 MM_STAT_ACV_MASK);
497 return new DtbAcvFault(req->getVaddr(), req->getFlags(), flags);
498 }
499
500 req->setPaddr(req->getVaddr() & PAddrImplMask);
501
502#if !ALPHA_TLASER
503 // sign extend the physical address properly
504 if (req->getPaddr() & PAddrUncachedBit40)
505 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
506 else
507 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
508#endif
509
510 } else {
511 if (write)
512 write_accesses++;
513 else
514 read_accesses++;
515
516 int asn = DTB_ASN_ASN(xc->readMiscReg(AlphaISA::IPR_DTB_ASN));
516 int asn = DTB_ASN_ASN(tc->readMiscReg(AlphaISA::IPR_DTB_ASN));
517
518 // not a physical address: need to look up pte
519 AlphaISA::PTE *pte = lookup(AlphaISA::VAddr(req->getVaddr()).vpn(),
520 asn);
521
522 if (!pte) {
523 // page fault
524 if (write) { write_misses++; } else { read_misses++; }
525 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
526 MM_STAT_DTB_MISS_MASK;
527 return (req->getFlags() & VPTE) ?
528 (Fault)(new PDtbMissFault(req->getVaddr(), req->getFlags(),
529 flags)) :
530 (Fault)(new NDtbMissFault(req->getVaddr(), req->getFlags(),
531 flags));
532 }
533
534 req->setPaddr((pte->ppn << AlphaISA::PageShift) +
535 AlphaISA::VAddr(req->getVaddr()).offset());
536
537 if (write) {
538 if (!(pte->xwe & MODE2MASK(mode))) {
539 // declare the instruction access fault
540 write_acv++;
541 uint64_t flags = MM_STAT_WR_MASK |
542 MM_STAT_ACV_MASK |
543 (pte->fonw ? MM_STAT_FONW_MASK : 0);
544 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
545 }
546 if (pte->fonw) {
547 write_acv++;
548 uint64_t flags = MM_STAT_WR_MASK |
549 MM_STAT_FONW_MASK;
550 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
551 }
552 } else {
553 if (!(pte->xre & MODE2MASK(mode))) {
554 read_acv++;
555 uint64_t flags = MM_STAT_ACV_MASK |
556 (pte->fonr ? MM_STAT_FONR_MASK : 0);
557 return new DtbAcvFault(req->getVaddr(), req->getFlags(), flags);
558 }
559 if (pte->fonr) {
560 read_acv++;
561 uint64_t flags = MM_STAT_FONR_MASK;
562 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
563 }
564 }
565 }
566
567 if (write)
568 write_hits++;
569 else
570 read_hits++;
571 }
572
573 // check that the physical address is ok (catch bad physical addresses)
574 if (req->getPaddr() & ~PAddrImplMask)
575 return genMachineCheckFault();
576
577 return checkCacheability(req);
578}
579
580AlphaISA::PTE &
581AlphaTLB::index(bool advance)
582{
583 AlphaISA::PTE *pte = &table[nlu];
584
585 if (advance)
586 nextnlu();
587
588 return *pte;
589}
590
591DEFINE_SIM_OBJECT_CLASS_NAME("AlphaTLB", AlphaTLB)
592
593BEGIN_DECLARE_SIM_OBJECT_PARAMS(AlphaITB)
594
595 Param<int> size;
596
597END_DECLARE_SIM_OBJECT_PARAMS(AlphaITB)
598
599BEGIN_INIT_SIM_OBJECT_PARAMS(AlphaITB)
600
601 INIT_PARAM_DFLT(size, "TLB size", 48)
602
603END_INIT_SIM_OBJECT_PARAMS(AlphaITB)
604
605
606CREATE_SIM_OBJECT(AlphaITB)
607{
608 return new AlphaITB(getInstanceName(), size);
609}
610
611REGISTER_SIM_OBJECT("AlphaITB", AlphaITB)
612
613BEGIN_DECLARE_SIM_OBJECT_PARAMS(AlphaDTB)
614
615 Param<int> size;
616
617END_DECLARE_SIM_OBJECT_PARAMS(AlphaDTB)
618
619BEGIN_INIT_SIM_OBJECT_PARAMS(AlphaDTB)
620
621 INIT_PARAM_DFLT(size, "TLB size", 64)
622
623END_INIT_SIM_OBJECT_PARAMS(AlphaDTB)
624
625
626CREATE_SIM_OBJECT(AlphaDTB)
627{
628 return new AlphaDTB(getInstanceName(), size);
629}
630
631REGISTER_SIM_OBJECT("AlphaDTB", AlphaDTB)
632
517
518 // not a physical address: need to look up pte
519 AlphaISA::PTE *pte = lookup(AlphaISA::VAddr(req->getVaddr()).vpn(),
520 asn);
521
522 if (!pte) {
523 // page fault
524 if (write) { write_misses++; } else { read_misses++; }
525 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
526 MM_STAT_DTB_MISS_MASK;
527 return (req->getFlags() & VPTE) ?
528 (Fault)(new PDtbMissFault(req->getVaddr(), req->getFlags(),
529 flags)) :
530 (Fault)(new NDtbMissFault(req->getVaddr(), req->getFlags(),
531 flags));
532 }
533
534 req->setPaddr((pte->ppn << AlphaISA::PageShift) +
535 AlphaISA::VAddr(req->getVaddr()).offset());
536
537 if (write) {
538 if (!(pte->xwe & MODE2MASK(mode))) {
539 // declare the instruction access fault
540 write_acv++;
541 uint64_t flags = MM_STAT_WR_MASK |
542 MM_STAT_ACV_MASK |
543 (pte->fonw ? MM_STAT_FONW_MASK : 0);
544 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
545 }
546 if (pte->fonw) {
547 write_acv++;
548 uint64_t flags = MM_STAT_WR_MASK |
549 MM_STAT_FONW_MASK;
550 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
551 }
552 } else {
553 if (!(pte->xre & MODE2MASK(mode))) {
554 read_acv++;
555 uint64_t flags = MM_STAT_ACV_MASK |
556 (pte->fonr ? MM_STAT_FONR_MASK : 0);
557 return new DtbAcvFault(req->getVaddr(), req->getFlags(), flags);
558 }
559 if (pte->fonr) {
560 read_acv++;
561 uint64_t flags = MM_STAT_FONR_MASK;
562 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
563 }
564 }
565 }
566
567 if (write)
568 write_hits++;
569 else
570 read_hits++;
571 }
572
573 // check that the physical address is ok (catch bad physical addresses)
574 if (req->getPaddr() & ~PAddrImplMask)
575 return genMachineCheckFault();
576
577 return checkCacheability(req);
578}
579
580AlphaISA::PTE &
581AlphaTLB::index(bool advance)
582{
583 AlphaISA::PTE *pte = &table[nlu];
584
585 if (advance)
586 nextnlu();
587
588 return *pte;
589}
590
591DEFINE_SIM_OBJECT_CLASS_NAME("AlphaTLB", AlphaTLB)
592
593BEGIN_DECLARE_SIM_OBJECT_PARAMS(AlphaITB)
594
595 Param<int> size;
596
597END_DECLARE_SIM_OBJECT_PARAMS(AlphaITB)
598
599BEGIN_INIT_SIM_OBJECT_PARAMS(AlphaITB)
600
601 INIT_PARAM_DFLT(size, "TLB size", 48)
602
603END_INIT_SIM_OBJECT_PARAMS(AlphaITB)
604
605
606CREATE_SIM_OBJECT(AlphaITB)
607{
608 return new AlphaITB(getInstanceName(), size);
609}
610
611REGISTER_SIM_OBJECT("AlphaITB", AlphaITB)
612
613BEGIN_DECLARE_SIM_OBJECT_PARAMS(AlphaDTB)
614
615 Param<int> size;
616
617END_DECLARE_SIM_OBJECT_PARAMS(AlphaDTB)
618
619BEGIN_INIT_SIM_OBJECT_PARAMS(AlphaDTB)
620
621 INIT_PARAM_DFLT(size, "TLB size", 64)
622
623END_INIT_SIM_OBJECT_PARAMS(AlphaDTB)
624
625
626CREATE_SIM_OBJECT(AlphaDTB)
627{
628 return new AlphaDTB(getInstanceName(), size);
629}
630
631REGISTER_SIM_OBJECT("AlphaDTB", AlphaDTB)
632