tlb.cc (4465:70123ac99284) tlb.cc (4762:c94e103c83ad)
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 * Steve Reinhardt
30 * Andrew Schultz
31 */
32
33#include <string>
34#include <vector>
35
36#include "arch/alpha/pagetable.hh"
37#include "arch/alpha/tlb.hh"
38#include "arch/alpha/faults.hh"
39#include "base/inifile.hh"
40#include "base/str.hh"
41#include "base/trace.hh"
42#include "config/alpha_tlaser.hh"
43#include "cpu/thread_context.hh"
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 * Steve Reinhardt
30 * Andrew Schultz
31 */
32
33#include <string>
34#include <vector>
35
36#include "arch/alpha/pagetable.hh"
37#include "arch/alpha/tlb.hh"
38#include "arch/alpha/faults.hh"
39#include "base/inifile.hh"
40#include "base/str.hh"
41#include "base/trace.hh"
42#include "config/alpha_tlaser.hh"
43#include "cpu/thread_context.hh"
44#include "sim/builder.hh"
44#include "params/AlphaDTB.hh"
45#include "params/AlphaITB.hh"
45
46using namespace std;
47using namespace EV5;
48
49namespace AlphaISA {
50///////////////////////////////////////////////////////////////////////
51//
52// Alpha TLB
53//
54#ifdef DEBUG
55bool uncacheBit39 = false;
56bool uncacheBit40 = false;
57#endif
58
59#define MODE2MASK(X) (1 << (X))
60
61TLB::TLB(const string &name, int s)
62 : SimObject(name), size(s), nlu(0)
63{
64 table = new PTE[size];
65 memset(table, 0, sizeof(PTE[size]));
66}
67
68TLB::~TLB()
69{
70 if (table)
71 delete [] table;
72}
73
74// look up an entry in the TLB
75PTE *
76TLB::lookup(Addr vpn, uint8_t asn) const
77{
78 // assume not found...
79 PTE *retval = NULL;
80
81 PageTable::const_iterator i = lookupTable.find(vpn);
82 if (i != lookupTable.end()) {
83 while (i->first == vpn) {
84 int index = i->second;
85 PTE *pte = &table[index];
86 assert(pte->valid);
87 if (vpn == pte->tag && (pte->asma || pte->asn == asn)) {
88 retval = pte;
89 break;
90 }
91
92 ++i;
93 }
94 }
95
96 DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn,
97 retval ? "hit" : "miss", retval ? retval->ppn : 0);
98 return retval;
99}
100
101
102Fault
103TLB::checkCacheability(RequestPtr &req)
104{
105// in Alpha, cacheability is controlled by upper-level bits of the
106// physical address
107
108/*
109 * We support having the uncacheable bit in either bit 39 or bit 40.
110 * The Turbolaser platform (and EV5) support having the bit in 39, but
111 * Tsunami (which Linux assumes uses an EV6) generates accesses with
112 * the bit in 40. So we must check for both, but we have debug flags
113 * to catch a weird case where both are used, which shouldn't happen.
114 */
115
116
117#if ALPHA_TLASER
118 if (req->getPaddr() & PAddrUncachedBit39)
119#else
120 if (req->getPaddr() & PAddrUncachedBit43)
121#endif
122 {
123 // IPR memory space not implemented
124 if (PAddrIprSpace(req->getPaddr())) {
125 return new UnimpFault("IPR memory space not implemented!");
126 } else {
127 // mark request as uncacheable
128 req->setFlags(req->getFlags() | UNCACHEABLE);
129
130#if !ALPHA_TLASER
131 // Clear bits 42:35 of the physical address (10-2 in Tsunami manual)
132 req->setPaddr(req->getPaddr() & PAddrUncachedMask);
133#endif
134 }
135 }
136 return NoFault;
137}
138
139
140// insert a new TLB entry
141void
142TLB::insert(Addr addr, PTE &pte)
143{
144 VAddr vaddr = addr;
145 if (table[nlu].valid) {
146 Addr oldvpn = table[nlu].tag;
147 PageTable::iterator i = lookupTable.find(oldvpn);
148
149 if (i == lookupTable.end())
150 panic("TLB entry not found in lookupTable");
151
152 int index;
153 while ((index = i->second) != nlu) {
154 if (table[index].tag != oldvpn)
155 panic("TLB entry not found in lookupTable");
156
157 ++i;
158 }
159
160 DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn);
161
162 lookupTable.erase(i);
163 }
164
165 DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), pte.ppn);
166
167 table[nlu] = pte;
168 table[nlu].tag = vaddr.vpn();
169 table[nlu].valid = true;
170
171 lookupTable.insert(make_pair(vaddr.vpn(), nlu));
172 nextnlu();
173}
174
175void
176TLB::flushAll()
177{
178 DPRINTF(TLB, "flushAll\n");
179 memset(table, 0, sizeof(PTE[size]));
180 lookupTable.clear();
181 nlu = 0;
182}
183
184void
185TLB::flushProcesses()
186{
187 PageTable::iterator i = lookupTable.begin();
188 PageTable::iterator end = lookupTable.end();
189 while (i != end) {
190 int index = i->second;
191 PTE *pte = &table[index];
192 assert(pte->valid);
193
194 // we can't increment i after we erase it, so save a copy and
195 // increment it to get the next entry now
196 PageTable::iterator cur = i;
197 ++i;
198
199 if (!pte->asma) {
200 DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index, pte->tag, pte->ppn);
201 pte->valid = false;
202 lookupTable.erase(cur);
203 }
204 }
205}
206
207void
208TLB::flushAddr(Addr addr, uint8_t asn)
209{
210 VAddr vaddr = addr;
211
212 PageTable::iterator i = lookupTable.find(vaddr.vpn());
213 if (i == lookupTable.end())
214 return;
215
216 while (i != lookupTable.end() && i->first == vaddr.vpn()) {
217 int index = i->second;
218 PTE *pte = &table[index];
219 assert(pte->valid);
220
221 if (vaddr.vpn() == pte->tag && (pte->asma || pte->asn == asn)) {
222 DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(),
223 pte->ppn);
224
225 // invalidate this entry
226 pte->valid = false;
227
228 lookupTable.erase(i++);
229 } else {
230 ++i;
231 }
232 }
233}
234
235
236void
237TLB::serialize(ostream &os)
238{
239 SERIALIZE_SCALAR(size);
240 SERIALIZE_SCALAR(nlu);
241
242 for (int i = 0; i < size; i++) {
243 nameOut(os, csprintf("%s.PTE%d", name(), i));
244 table[i].serialize(os);
245 }
246}
247
248void
249TLB::unserialize(Checkpoint *cp, const string &section)
250{
251 UNSERIALIZE_SCALAR(size);
252 UNSERIALIZE_SCALAR(nlu);
253
254 for (int i = 0; i < size; i++) {
255 table[i].unserialize(cp, csprintf("%s.PTE%d", section, i));
256 if (table[i].valid) {
257 lookupTable.insert(make_pair(table[i].tag, i));
258 }
259 }
260}
261
262
263///////////////////////////////////////////////////////////////////////
264//
265// Alpha ITB
266//
267ITB::ITB(const std::string &name, int size)
268 : TLB(name, size)
269{}
270
271
272void
273ITB::regStats()
274{
275 hits
276 .name(name() + ".hits")
277 .desc("ITB hits");
278 misses
279 .name(name() + ".misses")
280 .desc("ITB misses");
281 acv
282 .name(name() + ".acv")
283 .desc("ITB acv");
284 accesses
285 .name(name() + ".accesses")
286 .desc("ITB accesses");
287
288 accesses = hits + misses;
289}
290
291
292Fault
293ITB::translate(RequestPtr &req, ThreadContext *tc) const
294{
295 //If this is a pal pc, then set PHYSICAL
296 if(FULL_SYSTEM && PcPAL(req->getPC()))
297 req->setFlags(req->getFlags() | PHYSICAL);
298
299 if (PcPAL(req->getPC())) {
300 // strip off PAL PC marker (lsb is 1)
301 req->setPaddr((req->getVaddr() & ~3) & PAddrImplMask);
302 hits++;
303 return NoFault;
304 }
305
306 if (req->getFlags() & PHYSICAL) {
307 req->setPaddr(req->getVaddr());
308 } else {
309 // verify that this is a good virtual address
310 if (!validVirtualAddress(req->getVaddr())) {
311 acv++;
312 return new ItbAcvFault(req->getVaddr());
313 }
314
315
316 // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
317 // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
318#if ALPHA_TLASER
319 if ((MCSR_SP(tc->readMiscRegNoEffect(IPR_MCSR)) & 2) &&
320 VAddrSpaceEV5(req->getVaddr()) == 2)
321#else
322 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e)
323#endif
324 {
325 // only valid in kernel mode
326 if (ICM_CM(tc->readMiscRegNoEffect(IPR_ICM)) !=
327 mode_kernel) {
328 acv++;
329 return new ItbAcvFault(req->getVaddr());
330 }
331
332 req->setPaddr(req->getVaddr() & PAddrImplMask);
333
334#if !ALPHA_TLASER
335 // sign extend the physical address properly
336 if (req->getPaddr() & PAddrUncachedBit40)
337 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
338 else
339 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
340#endif
341
342 } else {
343 // not a physical address: need to look up pte
344 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
345 PTE *pte = lookup(VAddr(req->getVaddr()).vpn(),
346 asn);
347
348 if (!pte) {
349 misses++;
350 return new ItbPageFault(req->getVaddr());
351 }
352
353 req->setPaddr((pte->ppn << PageShift) +
354 (VAddr(req->getVaddr()).offset()
355 & ~3));
356
357 // check permissions for this access
358 if (!(pte->xre &
359 (1 << ICM_CM(tc->readMiscRegNoEffect(IPR_ICM))))) {
360 // instruction access fault
361 acv++;
362 return new ItbAcvFault(req->getVaddr());
363 }
364
365 hits++;
366 }
367 }
368
369 // check that the physical address is ok (catch bad physical addresses)
370 if (req->getPaddr() & ~PAddrImplMask)
371 return genMachineCheckFault();
372
373 return checkCacheability(req);
374
375}
376
377///////////////////////////////////////////////////////////////////////
378//
379// Alpha DTB
380//
381 DTB::DTB(const std::string &name, int size)
382 : TLB(name, size)
383{}
384
385void
386DTB::regStats()
387{
388 read_hits
389 .name(name() + ".read_hits")
390 .desc("DTB read hits")
391 ;
392
393 read_misses
394 .name(name() + ".read_misses")
395 .desc("DTB read misses")
396 ;
397
398 read_acv
399 .name(name() + ".read_acv")
400 .desc("DTB read access violations")
401 ;
402
403 read_accesses
404 .name(name() + ".read_accesses")
405 .desc("DTB read accesses")
406 ;
407
408 write_hits
409 .name(name() + ".write_hits")
410 .desc("DTB write hits")
411 ;
412
413 write_misses
414 .name(name() + ".write_misses")
415 .desc("DTB write misses")
416 ;
417
418 write_acv
419 .name(name() + ".write_acv")
420 .desc("DTB write access violations")
421 ;
422
423 write_accesses
424 .name(name() + ".write_accesses")
425 .desc("DTB write accesses")
426 ;
427
428 hits
429 .name(name() + ".hits")
430 .desc("DTB hits")
431 ;
432
433 misses
434 .name(name() + ".misses")
435 .desc("DTB misses")
436 ;
437
438 acv
439 .name(name() + ".acv")
440 .desc("DTB access violations")
441 ;
442
443 accesses
444 .name(name() + ".accesses")
445 .desc("DTB accesses")
446 ;
447
448 hits = read_hits + write_hits;
449 misses = read_misses + write_misses;
450 acv = read_acv + write_acv;
451 accesses = read_accesses + write_accesses;
452}
453
454Fault
455DTB::translate(RequestPtr &req, ThreadContext *tc, bool write) const
456{
457 Addr pc = tc->readPC();
458
459 mode_type mode =
460 (mode_type)DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM));
461
462
463 /**
464 * Check for alignment faults
465 */
466 if (req->getVaddr() & (req->getSize() - 1)) {
467 DPRINTF(TLB, "Alignment Fault on %#x, size = %d", req->getVaddr(),
468 req->getSize());
469 uint64_t flags = write ? MM_STAT_WR_MASK : 0;
470 return new DtbAlignmentFault(req->getVaddr(), req->getFlags(), flags);
471 }
472
473 if (PcPAL(pc)) {
474 mode = (req->getFlags() & ALTMODE) ?
475 (mode_type)ALT_MODE_AM(
476 tc->readMiscRegNoEffect(IPR_ALT_MODE))
477 : mode_kernel;
478 }
479
480 if (req->getFlags() & PHYSICAL) {
481 req->setPaddr(req->getVaddr());
482 } else {
483 // verify that this is a good virtual address
484 if (!validVirtualAddress(req->getVaddr())) {
485 if (write) { write_acv++; } else { read_acv++; }
486 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
487 MM_STAT_BAD_VA_MASK |
488 MM_STAT_ACV_MASK;
489 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
490 }
491
492 // Check for "superpage" mapping
493#if ALPHA_TLASER
494 if ((MCSR_SP(tc->readMiscRegNoEffect(IPR_MCSR)) & 2) &&
495 VAddrSpaceEV5(req->getVaddr()) == 2)
496#else
497 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e)
498#endif
499 {
500
501 // only valid in kernel mode
502 if (DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM)) !=
503 mode_kernel) {
504 if (write) { write_acv++; } else { read_acv++; }
505 uint64_t flags = ((write ? MM_STAT_WR_MASK : 0) |
506 MM_STAT_ACV_MASK);
507 return new DtbAcvFault(req->getVaddr(), req->getFlags(), flags);
508 }
509
510 req->setPaddr(req->getVaddr() & PAddrImplMask);
511
512#if !ALPHA_TLASER
513 // sign extend the physical address properly
514 if (req->getPaddr() & PAddrUncachedBit40)
515 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
516 else
517 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
518#endif
519
520 } else {
521 if (write)
522 write_accesses++;
523 else
524 read_accesses++;
525
526 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
527
528 // not a physical address: need to look up pte
529 PTE *pte = lookup(VAddr(req->getVaddr()).vpn(),
530 asn);
531
532 if (!pte) {
533 // page fault
534 if (write) { write_misses++; } else { read_misses++; }
535 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
536 MM_STAT_DTB_MISS_MASK;
537 return (req->getFlags() & VPTE) ?
538 (Fault)(new PDtbMissFault(req->getVaddr(), req->getFlags(),
539 flags)) :
540 (Fault)(new NDtbMissFault(req->getVaddr(), req->getFlags(),
541 flags));
542 }
543
544 req->setPaddr((pte->ppn << PageShift) +
545 VAddr(req->getVaddr()).offset());
546
547 if (write) {
548 if (!(pte->xwe & MODE2MASK(mode))) {
549 // declare the instruction access fault
550 write_acv++;
551 uint64_t flags = MM_STAT_WR_MASK |
552 MM_STAT_ACV_MASK |
553 (pte->fonw ? MM_STAT_FONW_MASK : 0);
554 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
555 }
556 if (pte->fonw) {
557 write_acv++;
558 uint64_t flags = MM_STAT_WR_MASK |
559 MM_STAT_FONW_MASK;
560 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
561 }
562 } else {
563 if (!(pte->xre & MODE2MASK(mode))) {
564 read_acv++;
565 uint64_t flags = MM_STAT_ACV_MASK |
566 (pte->fonr ? MM_STAT_FONR_MASK : 0);
567 return new DtbAcvFault(req->getVaddr(), req->getFlags(), flags);
568 }
569 if (pte->fonr) {
570 read_acv++;
571 uint64_t flags = MM_STAT_FONR_MASK;
572 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
573 }
574 }
575 }
576
577 if (write)
578 write_hits++;
579 else
580 read_hits++;
581 }
582
583 // check that the physical address is ok (catch bad physical addresses)
584 if (req->getPaddr() & ~PAddrImplMask)
585 return genMachineCheckFault();
586
587 return checkCacheability(req);
588}
589
590PTE &
591TLB::index(bool advance)
592{
593 PTE *pte = &table[nlu];
594
595 if (advance)
596 nextnlu();
597
598 return *pte;
599}
600
601/* end namespace AlphaISA */ }
602
46
47using namespace std;
48using namespace EV5;
49
50namespace AlphaISA {
51///////////////////////////////////////////////////////////////////////
52//
53// Alpha TLB
54//
55#ifdef DEBUG
56bool uncacheBit39 = false;
57bool uncacheBit40 = false;
58#endif
59
60#define MODE2MASK(X) (1 << (X))
61
62TLB::TLB(const string &name, int s)
63 : SimObject(name), size(s), nlu(0)
64{
65 table = new PTE[size];
66 memset(table, 0, sizeof(PTE[size]));
67}
68
69TLB::~TLB()
70{
71 if (table)
72 delete [] table;
73}
74
75// look up an entry in the TLB
76PTE *
77TLB::lookup(Addr vpn, uint8_t asn) const
78{
79 // assume not found...
80 PTE *retval = NULL;
81
82 PageTable::const_iterator i = lookupTable.find(vpn);
83 if (i != lookupTable.end()) {
84 while (i->first == vpn) {
85 int index = i->second;
86 PTE *pte = &table[index];
87 assert(pte->valid);
88 if (vpn == pte->tag && (pte->asma || pte->asn == asn)) {
89 retval = pte;
90 break;
91 }
92
93 ++i;
94 }
95 }
96
97 DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn,
98 retval ? "hit" : "miss", retval ? retval->ppn : 0);
99 return retval;
100}
101
102
103Fault
104TLB::checkCacheability(RequestPtr &req)
105{
106// in Alpha, cacheability is controlled by upper-level bits of the
107// physical address
108
109/*
110 * We support having the uncacheable bit in either bit 39 or bit 40.
111 * The Turbolaser platform (and EV5) support having the bit in 39, but
112 * Tsunami (which Linux assumes uses an EV6) generates accesses with
113 * the bit in 40. So we must check for both, but we have debug flags
114 * to catch a weird case where both are used, which shouldn't happen.
115 */
116
117
118#if ALPHA_TLASER
119 if (req->getPaddr() & PAddrUncachedBit39)
120#else
121 if (req->getPaddr() & PAddrUncachedBit43)
122#endif
123 {
124 // IPR memory space not implemented
125 if (PAddrIprSpace(req->getPaddr())) {
126 return new UnimpFault("IPR memory space not implemented!");
127 } else {
128 // mark request as uncacheable
129 req->setFlags(req->getFlags() | UNCACHEABLE);
130
131#if !ALPHA_TLASER
132 // Clear bits 42:35 of the physical address (10-2 in Tsunami manual)
133 req->setPaddr(req->getPaddr() & PAddrUncachedMask);
134#endif
135 }
136 }
137 return NoFault;
138}
139
140
141// insert a new TLB entry
142void
143TLB::insert(Addr addr, PTE &pte)
144{
145 VAddr vaddr = addr;
146 if (table[nlu].valid) {
147 Addr oldvpn = table[nlu].tag;
148 PageTable::iterator i = lookupTable.find(oldvpn);
149
150 if (i == lookupTable.end())
151 panic("TLB entry not found in lookupTable");
152
153 int index;
154 while ((index = i->second) != nlu) {
155 if (table[index].tag != oldvpn)
156 panic("TLB entry not found in lookupTable");
157
158 ++i;
159 }
160
161 DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn);
162
163 lookupTable.erase(i);
164 }
165
166 DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), pte.ppn);
167
168 table[nlu] = pte;
169 table[nlu].tag = vaddr.vpn();
170 table[nlu].valid = true;
171
172 lookupTable.insert(make_pair(vaddr.vpn(), nlu));
173 nextnlu();
174}
175
176void
177TLB::flushAll()
178{
179 DPRINTF(TLB, "flushAll\n");
180 memset(table, 0, sizeof(PTE[size]));
181 lookupTable.clear();
182 nlu = 0;
183}
184
185void
186TLB::flushProcesses()
187{
188 PageTable::iterator i = lookupTable.begin();
189 PageTable::iterator end = lookupTable.end();
190 while (i != end) {
191 int index = i->second;
192 PTE *pte = &table[index];
193 assert(pte->valid);
194
195 // we can't increment i after we erase it, so save a copy and
196 // increment it to get the next entry now
197 PageTable::iterator cur = i;
198 ++i;
199
200 if (!pte->asma) {
201 DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index, pte->tag, pte->ppn);
202 pte->valid = false;
203 lookupTable.erase(cur);
204 }
205 }
206}
207
208void
209TLB::flushAddr(Addr addr, uint8_t asn)
210{
211 VAddr vaddr = addr;
212
213 PageTable::iterator i = lookupTable.find(vaddr.vpn());
214 if (i == lookupTable.end())
215 return;
216
217 while (i != lookupTable.end() && i->first == vaddr.vpn()) {
218 int index = i->second;
219 PTE *pte = &table[index];
220 assert(pte->valid);
221
222 if (vaddr.vpn() == pte->tag && (pte->asma || pte->asn == asn)) {
223 DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(),
224 pte->ppn);
225
226 // invalidate this entry
227 pte->valid = false;
228
229 lookupTable.erase(i++);
230 } else {
231 ++i;
232 }
233 }
234}
235
236
237void
238TLB::serialize(ostream &os)
239{
240 SERIALIZE_SCALAR(size);
241 SERIALIZE_SCALAR(nlu);
242
243 for (int i = 0; i < size; i++) {
244 nameOut(os, csprintf("%s.PTE%d", name(), i));
245 table[i].serialize(os);
246 }
247}
248
249void
250TLB::unserialize(Checkpoint *cp, const string &section)
251{
252 UNSERIALIZE_SCALAR(size);
253 UNSERIALIZE_SCALAR(nlu);
254
255 for (int i = 0; i < size; i++) {
256 table[i].unserialize(cp, csprintf("%s.PTE%d", section, i));
257 if (table[i].valid) {
258 lookupTable.insert(make_pair(table[i].tag, i));
259 }
260 }
261}
262
263
264///////////////////////////////////////////////////////////////////////
265//
266// Alpha ITB
267//
268ITB::ITB(const std::string &name, int size)
269 : TLB(name, size)
270{}
271
272
273void
274ITB::regStats()
275{
276 hits
277 .name(name() + ".hits")
278 .desc("ITB hits");
279 misses
280 .name(name() + ".misses")
281 .desc("ITB misses");
282 acv
283 .name(name() + ".acv")
284 .desc("ITB acv");
285 accesses
286 .name(name() + ".accesses")
287 .desc("ITB accesses");
288
289 accesses = hits + misses;
290}
291
292
293Fault
294ITB::translate(RequestPtr &req, ThreadContext *tc) const
295{
296 //If this is a pal pc, then set PHYSICAL
297 if(FULL_SYSTEM && PcPAL(req->getPC()))
298 req->setFlags(req->getFlags() | PHYSICAL);
299
300 if (PcPAL(req->getPC())) {
301 // strip off PAL PC marker (lsb is 1)
302 req->setPaddr((req->getVaddr() & ~3) & PAddrImplMask);
303 hits++;
304 return NoFault;
305 }
306
307 if (req->getFlags() & PHYSICAL) {
308 req->setPaddr(req->getVaddr());
309 } else {
310 // verify that this is a good virtual address
311 if (!validVirtualAddress(req->getVaddr())) {
312 acv++;
313 return new ItbAcvFault(req->getVaddr());
314 }
315
316
317 // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
318 // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
319#if ALPHA_TLASER
320 if ((MCSR_SP(tc->readMiscRegNoEffect(IPR_MCSR)) & 2) &&
321 VAddrSpaceEV5(req->getVaddr()) == 2)
322#else
323 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e)
324#endif
325 {
326 // only valid in kernel mode
327 if (ICM_CM(tc->readMiscRegNoEffect(IPR_ICM)) !=
328 mode_kernel) {
329 acv++;
330 return new ItbAcvFault(req->getVaddr());
331 }
332
333 req->setPaddr(req->getVaddr() & PAddrImplMask);
334
335#if !ALPHA_TLASER
336 // sign extend the physical address properly
337 if (req->getPaddr() & PAddrUncachedBit40)
338 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
339 else
340 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
341#endif
342
343 } else {
344 // not a physical address: need to look up pte
345 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
346 PTE *pte = lookup(VAddr(req->getVaddr()).vpn(),
347 asn);
348
349 if (!pte) {
350 misses++;
351 return new ItbPageFault(req->getVaddr());
352 }
353
354 req->setPaddr((pte->ppn << PageShift) +
355 (VAddr(req->getVaddr()).offset()
356 & ~3));
357
358 // check permissions for this access
359 if (!(pte->xre &
360 (1 << ICM_CM(tc->readMiscRegNoEffect(IPR_ICM))))) {
361 // instruction access fault
362 acv++;
363 return new ItbAcvFault(req->getVaddr());
364 }
365
366 hits++;
367 }
368 }
369
370 // check that the physical address is ok (catch bad physical addresses)
371 if (req->getPaddr() & ~PAddrImplMask)
372 return genMachineCheckFault();
373
374 return checkCacheability(req);
375
376}
377
378///////////////////////////////////////////////////////////////////////
379//
380// Alpha DTB
381//
382 DTB::DTB(const std::string &name, int size)
383 : TLB(name, size)
384{}
385
386void
387DTB::regStats()
388{
389 read_hits
390 .name(name() + ".read_hits")
391 .desc("DTB read hits")
392 ;
393
394 read_misses
395 .name(name() + ".read_misses")
396 .desc("DTB read misses")
397 ;
398
399 read_acv
400 .name(name() + ".read_acv")
401 .desc("DTB read access violations")
402 ;
403
404 read_accesses
405 .name(name() + ".read_accesses")
406 .desc("DTB read accesses")
407 ;
408
409 write_hits
410 .name(name() + ".write_hits")
411 .desc("DTB write hits")
412 ;
413
414 write_misses
415 .name(name() + ".write_misses")
416 .desc("DTB write misses")
417 ;
418
419 write_acv
420 .name(name() + ".write_acv")
421 .desc("DTB write access violations")
422 ;
423
424 write_accesses
425 .name(name() + ".write_accesses")
426 .desc("DTB write accesses")
427 ;
428
429 hits
430 .name(name() + ".hits")
431 .desc("DTB hits")
432 ;
433
434 misses
435 .name(name() + ".misses")
436 .desc("DTB misses")
437 ;
438
439 acv
440 .name(name() + ".acv")
441 .desc("DTB access violations")
442 ;
443
444 accesses
445 .name(name() + ".accesses")
446 .desc("DTB accesses")
447 ;
448
449 hits = read_hits + write_hits;
450 misses = read_misses + write_misses;
451 acv = read_acv + write_acv;
452 accesses = read_accesses + write_accesses;
453}
454
455Fault
456DTB::translate(RequestPtr &req, ThreadContext *tc, bool write) const
457{
458 Addr pc = tc->readPC();
459
460 mode_type mode =
461 (mode_type)DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM));
462
463
464 /**
465 * Check for alignment faults
466 */
467 if (req->getVaddr() & (req->getSize() - 1)) {
468 DPRINTF(TLB, "Alignment Fault on %#x, size = %d", req->getVaddr(),
469 req->getSize());
470 uint64_t flags = write ? MM_STAT_WR_MASK : 0;
471 return new DtbAlignmentFault(req->getVaddr(), req->getFlags(), flags);
472 }
473
474 if (PcPAL(pc)) {
475 mode = (req->getFlags() & ALTMODE) ?
476 (mode_type)ALT_MODE_AM(
477 tc->readMiscRegNoEffect(IPR_ALT_MODE))
478 : mode_kernel;
479 }
480
481 if (req->getFlags() & PHYSICAL) {
482 req->setPaddr(req->getVaddr());
483 } else {
484 // verify that this is a good virtual address
485 if (!validVirtualAddress(req->getVaddr())) {
486 if (write) { write_acv++; } else { read_acv++; }
487 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
488 MM_STAT_BAD_VA_MASK |
489 MM_STAT_ACV_MASK;
490 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
491 }
492
493 // Check for "superpage" mapping
494#if ALPHA_TLASER
495 if ((MCSR_SP(tc->readMiscRegNoEffect(IPR_MCSR)) & 2) &&
496 VAddrSpaceEV5(req->getVaddr()) == 2)
497#else
498 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e)
499#endif
500 {
501
502 // only valid in kernel mode
503 if (DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM)) !=
504 mode_kernel) {
505 if (write) { write_acv++; } else { read_acv++; }
506 uint64_t flags = ((write ? MM_STAT_WR_MASK : 0) |
507 MM_STAT_ACV_MASK);
508 return new DtbAcvFault(req->getVaddr(), req->getFlags(), flags);
509 }
510
511 req->setPaddr(req->getVaddr() & PAddrImplMask);
512
513#if !ALPHA_TLASER
514 // sign extend the physical address properly
515 if (req->getPaddr() & PAddrUncachedBit40)
516 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
517 else
518 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
519#endif
520
521 } else {
522 if (write)
523 write_accesses++;
524 else
525 read_accesses++;
526
527 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
528
529 // not a physical address: need to look up pte
530 PTE *pte = lookup(VAddr(req->getVaddr()).vpn(),
531 asn);
532
533 if (!pte) {
534 // page fault
535 if (write) { write_misses++; } else { read_misses++; }
536 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
537 MM_STAT_DTB_MISS_MASK;
538 return (req->getFlags() & VPTE) ?
539 (Fault)(new PDtbMissFault(req->getVaddr(), req->getFlags(),
540 flags)) :
541 (Fault)(new NDtbMissFault(req->getVaddr(), req->getFlags(),
542 flags));
543 }
544
545 req->setPaddr((pte->ppn << PageShift) +
546 VAddr(req->getVaddr()).offset());
547
548 if (write) {
549 if (!(pte->xwe & MODE2MASK(mode))) {
550 // declare the instruction access fault
551 write_acv++;
552 uint64_t flags = MM_STAT_WR_MASK |
553 MM_STAT_ACV_MASK |
554 (pte->fonw ? MM_STAT_FONW_MASK : 0);
555 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
556 }
557 if (pte->fonw) {
558 write_acv++;
559 uint64_t flags = MM_STAT_WR_MASK |
560 MM_STAT_FONW_MASK;
561 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
562 }
563 } else {
564 if (!(pte->xre & MODE2MASK(mode))) {
565 read_acv++;
566 uint64_t flags = MM_STAT_ACV_MASK |
567 (pte->fonr ? MM_STAT_FONR_MASK : 0);
568 return new DtbAcvFault(req->getVaddr(), req->getFlags(), flags);
569 }
570 if (pte->fonr) {
571 read_acv++;
572 uint64_t flags = MM_STAT_FONR_MASK;
573 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
574 }
575 }
576 }
577
578 if (write)
579 write_hits++;
580 else
581 read_hits++;
582 }
583
584 // check that the physical address is ok (catch bad physical addresses)
585 if (req->getPaddr() & ~PAddrImplMask)
586 return genMachineCheckFault();
587
588 return checkCacheability(req);
589}
590
591PTE &
592TLB::index(bool advance)
593{
594 PTE *pte = &table[nlu];
595
596 if (advance)
597 nextnlu();
598
599 return *pte;
600}
601
602/* end namespace AlphaISA */ }
603
603DEFINE_SIM_OBJECT_CLASS_NAME("AlphaTLB", TLB)
604
605BEGIN_DECLARE_SIM_OBJECT_PARAMS(ITB)
606
607 Param<int> size;
608
609END_DECLARE_SIM_OBJECT_PARAMS(ITB)
610
611BEGIN_INIT_SIM_OBJECT_PARAMS(ITB)
612
613 INIT_PARAM_DFLT(size, "TLB size", 48)
614
615END_INIT_SIM_OBJECT_PARAMS(ITB)
616
617
618CREATE_SIM_OBJECT(ITB)
604AlphaISA::ITB *
605AlphaITBParams::create()
619{
606{
620 return new ITB(getInstanceName(), size);
607 return new AlphaISA::ITB(name, size);
621}
622
608}
609
623REGISTER_SIM_OBJECT("AlphaITB", ITB)
624
625BEGIN_DECLARE_SIM_OBJECT_PARAMS(DTB)
626
627 Param<int> size;
628
629END_DECLARE_SIM_OBJECT_PARAMS(DTB)
630
631BEGIN_INIT_SIM_OBJECT_PARAMS(DTB)
632
633 INIT_PARAM_DFLT(size, "TLB size", 64)
634
635END_INIT_SIM_OBJECT_PARAMS(DTB)
636
637
638CREATE_SIM_OBJECT(DTB)
610AlphaISA::DTB *
611AlphaDTBParams::create()
639{
612{
640 return new DTB(getInstanceName(), size);
613 return new AlphaISA::DTB(name, size);
641}
614}
642
643REGISTER_SIM_OBJECT("AlphaDTB", DTB)