tlb.cc (5532:d8ab33f5ff9a) tlb.cc (5543:3af77710f397)
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 * Steve Reinhardt
30 * Andrew Schultz
31 */
32
33#include <string>
34#include <vector>
35
36#include "arch/alpha/pagetable.hh"
37#include "arch/alpha/tlb.hh"
38#include "arch/alpha/faults.hh"
39#include "base/inifile.hh"
40#include "base/str.hh"
41#include "base/trace.hh"
42#include "config/alpha_tlaser.hh"
43#include "cpu/thread_context.hh"
44
45using namespace std;
46using namespace EV5;
47
48namespace AlphaISA {
49///////////////////////////////////////////////////////////////////////
50//
51// Alpha TLB
52//
53#ifdef DEBUG
54bool uncacheBit39 = false;
55bool uncacheBit40 = false;
56#endif
57
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 * Steve Reinhardt
30 * Andrew Schultz
31 */
32
33#include <string>
34#include <vector>
35
36#include "arch/alpha/pagetable.hh"
37#include "arch/alpha/tlb.hh"
38#include "arch/alpha/faults.hh"
39#include "base/inifile.hh"
40#include "base/str.hh"
41#include "base/trace.hh"
42#include "config/alpha_tlaser.hh"
43#include "cpu/thread_context.hh"
44
45using namespace std;
46using namespace EV5;
47
48namespace AlphaISA {
49///////////////////////////////////////////////////////////////////////
50//
51// Alpha TLB
52//
53#ifdef DEBUG
54bool uncacheBit39 = false;
55bool uncacheBit40 = false;
56#endif
57
58#define MODE2MASK(X) (1 << (X))
58#define MODE2MASK(X) (1 << (X))
59
60TLB::TLB(const Params *p)
61 : BaseTLB(p), size(p->size), nlu(0)
62{
63 table = new TlbEntry[size];
64 memset(table, 0, sizeof(TlbEntry[size]));
65 flushCache();
66}
67
68TLB::~TLB()
69{
70 if (table)
71 delete [] table;
72}
73
74// look up an entry in the TLB
75TlbEntry *
76TLB::lookup(Addr vpn, uint8_t asn)
77{
78 // assume not found...
79 TlbEntry *retval = NULL;
80
81 if (EntryCache[0]) {
82 if (vpn == EntryCache[0]->tag &&
83 (EntryCache[0]->asma || EntryCache[0]->asn == asn))
84 retval = EntryCache[0];
85 else if (EntryCache[1]) {
86 if (vpn == EntryCache[1]->tag &&
87 (EntryCache[1]->asma || EntryCache[1]->asn == asn))
88 retval = EntryCache[1];
89 else if (EntryCache[2] && vpn == EntryCache[2]->tag &&
90 (EntryCache[2]->asma || EntryCache[2]->asn == asn))
91 retval = EntryCache[2];
92 }
93 }
94
95 if (retval == NULL) {
96 PageTable::const_iterator i = lookupTable.find(vpn);
97 if (i != lookupTable.end()) {
98 while (i->first == vpn) {
99 int index = i->second;
100 TlbEntry *entry = &table[index];
101 assert(entry->valid);
102 if (vpn == entry->tag && (entry->asma || entry->asn == asn)) {
103 retval = updateCache(entry);
104 break;
105 }
106
107 ++i;
108 }
109 }
110 }
111
112 DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn,
113 retval ? "hit" : "miss", retval ? retval->ppn : 0);
114 return retval;
115}
116
117
118Fault
119TLB::checkCacheability(RequestPtr &req, bool itb)
120{
121// in Alpha, cacheability is controlled by upper-level bits of the
122// physical address
123
124/*
125 * We support having the uncacheable bit in either bit 39 or bit 40.
126 * The Turbolaser platform (and EV5) support having the bit in 39, but
127 * Tsunami (which Linux assumes uses an EV6) generates accesses with
128 * the bit in 40. So we must check for both, but we have debug flags
129 * to catch a weird case where both are used, which shouldn't happen.
130 */
131
132
133#if ALPHA_TLASER
134 if (req->getPaddr() & PAddrUncachedBit39)
135#else
136 if (req->getPaddr() & PAddrUncachedBit43)
137#endif
138 {
139 // IPR memory space not implemented
140 if (PAddrIprSpace(req->getPaddr())) {
141 return new UnimpFault("IPR memory space not implemented!");
142 } else {
143 // mark request as uncacheable
144 req->setFlags(req->getFlags() | UNCACHEABLE);
145
146#if !ALPHA_TLASER
147 // Clear bits 42:35 of the physical address (10-2 in Tsunami manual)
148 req->setPaddr(req->getPaddr() & PAddrUncachedMask);
149#endif
150 }
151 // We shouldn't be able to read from an uncachable address in Alpha as
152 // we don't have a ROM and we don't want to try to fetch from a device
153 // register as we destroy any data that is clear-on-read.
154 if (req->isUncacheable() && itb)
155 return new UnimpFault("CPU trying to fetch from uncached I/O");
156
157 }
158 return NoFault;
159}
160
161
162// insert a new TLB entry
163void
164TLB::insert(Addr addr, TlbEntry &entry)
165{
166 flushCache();
167 VAddr vaddr = addr;
168 if (table[nlu].valid) {
169 Addr oldvpn = table[nlu].tag;
170 PageTable::iterator i = lookupTable.find(oldvpn);
171
172 if (i == lookupTable.end())
173 panic("TLB entry not found in lookupTable");
174
175 int index;
176 while ((index = i->second) != nlu) {
177 if (table[index].tag != oldvpn)
178 panic("TLB entry not found in lookupTable");
179
180 ++i;
181 }
182
183 DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn);
184
185 lookupTable.erase(i);
186 }
187
188 DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), entry.ppn);
189
190 table[nlu] = entry;
191 table[nlu].tag = vaddr.vpn();
192 table[nlu].valid = true;
193
194 lookupTable.insert(make_pair(vaddr.vpn(), nlu));
195 nextnlu();
196}
197
198void
199TLB::flushAll()
200{
201 DPRINTF(TLB, "flushAll\n");
202 memset(table, 0, sizeof(TlbEntry[size]));
203 flushCache();
204 lookupTable.clear();
205 nlu = 0;
206}
207
208void
209TLB::flushProcesses()
210{
211 flushCache();
212 PageTable::iterator i = lookupTable.begin();
213 PageTable::iterator end = lookupTable.end();
214 while (i != end) {
215 int index = i->second;
216 TlbEntry *entry = &table[index];
217 assert(entry->valid);
218
219 // we can't increment i after we erase it, so save a copy and
220 // increment it to get the next entry now
221 PageTable::iterator cur = i;
222 ++i;
223
224 if (!entry->asma) {
225 DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index, entry->tag, entry->ppn);
226 entry->valid = false;
227 lookupTable.erase(cur);
228 }
229 }
230}
231
232void
233TLB::flushAddr(Addr addr, uint8_t asn)
234{
235 flushCache();
236 VAddr vaddr = addr;
237
238 PageTable::iterator i = lookupTable.find(vaddr.vpn());
239 if (i == lookupTable.end())
240 return;
241
242 while (i != lookupTable.end() && i->first == vaddr.vpn()) {
243 int index = i->second;
244 TlbEntry *entry = &table[index];
245 assert(entry->valid);
246
247 if (vaddr.vpn() == entry->tag && (entry->asma || entry->asn == asn)) {
248 DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(),
249 entry->ppn);
250
251 // invalidate this entry
252 entry->valid = false;
253
254 lookupTable.erase(i++);
255 } else {
256 ++i;
257 }
258 }
259}
260
261
262void
263TLB::serialize(ostream &os)
264{
265 SERIALIZE_SCALAR(size);
266 SERIALIZE_SCALAR(nlu);
267
268 for (int i = 0; i < size; i++) {
269 nameOut(os, csprintf("%s.Entry%d", name(), i));
270 table[i].serialize(os);
271 }
272}
273
274void
275TLB::unserialize(Checkpoint *cp, const string &section)
276{
277 UNSERIALIZE_SCALAR(size);
278 UNSERIALIZE_SCALAR(nlu);
279
280 for (int i = 0; i < size; i++) {
281 table[i].unserialize(cp, csprintf("%s.Entry%d", section, i));
282 if (table[i].valid) {
283 lookupTable.insert(make_pair(table[i].tag, i));
284 }
285 }
286}
287
288
289///////////////////////////////////////////////////////////////////////
290//
291// Alpha ITB
292//
293ITB::ITB(const Params *p)
294 : TLB(p)
295{}
296
297
298void
299ITB::regStats()
300{
301 hits
302 .name(name() + ".hits")
303 .desc("ITB hits");
304 misses
305 .name(name() + ".misses")
306 .desc("ITB misses");
307 acv
308 .name(name() + ".acv")
309 .desc("ITB acv");
310 accesses
311 .name(name() + ".accesses")
312 .desc("ITB accesses");
313
314 accesses = hits + misses;
315}
316
317
318Fault
319ITB::translate(RequestPtr &req, ThreadContext *tc)
320{
321 //If this is a pal pc, then set PHYSICAL
322 if(FULL_SYSTEM && PcPAL(req->getPC()))
323 req->setFlags(req->getFlags() | PHYSICAL);
324
325 if (PcPAL(req->getPC())) {
326 // strip off PAL PC marker (lsb is 1)
327 req->setPaddr((req->getVaddr() & ~3) & PAddrImplMask);
328 hits++;
329 return NoFault;
330 }
331
332 if (req->getFlags() & PHYSICAL) {
333 req->setPaddr(req->getVaddr());
334 } else {
335 // verify that this is a good virtual address
336 if (!validVirtualAddress(req->getVaddr())) {
337 acv++;
338 return new ItbAcvFault(req->getVaddr());
339 }
340
341
342 // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
343 // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
344#if ALPHA_TLASER
345 if ((MCSR_SP(tc->readMiscRegNoEffect(IPR_MCSR)) & 2) &&
346 VAddrSpaceEV5(req->getVaddr()) == 2)
347#else
348 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e)
349#endif
350 {
351 // only valid in kernel mode
352 if (ICM_CM(tc->readMiscRegNoEffect(IPR_ICM)) !=
353 mode_kernel) {
354 acv++;
355 return new ItbAcvFault(req->getVaddr());
356 }
357
358 req->setPaddr(req->getVaddr() & PAddrImplMask);
359
360#if !ALPHA_TLASER
361 // sign extend the physical address properly
362 if (req->getPaddr() & PAddrUncachedBit40)
363 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
364 else
365 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
366#endif
367
368 } else {
369 // not a physical address: need to look up pte
370 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
371 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(),
372 asn);
373
374 if (!entry) {
375 misses++;
376 return new ItbPageFault(req->getVaddr());
377 }
378
379 req->setPaddr((entry->ppn << PageShift) +
380 (VAddr(req->getVaddr()).offset()
381 & ~3));
382
383 // check permissions for this access
384 if (!(entry->xre &
385 (1 << ICM_CM(tc->readMiscRegNoEffect(IPR_ICM))))) {
386 // instruction access fault
387 acv++;
388 return new ItbAcvFault(req->getVaddr());
389 }
390
391 hits++;
392 }
393 }
394
395 // check that the physical address is ok (catch bad physical addresses)
396 if (req->getPaddr() & ~PAddrImplMask)
397 return genMachineCheckFault();
398
399 return checkCacheability(req, true);
400
401}
402
403///////////////////////////////////////////////////////////////////////
404//
405// Alpha DTB
406//
407 DTB::DTB(const Params *p)
408 : TLB(p)
409{}
410
411void
412DTB::regStats()
413{
414 read_hits
415 .name(name() + ".read_hits")
416 .desc("DTB read hits")
417 ;
418
419 read_misses
420 .name(name() + ".read_misses")
421 .desc("DTB read misses")
422 ;
423
424 read_acv
425 .name(name() + ".read_acv")
426 .desc("DTB read access violations")
427 ;
428
429 read_accesses
430 .name(name() + ".read_accesses")
431 .desc("DTB read accesses")
432 ;
433
434 write_hits
435 .name(name() + ".write_hits")
436 .desc("DTB write hits")
437 ;
438
439 write_misses
440 .name(name() + ".write_misses")
441 .desc("DTB write misses")
442 ;
443
444 write_acv
445 .name(name() + ".write_acv")
446 .desc("DTB write access violations")
447 ;
448
449 write_accesses
450 .name(name() + ".write_accesses")
451 .desc("DTB write accesses")
452 ;
453
454 hits
455 .name(name() + ".hits")
456 .desc("DTB hits")
457 ;
458
459 misses
460 .name(name() + ".misses")
461 .desc("DTB misses")
462 ;
463
464 acv
465 .name(name() + ".acv")
466 .desc("DTB access violations")
467 ;
468
469 accesses
470 .name(name() + ".accesses")
471 .desc("DTB accesses")
472 ;
473
474 hits = read_hits + write_hits;
475 misses = read_misses + write_misses;
476 acv = read_acv + write_acv;
477 accesses = read_accesses + write_accesses;
478}
479
480Fault
481DTB::translate(RequestPtr &req, ThreadContext *tc, bool write)
482{
483 Addr pc = tc->readPC();
484
485 mode_type mode =
486 (mode_type)DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM));
487
488
489 /**
490 * Check for alignment faults
491 */
492 if (req->getVaddr() & (req->getSize() - 1)) {
493 DPRINTF(TLB, "Alignment Fault on %#x, size = %d", req->getVaddr(),
494 req->getSize());
495 uint64_t flags = write ? MM_STAT_WR_MASK : 0;
496 return new DtbAlignmentFault(req->getVaddr(), req->getFlags(), flags);
497 }
498
499 if (PcPAL(pc)) {
500 mode = (req->getFlags() & ALTMODE) ?
501 (mode_type)ALT_MODE_AM(
502 tc->readMiscRegNoEffect(IPR_ALT_MODE))
503 : mode_kernel;
504 }
505
506 if (req->getFlags() & PHYSICAL) {
507 req->setPaddr(req->getVaddr());
508 } else {
509 // verify that this is a good virtual address
510 if (!validVirtualAddress(req->getVaddr())) {
511 if (write) { write_acv++; } else { read_acv++; }
512 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
513 MM_STAT_BAD_VA_MASK |
514 MM_STAT_ACV_MASK;
515 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
516 }
517
518 // Check for "superpage" mapping
519#if ALPHA_TLASER
520 if ((MCSR_SP(tc->readMiscRegNoEffect(IPR_MCSR)) & 2) &&
521 VAddrSpaceEV5(req->getVaddr()) == 2)
522#else
523 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e)
524#endif
525 {
526
527 // only valid in kernel mode
528 if (DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM)) !=
529 mode_kernel) {
530 if (write) { write_acv++; } else { read_acv++; }
531 uint64_t flags = ((write ? MM_STAT_WR_MASK : 0) |
532 MM_STAT_ACV_MASK);
533 return new DtbAcvFault(req->getVaddr(), req->getFlags(), flags);
534 }
535
536 req->setPaddr(req->getVaddr() & PAddrImplMask);
537
538#if !ALPHA_TLASER
539 // sign extend the physical address properly
540 if (req->getPaddr() & PAddrUncachedBit40)
541 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
542 else
543 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
544#endif
545
546 } else {
547 if (write)
548 write_accesses++;
549 else
550 read_accesses++;
551
552 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
553
554 // not a physical address: need to look up pte
555 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(), asn);
556
557 if (!entry) {
558 // page fault
559 if (write) { write_misses++; } else { read_misses++; }
560 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
561 MM_STAT_DTB_MISS_MASK;
562 return (req->getFlags() & VPTE) ?
563 (Fault)(new PDtbMissFault(req->getVaddr(), req->getFlags(),
564 flags)) :
565 (Fault)(new NDtbMissFault(req->getVaddr(), req->getFlags(),
566 flags));
567 }
568
569 req->setPaddr((entry->ppn << PageShift) +
570 VAddr(req->getVaddr()).offset());
571
572 if (write) {
573 if (!(entry->xwe & MODE2MASK(mode))) {
574 // declare the instruction access fault
575 write_acv++;
576 uint64_t flags = MM_STAT_WR_MASK |
577 MM_STAT_ACV_MASK |
578 (entry->fonw ? MM_STAT_FONW_MASK : 0);
579 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
580 }
581 if (entry->fonw) {
582 write_acv++;
583 uint64_t flags = MM_STAT_WR_MASK |
584 MM_STAT_FONW_MASK;
585 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
586 }
587 } else {
588 if (!(entry->xre & MODE2MASK(mode))) {
589 read_acv++;
590 uint64_t flags = MM_STAT_ACV_MASK |
591 (entry->fonr ? MM_STAT_FONR_MASK : 0);
592 return new DtbAcvFault(req->getVaddr(), req->getFlags(), flags);
593 }
594 if (entry->fonr) {
595 read_acv++;
596 uint64_t flags = MM_STAT_FONR_MASK;
597 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
598 }
599 }
600 }
601
602 if (write)
603 write_hits++;
604 else
605 read_hits++;
606 }
607
608 // check that the physical address is ok (catch bad physical addresses)
609 if (req->getPaddr() & ~PAddrImplMask)
610 return genMachineCheckFault();
611
612 return checkCacheability(req);
613}
614
615TlbEntry &
616TLB::index(bool advance)
617{
618 TlbEntry *entry = &table[nlu];
619
620 if (advance)
621 nextnlu();
622
623 return *entry;
624}
625
626/* end namespace AlphaISA */ }
627
628AlphaISA::ITB *
629AlphaITBParams::create()
630{
631 return new AlphaISA::ITB(this);
632}
633
634AlphaISA::DTB *
635AlphaDTBParams::create()
636{
637 return new AlphaISA::DTB(this);
638}
59
60TLB::TLB(const Params *p)
61 : BaseTLB(p), size(p->size), nlu(0)
62{
63 table = new TlbEntry[size];
64 memset(table, 0, sizeof(TlbEntry[size]));
65 flushCache();
66}
67
68TLB::~TLB()
69{
70 if (table)
71 delete [] table;
72}
73
74// look up an entry in the TLB
75TlbEntry *
76TLB::lookup(Addr vpn, uint8_t asn)
77{
78 // assume not found...
79 TlbEntry *retval = NULL;
80
81 if (EntryCache[0]) {
82 if (vpn == EntryCache[0]->tag &&
83 (EntryCache[0]->asma || EntryCache[0]->asn == asn))
84 retval = EntryCache[0];
85 else if (EntryCache[1]) {
86 if (vpn == EntryCache[1]->tag &&
87 (EntryCache[1]->asma || EntryCache[1]->asn == asn))
88 retval = EntryCache[1];
89 else if (EntryCache[2] && vpn == EntryCache[2]->tag &&
90 (EntryCache[2]->asma || EntryCache[2]->asn == asn))
91 retval = EntryCache[2];
92 }
93 }
94
95 if (retval == NULL) {
96 PageTable::const_iterator i = lookupTable.find(vpn);
97 if (i != lookupTable.end()) {
98 while (i->first == vpn) {
99 int index = i->second;
100 TlbEntry *entry = &table[index];
101 assert(entry->valid);
102 if (vpn == entry->tag && (entry->asma || entry->asn == asn)) {
103 retval = updateCache(entry);
104 break;
105 }
106
107 ++i;
108 }
109 }
110 }
111
112 DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn,
113 retval ? "hit" : "miss", retval ? retval->ppn : 0);
114 return retval;
115}
116
117
118Fault
119TLB::checkCacheability(RequestPtr &req, bool itb)
120{
121// in Alpha, cacheability is controlled by upper-level bits of the
122// physical address
123
124/*
125 * We support having the uncacheable bit in either bit 39 or bit 40.
126 * The Turbolaser platform (and EV5) support having the bit in 39, but
127 * Tsunami (which Linux assumes uses an EV6) generates accesses with
128 * the bit in 40. So we must check for both, but we have debug flags
129 * to catch a weird case where both are used, which shouldn't happen.
130 */
131
132
133#if ALPHA_TLASER
134 if (req->getPaddr() & PAddrUncachedBit39)
135#else
136 if (req->getPaddr() & PAddrUncachedBit43)
137#endif
138 {
139 // IPR memory space not implemented
140 if (PAddrIprSpace(req->getPaddr())) {
141 return new UnimpFault("IPR memory space not implemented!");
142 } else {
143 // mark request as uncacheable
144 req->setFlags(req->getFlags() | UNCACHEABLE);
145
146#if !ALPHA_TLASER
147 // Clear bits 42:35 of the physical address (10-2 in Tsunami manual)
148 req->setPaddr(req->getPaddr() & PAddrUncachedMask);
149#endif
150 }
151 // We shouldn't be able to read from an uncachable address in Alpha as
152 // we don't have a ROM and we don't want to try to fetch from a device
153 // register as we destroy any data that is clear-on-read.
154 if (req->isUncacheable() && itb)
155 return new UnimpFault("CPU trying to fetch from uncached I/O");
156
157 }
158 return NoFault;
159}
160
161
162// insert a new TLB entry
163void
164TLB::insert(Addr addr, TlbEntry &entry)
165{
166 flushCache();
167 VAddr vaddr = addr;
168 if (table[nlu].valid) {
169 Addr oldvpn = table[nlu].tag;
170 PageTable::iterator i = lookupTable.find(oldvpn);
171
172 if (i == lookupTable.end())
173 panic("TLB entry not found in lookupTable");
174
175 int index;
176 while ((index = i->second) != nlu) {
177 if (table[index].tag != oldvpn)
178 panic("TLB entry not found in lookupTable");
179
180 ++i;
181 }
182
183 DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn);
184
185 lookupTable.erase(i);
186 }
187
188 DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), entry.ppn);
189
190 table[nlu] = entry;
191 table[nlu].tag = vaddr.vpn();
192 table[nlu].valid = true;
193
194 lookupTable.insert(make_pair(vaddr.vpn(), nlu));
195 nextnlu();
196}
197
198void
199TLB::flushAll()
200{
201 DPRINTF(TLB, "flushAll\n");
202 memset(table, 0, sizeof(TlbEntry[size]));
203 flushCache();
204 lookupTable.clear();
205 nlu = 0;
206}
207
208void
209TLB::flushProcesses()
210{
211 flushCache();
212 PageTable::iterator i = lookupTable.begin();
213 PageTable::iterator end = lookupTable.end();
214 while (i != end) {
215 int index = i->second;
216 TlbEntry *entry = &table[index];
217 assert(entry->valid);
218
219 // we can't increment i after we erase it, so save a copy and
220 // increment it to get the next entry now
221 PageTable::iterator cur = i;
222 ++i;
223
224 if (!entry->asma) {
225 DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index, entry->tag, entry->ppn);
226 entry->valid = false;
227 lookupTable.erase(cur);
228 }
229 }
230}
231
232void
233TLB::flushAddr(Addr addr, uint8_t asn)
234{
235 flushCache();
236 VAddr vaddr = addr;
237
238 PageTable::iterator i = lookupTable.find(vaddr.vpn());
239 if (i == lookupTable.end())
240 return;
241
242 while (i != lookupTable.end() && i->first == vaddr.vpn()) {
243 int index = i->second;
244 TlbEntry *entry = &table[index];
245 assert(entry->valid);
246
247 if (vaddr.vpn() == entry->tag && (entry->asma || entry->asn == asn)) {
248 DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(),
249 entry->ppn);
250
251 // invalidate this entry
252 entry->valid = false;
253
254 lookupTable.erase(i++);
255 } else {
256 ++i;
257 }
258 }
259}
260
261
262void
263TLB::serialize(ostream &os)
264{
265 SERIALIZE_SCALAR(size);
266 SERIALIZE_SCALAR(nlu);
267
268 for (int i = 0; i < size; i++) {
269 nameOut(os, csprintf("%s.Entry%d", name(), i));
270 table[i].serialize(os);
271 }
272}
273
274void
275TLB::unserialize(Checkpoint *cp, const string &section)
276{
277 UNSERIALIZE_SCALAR(size);
278 UNSERIALIZE_SCALAR(nlu);
279
280 for (int i = 0; i < size; i++) {
281 table[i].unserialize(cp, csprintf("%s.Entry%d", section, i));
282 if (table[i].valid) {
283 lookupTable.insert(make_pair(table[i].tag, i));
284 }
285 }
286}
287
288
289///////////////////////////////////////////////////////////////////////
290//
291// Alpha ITB
292//
293ITB::ITB(const Params *p)
294 : TLB(p)
295{}
296
297
298void
299ITB::regStats()
300{
301 hits
302 .name(name() + ".hits")
303 .desc("ITB hits");
304 misses
305 .name(name() + ".misses")
306 .desc("ITB misses");
307 acv
308 .name(name() + ".acv")
309 .desc("ITB acv");
310 accesses
311 .name(name() + ".accesses")
312 .desc("ITB accesses");
313
314 accesses = hits + misses;
315}
316
317
318Fault
319ITB::translate(RequestPtr &req, ThreadContext *tc)
320{
321 //If this is a pal pc, then set PHYSICAL
322 if(FULL_SYSTEM && PcPAL(req->getPC()))
323 req->setFlags(req->getFlags() | PHYSICAL);
324
325 if (PcPAL(req->getPC())) {
326 // strip off PAL PC marker (lsb is 1)
327 req->setPaddr((req->getVaddr() & ~3) & PAddrImplMask);
328 hits++;
329 return NoFault;
330 }
331
332 if (req->getFlags() & PHYSICAL) {
333 req->setPaddr(req->getVaddr());
334 } else {
335 // verify that this is a good virtual address
336 if (!validVirtualAddress(req->getVaddr())) {
337 acv++;
338 return new ItbAcvFault(req->getVaddr());
339 }
340
341
342 // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
343 // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
344#if ALPHA_TLASER
345 if ((MCSR_SP(tc->readMiscRegNoEffect(IPR_MCSR)) & 2) &&
346 VAddrSpaceEV5(req->getVaddr()) == 2)
347#else
348 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e)
349#endif
350 {
351 // only valid in kernel mode
352 if (ICM_CM(tc->readMiscRegNoEffect(IPR_ICM)) !=
353 mode_kernel) {
354 acv++;
355 return new ItbAcvFault(req->getVaddr());
356 }
357
358 req->setPaddr(req->getVaddr() & PAddrImplMask);
359
360#if !ALPHA_TLASER
361 // sign extend the physical address properly
362 if (req->getPaddr() & PAddrUncachedBit40)
363 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
364 else
365 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
366#endif
367
368 } else {
369 // not a physical address: need to look up pte
370 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
371 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(),
372 asn);
373
374 if (!entry) {
375 misses++;
376 return new ItbPageFault(req->getVaddr());
377 }
378
379 req->setPaddr((entry->ppn << PageShift) +
380 (VAddr(req->getVaddr()).offset()
381 & ~3));
382
383 // check permissions for this access
384 if (!(entry->xre &
385 (1 << ICM_CM(tc->readMiscRegNoEffect(IPR_ICM))))) {
386 // instruction access fault
387 acv++;
388 return new ItbAcvFault(req->getVaddr());
389 }
390
391 hits++;
392 }
393 }
394
395 // check that the physical address is ok (catch bad physical addresses)
396 if (req->getPaddr() & ~PAddrImplMask)
397 return genMachineCheckFault();
398
399 return checkCacheability(req, true);
400
401}
402
403///////////////////////////////////////////////////////////////////////
404//
405// Alpha DTB
406//
407 DTB::DTB(const Params *p)
408 : TLB(p)
409{}
410
411void
412DTB::regStats()
413{
414 read_hits
415 .name(name() + ".read_hits")
416 .desc("DTB read hits")
417 ;
418
419 read_misses
420 .name(name() + ".read_misses")
421 .desc("DTB read misses")
422 ;
423
424 read_acv
425 .name(name() + ".read_acv")
426 .desc("DTB read access violations")
427 ;
428
429 read_accesses
430 .name(name() + ".read_accesses")
431 .desc("DTB read accesses")
432 ;
433
434 write_hits
435 .name(name() + ".write_hits")
436 .desc("DTB write hits")
437 ;
438
439 write_misses
440 .name(name() + ".write_misses")
441 .desc("DTB write misses")
442 ;
443
444 write_acv
445 .name(name() + ".write_acv")
446 .desc("DTB write access violations")
447 ;
448
449 write_accesses
450 .name(name() + ".write_accesses")
451 .desc("DTB write accesses")
452 ;
453
454 hits
455 .name(name() + ".hits")
456 .desc("DTB hits")
457 ;
458
459 misses
460 .name(name() + ".misses")
461 .desc("DTB misses")
462 ;
463
464 acv
465 .name(name() + ".acv")
466 .desc("DTB access violations")
467 ;
468
469 accesses
470 .name(name() + ".accesses")
471 .desc("DTB accesses")
472 ;
473
474 hits = read_hits + write_hits;
475 misses = read_misses + write_misses;
476 acv = read_acv + write_acv;
477 accesses = read_accesses + write_accesses;
478}
479
480Fault
481DTB::translate(RequestPtr &req, ThreadContext *tc, bool write)
482{
483 Addr pc = tc->readPC();
484
485 mode_type mode =
486 (mode_type)DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM));
487
488
489 /**
490 * Check for alignment faults
491 */
492 if (req->getVaddr() & (req->getSize() - 1)) {
493 DPRINTF(TLB, "Alignment Fault on %#x, size = %d", req->getVaddr(),
494 req->getSize());
495 uint64_t flags = write ? MM_STAT_WR_MASK : 0;
496 return new DtbAlignmentFault(req->getVaddr(), req->getFlags(), flags);
497 }
498
499 if (PcPAL(pc)) {
500 mode = (req->getFlags() & ALTMODE) ?
501 (mode_type)ALT_MODE_AM(
502 tc->readMiscRegNoEffect(IPR_ALT_MODE))
503 : mode_kernel;
504 }
505
506 if (req->getFlags() & PHYSICAL) {
507 req->setPaddr(req->getVaddr());
508 } else {
509 // verify that this is a good virtual address
510 if (!validVirtualAddress(req->getVaddr())) {
511 if (write) { write_acv++; } else { read_acv++; }
512 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
513 MM_STAT_BAD_VA_MASK |
514 MM_STAT_ACV_MASK;
515 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
516 }
517
518 // Check for "superpage" mapping
519#if ALPHA_TLASER
520 if ((MCSR_SP(tc->readMiscRegNoEffect(IPR_MCSR)) & 2) &&
521 VAddrSpaceEV5(req->getVaddr()) == 2)
522#else
523 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e)
524#endif
525 {
526
527 // only valid in kernel mode
528 if (DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM)) !=
529 mode_kernel) {
530 if (write) { write_acv++; } else { read_acv++; }
531 uint64_t flags = ((write ? MM_STAT_WR_MASK : 0) |
532 MM_STAT_ACV_MASK);
533 return new DtbAcvFault(req->getVaddr(), req->getFlags(), flags);
534 }
535
536 req->setPaddr(req->getVaddr() & PAddrImplMask);
537
538#if !ALPHA_TLASER
539 // sign extend the physical address properly
540 if (req->getPaddr() & PAddrUncachedBit40)
541 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
542 else
543 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
544#endif
545
546 } else {
547 if (write)
548 write_accesses++;
549 else
550 read_accesses++;
551
552 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
553
554 // not a physical address: need to look up pte
555 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(), asn);
556
557 if (!entry) {
558 // page fault
559 if (write) { write_misses++; } else { read_misses++; }
560 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
561 MM_STAT_DTB_MISS_MASK;
562 return (req->getFlags() & VPTE) ?
563 (Fault)(new PDtbMissFault(req->getVaddr(), req->getFlags(),
564 flags)) :
565 (Fault)(new NDtbMissFault(req->getVaddr(), req->getFlags(),
566 flags));
567 }
568
569 req->setPaddr((entry->ppn << PageShift) +
570 VAddr(req->getVaddr()).offset());
571
572 if (write) {
573 if (!(entry->xwe & MODE2MASK(mode))) {
574 // declare the instruction access fault
575 write_acv++;
576 uint64_t flags = MM_STAT_WR_MASK |
577 MM_STAT_ACV_MASK |
578 (entry->fonw ? MM_STAT_FONW_MASK : 0);
579 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
580 }
581 if (entry->fonw) {
582 write_acv++;
583 uint64_t flags = MM_STAT_WR_MASK |
584 MM_STAT_FONW_MASK;
585 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
586 }
587 } else {
588 if (!(entry->xre & MODE2MASK(mode))) {
589 read_acv++;
590 uint64_t flags = MM_STAT_ACV_MASK |
591 (entry->fonr ? MM_STAT_FONR_MASK : 0);
592 return new DtbAcvFault(req->getVaddr(), req->getFlags(), flags);
593 }
594 if (entry->fonr) {
595 read_acv++;
596 uint64_t flags = MM_STAT_FONR_MASK;
597 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
598 }
599 }
600 }
601
602 if (write)
603 write_hits++;
604 else
605 read_hits++;
606 }
607
608 // check that the physical address is ok (catch bad physical addresses)
609 if (req->getPaddr() & ~PAddrImplMask)
610 return genMachineCheckFault();
611
612 return checkCacheability(req);
613}
614
615TlbEntry &
616TLB::index(bool advance)
617{
618 TlbEntry *entry = &table[nlu];
619
620 if (advance)
621 nextnlu();
622
623 return *entry;
624}
625
626/* end namespace AlphaISA */ }
627
628AlphaISA::ITB *
629AlphaITBParams::create()
630{
631 return new AlphaISA::ITB(this);
632}
633
634AlphaISA::DTB *
635AlphaDTBParams::create()
636{
637 return new AlphaISA::DTB(this);
638}