tlb.cc (8229:78bf55f23338) tlb.cc (8232:b28d06a175be)
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 * Steve Reinhardt
30 * Andrew Schultz
31 */
32
33#include <string>
34#include <vector>
35
36#include "arch/alpha/faults.hh"
37#include "arch/alpha/pagetable.hh"
38#include "arch/alpha/tlb.hh"
39#include "base/inifile.hh"
40#include "base/str.hh"
41#include "base/trace.hh"
42#include "cpu/thread_context.hh"
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 * Steve Reinhardt
30 * Andrew Schultz
31 */
32
33#include <string>
34#include <vector>
35
36#include "arch/alpha/faults.hh"
37#include "arch/alpha/pagetable.hh"
38#include "arch/alpha/tlb.hh"
39#include "base/inifile.hh"
40#include "base/str.hh"
41#include "base/trace.hh"
42#include "cpu/thread_context.hh"
43#include "debug/TLB.hh"
43
44using namespace std;
45
46namespace AlphaISA {
47
48///////////////////////////////////////////////////////////////////////
49//
50// Alpha TLB
51//
52
53#ifdef DEBUG
54bool uncacheBit39 = false;
55bool uncacheBit40 = false;
56#endif
57
58#define MODE2MASK(X) (1 << (X))
59
60TLB::TLB(const Params *p)
61 : BaseTLB(p), size(p->size), nlu(0)
62{
63 table = new TlbEntry[size];
64 memset(table, 0, sizeof(TlbEntry[size]));
65 flushCache();
66}
67
68TLB::~TLB()
69{
70 if (table)
71 delete [] table;
72}
73
74void
75TLB::regStats()
76{
77 fetch_hits
78 .name(name() + ".fetch_hits")
79 .desc("ITB hits");
80 fetch_misses
81 .name(name() + ".fetch_misses")
82 .desc("ITB misses");
83 fetch_acv
84 .name(name() + ".fetch_acv")
85 .desc("ITB acv");
86 fetch_accesses
87 .name(name() + ".fetch_accesses")
88 .desc("ITB accesses");
89
90 fetch_accesses = fetch_hits + fetch_misses;
91
92 read_hits
93 .name(name() + ".read_hits")
94 .desc("DTB read hits")
95 ;
96
97 read_misses
98 .name(name() + ".read_misses")
99 .desc("DTB read misses")
100 ;
101
102 read_acv
103 .name(name() + ".read_acv")
104 .desc("DTB read access violations")
105 ;
106
107 read_accesses
108 .name(name() + ".read_accesses")
109 .desc("DTB read accesses")
110 ;
111
112 write_hits
113 .name(name() + ".write_hits")
114 .desc("DTB write hits")
115 ;
116
117 write_misses
118 .name(name() + ".write_misses")
119 .desc("DTB write misses")
120 ;
121
122 write_acv
123 .name(name() + ".write_acv")
124 .desc("DTB write access violations")
125 ;
126
127 write_accesses
128 .name(name() + ".write_accesses")
129 .desc("DTB write accesses")
130 ;
131
132 data_hits
133 .name(name() + ".data_hits")
134 .desc("DTB hits")
135 ;
136
137 data_misses
138 .name(name() + ".data_misses")
139 .desc("DTB misses")
140 ;
141
142 data_acv
143 .name(name() + ".data_acv")
144 .desc("DTB access violations")
145 ;
146
147 data_accesses
148 .name(name() + ".data_accesses")
149 .desc("DTB accesses")
150 ;
151
152 data_hits = read_hits + write_hits;
153 data_misses = read_misses + write_misses;
154 data_acv = read_acv + write_acv;
155 data_accesses = read_accesses + write_accesses;
156}
157
158// look up an entry in the TLB
159TlbEntry *
160TLB::lookup(Addr vpn, uint8_t asn)
161{
162 // assume not found...
163 TlbEntry *retval = NULL;
164
165 if (EntryCache[0]) {
166 if (vpn == EntryCache[0]->tag &&
167 (EntryCache[0]->asma || EntryCache[0]->asn == asn))
168 retval = EntryCache[0];
169 else if (EntryCache[1]) {
170 if (vpn == EntryCache[1]->tag &&
171 (EntryCache[1]->asma || EntryCache[1]->asn == asn))
172 retval = EntryCache[1];
173 else if (EntryCache[2] && vpn == EntryCache[2]->tag &&
174 (EntryCache[2]->asma || EntryCache[2]->asn == asn))
175 retval = EntryCache[2];
176 }
177 }
178
179 if (retval == NULL) {
180 PageTable::const_iterator i = lookupTable.find(vpn);
181 if (i != lookupTable.end()) {
182 while (i->first == vpn) {
183 int index = i->second;
184 TlbEntry *entry = &table[index];
185 assert(entry->valid);
186 if (vpn == entry->tag && (entry->asma || entry->asn == asn)) {
187 retval = updateCache(entry);
188 break;
189 }
190
191 ++i;
192 }
193 }
194 }
195
196 DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn,
197 retval ? "hit" : "miss", retval ? retval->ppn : 0);
198 return retval;
199}
200
201Fault
202TLB::checkCacheability(RequestPtr &req, bool itb)
203{
204 // in Alpha, cacheability is controlled by upper-level bits of the
205 // physical address
206
207 /*
208 * We support having the uncacheable bit in either bit 39 or bit
209 * 40. The Turbolaser platform (and EV5) support having the bit
210 * in 39, but Tsunami (which Linux assumes uses an EV6) generates
211 * accesses with the bit in 40. So we must check for both, but we
212 * have debug flags to catch a weird case where both are used,
213 * which shouldn't happen.
214 */
215
216
217 if (req->getPaddr() & PAddrUncachedBit43) {
218 // IPR memory space not implemented
219 if (PAddrIprSpace(req->getPaddr())) {
220 return new UnimpFault("IPR memory space not implemented!");
221 } else {
222 // mark request as uncacheable
223 req->setFlags(Request::UNCACHEABLE);
224
225 // Clear bits 42:35 of the physical address (10-2 in
226 // Tsunami manual)
227 req->setPaddr(req->getPaddr() & PAddrUncachedMask);
228 }
229 // We shouldn't be able to read from an uncachable address in Alpha as
230 // we don't have a ROM and we don't want to try to fetch from a device
231 // register as we destroy any data that is clear-on-read.
232 if (req->isUncacheable() && itb)
233 return new UnimpFault("CPU trying to fetch from uncached I/O");
234
235 }
236 return NoFault;
237}
238
239
240// insert a new TLB entry
241void
242TLB::insert(Addr addr, TlbEntry &entry)
243{
244 flushCache();
245 VAddr vaddr = addr;
246 if (table[nlu].valid) {
247 Addr oldvpn = table[nlu].tag;
248 PageTable::iterator i = lookupTable.find(oldvpn);
249
250 if (i == lookupTable.end())
251 panic("TLB entry not found in lookupTable");
252
253 int index;
254 while ((index = i->second) != nlu) {
255 if (table[index].tag != oldvpn)
256 panic("TLB entry not found in lookupTable");
257
258 ++i;
259 }
260
261 DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn);
262
263 lookupTable.erase(i);
264 }
265
266 DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), entry.ppn);
267
268 table[nlu] = entry;
269 table[nlu].tag = vaddr.vpn();
270 table[nlu].valid = true;
271
272 lookupTable.insert(make_pair(vaddr.vpn(), nlu));
273 nextnlu();
274}
275
276void
277TLB::flushAll()
278{
279 DPRINTF(TLB, "flushAll\n");
280 memset(table, 0, sizeof(TlbEntry[size]));
281 flushCache();
282 lookupTable.clear();
283 nlu = 0;
284}
285
286void
287TLB::flushProcesses()
288{
289 flushCache();
290 PageTable::iterator i = lookupTable.begin();
291 PageTable::iterator end = lookupTable.end();
292 while (i != end) {
293 int index = i->second;
294 TlbEntry *entry = &table[index];
295 assert(entry->valid);
296
297 // we can't increment i after we erase it, so save a copy and
298 // increment it to get the next entry now
299 PageTable::iterator cur = i;
300 ++i;
301
302 if (!entry->asma) {
303 DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index,
304 entry->tag, entry->ppn);
305 entry->valid = false;
306 lookupTable.erase(cur);
307 }
308 }
309}
310
311void
312TLB::flushAddr(Addr addr, uint8_t asn)
313{
314 flushCache();
315 VAddr vaddr = addr;
316
317 PageTable::iterator i = lookupTable.find(vaddr.vpn());
318 if (i == lookupTable.end())
319 return;
320
321 while (i != lookupTable.end() && i->first == vaddr.vpn()) {
322 int index = i->second;
323 TlbEntry *entry = &table[index];
324 assert(entry->valid);
325
326 if (vaddr.vpn() == entry->tag && (entry->asma || entry->asn == asn)) {
327 DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(),
328 entry->ppn);
329
330 // invalidate this entry
331 entry->valid = false;
332
333 lookupTable.erase(i++);
334 } else {
335 ++i;
336 }
337 }
338}
339
340
341void
342TLB::serialize(ostream &os)
343{
344 SERIALIZE_SCALAR(size);
345 SERIALIZE_SCALAR(nlu);
346
347 for (int i = 0; i < size; i++) {
348 nameOut(os, csprintf("%s.Entry%d", name(), i));
349 table[i].serialize(os);
350 }
351}
352
353void
354TLB::unserialize(Checkpoint *cp, const string &section)
355{
356 UNSERIALIZE_SCALAR(size);
357 UNSERIALIZE_SCALAR(nlu);
358
359 for (int i = 0; i < size; i++) {
360 table[i].unserialize(cp, csprintf("%s.Entry%d", section, i));
361 if (table[i].valid) {
362 lookupTable.insert(make_pair(table[i].tag, i));
363 }
364 }
365}
366
367Fault
368TLB::translateInst(RequestPtr req, ThreadContext *tc)
369{
370 //If this is a pal pc, then set PHYSICAL
371 if (FULL_SYSTEM && PcPAL(req->getPC()))
372 req->setFlags(Request::PHYSICAL);
373
374 if (PcPAL(req->getPC())) {
375 // strip off PAL PC marker (lsb is 1)
376 req->setPaddr((req->getVaddr() & ~3) & PAddrImplMask);
377 fetch_hits++;
378 return NoFault;
379 }
380
381 if (req->getFlags() & Request::PHYSICAL) {
382 req->setPaddr(req->getVaddr());
383 } else {
384 // verify that this is a good virtual address
385 if (!validVirtualAddress(req->getVaddr())) {
386 fetch_acv++;
387 return new ItbAcvFault(req->getVaddr());
388 }
389
390
391 // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
392 // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
393 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
394 // only valid in kernel mode
395 if (ICM_CM(tc->readMiscRegNoEffect(IPR_ICM)) !=
396 mode_kernel) {
397 fetch_acv++;
398 return new ItbAcvFault(req->getVaddr());
399 }
400
401 req->setPaddr(req->getVaddr() & PAddrImplMask);
402
403 // sign extend the physical address properly
404 if (req->getPaddr() & PAddrUncachedBit40)
405 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
406 else
407 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
408 } else {
409 // not a physical address: need to look up pte
410 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
411 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(),
412 asn);
413
414 if (!entry) {
415 fetch_misses++;
416 return new ItbPageFault(req->getVaddr());
417 }
418
419 req->setPaddr((entry->ppn << PageShift) +
420 (VAddr(req->getVaddr()).offset()
421 & ~3));
422
423 // check permissions for this access
424 if (!(entry->xre &
425 (1 << ICM_CM(tc->readMiscRegNoEffect(IPR_ICM))))) {
426 // instruction access fault
427 fetch_acv++;
428 return new ItbAcvFault(req->getVaddr());
429 }
430
431 fetch_hits++;
432 }
433 }
434
435 // check that the physical address is ok (catch bad physical addresses)
436 if (req->getPaddr() & ~PAddrImplMask)
437 return genMachineCheckFault();
438
439 return checkCacheability(req, true);
440
441}
442
443Fault
444TLB::translateData(RequestPtr req, ThreadContext *tc, bool write)
445{
446 mode_type mode =
447 (mode_type)DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM));
448
449 /**
450 * Check for alignment faults
451 */
452 if (req->getVaddr() & (req->getSize() - 1)) {
453 DPRINTF(TLB, "Alignment Fault on %#x, size = %d\n", req->getVaddr(),
454 req->getSize());
455 uint64_t flags = write ? MM_STAT_WR_MASK : 0;
456 return new DtbAlignmentFault(req->getVaddr(), req->getFlags(), flags);
457 }
458
459 if (PcPAL(tc->pcState().pc())) {
460 mode = (req->getFlags() & Request::ALTMODE) ?
461 (mode_type)ALT_MODE_AM(
462 tc->readMiscRegNoEffect(IPR_ALT_MODE))
463 : mode_kernel;
464 }
465
466 if (req->getFlags() & Request::PHYSICAL) {
467 req->setPaddr(req->getVaddr());
468 } else {
469 // verify that this is a good virtual address
470 if (!validVirtualAddress(req->getVaddr())) {
471 if (write) { write_acv++; } else { read_acv++; }
472 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
473 MM_STAT_BAD_VA_MASK |
474 MM_STAT_ACV_MASK;
475 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
476 }
477
478 // Check for "superpage" mapping
479 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
480 // only valid in kernel mode
481 if (DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM)) !=
482 mode_kernel) {
483 if (write) { write_acv++; } else { read_acv++; }
484 uint64_t flags = ((write ? MM_STAT_WR_MASK : 0) |
485 MM_STAT_ACV_MASK);
486
487 return new DtbAcvFault(req->getVaddr(), req->getFlags(),
488 flags);
489 }
490
491 req->setPaddr(req->getVaddr() & PAddrImplMask);
492
493 // sign extend the physical address properly
494 if (req->getPaddr() & PAddrUncachedBit40)
495 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
496 else
497 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
498 } else {
499 if (write)
500 write_accesses++;
501 else
502 read_accesses++;
503
504 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
505
506 // not a physical address: need to look up pte
507 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(), asn);
508
509 if (!entry) {
510 // page fault
511 if (write) { write_misses++; } else { read_misses++; }
512 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
513 MM_STAT_DTB_MISS_MASK;
514 return (req->getFlags() & Request::VPTE) ?
515 (Fault)(new PDtbMissFault(req->getVaddr(), req->getFlags(),
516 flags)) :
517 (Fault)(new NDtbMissFault(req->getVaddr(), req->getFlags(),
518 flags));
519 }
520
521 req->setPaddr((entry->ppn << PageShift) +
522 VAddr(req->getVaddr()).offset());
523
524 if (write) {
525 if (!(entry->xwe & MODE2MASK(mode))) {
526 // declare the instruction access fault
527 write_acv++;
528 uint64_t flags = MM_STAT_WR_MASK |
529 MM_STAT_ACV_MASK |
530 (entry->fonw ? MM_STAT_FONW_MASK : 0);
531 return new DtbPageFault(req->getVaddr(), req->getFlags(),
532 flags);
533 }
534 if (entry->fonw) {
535 write_acv++;
536 uint64_t flags = MM_STAT_WR_MASK | MM_STAT_FONW_MASK;
537 return new DtbPageFault(req->getVaddr(), req->getFlags(),
538 flags);
539 }
540 } else {
541 if (!(entry->xre & MODE2MASK(mode))) {
542 read_acv++;
543 uint64_t flags = MM_STAT_ACV_MASK |
544 (entry->fonr ? MM_STAT_FONR_MASK : 0);
545 return new DtbAcvFault(req->getVaddr(), req->getFlags(),
546 flags);
547 }
548 if (entry->fonr) {
549 read_acv++;
550 uint64_t flags = MM_STAT_FONR_MASK;
551 return new DtbPageFault(req->getVaddr(), req->getFlags(),
552 flags);
553 }
554 }
555 }
556
557 if (write)
558 write_hits++;
559 else
560 read_hits++;
561 }
562
563 // check that the physical address is ok (catch bad physical addresses)
564 if (req->getPaddr() & ~PAddrImplMask)
565 return genMachineCheckFault();
566
567 return checkCacheability(req);
568}
569
570TlbEntry &
571TLB::index(bool advance)
572{
573 TlbEntry *entry = &table[nlu];
574
575 if (advance)
576 nextnlu();
577
578 return *entry;
579}
580
581Fault
582TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode)
583{
584 if (mode == Execute)
585 return translateInst(req, tc);
586 else
587 return translateData(req, tc, mode == Write);
588}
589
590void
591TLB::translateTiming(RequestPtr req, ThreadContext *tc,
592 Translation *translation, Mode mode)
593{
594 assert(translation);
595 translation->finish(translateAtomic(req, tc, mode), req, tc, mode);
596}
597
598} // namespace AlphaISA
599
600AlphaISA::TLB *
601AlphaTLBParams::create()
602{
603 return new AlphaISA::TLB(this);
604}
44
45using namespace std;
46
47namespace AlphaISA {
48
49///////////////////////////////////////////////////////////////////////
50//
51// Alpha TLB
52//
53
54#ifdef DEBUG
55bool uncacheBit39 = false;
56bool uncacheBit40 = false;
57#endif
58
59#define MODE2MASK(X) (1 << (X))
60
61TLB::TLB(const Params *p)
62 : BaseTLB(p), size(p->size), nlu(0)
63{
64 table = new TlbEntry[size];
65 memset(table, 0, sizeof(TlbEntry[size]));
66 flushCache();
67}
68
69TLB::~TLB()
70{
71 if (table)
72 delete [] table;
73}
74
75void
76TLB::regStats()
77{
78 fetch_hits
79 .name(name() + ".fetch_hits")
80 .desc("ITB hits");
81 fetch_misses
82 .name(name() + ".fetch_misses")
83 .desc("ITB misses");
84 fetch_acv
85 .name(name() + ".fetch_acv")
86 .desc("ITB acv");
87 fetch_accesses
88 .name(name() + ".fetch_accesses")
89 .desc("ITB accesses");
90
91 fetch_accesses = fetch_hits + fetch_misses;
92
93 read_hits
94 .name(name() + ".read_hits")
95 .desc("DTB read hits")
96 ;
97
98 read_misses
99 .name(name() + ".read_misses")
100 .desc("DTB read misses")
101 ;
102
103 read_acv
104 .name(name() + ".read_acv")
105 .desc("DTB read access violations")
106 ;
107
108 read_accesses
109 .name(name() + ".read_accesses")
110 .desc("DTB read accesses")
111 ;
112
113 write_hits
114 .name(name() + ".write_hits")
115 .desc("DTB write hits")
116 ;
117
118 write_misses
119 .name(name() + ".write_misses")
120 .desc("DTB write misses")
121 ;
122
123 write_acv
124 .name(name() + ".write_acv")
125 .desc("DTB write access violations")
126 ;
127
128 write_accesses
129 .name(name() + ".write_accesses")
130 .desc("DTB write accesses")
131 ;
132
133 data_hits
134 .name(name() + ".data_hits")
135 .desc("DTB hits")
136 ;
137
138 data_misses
139 .name(name() + ".data_misses")
140 .desc("DTB misses")
141 ;
142
143 data_acv
144 .name(name() + ".data_acv")
145 .desc("DTB access violations")
146 ;
147
148 data_accesses
149 .name(name() + ".data_accesses")
150 .desc("DTB accesses")
151 ;
152
153 data_hits = read_hits + write_hits;
154 data_misses = read_misses + write_misses;
155 data_acv = read_acv + write_acv;
156 data_accesses = read_accesses + write_accesses;
157}
158
159// look up an entry in the TLB
160TlbEntry *
161TLB::lookup(Addr vpn, uint8_t asn)
162{
163 // assume not found...
164 TlbEntry *retval = NULL;
165
166 if (EntryCache[0]) {
167 if (vpn == EntryCache[0]->tag &&
168 (EntryCache[0]->asma || EntryCache[0]->asn == asn))
169 retval = EntryCache[0];
170 else if (EntryCache[1]) {
171 if (vpn == EntryCache[1]->tag &&
172 (EntryCache[1]->asma || EntryCache[1]->asn == asn))
173 retval = EntryCache[1];
174 else if (EntryCache[2] && vpn == EntryCache[2]->tag &&
175 (EntryCache[2]->asma || EntryCache[2]->asn == asn))
176 retval = EntryCache[2];
177 }
178 }
179
180 if (retval == NULL) {
181 PageTable::const_iterator i = lookupTable.find(vpn);
182 if (i != lookupTable.end()) {
183 while (i->first == vpn) {
184 int index = i->second;
185 TlbEntry *entry = &table[index];
186 assert(entry->valid);
187 if (vpn == entry->tag && (entry->asma || entry->asn == asn)) {
188 retval = updateCache(entry);
189 break;
190 }
191
192 ++i;
193 }
194 }
195 }
196
197 DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn,
198 retval ? "hit" : "miss", retval ? retval->ppn : 0);
199 return retval;
200}
201
202Fault
203TLB::checkCacheability(RequestPtr &req, bool itb)
204{
205 // in Alpha, cacheability is controlled by upper-level bits of the
206 // physical address
207
208 /*
209 * We support having the uncacheable bit in either bit 39 or bit
210 * 40. The Turbolaser platform (and EV5) support having the bit
211 * in 39, but Tsunami (which Linux assumes uses an EV6) generates
212 * accesses with the bit in 40. So we must check for both, but we
213 * have debug flags to catch a weird case where both are used,
214 * which shouldn't happen.
215 */
216
217
218 if (req->getPaddr() & PAddrUncachedBit43) {
219 // IPR memory space not implemented
220 if (PAddrIprSpace(req->getPaddr())) {
221 return new UnimpFault("IPR memory space not implemented!");
222 } else {
223 // mark request as uncacheable
224 req->setFlags(Request::UNCACHEABLE);
225
226 // Clear bits 42:35 of the physical address (10-2 in
227 // Tsunami manual)
228 req->setPaddr(req->getPaddr() & PAddrUncachedMask);
229 }
230 // We shouldn't be able to read from an uncachable address in Alpha as
231 // we don't have a ROM and we don't want to try to fetch from a device
232 // register as we destroy any data that is clear-on-read.
233 if (req->isUncacheable() && itb)
234 return new UnimpFault("CPU trying to fetch from uncached I/O");
235
236 }
237 return NoFault;
238}
239
240
241// insert a new TLB entry
242void
243TLB::insert(Addr addr, TlbEntry &entry)
244{
245 flushCache();
246 VAddr vaddr = addr;
247 if (table[nlu].valid) {
248 Addr oldvpn = table[nlu].tag;
249 PageTable::iterator i = lookupTable.find(oldvpn);
250
251 if (i == lookupTable.end())
252 panic("TLB entry not found in lookupTable");
253
254 int index;
255 while ((index = i->second) != nlu) {
256 if (table[index].tag != oldvpn)
257 panic("TLB entry not found in lookupTable");
258
259 ++i;
260 }
261
262 DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn);
263
264 lookupTable.erase(i);
265 }
266
267 DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), entry.ppn);
268
269 table[nlu] = entry;
270 table[nlu].tag = vaddr.vpn();
271 table[nlu].valid = true;
272
273 lookupTable.insert(make_pair(vaddr.vpn(), nlu));
274 nextnlu();
275}
276
277void
278TLB::flushAll()
279{
280 DPRINTF(TLB, "flushAll\n");
281 memset(table, 0, sizeof(TlbEntry[size]));
282 flushCache();
283 lookupTable.clear();
284 nlu = 0;
285}
286
287void
288TLB::flushProcesses()
289{
290 flushCache();
291 PageTable::iterator i = lookupTable.begin();
292 PageTable::iterator end = lookupTable.end();
293 while (i != end) {
294 int index = i->second;
295 TlbEntry *entry = &table[index];
296 assert(entry->valid);
297
298 // we can't increment i after we erase it, so save a copy and
299 // increment it to get the next entry now
300 PageTable::iterator cur = i;
301 ++i;
302
303 if (!entry->asma) {
304 DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index,
305 entry->tag, entry->ppn);
306 entry->valid = false;
307 lookupTable.erase(cur);
308 }
309 }
310}
311
312void
313TLB::flushAddr(Addr addr, uint8_t asn)
314{
315 flushCache();
316 VAddr vaddr = addr;
317
318 PageTable::iterator i = lookupTable.find(vaddr.vpn());
319 if (i == lookupTable.end())
320 return;
321
322 while (i != lookupTable.end() && i->first == vaddr.vpn()) {
323 int index = i->second;
324 TlbEntry *entry = &table[index];
325 assert(entry->valid);
326
327 if (vaddr.vpn() == entry->tag && (entry->asma || entry->asn == asn)) {
328 DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(),
329 entry->ppn);
330
331 // invalidate this entry
332 entry->valid = false;
333
334 lookupTable.erase(i++);
335 } else {
336 ++i;
337 }
338 }
339}
340
341
342void
343TLB::serialize(ostream &os)
344{
345 SERIALIZE_SCALAR(size);
346 SERIALIZE_SCALAR(nlu);
347
348 for (int i = 0; i < size; i++) {
349 nameOut(os, csprintf("%s.Entry%d", name(), i));
350 table[i].serialize(os);
351 }
352}
353
354void
355TLB::unserialize(Checkpoint *cp, const string &section)
356{
357 UNSERIALIZE_SCALAR(size);
358 UNSERIALIZE_SCALAR(nlu);
359
360 for (int i = 0; i < size; i++) {
361 table[i].unserialize(cp, csprintf("%s.Entry%d", section, i));
362 if (table[i].valid) {
363 lookupTable.insert(make_pair(table[i].tag, i));
364 }
365 }
366}
367
368Fault
369TLB::translateInst(RequestPtr req, ThreadContext *tc)
370{
371 //If this is a pal pc, then set PHYSICAL
372 if (FULL_SYSTEM && PcPAL(req->getPC()))
373 req->setFlags(Request::PHYSICAL);
374
375 if (PcPAL(req->getPC())) {
376 // strip off PAL PC marker (lsb is 1)
377 req->setPaddr((req->getVaddr() & ~3) & PAddrImplMask);
378 fetch_hits++;
379 return NoFault;
380 }
381
382 if (req->getFlags() & Request::PHYSICAL) {
383 req->setPaddr(req->getVaddr());
384 } else {
385 // verify that this is a good virtual address
386 if (!validVirtualAddress(req->getVaddr())) {
387 fetch_acv++;
388 return new ItbAcvFault(req->getVaddr());
389 }
390
391
392 // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
393 // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
394 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
395 // only valid in kernel mode
396 if (ICM_CM(tc->readMiscRegNoEffect(IPR_ICM)) !=
397 mode_kernel) {
398 fetch_acv++;
399 return new ItbAcvFault(req->getVaddr());
400 }
401
402 req->setPaddr(req->getVaddr() & PAddrImplMask);
403
404 // sign extend the physical address properly
405 if (req->getPaddr() & PAddrUncachedBit40)
406 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
407 else
408 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
409 } else {
410 // not a physical address: need to look up pte
411 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
412 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(),
413 asn);
414
415 if (!entry) {
416 fetch_misses++;
417 return new ItbPageFault(req->getVaddr());
418 }
419
420 req->setPaddr((entry->ppn << PageShift) +
421 (VAddr(req->getVaddr()).offset()
422 & ~3));
423
424 // check permissions for this access
425 if (!(entry->xre &
426 (1 << ICM_CM(tc->readMiscRegNoEffect(IPR_ICM))))) {
427 // instruction access fault
428 fetch_acv++;
429 return new ItbAcvFault(req->getVaddr());
430 }
431
432 fetch_hits++;
433 }
434 }
435
436 // check that the physical address is ok (catch bad physical addresses)
437 if (req->getPaddr() & ~PAddrImplMask)
438 return genMachineCheckFault();
439
440 return checkCacheability(req, true);
441
442}
443
444Fault
445TLB::translateData(RequestPtr req, ThreadContext *tc, bool write)
446{
447 mode_type mode =
448 (mode_type)DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM));
449
450 /**
451 * Check for alignment faults
452 */
453 if (req->getVaddr() & (req->getSize() - 1)) {
454 DPRINTF(TLB, "Alignment Fault on %#x, size = %d\n", req->getVaddr(),
455 req->getSize());
456 uint64_t flags = write ? MM_STAT_WR_MASK : 0;
457 return new DtbAlignmentFault(req->getVaddr(), req->getFlags(), flags);
458 }
459
460 if (PcPAL(tc->pcState().pc())) {
461 mode = (req->getFlags() & Request::ALTMODE) ?
462 (mode_type)ALT_MODE_AM(
463 tc->readMiscRegNoEffect(IPR_ALT_MODE))
464 : mode_kernel;
465 }
466
467 if (req->getFlags() & Request::PHYSICAL) {
468 req->setPaddr(req->getVaddr());
469 } else {
470 // verify that this is a good virtual address
471 if (!validVirtualAddress(req->getVaddr())) {
472 if (write) { write_acv++; } else { read_acv++; }
473 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
474 MM_STAT_BAD_VA_MASK |
475 MM_STAT_ACV_MASK;
476 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
477 }
478
479 // Check for "superpage" mapping
480 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
481 // only valid in kernel mode
482 if (DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM)) !=
483 mode_kernel) {
484 if (write) { write_acv++; } else { read_acv++; }
485 uint64_t flags = ((write ? MM_STAT_WR_MASK : 0) |
486 MM_STAT_ACV_MASK);
487
488 return new DtbAcvFault(req->getVaddr(), req->getFlags(),
489 flags);
490 }
491
492 req->setPaddr(req->getVaddr() & PAddrImplMask);
493
494 // sign extend the physical address properly
495 if (req->getPaddr() & PAddrUncachedBit40)
496 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
497 else
498 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
499 } else {
500 if (write)
501 write_accesses++;
502 else
503 read_accesses++;
504
505 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
506
507 // not a physical address: need to look up pte
508 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(), asn);
509
510 if (!entry) {
511 // page fault
512 if (write) { write_misses++; } else { read_misses++; }
513 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
514 MM_STAT_DTB_MISS_MASK;
515 return (req->getFlags() & Request::VPTE) ?
516 (Fault)(new PDtbMissFault(req->getVaddr(), req->getFlags(),
517 flags)) :
518 (Fault)(new NDtbMissFault(req->getVaddr(), req->getFlags(),
519 flags));
520 }
521
522 req->setPaddr((entry->ppn << PageShift) +
523 VAddr(req->getVaddr()).offset());
524
525 if (write) {
526 if (!(entry->xwe & MODE2MASK(mode))) {
527 // declare the instruction access fault
528 write_acv++;
529 uint64_t flags = MM_STAT_WR_MASK |
530 MM_STAT_ACV_MASK |
531 (entry->fonw ? MM_STAT_FONW_MASK : 0);
532 return new DtbPageFault(req->getVaddr(), req->getFlags(),
533 flags);
534 }
535 if (entry->fonw) {
536 write_acv++;
537 uint64_t flags = MM_STAT_WR_MASK | MM_STAT_FONW_MASK;
538 return new DtbPageFault(req->getVaddr(), req->getFlags(),
539 flags);
540 }
541 } else {
542 if (!(entry->xre & MODE2MASK(mode))) {
543 read_acv++;
544 uint64_t flags = MM_STAT_ACV_MASK |
545 (entry->fonr ? MM_STAT_FONR_MASK : 0);
546 return new DtbAcvFault(req->getVaddr(), req->getFlags(),
547 flags);
548 }
549 if (entry->fonr) {
550 read_acv++;
551 uint64_t flags = MM_STAT_FONR_MASK;
552 return new DtbPageFault(req->getVaddr(), req->getFlags(),
553 flags);
554 }
555 }
556 }
557
558 if (write)
559 write_hits++;
560 else
561 read_hits++;
562 }
563
564 // check that the physical address is ok (catch bad physical addresses)
565 if (req->getPaddr() & ~PAddrImplMask)
566 return genMachineCheckFault();
567
568 return checkCacheability(req);
569}
570
571TlbEntry &
572TLB::index(bool advance)
573{
574 TlbEntry *entry = &table[nlu];
575
576 if (advance)
577 nextnlu();
578
579 return *entry;
580}
581
582Fault
583TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode)
584{
585 if (mode == Execute)
586 return translateInst(req, tc);
587 else
588 return translateData(req, tc, mode == Write);
589}
590
591void
592TLB::translateTiming(RequestPtr req, ThreadContext *tc,
593 Translation *translation, Mode mode)
594{
595 assert(translation);
596 translation->finish(translateAtomic(req, tc, mode), req, tc, mode);
597}
598
599} // namespace AlphaISA
600
601AlphaISA::TLB *
602AlphaTLBParams::create()
603{
604 return new AlphaISA::TLB(this);
605}