Deleted Added
sdiff udiff text old ( 5004:7d94cedab264 ) new ( 5034:6186ef720dd4 )
full compact
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 * Steve Reinhardt
30 * Andrew Schultz
31 */
32
33#include <string>
34#include <vector>
35
36#include "arch/alpha/pagetable.hh"
37#include "arch/alpha/tlb.hh"
38#include "arch/alpha/faults.hh"
39#include "base/inifile.hh"
40#include "base/str.hh"
41#include "base/trace.hh"
42#include "config/alpha_tlaser.hh"
43#include "cpu/thread_context.hh"
44#include "params/AlphaDTB.hh"
45#include "params/AlphaITB.hh"
46
47using namespace std;
48using namespace EV5;
49
50namespace AlphaISA {
51///////////////////////////////////////////////////////////////////////
52//
53// Alpha TLB
54//
55#ifdef DEBUG
56bool uncacheBit39 = false;
57bool uncacheBit40 = false;
58#endif
59
60#define MODE2MASK(X) (1 << (X))
61
62TLB::TLB(const string &name, int s)
63 : SimObject(name), size(s), nlu(0)
64{
65 table = new TlbEntry[size];
66 memset(table, 0, sizeof(TlbEntry[size]));
67 flushCache();
68}
69
70TLB::~TLB()
71{
72 if (table)
73 delete [] table;
74}
75
76// look up an entry in the TLB
77TlbEntry *
78TLB::lookup(Addr vpn, uint8_t asn)
79{
80 // assume not found...
81 TlbEntry *retval = NULL;
82
83 if (EntryCache[0]) {
84 if (vpn == EntryCache[0]->tag &&
85 (EntryCache[0]->asma || EntryCache[0]->asn == asn))
86 retval = EntryCache[0];
87 else if (EntryCache[1]) {
88 if (vpn == EntryCache[1]->tag &&
89 (EntryCache[1]->asma || EntryCache[1]->asn == asn))
90 retval = EntryCache[1];
91 else if (EntryCache[2] && vpn == EntryCache[2]->tag &&
92 (EntryCache[2]->asma || EntryCache[2]->asn == asn))
93 retval = EntryCache[2];
94 }
95 }
96
97 if (retval == NULL) {
98 PageTable::const_iterator i = lookupTable.find(vpn);
99 if (i != lookupTable.end()) {
100 while (i->first == vpn) {
101 int index = i->second;
102 TlbEntry *entry = &table[index];
103 assert(entry->valid);
104 if (vpn == entry->tag && (entry->asma || entry->asn == asn)) {
105 retval = updateCache(entry);
106 break;
107 }
108
109 ++i;
110 }
111 }
112 }
113
114 DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn,
115 retval ? "hit" : "miss", retval ? retval->ppn : 0);
116 return retval;
117}
118
119
120Fault
121TLB::checkCacheability(RequestPtr &req)
122{
123// in Alpha, cacheability is controlled by upper-level bits of the
124// physical address
125
126/*
127 * We support having the uncacheable bit in either bit 39 or bit 40.
128 * The Turbolaser platform (and EV5) support having the bit in 39, but
129 * Tsunami (which Linux assumes uses an EV6) generates accesses with
130 * the bit in 40. So we must check for both, but we have debug flags
131 * to catch a weird case where both are used, which shouldn't happen.
132 */
133
134
135#if ALPHA_TLASER
136 if (req->getPaddr() & PAddrUncachedBit39)
137#else
138 if (req->getPaddr() & PAddrUncachedBit43)
139#endif
140 {
141 // IPR memory space not implemented
142 if (PAddrIprSpace(req->getPaddr())) {
143 return new UnimpFault("IPR memory space not implemented!");
144 } else {
145 // mark request as uncacheable
146 req->setFlags(req->getFlags() | UNCACHEABLE);
147
148#if !ALPHA_TLASER
149 // Clear bits 42:35 of the physical address (10-2 in Tsunami manual)
150 req->setPaddr(req->getPaddr() & PAddrUncachedMask);
151#endif
152 }
153 }
154 return NoFault;
155}
156
157
158// insert a new TLB entry
159void
160TLB::insert(Addr addr, TlbEntry &entry)
161{
162 flushCache();
163 VAddr vaddr = addr;
164 if (table[nlu].valid) {
165 Addr oldvpn = table[nlu].tag;
166 PageTable::iterator i = lookupTable.find(oldvpn);
167
168 if (i == lookupTable.end())
169 panic("TLB entry not found in lookupTable");
170
171 int index;
172 while ((index = i->second) != nlu) {
173 if (table[index].tag != oldvpn)
174 panic("TLB entry not found in lookupTable");
175
176 ++i;
177 }
178
179 DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn);
180
181 lookupTable.erase(i);
182 }
183
184 DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), entry.ppn);
185
186 table[nlu] = entry;
187 table[nlu].tag = vaddr.vpn();
188 table[nlu].valid = true;
189
190 lookupTable.insert(make_pair(vaddr.vpn(), nlu));
191 nextnlu();
192}
193
194void
195TLB::flushAll()
196{
197 DPRINTF(TLB, "flushAll\n");
198 memset(table, 0, sizeof(TlbEntry[size]));
199 flushCache();
200 lookupTable.clear();
201 nlu = 0;
202}
203
204void
205TLB::flushProcesses()
206{
207 flushCache();
208 PageTable::iterator i = lookupTable.begin();
209 PageTable::iterator end = lookupTable.end();
210 while (i != end) {
211 int index = i->second;
212 TlbEntry *entry = &table[index];
213 assert(entry->valid);
214
215 // we can't increment i after we erase it, so save a copy and
216 // increment it to get the next entry now
217 PageTable::iterator cur = i;
218 ++i;
219
220 if (!entry->asma) {
221 DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index, entry->tag, entry->ppn);
222 entry->valid = false;
223 lookupTable.erase(cur);
224 }
225 }
226}
227
228void
229TLB::flushAddr(Addr addr, uint8_t asn)
230{
231 flushCache();
232 VAddr vaddr = addr;
233
234 PageTable::iterator i = lookupTable.find(vaddr.vpn());
235 if (i == lookupTable.end())
236 return;
237
238 while (i != lookupTable.end() && i->first == vaddr.vpn()) {
239 int index = i->second;
240 TlbEntry *entry = &table[index];
241 assert(entry->valid);
242
243 if (vaddr.vpn() == entry->tag && (entry->asma || entry->asn == asn)) {
244 DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(),
245 entry->ppn);
246
247 // invalidate this entry
248 entry->valid = false;
249
250 lookupTable.erase(i++);
251 } else {
252 ++i;
253 }
254 }
255}
256
257
258void
259TLB::serialize(ostream &os)
260{
261 SERIALIZE_SCALAR(size);
262 SERIALIZE_SCALAR(nlu);
263
264 for (int i = 0; i < size; i++) {
265 nameOut(os, csprintf("%s.Entry%d", name(), i));
266 table[i].serialize(os);
267 }
268}
269
270void
271TLB::unserialize(Checkpoint *cp, const string &section)
272{
273 UNSERIALIZE_SCALAR(size);
274 UNSERIALIZE_SCALAR(nlu);
275
276 for (int i = 0; i < size; i++) {
277 table[i].unserialize(cp, csprintf("%s.Entry%d", section, i));
278 if (table[i].valid) {
279 lookupTable.insert(make_pair(table[i].tag, i));
280 }
281 }
282}
283
284
285///////////////////////////////////////////////////////////////////////
286//
287// Alpha ITB
288//
289ITB::ITB(const std::string &name, int size)
290 : TLB(name, size)
291{}
292
293
294void
295ITB::regStats()
296{
297 hits
298 .name(name() + ".hits")
299 .desc("ITB hits");
300 misses
301 .name(name() + ".misses")
302 .desc("ITB misses");
303 acv
304 .name(name() + ".acv")
305 .desc("ITB acv");
306 accesses
307 .name(name() + ".accesses")
308 .desc("ITB accesses");
309
310 accesses = hits + misses;
311}
312
313
314Fault
315ITB::translate(RequestPtr &req, ThreadContext *tc)
316{
317 //If this is a pal pc, then set PHYSICAL
318 if(FULL_SYSTEM && PcPAL(req->getPC()))
319 req->setFlags(req->getFlags() | PHYSICAL);
320
321 if (PcPAL(req->getPC())) {
322 // strip off PAL PC marker (lsb is 1)
323 req->setPaddr((req->getVaddr() & ~3) & PAddrImplMask);
324 hits++;
325 return NoFault;
326 }
327
328 if (req->getFlags() & PHYSICAL) {
329 req->setPaddr(req->getVaddr());
330 } else {
331 // verify that this is a good virtual address
332 if (!validVirtualAddress(req->getVaddr())) {
333 acv++;
334 return new ItbAcvFault(req->getVaddr());
335 }
336
337
338 // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
339 // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
340#if ALPHA_TLASER
341 if ((MCSR_SP(tc->readMiscRegNoEffect(IPR_MCSR)) & 2) &&
342 VAddrSpaceEV5(req->getVaddr()) == 2)
343#else
344 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e)
345#endif
346 {
347 // only valid in kernel mode
348 if (ICM_CM(tc->readMiscRegNoEffect(IPR_ICM)) !=
349 mode_kernel) {
350 acv++;
351 return new ItbAcvFault(req->getVaddr());
352 }
353
354 req->setPaddr(req->getVaddr() & PAddrImplMask);
355
356#if !ALPHA_TLASER
357 // sign extend the physical address properly
358 if (req->getPaddr() & PAddrUncachedBit40)
359 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
360 else
361 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
362#endif
363
364 } else {
365 // not a physical address: need to look up pte
366 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
367 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(),
368 asn);
369
370 if (!entry) {
371 misses++;
372 return new ItbPageFault(req->getVaddr());
373 }
374
375 req->setPaddr((entry->ppn << PageShift) +
376 (VAddr(req->getVaddr()).offset()
377 & ~3));
378
379 // check permissions for this access
380 if (!(entry->xre &
381 (1 << ICM_CM(tc->readMiscRegNoEffect(IPR_ICM))))) {
382 // instruction access fault
383 acv++;
384 return new ItbAcvFault(req->getVaddr());
385 }
386
387 hits++;
388 }
389 }
390
391 // check that the physical address is ok (catch bad physical addresses)
392 if (req->getPaddr() & ~PAddrImplMask)
393 return genMachineCheckFault();
394
395 return checkCacheability(req);
396
397}
398
399///////////////////////////////////////////////////////////////////////
400//
401// Alpha DTB
402//
403 DTB::DTB(const std::string &name, int size)
404 : TLB(name, size)
405{}
406
407void
408DTB::regStats()
409{
410 read_hits
411 .name(name() + ".read_hits")
412 .desc("DTB read hits")
413 ;
414
415 read_misses
416 .name(name() + ".read_misses")
417 .desc("DTB read misses")
418 ;
419
420 read_acv
421 .name(name() + ".read_acv")
422 .desc("DTB read access violations")
423 ;
424
425 read_accesses
426 .name(name() + ".read_accesses")
427 .desc("DTB read accesses")
428 ;
429
430 write_hits
431 .name(name() + ".write_hits")
432 .desc("DTB write hits")
433 ;
434
435 write_misses
436 .name(name() + ".write_misses")
437 .desc("DTB write misses")
438 ;
439
440 write_acv
441 .name(name() + ".write_acv")
442 .desc("DTB write access violations")
443 ;
444
445 write_accesses
446 .name(name() + ".write_accesses")
447 .desc("DTB write accesses")
448 ;
449
450 hits
451 .name(name() + ".hits")
452 .desc("DTB hits")
453 ;
454
455 misses
456 .name(name() + ".misses")
457 .desc("DTB misses")
458 ;
459
460 acv
461 .name(name() + ".acv")
462 .desc("DTB access violations")
463 ;
464
465 accesses
466 .name(name() + ".accesses")
467 .desc("DTB accesses")
468 ;
469
470 hits = read_hits + write_hits;
471 misses = read_misses + write_misses;
472 acv = read_acv + write_acv;
473 accesses = read_accesses + write_accesses;
474}
475
476Fault
477DTB::translate(RequestPtr &req, ThreadContext *tc, bool write)
478{
479 Addr pc = tc->readPC();
480
481 mode_type mode =
482 (mode_type)DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM));
483
484
485 /**
486 * Check for alignment faults
487 */
488 if (req->getVaddr() & (req->getSize() - 1)) {
489 DPRINTF(TLB, "Alignment Fault on %#x, size = %d", req->getVaddr(),
490 req->getSize());
491 uint64_t flags = write ? MM_STAT_WR_MASK : 0;
492 return new DtbAlignmentFault(req->getVaddr(), req->getFlags(), flags);
493 }
494
495 if (PcPAL(pc)) {
496 mode = (req->getFlags() & ALTMODE) ?
497 (mode_type)ALT_MODE_AM(
498 tc->readMiscRegNoEffect(IPR_ALT_MODE))
499 : mode_kernel;
500 }
501
502 if (req->getFlags() & PHYSICAL) {
503 req->setPaddr(req->getVaddr());
504 } else {
505 // verify that this is a good virtual address
506 if (!validVirtualAddress(req->getVaddr())) {
507 if (write) { write_acv++; } else { read_acv++; }
508 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
509 MM_STAT_BAD_VA_MASK |
510 MM_STAT_ACV_MASK;
511 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
512 }
513
514 // Check for "superpage" mapping
515#if ALPHA_TLASER
516 if ((MCSR_SP(tc->readMiscRegNoEffect(IPR_MCSR)) & 2) &&
517 VAddrSpaceEV5(req->getVaddr()) == 2)
518#else
519 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e)
520#endif
521 {
522
523 // only valid in kernel mode
524 if (DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM)) !=
525 mode_kernel) {
526 if (write) { write_acv++; } else { read_acv++; }
527 uint64_t flags = ((write ? MM_STAT_WR_MASK : 0) |
528 MM_STAT_ACV_MASK);
529 return new DtbAcvFault(req->getVaddr(), req->getFlags(), flags);
530 }
531
532 req->setPaddr(req->getVaddr() & PAddrImplMask);
533
534#if !ALPHA_TLASER
535 // sign extend the physical address properly
536 if (req->getPaddr() & PAddrUncachedBit40)
537 req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
538 else
539 req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
540#endif
541
542 } else {
543 if (write)
544 write_accesses++;
545 else
546 read_accesses++;
547
548 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
549
550 // not a physical address: need to look up pte
551 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(), asn);
552
553 if (!entry) {
554 // page fault
555 if (write) { write_misses++; } else { read_misses++; }
556 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
557 MM_STAT_DTB_MISS_MASK;
558 return (req->getFlags() & VPTE) ?
559 (Fault)(new PDtbMissFault(req->getVaddr(), req->getFlags(),
560 flags)) :
561 (Fault)(new NDtbMissFault(req->getVaddr(), req->getFlags(),
562 flags));
563 }
564
565 req->setPaddr((entry->ppn << PageShift) +
566 VAddr(req->getVaddr()).offset());
567
568 if (write) {
569 if (!(entry->xwe & MODE2MASK(mode))) {
570 // declare the instruction access fault
571 write_acv++;
572 uint64_t flags = MM_STAT_WR_MASK |
573 MM_STAT_ACV_MASK |
574 (entry->fonw ? MM_STAT_FONW_MASK : 0);
575 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
576 }
577 if (entry->fonw) {
578 write_acv++;
579 uint64_t flags = MM_STAT_WR_MASK |
580 MM_STAT_FONW_MASK;
581 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
582 }
583 } else {
584 if (!(entry->xre & MODE2MASK(mode))) {
585 read_acv++;
586 uint64_t flags = MM_STAT_ACV_MASK |
587 (entry->fonr ? MM_STAT_FONR_MASK : 0);
588 return new DtbAcvFault(req->getVaddr(), req->getFlags(), flags);
589 }
590 if (entry->fonr) {
591 read_acv++;
592 uint64_t flags = MM_STAT_FONR_MASK;
593 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
594 }
595 }
596 }
597
598 if (write)
599 write_hits++;
600 else
601 read_hits++;
602 }
603
604 // check that the physical address is ok (catch bad physical addresses)
605 if (req->getPaddr() & ~PAddrImplMask)
606 return genMachineCheckFault();
607
608 return checkCacheability(req);
609}
610
611TlbEntry &
612TLB::index(bool advance)
613{
614 TlbEntry *entry = &table[nlu];
615
616 if (advance)
617 nextnlu();
618
619 return *entry;
620}
621
622/* end namespace AlphaISA */ }
623
624AlphaISA::ITB *
625AlphaITBParams::create()
626{
627 return new AlphaISA::ITB(name, size);
628}
629
630AlphaISA::DTB *
631AlphaDTBParams::create()
632{
633 return new AlphaISA::DTB(name, size);
634}