1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;

--- 31 unchanged lines hidden (view full) ---

40#include "sim/builder.hh"
41
42/* @todo remove some of the magic constants. -- ali
43 * */
44namespace SparcISA
45{
46
47TLB::TLB(const std::string &name, int s)
48 : SimObject(name), size(s), usedEntries(0), cacheValid(false)
48 : SimObject(name), size(s), usedEntries(0), lastReplaced(0),
49 cacheValid(false)
50{
51 // To make this work you'll have to change the hypervisor and OS
52 if (size > 64)
53 fatal("SPARC T1 TLB registers don't support more than 64 TLB entries.");
54
55 tlb = new TlbEntry[size];
56 memset(tlb, 0, sizeof(TlbEntry) * size);
57
58 for (int x = 0; x < size; x++)
59 freeList.push_back(&tlb[x]);
60}
61
62void
63TLB::clearUsedBits()
64{
65 MapIter i;
62 for (i = lookupTable.begin(); i != lookupTable.end();) {
66 for (i = lookupTable.begin(); i != lookupTable.end(); i++) {
67 TlbEntry *t = i->second;
68 if (!t->pte.locked()) {
69 t->used = false;
70 usedEntries--;
71 }
72 }
73}
74
75
76void
77TLB::insert(Addr va, int partition_id, int context_id, bool real,
78 const PageTableEntry& PTE, int entry)
79{
80
81
82 MapIter i;
83 TlbEntry *new_entry = NULL;
84 TlbRange tr;
85 int x;
86
87 cacheValid = false;
88 tr.va = va;
89 tr.size = PTE.size() - 1;
90 tr.contextId = context_id;
91 tr.partitionId = partition_id;
92 tr.real = real;
93
84 DPRINTF(TLB, "TLB: Inserting TLB Entry; va=%#x pa=%#x pid=%d cid=%d r=%d\n",
85 va, PTE.paddr(), partition_id, context_id, (int)real);
94
95 DPRINTF(TLB, "TLB: Inserting TLB Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n",
96 va, PTE.paddr(), partition_id, context_id, (int)real, entry);
97
98 // Demap any entry that conflicts
99 i = lookupTable.find(tr);
100 if (i != lookupTable.end()) {
101 i->second->valid = false;
102 if (i->second->used) {
103 i->second->used = false;
104 usedEntries--;
105 }
106 freeList.push_front(i->second);
107 DPRINTF(TLB, "TLB: Found conflicting entry %#X , deleting it\n",
108 i->second);
109 lookupTable.erase(i);
110 }
111
112
113 if (entry != -1) {
114 assert(entry < size && entry >= 0);
115 new_entry = &tlb[entry];
116 } else {
117 if (!freeList.empty()) {
118 new_entry = freeList.front();
119 } else {
120 x = lastReplaced;
121 do {
122 ++x;
123 if (x == size)
124 x = 0;
125 if (x == lastReplaced)
126 goto insertAllLocked;
127 } while (tlb[x].pte.locked());
128 lastReplaced = x;
129 new_entry = &tlb[x];
130 lookupTable.erase(new_entry->range);
131 }
132 /*
133 for (x = 0; x < size; x++) {
134 if (!tlb[x].valid || !tlb[x].used) {
135 new_entry = &tlb[x];
136 break;
137 }
96 }
138 }*/
139 }
140
141insertAllLocked:
142 // Update the last ently if their all locked
100 if (!new_entry)
143 if (!new_entry) {
144 new_entry = &tlb[size-1];
145 lookupTable.erase(new_entry->range);
146 }
147
148 freeList.remove(new_entry);
149 DPRINTF(TLB, "Using entry: %#X\n", new_entry);
150
151 assert(PTE.valid());
152 new_entry->range.va = va;
105 new_entry->range.size = PTE.size();
153 new_entry->range.size = PTE.size() - 1;
154 new_entry->range.partitionId = partition_id;
155 new_entry->range.contextId = context_id;
156 new_entry->range.real = real;
157 new_entry->pte = PTE;
158 new_entry->used = true;;
159 new_entry->valid = true;
160 usedEntries++;
161
162
115 // Demap any entry that conflicts
116 i = lookupTable.find(new_entry->range);
117 if (i != lookupTable.end()) {
118 i->second->valid = false;
119 if (i->second->used) {
120 i->second->used = false;
121 usedEntries--;
122 }
123 DPRINTF(TLB, "TLB: Found conflicting entry, deleting it\n");
124 lookupTable.erase(i);
125 }
163
164 i = lookupTable.insert(new_entry->range, new_entry);
165 assert(i != lookupTable.end());
166
167 // If all entries have there used bit set, clear it on them all, but the
168 // one we just inserted
169 if (usedEntries == size) {
170 clearUsedBits();

--- 80 unchanged lines hidden (view full) ---

251 i = lookupTable.find(tr);
252 if (i != lookupTable.end()) {
253 DPRINTF(IPR, "TLB: Demapped page\n");
254 i->second->valid = false;
255 if (i->second->used) {
256 i->second->used = false;
257 usedEntries--;
258 }
259 freeList.push_front(i->second);
260 DPRINTF(TLB, "Freeing TLB entry : %#X\n", i->second);
261 lookupTable.erase(i);
262 }
263}
264
265void
266TLB::demapContext(int partition_id, int context_id)
267{
268 int x;
269 DPRINTF(IPR, "TLB: Demapping Context pid=%#d cid=%d\n",
270 partition_id, context_id);
271 cacheValid = false;
272 for (x = 0; x < size; x++) {
273 if (tlb[x].range.contextId == context_id &&
274 tlb[x].range.partitionId == partition_id) {
275 if (tlb[x].valid == true) {
276 freeList.push_front(&tlb[x]);
277 DPRINTF(TLB, "Freeing TLB entry : %#X\n", &tlb[x]);
278 }
279 tlb[x].valid = false;
280 if (tlb[x].used) {
281 tlb[x].used = false;
282 usedEntries--;
283 }
284 lookupTable.erase(tlb[x].range);
285 }
286 }
287}
288
289void
290TLB::demapAll(int partition_id)
291{
292 int x;
293 DPRINTF(TLB, "TLB: Demapping All pid=%#d\n", partition_id);
294 cacheValid = false;
295 for (x = 0; x < size; x++) {
296 if (!tlb[x].pte.locked() && tlb[x].range.partitionId == partition_id) {
297 if (tlb[x].valid == true){
298 freeList.push_front(&tlb[x]);
299 DPRINTF(TLB, "Freeing TLB entry : %#X\n", &tlb[x]);
300 }
301 tlb[x].valid = false;
302 if (tlb[x].used) {
303 tlb[x].used = false;
304 usedEntries--;
305 }
306 lookupTable.erase(tlb[x].range);
307 }
308 }
309}
310
311void
312TLB::invalidateAll()
313{
314 int x;
315 cacheValid = false;
316
317 freeList.clear();
318 for (x = 0; x < size; x++) {
319 if (tlb[x].valid == true)
320 freeList.push_back(&tlb[x]);
321 tlb[x].valid = false;
322 }
323 usedEntries = 0;
324}
325
326uint64_t
327TLB::TteRead(int entry) {
328 if (entry >= size)
329 panic("entry: %d\n", entry);
330
331 assert(entry < size);
279 return tlb[entry].pte();
332 if (tlb[entry].valid)
333 return tlb[entry].pte();
334 else
335 return (uint64_t)-1ll;
336}
337
338uint64_t
339TLB::TagRead(int entry) {
340 assert(entry < size);
341 uint64_t tag;
342 if (!tlb[entry].valid)
343 return (uint64_t)-1ll;
344
287 tag = tlb[entry].range.contextId | tlb[entry].range.va |
288 (uint64_t)tlb[entry].range.partitionId << 61;
345 tag = tlb[entry].range.contextId;
346 tag |= tlb[entry].range.va;
347 tag |= (uint64_t)tlb[entry].range.partitionId << 61;
348 tag |= tlb[entry].range.real ? ULL(1) << 60 : 0;
349 tag |= (uint64_t)~tlb[entry].pte._size() << 56;
350 return tag;
351}
352
353bool
354TLB::validVirtualAddress(Addr va, bool am)
355{

--- 199 unchanged lines hidden (view full) ---

555 if (hpriv && implicit) {
556 req->setPaddr(vaddr & PAddrImplMask);
557 return NoFault;
558 }
559
560 // Be fast if we can!
561 if (cacheValid && cacheState == tlbdata) {
562 if (cacheEntry[0] && cacheAsi[0] == asi && cacheEntry[0]->range.va < vaddr + size &&
504 cacheEntry[0]->range.va + cacheEntry[0]->range.size >= vaddr) {
563 cacheEntry[0]->range.va + cacheEntry[0]->range.size > vaddr) {
564 req->setPaddr(cacheEntry[0]->pte.paddr() & ~(cacheEntry[0]->pte.size()-1) |
565 vaddr & cacheEntry[0]->pte.size()-1 );
566 return NoFault;
567 }
568 if (cacheEntry[1] && cacheAsi[1] == asi && cacheEntry[1]->range.va < vaddr + size &&
510 cacheEntry[1]->range.va + cacheEntry[1]->range.size >= vaddr) {
569 cacheEntry[1]->range.va + cacheEntry[1]->range.size > vaddr) {
570 req->setPaddr(cacheEntry[1]->pte.paddr() & ~(cacheEntry[1]->pte.size()-1) |
571 vaddr & cacheEntry[1]->pte.size()-1 );
572 return NoFault;
573 }
574 }
575
576 bool red = bits(tlbdata,1,1);
577 bool priv = bits(tlbdata,2,2);

--- 143 unchanged lines hidden (view full) ---

721
722
723 if (!priv && e->pte.priv()) {
724 writeSfr(tc, vaddr, write, ct, e->pte.sideffect(), PrivViolation, asi);
725 return new DataAccessException;
726 }
727
728 // cache translation date for next translation
670 cacheValid = true;
729 cacheState = tlbdata;
730 if (!cacheValid) {
731 cacheEntry[1] = NULL;
732 cacheEntry[0] = NULL;
733 }
734
735 if (cacheEntry[0] != e && cacheEntry[1] != e) {
736 cacheEntry[1] = cacheEntry[0];
737 cacheEntry[0] = e;
738 cacheAsi[1] = cacheAsi[0];
739 cacheAsi[0] = asi;
740 if (implicit)
741 cacheAsi[0] = (ASI)0;
742 }
680
743 cacheValid = true;
744 req->setPaddr(e->pte.paddr() & ~(e->pte.size()-1) |
745 vaddr & e->pte.size()-1);
746 DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
747 return NoFault;
748 /** Normal flow ends here. */
749
750handleScratchRegAccess:
751 if (vaddr > 0x38 || (vaddr >= 0x20 && vaddr < 0x30 && !hpriv)) {
752 writeSfr(tc, vaddr, write, Primary, true, IllegalAsi, asi);
753 return new DataAccessException;
754 }
755 goto regAccessOk;
756
757handleQueueRegAccess:
758 if (!priv && !hpriv) {
759 writeSfr(tc, vaddr, write, Primary, true, IllegalAsi, asi);
760 return new PrivilegedAction;
761 }
699 if (priv && vaddr & 0xF || vaddr > 0x3f8 || vaddr < 0x3c0) {
762 if (!hpriv && vaddr & 0xF || vaddr > 0x3f8 || vaddr < 0x3c0) {
763 writeSfr(tc, vaddr, write, Primary, true, IllegalAsi, asi);
764 return new DataAccessException;
765 }
766 goto regAccessOk;
767
768handleSparcErrorRegAccess:
769 if (!hpriv) {
770 if (priv) {

--- 455 unchanged lines hidden ---