tlb.cc (10418:7a76e13f0101) tlb.cc (10463:25c5da51bbe0)
1/*
2 * Copyright (c) 2010-2013 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 * Nathan Binkert
42 * Steve Reinhardt
43 */
44
45#include <string>
46#include <vector>
47
48#include "arch/arm/faults.hh"
49#include "arch/arm/pagetable.hh"
50#include "arch/arm/system.hh"
51#include "arch/arm/table_walker.hh"
52#include "arch/arm/stage2_lookup.hh"
53#include "arch/arm/stage2_mmu.hh"
54#include "arch/arm/tlb.hh"
55#include "arch/arm/utility.hh"
56#include "base/inifile.hh"
57#include "base/str.hh"
58#include "base/trace.hh"
59#include "cpu/base.hh"
60#include "cpu/thread_context.hh"
61#include "debug/Checkpoint.hh"
62#include "debug/TLB.hh"
63#include "debug/TLBVerbose.hh"
64#include "mem/page_table.hh"
65#include "params/ArmTLB.hh"
66#include "sim/full_system.hh"
67#include "sim/process.hh"
68
69using namespace std;
70using namespace ArmISA;
71
72TLB::TLB(const ArmTLBParams *p)
73 : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size),
74 isStage2(p->is_stage2), stage2Req(false), _attr(0),
75 directToStage2(false), tableWalker(p->walker), stage2Tlb(NULL),
76 stage2Mmu(NULL), rangeMRU(1), bootUncacheability(false),
77 miscRegValid(false), curTranType(NormalTran)
78{
79 tableWalker->setTlb(this);
80
81 // Cache system-level properties
82 haveLPAE = tableWalker->haveLPAE();
83 haveVirtualization = tableWalker->haveVirtualization();
84 haveLargeAsid64 = tableWalker->haveLargeAsid64();
85}
86
87TLB::~TLB()
88{
89 delete[] table;
90}
91
92void
93TLB::init()
94{
95 if (stage2Mmu && !isStage2)
96 stage2Tlb = stage2Mmu->stage2Tlb();
97}
98
99void
100TLB::setMMU(Stage2MMU *m)
101{
102 stage2Mmu = m;
103 tableWalker->setMMU(m);
104}
105
106bool
107TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa)
108{
109 updateMiscReg(tc);
110
111 if (directToStage2) {
112 assert(stage2Tlb);
113 return stage2Tlb->translateFunctional(tc, va, pa);
114 }
115
116 TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false,
117 aarch64 ? aarch64EL : EL1);
118 if (!e)
119 return false;
120 pa = e->pAddr(va);
121 return true;
122}
123
124Fault
125TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
126{
127 return NoFault;
128}
129
130TlbEntry*
131TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure,
132 bool functional, bool ignore_asn, uint8_t target_el)
133{
134
135 TlbEntry *retval = NULL;
136
137 // Maintaining LRU array
138 int x = 0;
139 while (retval == NULL && x < size) {
140 if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false,
141 target_el)) ||
142 (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) {
143 // We only move the hit entry ahead when the position is higher
144 // than rangeMRU
145 if (x > rangeMRU && !functional) {
146 TlbEntry tmp_entry = table[x];
147 for(int i = x; i > 0; i--)
148 table[i] = table[i - 1];
149 table[0] = tmp_entry;
150 retval = &table[0];
151 } else {
152 retval = &table[x];
153 }
154 break;
155 }
156 ++x;
157 }
158
159 DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d "
160 "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d "
161 "el: %d\n",
162 va, asn, retval ? "hit" : "miss", vmid, hyp, secure,
163 retval ? retval->pfn : 0, retval ? retval->size : 0,
164 retval ? retval->pAddr(va) : 0, retval ? retval->ap : 0,
165 retval ? retval->ns : 0, retval ? retval->nstid : 0,
166 retval ? retval->global : 0, retval ? retval->asid : 0,
167 retval ? retval->el : 0);
168
169 return retval;
170}
171
172// insert a new TLB entry
173void
174TLB::insert(Addr addr, TlbEntry &entry)
175{
176 DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x"
177 " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
178 " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn,
179 entry.size, entry.vpn, entry.asid, entry.vmid, entry.N,
180 entry.global, entry.valid, entry.nonCacheable, entry.xn,
181 entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid,
182 entry.isHyp);
183
184 if (table[size - 1].valid)
185 DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
186 "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n",
187 table[size-1].vpn << table[size-1].N, table[size-1].asid,
188 table[size-1].vmid, table[size-1].pfn << table[size-1].N,
189 table[size-1].size, table[size-1].ap, table[size-1].ns,
190 table[size-1].nstid, table[size-1].global, table[size-1].isHyp,
191 table[size-1].el);
192
193 //inserting to MRU position and evicting the LRU one
194
195 for (int i = size - 1; i > 0; --i)
196 table[i] = table[i-1];
197 table[0] = entry;
198
199 inserts++;
1/*
2 * Copyright (c) 2010-2013 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 * Nathan Binkert
42 * Steve Reinhardt
43 */
44
45#include <string>
46#include <vector>
47
48#include "arch/arm/faults.hh"
49#include "arch/arm/pagetable.hh"
50#include "arch/arm/system.hh"
51#include "arch/arm/table_walker.hh"
52#include "arch/arm/stage2_lookup.hh"
53#include "arch/arm/stage2_mmu.hh"
54#include "arch/arm/tlb.hh"
55#include "arch/arm/utility.hh"
56#include "base/inifile.hh"
57#include "base/str.hh"
58#include "base/trace.hh"
59#include "cpu/base.hh"
60#include "cpu/thread_context.hh"
61#include "debug/Checkpoint.hh"
62#include "debug/TLB.hh"
63#include "debug/TLBVerbose.hh"
64#include "mem/page_table.hh"
65#include "params/ArmTLB.hh"
66#include "sim/full_system.hh"
67#include "sim/process.hh"
68
69using namespace std;
70using namespace ArmISA;
71
72TLB::TLB(const ArmTLBParams *p)
73 : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size),
74 isStage2(p->is_stage2), stage2Req(false), _attr(0),
75 directToStage2(false), tableWalker(p->walker), stage2Tlb(NULL),
76 stage2Mmu(NULL), rangeMRU(1), bootUncacheability(false),
77 miscRegValid(false), curTranType(NormalTran)
78{
79 tableWalker->setTlb(this);
80
81 // Cache system-level properties
82 haveLPAE = tableWalker->haveLPAE();
83 haveVirtualization = tableWalker->haveVirtualization();
84 haveLargeAsid64 = tableWalker->haveLargeAsid64();
85}
86
87TLB::~TLB()
88{
89 delete[] table;
90}
91
92void
93TLB::init()
94{
95 if (stage2Mmu && !isStage2)
96 stage2Tlb = stage2Mmu->stage2Tlb();
97}
98
99void
100TLB::setMMU(Stage2MMU *m)
101{
102 stage2Mmu = m;
103 tableWalker->setMMU(m);
104}
105
106bool
107TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa)
108{
109 updateMiscReg(tc);
110
111 if (directToStage2) {
112 assert(stage2Tlb);
113 return stage2Tlb->translateFunctional(tc, va, pa);
114 }
115
116 TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false,
117 aarch64 ? aarch64EL : EL1);
118 if (!e)
119 return false;
120 pa = e->pAddr(va);
121 return true;
122}
123
124Fault
125TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
126{
127 return NoFault;
128}
129
130TlbEntry*
131TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure,
132 bool functional, bool ignore_asn, uint8_t target_el)
133{
134
135 TlbEntry *retval = NULL;
136
137 // Maintaining LRU array
138 int x = 0;
139 while (retval == NULL && x < size) {
140 if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false,
141 target_el)) ||
142 (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) {
143 // We only move the hit entry ahead when the position is higher
144 // than rangeMRU
145 if (x > rangeMRU && !functional) {
146 TlbEntry tmp_entry = table[x];
147 for(int i = x; i > 0; i--)
148 table[i] = table[i - 1];
149 table[0] = tmp_entry;
150 retval = &table[0];
151 } else {
152 retval = &table[x];
153 }
154 break;
155 }
156 ++x;
157 }
158
159 DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d "
160 "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d "
161 "el: %d\n",
162 va, asn, retval ? "hit" : "miss", vmid, hyp, secure,
163 retval ? retval->pfn : 0, retval ? retval->size : 0,
164 retval ? retval->pAddr(va) : 0, retval ? retval->ap : 0,
165 retval ? retval->ns : 0, retval ? retval->nstid : 0,
166 retval ? retval->global : 0, retval ? retval->asid : 0,
167 retval ? retval->el : 0);
168
169 return retval;
170}
171
172// insert a new TLB entry
173void
174TLB::insert(Addr addr, TlbEntry &entry)
175{
176 DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x"
177 " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
178 " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn,
179 entry.size, entry.vpn, entry.asid, entry.vmid, entry.N,
180 entry.global, entry.valid, entry.nonCacheable, entry.xn,
181 entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid,
182 entry.isHyp);
183
184 if (table[size - 1].valid)
185 DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
186 "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n",
187 table[size-1].vpn << table[size-1].N, table[size-1].asid,
188 table[size-1].vmid, table[size-1].pfn << table[size-1].N,
189 table[size-1].size, table[size-1].ap, table[size-1].ns,
190 table[size-1].nstid, table[size-1].global, table[size-1].isHyp,
191 table[size-1].el);
192
193 //inserting to MRU position and evicting the LRU one
194
195 for (int i = size - 1; i > 0; --i)
196 table[i] = table[i-1];
197 table[0] = entry;
198
199 inserts++;
200 ppRefills->notify(1);
200}
201
202void
203TLB::printTlb() const
204{
205 int x = 0;
206 TlbEntry *te;
207 DPRINTF(TLB, "Current TLB contents:\n");
208 while (x < size) {
209 te = &table[x];
210 if (te->valid)
211 DPRINTF(TLB, " * %s\n", te->print());
212 ++x;
213 }
214}
215
216void
217TLB::flushAllSecurity(bool secure_lookup, uint8_t target_el, bool ignore_el)
218{
219 DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n",
220 (secure_lookup ? "secure" : "non-secure"));
221 int x = 0;
222 TlbEntry *te;
223 while (x < size) {
224 te = &table[x];
225 if (te->valid && secure_lookup == !te->nstid &&
226 (te->vmid == vmid || secure_lookup) &&
227 checkELMatch(target_el, te->el, ignore_el)) {
228
229 DPRINTF(TLB, " - %s\n", te->print());
230 te->valid = false;
231 flushedEntries++;
232 }
233 ++x;
234 }
235
236 flushTlb++;
237
238 // If there's a second stage TLB (and we're not it) then flush it as well
239 // if we're currently in hyp mode
240 if (!isStage2 && isHyp) {
241 stage2Tlb->flushAllSecurity(secure_lookup, true);
242 }
243}
244
245void
246TLB::flushAllNs(bool hyp, uint8_t target_el, bool ignore_el)
247{
248 DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n",
249 (hyp ? "hyp" : "non-hyp"));
250 int x = 0;
251 TlbEntry *te;
252 while (x < size) {
253 te = &table[x];
254 if (te->valid && te->nstid && te->isHyp == hyp &&
255 checkELMatch(target_el, te->el, ignore_el)) {
256
257 DPRINTF(TLB, " - %s\n", te->print());
258 flushedEntries++;
259 te->valid = false;
260 }
261 ++x;
262 }
263
264 flushTlb++;
265
266 // If there's a second stage TLB (and we're not it) then flush it as well
267 if (!isStage2 && !hyp) {
268 stage2Tlb->flushAllNs(false, true);
269 }
270}
271
272void
273TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, uint8_t target_el)
274{
275 DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x "
276 "(%s lookup)\n", mva, asn, (secure_lookup ?
277 "secure" : "non-secure"));
278 _flushMva(mva, asn, secure_lookup, false, false, target_el);
279 flushTlbMvaAsid++;
280}
281
282void
283TLB::flushAsid(uint64_t asn, bool secure_lookup, uint8_t target_el)
284{
285 DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn,
286 (secure_lookup ? "secure" : "non-secure"));
287
288 int x = 0 ;
289 TlbEntry *te;
290
291 while (x < size) {
292 te = &table[x];
293 if (te->valid && te->asid == asn && secure_lookup == !te->nstid &&
294 (te->vmid == vmid || secure_lookup) &&
295 checkELMatch(target_el, te->el, false)) {
296
297 te->valid = false;
298 DPRINTF(TLB, " - %s\n", te->print());
299 flushedEntries++;
300 }
301 ++x;
302 }
303 flushTlbAsid++;
304}
305
306void
307TLB::flushMva(Addr mva, bool secure_lookup, bool hyp, uint8_t target_el)
308{
309 DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva,
310 (secure_lookup ? "secure" : "non-secure"));
311 _flushMva(mva, 0xbeef, secure_lookup, hyp, true, target_el);
312 flushTlbMva++;
313}
314
315void
316TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, bool hyp,
317 bool ignore_asn, uint8_t target_el)
318{
319 TlbEntry *te;
320 // D5.7.2: Sign-extend address to 64 bits
321 mva = sext<56>(mva);
322 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
323 target_el);
324 while (te != NULL) {
325 if (secure_lookup == !te->nstid) {
326 DPRINTF(TLB, " - %s\n", te->print());
327 te->valid = false;
328 flushedEntries++;
329 }
330 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
331 target_el);
332 }
333}
334
335bool
336TLB::checkELMatch(uint8_t target_el, uint8_t tentry_el, bool ignore_el)
337{
338 bool elMatch = true;
339 if (!ignore_el) {
340 if (target_el == 2 || target_el == 3) {
341 elMatch = (tentry_el == target_el);
342 } else {
343 elMatch = (tentry_el == 0) || (tentry_el == 1);
344 }
345 }
346 return elMatch;
347}
348
349void
350TLB::drainResume()
351{
352 // We might have unserialized something or switched CPUs, so make
353 // sure to re-read the misc regs.
354 miscRegValid = false;
355}
356
357void
358TLB::takeOverFrom(BaseTLB *_otlb)
359{
360 TLB *otlb = dynamic_cast<TLB*>(_otlb);
361 /* Make sure we actually have a valid type */
362 if (otlb) {
363 _attr = otlb->_attr;
364 haveLPAE = otlb->haveLPAE;
365 directToStage2 = otlb->directToStage2;
366 stage2Req = otlb->stage2Req;
367 bootUncacheability = otlb->bootUncacheability;
368
369 /* Sync the stage2 MMU if they exist in both
370 * the old CPU and the new
371 */
372 if (!isStage2 &&
373 stage2Tlb && otlb->stage2Tlb) {
374 stage2Tlb->takeOverFrom(otlb->stage2Tlb);
375 }
376 } else {
377 panic("Incompatible TLB type!");
378 }
379}
380
381void
382TLB::serialize(ostream &os)
383{
384 DPRINTF(Checkpoint, "Serializing Arm TLB\n");
385
386 SERIALIZE_SCALAR(_attr);
387 SERIALIZE_SCALAR(haveLPAE);
388 SERIALIZE_SCALAR(directToStage2);
389 SERIALIZE_SCALAR(stage2Req);
390 SERIALIZE_SCALAR(bootUncacheability);
391
392 int num_entries = size;
393 SERIALIZE_SCALAR(num_entries);
394 for(int i = 0; i < size; i++){
395 nameOut(os, csprintf("%s.TlbEntry%d", name(), i));
396 table[i].serialize(os);
397 }
398}
399
400void
401TLB::unserialize(Checkpoint *cp, const string &section)
402{
403 DPRINTF(Checkpoint, "Unserializing Arm TLB\n");
404
405 UNSERIALIZE_SCALAR(_attr);
406 UNSERIALIZE_SCALAR(haveLPAE);
407 UNSERIALIZE_SCALAR(directToStage2);
408 UNSERIALIZE_SCALAR(stage2Req);
409 UNSERIALIZE_SCALAR(bootUncacheability);
410
411 int num_entries;
412 UNSERIALIZE_SCALAR(num_entries);
413 for(int i = 0; i < min(size, num_entries); i++){
414 table[i].unserialize(cp, csprintf("%s.TlbEntry%d", section, i));
415 }
416}
417
418void
419TLB::regStats()
420{
421 instHits
422 .name(name() + ".inst_hits")
423 .desc("ITB inst hits")
424 ;
425
426 instMisses
427 .name(name() + ".inst_misses")
428 .desc("ITB inst misses")
429 ;
430
431 instAccesses
432 .name(name() + ".inst_accesses")
433 .desc("ITB inst accesses")
434 ;
435
436 readHits
437 .name(name() + ".read_hits")
438 .desc("DTB read hits")
439 ;
440
441 readMisses
442 .name(name() + ".read_misses")
443 .desc("DTB read misses")
444 ;
445
446 readAccesses
447 .name(name() + ".read_accesses")
448 .desc("DTB read accesses")
449 ;
450
451 writeHits
452 .name(name() + ".write_hits")
453 .desc("DTB write hits")
454 ;
455
456 writeMisses
457 .name(name() + ".write_misses")
458 .desc("DTB write misses")
459 ;
460
461 writeAccesses
462 .name(name() + ".write_accesses")
463 .desc("DTB write accesses")
464 ;
465
466 hits
467 .name(name() + ".hits")
468 .desc("DTB hits")
469 ;
470
471 misses
472 .name(name() + ".misses")
473 .desc("DTB misses")
474 ;
475
476 accesses
477 .name(name() + ".accesses")
478 .desc("DTB accesses")
479 ;
480
481 flushTlb
482 .name(name() + ".flush_tlb")
483 .desc("Number of times complete TLB was flushed")
484 ;
485
486 flushTlbMva
487 .name(name() + ".flush_tlb_mva")
488 .desc("Number of times TLB was flushed by MVA")
489 ;
490
491 flushTlbMvaAsid
492 .name(name() + ".flush_tlb_mva_asid")
493 .desc("Number of times TLB was flushed by MVA & ASID")
494 ;
495
496 flushTlbAsid
497 .name(name() + ".flush_tlb_asid")
498 .desc("Number of times TLB was flushed by ASID")
499 ;
500
501 flushedEntries
502 .name(name() + ".flush_entries")
503 .desc("Number of entries that have been flushed from TLB")
504 ;
505
506 alignFaults
507 .name(name() + ".align_faults")
508 .desc("Number of TLB faults due to alignment restrictions")
509 ;
510
511 prefetchFaults
512 .name(name() + ".prefetch_faults")
513 .desc("Number of TLB faults due to prefetch")
514 ;
515
516 domainFaults
517 .name(name() + ".domain_faults")
518 .desc("Number of TLB faults due to domain restrictions")
519 ;
520
521 permsFaults
522 .name(name() + ".perms_faults")
523 .desc("Number of TLB faults due to permissions restrictions")
524 ;
525
526 instAccesses = instHits + instMisses;
527 readAccesses = readHits + readMisses;
528 writeAccesses = writeHits + writeMisses;
529 hits = readHits + writeHits + instHits;
530 misses = readMisses + writeMisses + instMisses;
531 accesses = readAccesses + writeAccesses + instAccesses;
532}
533
201}
202
203void
204TLB::printTlb() const
205{
206 int x = 0;
207 TlbEntry *te;
208 DPRINTF(TLB, "Current TLB contents:\n");
209 while (x < size) {
210 te = &table[x];
211 if (te->valid)
212 DPRINTF(TLB, " * %s\n", te->print());
213 ++x;
214 }
215}
216
217void
218TLB::flushAllSecurity(bool secure_lookup, uint8_t target_el, bool ignore_el)
219{
220 DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n",
221 (secure_lookup ? "secure" : "non-secure"));
222 int x = 0;
223 TlbEntry *te;
224 while (x < size) {
225 te = &table[x];
226 if (te->valid && secure_lookup == !te->nstid &&
227 (te->vmid == vmid || secure_lookup) &&
228 checkELMatch(target_el, te->el, ignore_el)) {
229
230 DPRINTF(TLB, " - %s\n", te->print());
231 te->valid = false;
232 flushedEntries++;
233 }
234 ++x;
235 }
236
237 flushTlb++;
238
239 // If there's a second stage TLB (and we're not it) then flush it as well
240 // if we're currently in hyp mode
241 if (!isStage2 && isHyp) {
242 stage2Tlb->flushAllSecurity(secure_lookup, true);
243 }
244}
245
246void
247TLB::flushAllNs(bool hyp, uint8_t target_el, bool ignore_el)
248{
249 DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n",
250 (hyp ? "hyp" : "non-hyp"));
251 int x = 0;
252 TlbEntry *te;
253 while (x < size) {
254 te = &table[x];
255 if (te->valid && te->nstid && te->isHyp == hyp &&
256 checkELMatch(target_el, te->el, ignore_el)) {
257
258 DPRINTF(TLB, " - %s\n", te->print());
259 flushedEntries++;
260 te->valid = false;
261 }
262 ++x;
263 }
264
265 flushTlb++;
266
267 // If there's a second stage TLB (and we're not it) then flush it as well
268 if (!isStage2 && !hyp) {
269 stage2Tlb->flushAllNs(false, true);
270 }
271}
272
273void
274TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, uint8_t target_el)
275{
276 DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x "
277 "(%s lookup)\n", mva, asn, (secure_lookup ?
278 "secure" : "non-secure"));
279 _flushMva(mva, asn, secure_lookup, false, false, target_el);
280 flushTlbMvaAsid++;
281}
282
283void
284TLB::flushAsid(uint64_t asn, bool secure_lookup, uint8_t target_el)
285{
286 DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn,
287 (secure_lookup ? "secure" : "non-secure"));
288
289 int x = 0 ;
290 TlbEntry *te;
291
292 while (x < size) {
293 te = &table[x];
294 if (te->valid && te->asid == asn && secure_lookup == !te->nstid &&
295 (te->vmid == vmid || secure_lookup) &&
296 checkELMatch(target_el, te->el, false)) {
297
298 te->valid = false;
299 DPRINTF(TLB, " - %s\n", te->print());
300 flushedEntries++;
301 }
302 ++x;
303 }
304 flushTlbAsid++;
305}
306
307void
308TLB::flushMva(Addr mva, bool secure_lookup, bool hyp, uint8_t target_el)
309{
310 DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva,
311 (secure_lookup ? "secure" : "non-secure"));
312 _flushMva(mva, 0xbeef, secure_lookup, hyp, true, target_el);
313 flushTlbMva++;
314}
315
316void
317TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, bool hyp,
318 bool ignore_asn, uint8_t target_el)
319{
320 TlbEntry *te;
321 // D5.7.2: Sign-extend address to 64 bits
322 mva = sext<56>(mva);
323 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
324 target_el);
325 while (te != NULL) {
326 if (secure_lookup == !te->nstid) {
327 DPRINTF(TLB, " - %s\n", te->print());
328 te->valid = false;
329 flushedEntries++;
330 }
331 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
332 target_el);
333 }
334}
335
336bool
337TLB::checkELMatch(uint8_t target_el, uint8_t tentry_el, bool ignore_el)
338{
339 bool elMatch = true;
340 if (!ignore_el) {
341 if (target_el == 2 || target_el == 3) {
342 elMatch = (tentry_el == target_el);
343 } else {
344 elMatch = (tentry_el == 0) || (tentry_el == 1);
345 }
346 }
347 return elMatch;
348}
349
350void
351TLB::drainResume()
352{
353 // We might have unserialized something or switched CPUs, so make
354 // sure to re-read the misc regs.
355 miscRegValid = false;
356}
357
358void
359TLB::takeOverFrom(BaseTLB *_otlb)
360{
361 TLB *otlb = dynamic_cast<TLB*>(_otlb);
362 /* Make sure we actually have a valid type */
363 if (otlb) {
364 _attr = otlb->_attr;
365 haveLPAE = otlb->haveLPAE;
366 directToStage2 = otlb->directToStage2;
367 stage2Req = otlb->stage2Req;
368 bootUncacheability = otlb->bootUncacheability;
369
370 /* Sync the stage2 MMU if they exist in both
371 * the old CPU and the new
372 */
373 if (!isStage2 &&
374 stage2Tlb && otlb->stage2Tlb) {
375 stage2Tlb->takeOverFrom(otlb->stage2Tlb);
376 }
377 } else {
378 panic("Incompatible TLB type!");
379 }
380}
381
382void
383TLB::serialize(ostream &os)
384{
385 DPRINTF(Checkpoint, "Serializing Arm TLB\n");
386
387 SERIALIZE_SCALAR(_attr);
388 SERIALIZE_SCALAR(haveLPAE);
389 SERIALIZE_SCALAR(directToStage2);
390 SERIALIZE_SCALAR(stage2Req);
391 SERIALIZE_SCALAR(bootUncacheability);
392
393 int num_entries = size;
394 SERIALIZE_SCALAR(num_entries);
395 for(int i = 0; i < size; i++){
396 nameOut(os, csprintf("%s.TlbEntry%d", name(), i));
397 table[i].serialize(os);
398 }
399}
400
401void
402TLB::unserialize(Checkpoint *cp, const string &section)
403{
404 DPRINTF(Checkpoint, "Unserializing Arm TLB\n");
405
406 UNSERIALIZE_SCALAR(_attr);
407 UNSERIALIZE_SCALAR(haveLPAE);
408 UNSERIALIZE_SCALAR(directToStage2);
409 UNSERIALIZE_SCALAR(stage2Req);
410 UNSERIALIZE_SCALAR(bootUncacheability);
411
412 int num_entries;
413 UNSERIALIZE_SCALAR(num_entries);
414 for(int i = 0; i < min(size, num_entries); i++){
415 table[i].unserialize(cp, csprintf("%s.TlbEntry%d", section, i));
416 }
417}
418
419void
420TLB::regStats()
421{
422 instHits
423 .name(name() + ".inst_hits")
424 .desc("ITB inst hits")
425 ;
426
427 instMisses
428 .name(name() + ".inst_misses")
429 .desc("ITB inst misses")
430 ;
431
432 instAccesses
433 .name(name() + ".inst_accesses")
434 .desc("ITB inst accesses")
435 ;
436
437 readHits
438 .name(name() + ".read_hits")
439 .desc("DTB read hits")
440 ;
441
442 readMisses
443 .name(name() + ".read_misses")
444 .desc("DTB read misses")
445 ;
446
447 readAccesses
448 .name(name() + ".read_accesses")
449 .desc("DTB read accesses")
450 ;
451
452 writeHits
453 .name(name() + ".write_hits")
454 .desc("DTB write hits")
455 ;
456
457 writeMisses
458 .name(name() + ".write_misses")
459 .desc("DTB write misses")
460 ;
461
462 writeAccesses
463 .name(name() + ".write_accesses")
464 .desc("DTB write accesses")
465 ;
466
467 hits
468 .name(name() + ".hits")
469 .desc("DTB hits")
470 ;
471
472 misses
473 .name(name() + ".misses")
474 .desc("DTB misses")
475 ;
476
477 accesses
478 .name(name() + ".accesses")
479 .desc("DTB accesses")
480 ;
481
482 flushTlb
483 .name(name() + ".flush_tlb")
484 .desc("Number of times complete TLB was flushed")
485 ;
486
487 flushTlbMva
488 .name(name() + ".flush_tlb_mva")
489 .desc("Number of times TLB was flushed by MVA")
490 ;
491
492 flushTlbMvaAsid
493 .name(name() + ".flush_tlb_mva_asid")
494 .desc("Number of times TLB was flushed by MVA & ASID")
495 ;
496
497 flushTlbAsid
498 .name(name() + ".flush_tlb_asid")
499 .desc("Number of times TLB was flushed by ASID")
500 ;
501
502 flushedEntries
503 .name(name() + ".flush_entries")
504 .desc("Number of entries that have been flushed from TLB")
505 ;
506
507 alignFaults
508 .name(name() + ".align_faults")
509 .desc("Number of TLB faults due to alignment restrictions")
510 ;
511
512 prefetchFaults
513 .name(name() + ".prefetch_faults")
514 .desc("Number of TLB faults due to prefetch")
515 ;
516
517 domainFaults
518 .name(name() + ".domain_faults")
519 .desc("Number of TLB faults due to domain restrictions")
520 ;
521
522 permsFaults
523 .name(name() + ".perms_faults")
524 .desc("Number of TLB faults due to permissions restrictions")
525 ;
526
527 instAccesses = instHits + instMisses;
528 readAccesses = readHits + readMisses;
529 writeAccesses = writeHits + writeMisses;
530 hits = readHits + writeHits + instHits;
531 misses = readMisses + writeMisses + instMisses;
532 accesses = readAccesses + writeAccesses + instAccesses;
533}
534
535void
536TLB::regProbePoints()
537{
538 ppRefills.reset(new ProbePoints::PMU(getProbeManager(), "Refills"));
539}
540
534Fault
535TLB::translateSe(RequestPtr req, ThreadContext *tc, Mode mode,
536 Translation *translation, bool &delay, bool timing)
537{
538 updateMiscReg(tc);
539 Addr vaddr_tainted = req->getVaddr();
540 Addr vaddr = 0;
541 if (aarch64)
542 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL);
543 else
544 vaddr = vaddr_tainted;
545 uint32_t flags = req->getFlags();
546
547 bool is_fetch = (mode == Execute);
548 bool is_write = (mode == Write);
549
550 if (!is_fetch) {
551 assert(flags & MustBeOne);
552 if (sctlr.a || !(flags & AllowUnaligned)) {
553 if (vaddr & mask(flags & AlignmentMask)) {
554 // LPAE is always disabled in SE mode
555 return new DataAbort(vaddr_tainted,
556 TlbEntry::DomainType::NoAccess, is_write,
557 ArmFault::AlignmentFault, isStage2,
558 ArmFault::VmsaTran);
559 }
560 }
561 }
562
563 Addr paddr;
564 Process *p = tc->getProcessPtr();
565
566 if (!p->pTable->translate(vaddr, paddr))
567 return Fault(new GenericPageTableFault(vaddr_tainted));
568 req->setPaddr(paddr);
569
570 return NoFault;
571}
572
573Fault
574TLB::trickBoxCheck(RequestPtr req, Mode mode, TlbEntry::DomainType domain)
575{
576 return NoFault;
577}
578
579Fault
580TLB::walkTrickBoxCheck(Addr pa, bool is_secure, Addr va, Addr sz, bool is_exec,
581 bool is_write, TlbEntry::DomainType domain, LookupLevel lookup_level)
582{
583 return NoFault;
584}
585
586Fault
587TLB::checkPermissions(TlbEntry *te, RequestPtr req, Mode mode)
588{
589 Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
590 uint32_t flags = req->getFlags();
591 bool is_fetch = (mode == Execute);
592 bool is_write = (mode == Write);
593 bool is_priv = isPriv && !(flags & UserMode);
594
595 // Get the translation type from the actuall table entry
596 ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran
597 : ArmFault::VmsaTran;
598
599 // If this is the second stage of translation and the request is for a
600 // stage 1 page table walk then we need to check the HCR.PTW bit. This
601 // allows us to generate a fault if the request targets an area marked
602 // as a device or strongly ordered.
603 if (isStage2 && req->isPTWalk() && hcr.ptw &&
604 (te->mtype != TlbEntry::MemoryType::Normal)) {
605 return new DataAbort(vaddr, te->domain, is_write,
606 ArmFault::PermissionLL + te->lookupLevel,
607 isStage2, tranMethod);
608 }
609
610 // Generate an alignment fault for unaligned data accesses to device or
611 // strongly ordered memory
612 if (!is_fetch) {
613 if (te->mtype != TlbEntry::MemoryType::Normal) {
614 if (vaddr & mask(flags & AlignmentMask)) {
615 alignFaults++;
616 return new DataAbort(vaddr, TlbEntry::DomainType::NoAccess, is_write,
617 ArmFault::AlignmentFault, isStage2,
618 tranMethod);
619 }
620 }
621 }
622
623 if (te->nonCacheable) {
624 // Prevent prefetching from I/O devices.
625 if (req->isPrefetch()) {
626 // Here we can safely use the fault status for the short
627 // desc. format in all cases
628 return new PrefetchAbort(vaddr, ArmFault::PrefetchUncacheable,
629 isStage2, tranMethod);
630 }
631 }
632
633 if (!te->longDescFormat) {
634 switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
635 case 0:
636 domainFaults++;
637 DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x"
638 " domain: %#x write:%d\n", dacr,
639 static_cast<uint8_t>(te->domain), is_write);
640 if (is_fetch)
641 return new PrefetchAbort(vaddr,
642 ArmFault::DomainLL + te->lookupLevel,
643 isStage2, tranMethod);
644 else
645 return new DataAbort(vaddr, te->domain, is_write,
646 ArmFault::DomainLL + te->lookupLevel,
647 isStage2, tranMethod);
648 case 1:
649 // Continue with permissions check
650 break;
651 case 2:
652 panic("UNPRED domain\n");
653 case 3:
654 return NoFault;
655 }
656 }
657
658 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
659 uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap;
660 uint8_t hap = te->hap;
661
662 if (sctlr.afe == 1 || te->longDescFormat)
663 ap |= 1;
664
665 bool abt;
666 bool isWritable = true;
667 // If this is a stage 2 access (eg for reading stage 1 page table entries)
668 // then don't perform the AP permissions check, we stil do the HAP check
669 // below.
670 if (isStage2) {
671 abt = false;
672 } else {
673 switch (ap) {
674 case 0:
675 DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n",
676 (int)sctlr.rs);
677 if (!sctlr.xp) {
678 switch ((int)sctlr.rs) {
679 case 2:
680 abt = is_write;
681 break;
682 case 1:
683 abt = is_write || !is_priv;
684 break;
685 case 0:
686 case 3:
687 default:
688 abt = true;
689 break;
690 }
691 } else {
692 abt = true;
693 }
694 break;
695 case 1:
696 abt = !is_priv;
697 break;
698 case 2:
699 abt = !is_priv && is_write;
700 isWritable = is_priv;
701 break;
702 case 3:
703 abt = false;
704 break;
705 case 4:
706 panic("UNPRED premissions\n");
707 case 5:
708 abt = !is_priv || is_write;
709 isWritable = false;
710 break;
711 case 6:
712 case 7:
713 abt = is_write;
714 isWritable = false;
715 break;
716 default:
717 panic("Unknown permissions %#x\n", ap);
718 }
719 }
720
721 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
722 bool xn = te->xn || (isWritable && sctlr.wxn) ||
723 (ap == 3 && sctlr.uwxn && is_priv);
724 if (is_fetch && (abt || xn ||
725 (te->longDescFormat && te->pxn && !is_priv) ||
726 (isSecure && te->ns && scr.sif))) {
727 permsFaults++;
728 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
729 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
730 ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe);
731 return new PrefetchAbort(vaddr,
732 ArmFault::PermissionLL + te->lookupLevel,
733 isStage2, tranMethod);
734 } else if (abt | hapAbt) {
735 permsFaults++;
736 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
737 " write:%d\n", ap, is_priv, is_write);
738 return new DataAbort(vaddr, te->domain, is_write,
739 ArmFault::PermissionLL + te->lookupLevel,
740 isStage2 | !abt, tranMethod);
741 }
742 return NoFault;
743}
744
745
746Fault
747TLB::checkPermissions64(TlbEntry *te, RequestPtr req, Mode mode,
748 ThreadContext *tc)
749{
750 assert(aarch64);
751
752 Addr vaddr_tainted = req->getVaddr();
753 Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL);
754
755 uint32_t flags = req->getFlags();
756 bool is_fetch = (mode == Execute);
757 bool is_write = (mode == Write);
758 bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode);
759
760 updateMiscReg(tc, curTranType);
761
762 // If this is the second stage of translation and the request is for a
763 // stage 1 page table walk then we need to check the HCR.PTW bit. This
764 // allows us to generate a fault if the request targets an area marked
765 // as a device or strongly ordered.
766 if (isStage2 && req->isPTWalk() && hcr.ptw &&
767 (te->mtype != TlbEntry::MemoryType::Normal)) {
768 return new DataAbort(vaddr_tainted, te->domain, is_write,
769 ArmFault::PermissionLL + te->lookupLevel,
770 isStage2, ArmFault::LpaeTran);
771 }
772
773 // Generate an alignment fault for unaligned accesses to device or
774 // strongly ordered memory
775 if (!is_fetch) {
776 if (te->mtype != TlbEntry::MemoryType::Normal) {
777 if (vaddr & mask(flags & AlignmentMask)) {
778 alignFaults++;
779 return new DataAbort(vaddr_tainted,
780 TlbEntry::DomainType::NoAccess, is_write,
781 ArmFault::AlignmentFault, isStage2,
782 ArmFault::LpaeTran);
783 }
784 }
785 }
786
787 if (te->nonCacheable) {
788 // Prevent prefetching from I/O devices.
789 if (req->isPrefetch()) {
790 // Here we can safely use the fault status for the short
791 // desc. format in all cases
792 return new PrefetchAbort(vaddr_tainted,
793 ArmFault::PrefetchUncacheable,
794 isStage2, ArmFault::LpaeTran);
795 }
796 }
797
798 uint8_t ap = 0x3 & (te->ap); // 2-bit access protection field
799 bool grant = false;
800
801 uint8_t xn = te->xn;
802 uint8_t pxn = te->pxn;
803 bool r = !is_write && !is_fetch;
804 bool w = is_write;
805 bool x = is_fetch;
806 DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
807 "w:%d, x:%d\n", ap, xn, pxn, r, w, x);
808
809 if (isStage2) {
810 panic("Virtualization in AArch64 state is not supported yet");
811 } else {
812 switch (aarch64EL) {
813 case EL0:
814 {
815 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
816 switch (perm) {
817 case 0:
818 case 1:
819 case 8:
820 case 9:
821 grant = x;
822 break;
823 case 4:
824 case 5:
825 grant = r || w || (x && !sctlr.wxn);
826 break;
827 case 6:
828 case 7:
829 grant = r || w;
830 break;
831 case 12:
832 case 13:
833 grant = r || x;
834 break;
835 case 14:
836 case 15:
837 grant = r;
838 break;
839 default:
840 grant = false;
841 }
842 }
843 break;
844 case EL1:
845 {
846 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
847 switch (perm) {
848 case 0:
849 case 2:
850 grant = r || w || (x && !sctlr.wxn);
851 break;
852 case 1:
853 case 3:
854 case 4:
855 case 5:
856 case 6:
857 case 7:
858 // regions that are writeable at EL0 should not be
859 // executable at EL1
860 grant = r || w;
861 break;
862 case 8:
863 case 10:
864 case 12:
865 case 14:
866 grant = r || x;
867 break;
868 case 9:
869 case 11:
870 case 13:
871 case 15:
872 grant = r;
873 break;
874 default:
875 grant = false;
876 }
877 }
878 break;
879 case EL2:
880 case EL3:
881 {
882 uint8_t perm = (ap & 0x2) | xn;
883 switch (perm) {
884 case 0:
885 grant = r || w || (x && !sctlr.wxn) ;
886 break;
887 case 1:
888 grant = r || w;
889 break;
890 case 2:
891 grant = r || x;
892 break;
893 case 3:
894 grant = r;
895 break;
896 default:
897 grant = false;
898 }
899 }
900 break;
901 }
902 }
903
904 if (!grant) {
905 if (is_fetch) {
906 permsFaults++;
907 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. "
908 "AP:%d priv:%d write:%d ns:%d sif:%d "
909 "sctlr.afe: %d\n",
910 ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe);
911 // Use PC value instead of vaddr because vaddr might be aligned to
912 // cache line and should not be the address reported in FAR
913 return new PrefetchAbort(req->getPC(),
914 ArmFault::PermissionLL + te->lookupLevel,
915 isStage2, ArmFault::LpaeTran);
916 } else {
917 permsFaults++;
918 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d "
919 "priv:%d write:%d\n", ap, is_priv, is_write);
920 return new DataAbort(vaddr_tainted, te->domain, is_write,
921 ArmFault::PermissionLL + te->lookupLevel,
922 isStage2, ArmFault::LpaeTran);
923 }
924 }
925
926 return NoFault;
927}
928
929Fault
930TLB::translateFs(RequestPtr req, ThreadContext *tc, Mode mode,
931 Translation *translation, bool &delay, bool timing,
932 TLB::ArmTranslationType tranType, bool functional)
933{
934 // No such thing as a functional timing access
935 assert(!(timing && functional));
936
937 updateMiscReg(tc, tranType);
938
939 Addr vaddr_tainted = req->getVaddr();
940 Addr vaddr = 0;
941 if (aarch64)
942 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL);
943 else
944 vaddr = vaddr_tainted;
945 uint32_t flags = req->getFlags();
946
947 bool is_fetch = (mode == Execute);
948 bool is_write = (mode == Write);
949 bool long_desc_format = aarch64 || (haveLPAE && ttbcr.eae);
950 ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
951 : ArmFault::VmsaTran;
952
953 req->setAsid(asid);
954
955 DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
956 isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran);
957
958 DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
959 "flags %#x tranType 0x%x\n", vaddr_tainted, mode, isStage2,
960 scr, sctlr, flags, tranType);
961
962 // Generate an alignment fault for unaligned PC
963 if (aarch64 && is_fetch && (req->getPC() & mask(2))) {
964 return new PCAlignmentFault(req->getPC());
965 }
966
967 // If this is a clrex instruction, provide a PA of 0 with no fault
968 // This will force the monitor to set the tracked address to 0
969 // a bit of a hack but this effectively clrears this processors monitor
970 if (flags & Request::CLEAR_LL){
971 // @todo: check implications of security extensions
972 req->setPaddr(0);
973 req->setFlags(Request::UNCACHEABLE);
974 req->setFlags(Request::CLEAR_LL);
975 return NoFault;
976 }
977 if ((req->isInstFetch() && (!sctlr.i)) ||
978 ((!req->isInstFetch()) && (!sctlr.c))){
979 req->setFlags(Request::UNCACHEABLE);
980 }
981 if (!is_fetch) {
982 assert(flags & MustBeOne);
983 if (sctlr.a || !(flags & AllowUnaligned)) {
984 if (vaddr & mask(flags & AlignmentMask)) {
985 alignFaults++;
986 return new DataAbort(vaddr_tainted,
987 TlbEntry::DomainType::NoAccess, is_write,
988 ArmFault::AlignmentFault, isStage2,
989 tranMethod);
990 }
991 }
992 }
993
994 // If guest MMU is off or hcr.vm=0 go straight to stage2
995 if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) {
996
997 req->setPaddr(vaddr);
998 // When the MMU is off the security attribute corresponds to the
999 // security state of the processor
1000 if (isSecure)
1001 req->setFlags(Request::SECURE);
1002
1003 // @todo: double check this (ARM ARM issue C B3.2.1)
1004 if (long_desc_format || sctlr.tre == 0) {
1005 req->setFlags(Request::UNCACHEABLE);
1006 } else {
1007 if (nmrr.ir0 == 0 || nmrr.or0 == 0 || prrr.tr0 != 0x2)
1008 req->setFlags(Request::UNCACHEABLE);
1009 }
1010
1011 // Set memory attributes
1012 TlbEntry temp_te;
1013 temp_te.ns = !isSecure;
1014 if (isStage2 || hcr.dc == 0 || isSecure ||
1015 (isHyp && !(tranType & S1CTran))) {
1016
1017 temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal
1018 : TlbEntry::MemoryType::StronglyOrdered;
1019 temp_te.innerAttrs = 0x0;
1020 temp_te.outerAttrs = 0x0;
1021 temp_te.shareable = true;
1022 temp_te.outerShareable = true;
1023 } else {
1024 temp_te.mtype = TlbEntry::MemoryType::Normal;
1025 temp_te.innerAttrs = 0x3;
1026 temp_te.outerAttrs = 0x3;
1027 temp_te.shareable = false;
1028 temp_te.outerShareable = false;
1029 }
1030 temp_te.setAttributes(long_desc_format);
1031 DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: "
1032 "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n",
1033 temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
1034 isStage2);
1035 setAttr(temp_te.attributes);
1036
1037 return trickBoxCheck(req, mode, TlbEntry::DomainType::NoAccess);
1038 }
1039
1040 DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n",
1041 isStage2 ? "IPA" : "VA", vaddr_tainted, asid);
1042 // Translation enabled
1043
1044 TlbEntry *te = NULL;
1045 TlbEntry mergeTe;
1046 Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
1047 functional, &mergeTe);
1048 // only proceed if we have a valid table entry
1049 if ((te == NULL) && (fault == NoFault)) delay = true;
1050
1051 // If we have the table entry transfer some of the attributes to the
1052 // request that triggered the translation
1053 if (te != NULL) {
1054 // Set memory attributes
1055 DPRINTF(TLBVerbose,
1056 "Setting memory attributes: shareable: %d, innerAttrs: %d, "
1057 "outerAttrs: %d, mtype: %d, isStage2: %d\n",
1058 te->shareable, te->innerAttrs, te->outerAttrs,
1059 static_cast<uint8_t>(te->mtype), isStage2);
1060 setAttr(te->attributes);
1061 if (te->nonCacheable) {
1062 req->setFlags(Request::UNCACHEABLE);
1063 }
1064
1065 if (!bootUncacheability &&
1066 ((ArmSystem*)tc->getSystemPtr())->adderBootUncacheable(vaddr)) {
1067 req->setFlags(Request::UNCACHEABLE);
1068 }
1069
1070 req->setPaddr(te->pAddr(vaddr));
1071 if (isSecure && !te->ns) {
1072 req->setFlags(Request::SECURE);
1073 }
1074 if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) &&
1075 (te->mtype != TlbEntry::MemoryType::Normal)) {
1076 // Unaligned accesses to Device memory should always cause an
1077 // abort regardless of sctlr.a
1078 alignFaults++;
1079 return new DataAbort(vaddr_tainted,
1080 TlbEntry::DomainType::NoAccess, is_write,
1081 ArmFault::AlignmentFault, isStage2,
1082 tranMethod);
1083 }
1084
1085 // Check for a trickbox generated address fault
1086 if (fault == NoFault) {
1087 fault = trickBoxCheck(req, mode, te->domain);
1088 }
1089 }
1090
1091 // Generate Illegal Inst Set State fault if IL bit is set in CPSR
1092 if (fault == NoFault) {
1093 CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
1094 if (aarch64 && is_fetch && cpsr.il == 1) {
1095 return new IllegalInstSetStateFault();
1096 }
1097 }
1098
1099 return fault;
1100}
1101
1102Fault
1103TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode,
1104 TLB::ArmTranslationType tranType)
1105{
1106 updateMiscReg(tc, tranType);
1107
1108 if (directToStage2) {
1109 assert(stage2Tlb);
1110 return stage2Tlb->translateAtomic(req, tc, mode, tranType);
1111 }
1112
1113 bool delay = false;
1114 Fault fault;
1115 if (FullSystem)
1116 fault = translateFs(req, tc, mode, NULL, delay, false, tranType);
1117 else
1118 fault = translateSe(req, tc, mode, NULL, delay, false);
1119 assert(!delay);
1120 return fault;
1121}
1122
1123Fault
1124TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode,
1125 TLB::ArmTranslationType tranType)
1126{
1127 updateMiscReg(tc, tranType);
1128
1129 if (directToStage2) {
1130 assert(stage2Tlb);
1131 return stage2Tlb->translateFunctional(req, tc, mode, tranType);
1132 }
1133
1134 bool delay = false;
1135 Fault fault;
1136 if (FullSystem)
1137 fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true);
1138 else
1139 fault = translateSe(req, tc, mode, NULL, delay, false);
1140 assert(!delay);
1141 return fault;
1142}
1143
1144Fault
1145TLB::translateTiming(RequestPtr req, ThreadContext *tc,
1146 Translation *translation, Mode mode, TLB::ArmTranslationType tranType)
1147{
1148 updateMiscReg(tc, tranType);
1149
1150 if (directToStage2) {
1151 assert(stage2Tlb);
1152 return stage2Tlb->translateTiming(req, tc, translation, mode, tranType);
1153 }
1154
1155 assert(translation);
1156
1157 return translateComplete(req, tc, translation, mode, tranType, isStage2);
1158}
1159
1160Fault
1161TLB::translateComplete(RequestPtr req, ThreadContext *tc,
1162 Translation *translation, Mode mode, TLB::ArmTranslationType tranType,
1163 bool callFromS2)
1164{
1165 bool delay = false;
1166 Fault fault;
1167 if (FullSystem)
1168 fault = translateFs(req, tc, mode, translation, delay, true, tranType);
1169 else
1170 fault = translateSe(req, tc, mode, translation, delay, true);
1171 DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay, fault !=
1172 NoFault);
1173 // If we have a translation, and we're not in the middle of doing a stage
1174 // 2 translation tell the translation that we've either finished or its
1175 // going to take a while. By not doing this when we're in the middle of a
1176 // stage 2 translation we prevent marking the translation as delayed twice,
1177 // one when the translation starts and again when the stage 1 translation
1178 // completes.
1179 if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) {
1180 if (!delay)
1181 translation->finish(fault, req, tc, mode);
1182 else
1183 translation->markDelayed();
1184 }
1185 return fault;
1186}
1187
1188BaseMasterPort*
1189TLB::getMasterPort()
1190{
1191 return &tableWalker->getMasterPort("port");
1192}
1193
1194DmaPort&
1195TLB::getWalkerPort()
1196{
1197 return tableWalker->getWalkerPort();
1198}
1199
1200void
1201TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType)
1202{
1203 // check if the regs have changed, or the translation mode is different.
1204 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1205 // one type of translation anyway
1206 if (miscRegValid && ((tranType == curTranType) || isStage2)) {
1207 return;
1208 }
1209
1210 DPRINTF(TLBVerbose, "TLB variables changed!\n");
1211 CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
1212 // Dependencies: SCR/SCR_EL3, CPSR
1213 isSecure = inSecureState(tc);
1214 isSecure &= (tranType & HypMode) == 0;
1215 isSecure &= (tranType & S1S2NsTran) == 0;
1216 aarch64 = !cpsr.width;
1217 if (aarch64) { // AArch64
1218 aarch64EL = (ExceptionLevel) (uint8_t) cpsr.el;
1219 switch (aarch64EL) {
1220 case EL0:
1221 case EL1:
1222 {
1223 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
1224 ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
1225 uint64_t ttbr_asid = ttbcr.a1 ?
1226 tc->readMiscReg(MISCREG_TTBR1_EL1) :
1227 tc->readMiscReg(MISCREG_TTBR0_EL1);
1228 asid = bits(ttbr_asid,
1229 (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1230 }
1231 break;
1232 case EL2:
1233 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1234 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1235 asid = -1;
1236 break;
1237 case EL3:
1238 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3);
1239 ttbcr = tc->readMiscReg(MISCREG_TCR_EL3);
1240 asid = -1;
1241 break;
1242 }
1243 scr = tc->readMiscReg(MISCREG_SCR_EL3);
1244 isPriv = aarch64EL != EL0;
1245 // @todo: modify this behaviour to support Virtualization in
1246 // AArch64
1247 vmid = 0;
1248 isHyp = false;
1249 directToStage2 = false;
1250 stage2Req = false;
1251 } else { // AArch32
1252 sctlr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_SCTLR, tc,
1253 !isSecure));
1254 ttbcr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_TTBCR, tc,
1255 !isSecure));
1256 scr = tc->readMiscReg(MISCREG_SCR);
1257 isPriv = cpsr.mode != MODE_USER;
1258 if (haveLPAE && ttbcr.eae) {
1259 // Long-descriptor translation table format in use
1260 uint64_t ttbr_asid = tc->readMiscReg(
1261 flattenMiscRegNsBanked(ttbcr.a1 ? MISCREG_TTBR1
1262 : MISCREG_TTBR0,
1263 tc, !isSecure));
1264 asid = bits(ttbr_asid, 55, 48);
1265 } else {
1266 // Short-descriptor translation table format in use
1267 CONTEXTIDR context_id = tc->readMiscReg(flattenMiscRegNsBanked(
1268 MISCREG_CONTEXTIDR, tc,!isSecure));
1269 asid = context_id.asid;
1270 }
1271 prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR, tc,
1272 !isSecure));
1273 nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR, tc,
1274 !isSecure));
1275 dacr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_DACR, tc,
1276 !isSecure));
1277 hcr = tc->readMiscReg(MISCREG_HCR);
1278
1279 if (haveVirtualization) {
1280 vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
1281 isHyp = cpsr.mode == MODE_HYP;
1282 isHyp |= tranType & HypMode;
1283 isHyp &= (tranType & S1S2NsTran) == 0;
1284 isHyp &= (tranType & S1CTran) == 0;
1285 if (isHyp) {
1286 sctlr = tc->readMiscReg(MISCREG_HSCTLR);
1287 }
1288 // Work out if we should skip the first stage of translation and go
1289 // directly to stage 2. This value is cached so we don't have to
1290 // compute it for every translation.
1291 stage2Req = hcr.vm && !isStage2 && !isHyp && !isSecure &&
1292 !(tranType & S1CTran);
1293 directToStage2 = stage2Req && !sctlr.m;
1294 } else {
1295 vmid = 0;
1296 stage2Req = false;
1297 isHyp = false;
1298 directToStage2 = false;
1299 }
1300 }
1301 miscRegValid = true;
1302 curTranType = tranType;
1303}
1304
1305Fault
1306TLB::getTE(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
1307 Translation *translation, bool timing, bool functional,
1308 bool is_secure, TLB::ArmTranslationType tranType)
1309{
1310 bool is_fetch = (mode == Execute);
1311 bool is_write = (mode == Write);
1312
1313 Addr vaddr_tainted = req->getVaddr();
1314 Addr vaddr = 0;
1315 ExceptionLevel target_el = aarch64 ? aarch64EL : EL1;
1316 if (aarch64) {
1317 vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el);
1318 } else {
1319 vaddr = vaddr_tainted;
1320 }
1321 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1322 if (*te == NULL) {
1323 if (req->isPrefetch()) {
1324 // if the request is a prefetch don't attempt to fill the TLB or go
1325 // any further with the memory access (here we can safely use the
1326 // fault status for the short desc. format in all cases)
1327 prefetchFaults++;
1328 return new PrefetchAbort(vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2);
1329 }
1330
1331 if (is_fetch)
1332 instMisses++;
1333 else if (is_write)
1334 writeMisses++;
1335 else
1336 readMisses++;
1337
1338 // start translation table walk, pass variables rather than
1339 // re-retreaving in table walker for speed
1340 DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1341 vaddr_tainted, asid, vmid);
1342 Fault fault;
1343 fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode,
1344 translation, timing, functional, is_secure,
1345 tranType);
1346 // for timing mode, return and wait for table walk,
1347 if (timing || fault != NoFault) {
1348 return fault;
1349 }
1350
1351 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1352 if (!*te)
1353 printTlb();
1354 assert(*te);
1355 } else {
1356 if (is_fetch)
1357 instHits++;
1358 else if (is_write)
1359 writeHits++;
1360 else
1361 readHits++;
1362 }
1363 return NoFault;
1364}
1365
1366Fault
1367TLB::getResultTe(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
1368 Translation *translation, bool timing, bool functional,
1369 TlbEntry *mergeTe)
1370{
1371 Fault fault;
1372 TlbEntry *s1Te = NULL;
1373
1374 Addr vaddr_tainted = req->getVaddr();
1375
1376 // Get the stage 1 table entry
1377 fault = getTE(&s1Te, req, tc, mode, translation, timing, functional,
1378 isSecure, curTranType);
1379 // only proceed if we have a valid table entry
1380 if ((s1Te != NULL) && (fault == NoFault)) {
1381 // Check stage 1 permissions before checking stage 2
1382 if (aarch64)
1383 fault = checkPermissions64(s1Te, req, mode, tc);
1384 else
1385 fault = checkPermissions(s1Te, req, mode);
1386 if (stage2Req & (fault == NoFault)) {
1387 Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te,
1388 req, translation, mode, timing, functional, curTranType);
1389 fault = s2Lookup->getTe(tc, mergeTe);
1390 if (s2Lookup->isComplete()) {
1391 *te = mergeTe;
1392 // We've finished with the lookup so delete it
1393 delete s2Lookup;
1394 } else {
1395 // The lookup hasn't completed, so we can't delete it now. We
1396 // get round this by asking the object to self delete when the
1397 // translation is complete.
1398 s2Lookup->setSelfDelete();
1399 }
1400 } else {
1401 // This case deals with an S1 hit (or bypass), followed by
1402 // an S2 hit-but-perms issue
1403 if (isStage2) {
1404 DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1405 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault);
1406 if (fault != NoFault) {
1407 ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get());
1408 armFault->annotate(ArmFault::S1PTW, false);
1409 armFault->annotate(ArmFault::OVA, vaddr_tainted);
1410 }
1411 }
1412 *te = s1Te;
1413 }
1414 }
1415 return fault;
1416}
1417
1418ArmISA::TLB *
1419ArmTLBParams::create()
1420{
1421 return new ArmISA::TLB(this);
1422}
541Fault
542TLB::translateSe(RequestPtr req, ThreadContext *tc, Mode mode,
543 Translation *translation, bool &delay, bool timing)
544{
545 updateMiscReg(tc);
546 Addr vaddr_tainted = req->getVaddr();
547 Addr vaddr = 0;
548 if (aarch64)
549 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL);
550 else
551 vaddr = vaddr_tainted;
552 uint32_t flags = req->getFlags();
553
554 bool is_fetch = (mode == Execute);
555 bool is_write = (mode == Write);
556
557 if (!is_fetch) {
558 assert(flags & MustBeOne);
559 if (sctlr.a || !(flags & AllowUnaligned)) {
560 if (vaddr & mask(flags & AlignmentMask)) {
561 // LPAE is always disabled in SE mode
562 return new DataAbort(vaddr_tainted,
563 TlbEntry::DomainType::NoAccess, is_write,
564 ArmFault::AlignmentFault, isStage2,
565 ArmFault::VmsaTran);
566 }
567 }
568 }
569
570 Addr paddr;
571 Process *p = tc->getProcessPtr();
572
573 if (!p->pTable->translate(vaddr, paddr))
574 return Fault(new GenericPageTableFault(vaddr_tainted));
575 req->setPaddr(paddr);
576
577 return NoFault;
578}
579
580Fault
581TLB::trickBoxCheck(RequestPtr req, Mode mode, TlbEntry::DomainType domain)
582{
583 return NoFault;
584}
585
586Fault
587TLB::walkTrickBoxCheck(Addr pa, bool is_secure, Addr va, Addr sz, bool is_exec,
588 bool is_write, TlbEntry::DomainType domain, LookupLevel lookup_level)
589{
590 return NoFault;
591}
592
593Fault
594TLB::checkPermissions(TlbEntry *te, RequestPtr req, Mode mode)
595{
596 Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
597 uint32_t flags = req->getFlags();
598 bool is_fetch = (mode == Execute);
599 bool is_write = (mode == Write);
600 bool is_priv = isPriv && !(flags & UserMode);
601
602 // Get the translation type from the actuall table entry
603 ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran
604 : ArmFault::VmsaTran;
605
606 // If this is the second stage of translation and the request is for a
607 // stage 1 page table walk then we need to check the HCR.PTW bit. This
608 // allows us to generate a fault if the request targets an area marked
609 // as a device or strongly ordered.
610 if (isStage2 && req->isPTWalk() && hcr.ptw &&
611 (te->mtype != TlbEntry::MemoryType::Normal)) {
612 return new DataAbort(vaddr, te->domain, is_write,
613 ArmFault::PermissionLL + te->lookupLevel,
614 isStage2, tranMethod);
615 }
616
617 // Generate an alignment fault for unaligned data accesses to device or
618 // strongly ordered memory
619 if (!is_fetch) {
620 if (te->mtype != TlbEntry::MemoryType::Normal) {
621 if (vaddr & mask(flags & AlignmentMask)) {
622 alignFaults++;
623 return new DataAbort(vaddr, TlbEntry::DomainType::NoAccess, is_write,
624 ArmFault::AlignmentFault, isStage2,
625 tranMethod);
626 }
627 }
628 }
629
630 if (te->nonCacheable) {
631 // Prevent prefetching from I/O devices.
632 if (req->isPrefetch()) {
633 // Here we can safely use the fault status for the short
634 // desc. format in all cases
635 return new PrefetchAbort(vaddr, ArmFault::PrefetchUncacheable,
636 isStage2, tranMethod);
637 }
638 }
639
640 if (!te->longDescFormat) {
641 switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
642 case 0:
643 domainFaults++;
644 DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x"
645 " domain: %#x write:%d\n", dacr,
646 static_cast<uint8_t>(te->domain), is_write);
647 if (is_fetch)
648 return new PrefetchAbort(vaddr,
649 ArmFault::DomainLL + te->lookupLevel,
650 isStage2, tranMethod);
651 else
652 return new DataAbort(vaddr, te->domain, is_write,
653 ArmFault::DomainLL + te->lookupLevel,
654 isStage2, tranMethod);
655 case 1:
656 // Continue with permissions check
657 break;
658 case 2:
659 panic("UNPRED domain\n");
660 case 3:
661 return NoFault;
662 }
663 }
664
665 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
666 uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap;
667 uint8_t hap = te->hap;
668
669 if (sctlr.afe == 1 || te->longDescFormat)
670 ap |= 1;
671
672 bool abt;
673 bool isWritable = true;
674 // If this is a stage 2 access (eg for reading stage 1 page table entries)
675 // then don't perform the AP permissions check, we stil do the HAP check
676 // below.
677 if (isStage2) {
678 abt = false;
679 } else {
680 switch (ap) {
681 case 0:
682 DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n",
683 (int)sctlr.rs);
684 if (!sctlr.xp) {
685 switch ((int)sctlr.rs) {
686 case 2:
687 abt = is_write;
688 break;
689 case 1:
690 abt = is_write || !is_priv;
691 break;
692 case 0:
693 case 3:
694 default:
695 abt = true;
696 break;
697 }
698 } else {
699 abt = true;
700 }
701 break;
702 case 1:
703 abt = !is_priv;
704 break;
705 case 2:
706 abt = !is_priv && is_write;
707 isWritable = is_priv;
708 break;
709 case 3:
710 abt = false;
711 break;
712 case 4:
713 panic("UNPRED premissions\n");
714 case 5:
715 abt = !is_priv || is_write;
716 isWritable = false;
717 break;
718 case 6:
719 case 7:
720 abt = is_write;
721 isWritable = false;
722 break;
723 default:
724 panic("Unknown permissions %#x\n", ap);
725 }
726 }
727
728 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
729 bool xn = te->xn || (isWritable && sctlr.wxn) ||
730 (ap == 3 && sctlr.uwxn && is_priv);
731 if (is_fetch && (abt || xn ||
732 (te->longDescFormat && te->pxn && !is_priv) ||
733 (isSecure && te->ns && scr.sif))) {
734 permsFaults++;
735 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
736 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
737 ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe);
738 return new PrefetchAbort(vaddr,
739 ArmFault::PermissionLL + te->lookupLevel,
740 isStage2, tranMethod);
741 } else if (abt | hapAbt) {
742 permsFaults++;
743 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
744 " write:%d\n", ap, is_priv, is_write);
745 return new DataAbort(vaddr, te->domain, is_write,
746 ArmFault::PermissionLL + te->lookupLevel,
747 isStage2 | !abt, tranMethod);
748 }
749 return NoFault;
750}
751
752
753Fault
754TLB::checkPermissions64(TlbEntry *te, RequestPtr req, Mode mode,
755 ThreadContext *tc)
756{
757 assert(aarch64);
758
759 Addr vaddr_tainted = req->getVaddr();
760 Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL);
761
762 uint32_t flags = req->getFlags();
763 bool is_fetch = (mode == Execute);
764 bool is_write = (mode == Write);
765 bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode);
766
767 updateMiscReg(tc, curTranType);
768
769 // If this is the second stage of translation and the request is for a
770 // stage 1 page table walk then we need to check the HCR.PTW bit. This
771 // allows us to generate a fault if the request targets an area marked
772 // as a device or strongly ordered.
773 if (isStage2 && req->isPTWalk() && hcr.ptw &&
774 (te->mtype != TlbEntry::MemoryType::Normal)) {
775 return new DataAbort(vaddr_tainted, te->domain, is_write,
776 ArmFault::PermissionLL + te->lookupLevel,
777 isStage2, ArmFault::LpaeTran);
778 }
779
780 // Generate an alignment fault for unaligned accesses to device or
781 // strongly ordered memory
782 if (!is_fetch) {
783 if (te->mtype != TlbEntry::MemoryType::Normal) {
784 if (vaddr & mask(flags & AlignmentMask)) {
785 alignFaults++;
786 return new DataAbort(vaddr_tainted,
787 TlbEntry::DomainType::NoAccess, is_write,
788 ArmFault::AlignmentFault, isStage2,
789 ArmFault::LpaeTran);
790 }
791 }
792 }
793
794 if (te->nonCacheable) {
795 // Prevent prefetching from I/O devices.
796 if (req->isPrefetch()) {
797 // Here we can safely use the fault status for the short
798 // desc. format in all cases
799 return new PrefetchAbort(vaddr_tainted,
800 ArmFault::PrefetchUncacheable,
801 isStage2, ArmFault::LpaeTran);
802 }
803 }
804
805 uint8_t ap = 0x3 & (te->ap); // 2-bit access protection field
806 bool grant = false;
807
808 uint8_t xn = te->xn;
809 uint8_t pxn = te->pxn;
810 bool r = !is_write && !is_fetch;
811 bool w = is_write;
812 bool x = is_fetch;
813 DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
814 "w:%d, x:%d\n", ap, xn, pxn, r, w, x);
815
816 if (isStage2) {
817 panic("Virtualization in AArch64 state is not supported yet");
818 } else {
819 switch (aarch64EL) {
820 case EL0:
821 {
822 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
823 switch (perm) {
824 case 0:
825 case 1:
826 case 8:
827 case 9:
828 grant = x;
829 break;
830 case 4:
831 case 5:
832 grant = r || w || (x && !sctlr.wxn);
833 break;
834 case 6:
835 case 7:
836 grant = r || w;
837 break;
838 case 12:
839 case 13:
840 grant = r || x;
841 break;
842 case 14:
843 case 15:
844 grant = r;
845 break;
846 default:
847 grant = false;
848 }
849 }
850 break;
851 case EL1:
852 {
853 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
854 switch (perm) {
855 case 0:
856 case 2:
857 grant = r || w || (x && !sctlr.wxn);
858 break;
859 case 1:
860 case 3:
861 case 4:
862 case 5:
863 case 6:
864 case 7:
865 // regions that are writeable at EL0 should not be
866 // executable at EL1
867 grant = r || w;
868 break;
869 case 8:
870 case 10:
871 case 12:
872 case 14:
873 grant = r || x;
874 break;
875 case 9:
876 case 11:
877 case 13:
878 case 15:
879 grant = r;
880 break;
881 default:
882 grant = false;
883 }
884 }
885 break;
886 case EL2:
887 case EL3:
888 {
889 uint8_t perm = (ap & 0x2) | xn;
890 switch (perm) {
891 case 0:
892 grant = r || w || (x && !sctlr.wxn) ;
893 break;
894 case 1:
895 grant = r || w;
896 break;
897 case 2:
898 grant = r || x;
899 break;
900 case 3:
901 grant = r;
902 break;
903 default:
904 grant = false;
905 }
906 }
907 break;
908 }
909 }
910
911 if (!grant) {
912 if (is_fetch) {
913 permsFaults++;
914 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. "
915 "AP:%d priv:%d write:%d ns:%d sif:%d "
916 "sctlr.afe: %d\n",
917 ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe);
918 // Use PC value instead of vaddr because vaddr might be aligned to
919 // cache line and should not be the address reported in FAR
920 return new PrefetchAbort(req->getPC(),
921 ArmFault::PermissionLL + te->lookupLevel,
922 isStage2, ArmFault::LpaeTran);
923 } else {
924 permsFaults++;
925 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d "
926 "priv:%d write:%d\n", ap, is_priv, is_write);
927 return new DataAbort(vaddr_tainted, te->domain, is_write,
928 ArmFault::PermissionLL + te->lookupLevel,
929 isStage2, ArmFault::LpaeTran);
930 }
931 }
932
933 return NoFault;
934}
935
936Fault
937TLB::translateFs(RequestPtr req, ThreadContext *tc, Mode mode,
938 Translation *translation, bool &delay, bool timing,
939 TLB::ArmTranslationType tranType, bool functional)
940{
941 // No such thing as a functional timing access
942 assert(!(timing && functional));
943
944 updateMiscReg(tc, tranType);
945
946 Addr vaddr_tainted = req->getVaddr();
947 Addr vaddr = 0;
948 if (aarch64)
949 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL);
950 else
951 vaddr = vaddr_tainted;
952 uint32_t flags = req->getFlags();
953
954 bool is_fetch = (mode == Execute);
955 bool is_write = (mode == Write);
956 bool long_desc_format = aarch64 || (haveLPAE && ttbcr.eae);
957 ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
958 : ArmFault::VmsaTran;
959
960 req->setAsid(asid);
961
962 DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
963 isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran);
964
965 DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
966 "flags %#x tranType 0x%x\n", vaddr_tainted, mode, isStage2,
967 scr, sctlr, flags, tranType);
968
969 // Generate an alignment fault for unaligned PC
970 if (aarch64 && is_fetch && (req->getPC() & mask(2))) {
971 return new PCAlignmentFault(req->getPC());
972 }
973
974 // If this is a clrex instruction, provide a PA of 0 with no fault
975 // This will force the monitor to set the tracked address to 0
976 // a bit of a hack but this effectively clrears this processors monitor
977 if (flags & Request::CLEAR_LL){
978 // @todo: check implications of security extensions
979 req->setPaddr(0);
980 req->setFlags(Request::UNCACHEABLE);
981 req->setFlags(Request::CLEAR_LL);
982 return NoFault;
983 }
984 if ((req->isInstFetch() && (!sctlr.i)) ||
985 ((!req->isInstFetch()) && (!sctlr.c))){
986 req->setFlags(Request::UNCACHEABLE);
987 }
988 if (!is_fetch) {
989 assert(flags & MustBeOne);
990 if (sctlr.a || !(flags & AllowUnaligned)) {
991 if (vaddr & mask(flags & AlignmentMask)) {
992 alignFaults++;
993 return new DataAbort(vaddr_tainted,
994 TlbEntry::DomainType::NoAccess, is_write,
995 ArmFault::AlignmentFault, isStage2,
996 tranMethod);
997 }
998 }
999 }
1000
1001 // If guest MMU is off or hcr.vm=0 go straight to stage2
1002 if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) {
1003
1004 req->setPaddr(vaddr);
1005 // When the MMU is off the security attribute corresponds to the
1006 // security state of the processor
1007 if (isSecure)
1008 req->setFlags(Request::SECURE);
1009
1010 // @todo: double check this (ARM ARM issue C B3.2.1)
1011 if (long_desc_format || sctlr.tre == 0) {
1012 req->setFlags(Request::UNCACHEABLE);
1013 } else {
1014 if (nmrr.ir0 == 0 || nmrr.or0 == 0 || prrr.tr0 != 0x2)
1015 req->setFlags(Request::UNCACHEABLE);
1016 }
1017
1018 // Set memory attributes
1019 TlbEntry temp_te;
1020 temp_te.ns = !isSecure;
1021 if (isStage2 || hcr.dc == 0 || isSecure ||
1022 (isHyp && !(tranType & S1CTran))) {
1023
1024 temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal
1025 : TlbEntry::MemoryType::StronglyOrdered;
1026 temp_te.innerAttrs = 0x0;
1027 temp_te.outerAttrs = 0x0;
1028 temp_te.shareable = true;
1029 temp_te.outerShareable = true;
1030 } else {
1031 temp_te.mtype = TlbEntry::MemoryType::Normal;
1032 temp_te.innerAttrs = 0x3;
1033 temp_te.outerAttrs = 0x3;
1034 temp_te.shareable = false;
1035 temp_te.outerShareable = false;
1036 }
1037 temp_te.setAttributes(long_desc_format);
1038 DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: "
1039 "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n",
1040 temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
1041 isStage2);
1042 setAttr(temp_te.attributes);
1043
1044 return trickBoxCheck(req, mode, TlbEntry::DomainType::NoAccess);
1045 }
1046
1047 DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n",
1048 isStage2 ? "IPA" : "VA", vaddr_tainted, asid);
1049 // Translation enabled
1050
1051 TlbEntry *te = NULL;
1052 TlbEntry mergeTe;
1053 Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
1054 functional, &mergeTe);
1055 // only proceed if we have a valid table entry
1056 if ((te == NULL) && (fault == NoFault)) delay = true;
1057
1058 // If we have the table entry transfer some of the attributes to the
1059 // request that triggered the translation
1060 if (te != NULL) {
1061 // Set memory attributes
1062 DPRINTF(TLBVerbose,
1063 "Setting memory attributes: shareable: %d, innerAttrs: %d, "
1064 "outerAttrs: %d, mtype: %d, isStage2: %d\n",
1065 te->shareable, te->innerAttrs, te->outerAttrs,
1066 static_cast<uint8_t>(te->mtype), isStage2);
1067 setAttr(te->attributes);
1068 if (te->nonCacheable) {
1069 req->setFlags(Request::UNCACHEABLE);
1070 }
1071
1072 if (!bootUncacheability &&
1073 ((ArmSystem*)tc->getSystemPtr())->adderBootUncacheable(vaddr)) {
1074 req->setFlags(Request::UNCACHEABLE);
1075 }
1076
1077 req->setPaddr(te->pAddr(vaddr));
1078 if (isSecure && !te->ns) {
1079 req->setFlags(Request::SECURE);
1080 }
1081 if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) &&
1082 (te->mtype != TlbEntry::MemoryType::Normal)) {
1083 // Unaligned accesses to Device memory should always cause an
1084 // abort regardless of sctlr.a
1085 alignFaults++;
1086 return new DataAbort(vaddr_tainted,
1087 TlbEntry::DomainType::NoAccess, is_write,
1088 ArmFault::AlignmentFault, isStage2,
1089 tranMethod);
1090 }
1091
1092 // Check for a trickbox generated address fault
1093 if (fault == NoFault) {
1094 fault = trickBoxCheck(req, mode, te->domain);
1095 }
1096 }
1097
1098 // Generate Illegal Inst Set State fault if IL bit is set in CPSR
1099 if (fault == NoFault) {
1100 CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
1101 if (aarch64 && is_fetch && cpsr.il == 1) {
1102 return new IllegalInstSetStateFault();
1103 }
1104 }
1105
1106 return fault;
1107}
1108
1109Fault
1110TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode,
1111 TLB::ArmTranslationType tranType)
1112{
1113 updateMiscReg(tc, tranType);
1114
1115 if (directToStage2) {
1116 assert(stage2Tlb);
1117 return stage2Tlb->translateAtomic(req, tc, mode, tranType);
1118 }
1119
1120 bool delay = false;
1121 Fault fault;
1122 if (FullSystem)
1123 fault = translateFs(req, tc, mode, NULL, delay, false, tranType);
1124 else
1125 fault = translateSe(req, tc, mode, NULL, delay, false);
1126 assert(!delay);
1127 return fault;
1128}
1129
1130Fault
1131TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode,
1132 TLB::ArmTranslationType tranType)
1133{
1134 updateMiscReg(tc, tranType);
1135
1136 if (directToStage2) {
1137 assert(stage2Tlb);
1138 return stage2Tlb->translateFunctional(req, tc, mode, tranType);
1139 }
1140
1141 bool delay = false;
1142 Fault fault;
1143 if (FullSystem)
1144 fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true);
1145 else
1146 fault = translateSe(req, tc, mode, NULL, delay, false);
1147 assert(!delay);
1148 return fault;
1149}
1150
1151Fault
1152TLB::translateTiming(RequestPtr req, ThreadContext *tc,
1153 Translation *translation, Mode mode, TLB::ArmTranslationType tranType)
1154{
1155 updateMiscReg(tc, tranType);
1156
1157 if (directToStage2) {
1158 assert(stage2Tlb);
1159 return stage2Tlb->translateTiming(req, tc, translation, mode, tranType);
1160 }
1161
1162 assert(translation);
1163
1164 return translateComplete(req, tc, translation, mode, tranType, isStage2);
1165}
1166
1167Fault
1168TLB::translateComplete(RequestPtr req, ThreadContext *tc,
1169 Translation *translation, Mode mode, TLB::ArmTranslationType tranType,
1170 bool callFromS2)
1171{
1172 bool delay = false;
1173 Fault fault;
1174 if (FullSystem)
1175 fault = translateFs(req, tc, mode, translation, delay, true, tranType);
1176 else
1177 fault = translateSe(req, tc, mode, translation, delay, true);
1178 DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay, fault !=
1179 NoFault);
1180 // If we have a translation, and we're not in the middle of doing a stage
1181 // 2 translation tell the translation that we've either finished or its
1182 // going to take a while. By not doing this when we're in the middle of a
1183 // stage 2 translation we prevent marking the translation as delayed twice,
1184 // one when the translation starts and again when the stage 1 translation
1185 // completes.
1186 if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) {
1187 if (!delay)
1188 translation->finish(fault, req, tc, mode);
1189 else
1190 translation->markDelayed();
1191 }
1192 return fault;
1193}
1194
1195BaseMasterPort*
1196TLB::getMasterPort()
1197{
1198 return &tableWalker->getMasterPort("port");
1199}
1200
1201DmaPort&
1202TLB::getWalkerPort()
1203{
1204 return tableWalker->getWalkerPort();
1205}
1206
1207void
1208TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType)
1209{
1210 // check if the regs have changed, or the translation mode is different.
1211 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1212 // one type of translation anyway
1213 if (miscRegValid && ((tranType == curTranType) || isStage2)) {
1214 return;
1215 }
1216
1217 DPRINTF(TLBVerbose, "TLB variables changed!\n");
1218 CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
1219 // Dependencies: SCR/SCR_EL3, CPSR
1220 isSecure = inSecureState(tc);
1221 isSecure &= (tranType & HypMode) == 0;
1222 isSecure &= (tranType & S1S2NsTran) == 0;
1223 aarch64 = !cpsr.width;
1224 if (aarch64) { // AArch64
1225 aarch64EL = (ExceptionLevel) (uint8_t) cpsr.el;
1226 switch (aarch64EL) {
1227 case EL0:
1228 case EL1:
1229 {
1230 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
1231 ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
1232 uint64_t ttbr_asid = ttbcr.a1 ?
1233 tc->readMiscReg(MISCREG_TTBR1_EL1) :
1234 tc->readMiscReg(MISCREG_TTBR0_EL1);
1235 asid = bits(ttbr_asid,
1236 (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1237 }
1238 break;
1239 case EL2:
1240 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1241 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1242 asid = -1;
1243 break;
1244 case EL3:
1245 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3);
1246 ttbcr = tc->readMiscReg(MISCREG_TCR_EL3);
1247 asid = -1;
1248 break;
1249 }
1250 scr = tc->readMiscReg(MISCREG_SCR_EL3);
1251 isPriv = aarch64EL != EL0;
1252 // @todo: modify this behaviour to support Virtualization in
1253 // AArch64
1254 vmid = 0;
1255 isHyp = false;
1256 directToStage2 = false;
1257 stage2Req = false;
1258 } else { // AArch32
1259 sctlr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_SCTLR, tc,
1260 !isSecure));
1261 ttbcr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_TTBCR, tc,
1262 !isSecure));
1263 scr = tc->readMiscReg(MISCREG_SCR);
1264 isPriv = cpsr.mode != MODE_USER;
1265 if (haveLPAE && ttbcr.eae) {
1266 // Long-descriptor translation table format in use
1267 uint64_t ttbr_asid = tc->readMiscReg(
1268 flattenMiscRegNsBanked(ttbcr.a1 ? MISCREG_TTBR1
1269 : MISCREG_TTBR0,
1270 tc, !isSecure));
1271 asid = bits(ttbr_asid, 55, 48);
1272 } else {
1273 // Short-descriptor translation table format in use
1274 CONTEXTIDR context_id = tc->readMiscReg(flattenMiscRegNsBanked(
1275 MISCREG_CONTEXTIDR, tc,!isSecure));
1276 asid = context_id.asid;
1277 }
1278 prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR, tc,
1279 !isSecure));
1280 nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR, tc,
1281 !isSecure));
1282 dacr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_DACR, tc,
1283 !isSecure));
1284 hcr = tc->readMiscReg(MISCREG_HCR);
1285
1286 if (haveVirtualization) {
1287 vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
1288 isHyp = cpsr.mode == MODE_HYP;
1289 isHyp |= tranType & HypMode;
1290 isHyp &= (tranType & S1S2NsTran) == 0;
1291 isHyp &= (tranType & S1CTran) == 0;
1292 if (isHyp) {
1293 sctlr = tc->readMiscReg(MISCREG_HSCTLR);
1294 }
1295 // Work out if we should skip the first stage of translation and go
1296 // directly to stage 2. This value is cached so we don't have to
1297 // compute it for every translation.
1298 stage2Req = hcr.vm && !isStage2 && !isHyp && !isSecure &&
1299 !(tranType & S1CTran);
1300 directToStage2 = stage2Req && !sctlr.m;
1301 } else {
1302 vmid = 0;
1303 stage2Req = false;
1304 isHyp = false;
1305 directToStage2 = false;
1306 }
1307 }
1308 miscRegValid = true;
1309 curTranType = tranType;
1310}
1311
1312Fault
1313TLB::getTE(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
1314 Translation *translation, bool timing, bool functional,
1315 bool is_secure, TLB::ArmTranslationType tranType)
1316{
1317 bool is_fetch = (mode == Execute);
1318 bool is_write = (mode == Write);
1319
1320 Addr vaddr_tainted = req->getVaddr();
1321 Addr vaddr = 0;
1322 ExceptionLevel target_el = aarch64 ? aarch64EL : EL1;
1323 if (aarch64) {
1324 vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el);
1325 } else {
1326 vaddr = vaddr_tainted;
1327 }
1328 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1329 if (*te == NULL) {
1330 if (req->isPrefetch()) {
1331 // if the request is a prefetch don't attempt to fill the TLB or go
1332 // any further with the memory access (here we can safely use the
1333 // fault status for the short desc. format in all cases)
1334 prefetchFaults++;
1335 return new PrefetchAbort(vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2);
1336 }
1337
1338 if (is_fetch)
1339 instMisses++;
1340 else if (is_write)
1341 writeMisses++;
1342 else
1343 readMisses++;
1344
1345 // start translation table walk, pass variables rather than
1346 // re-retreaving in table walker for speed
1347 DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1348 vaddr_tainted, asid, vmid);
1349 Fault fault;
1350 fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode,
1351 translation, timing, functional, is_secure,
1352 tranType);
1353 // for timing mode, return and wait for table walk,
1354 if (timing || fault != NoFault) {
1355 return fault;
1356 }
1357
1358 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1359 if (!*te)
1360 printTlb();
1361 assert(*te);
1362 } else {
1363 if (is_fetch)
1364 instHits++;
1365 else if (is_write)
1366 writeHits++;
1367 else
1368 readHits++;
1369 }
1370 return NoFault;
1371}
1372
1373Fault
1374TLB::getResultTe(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
1375 Translation *translation, bool timing, bool functional,
1376 TlbEntry *mergeTe)
1377{
1378 Fault fault;
1379 TlbEntry *s1Te = NULL;
1380
1381 Addr vaddr_tainted = req->getVaddr();
1382
1383 // Get the stage 1 table entry
1384 fault = getTE(&s1Te, req, tc, mode, translation, timing, functional,
1385 isSecure, curTranType);
1386 // only proceed if we have a valid table entry
1387 if ((s1Te != NULL) && (fault == NoFault)) {
1388 // Check stage 1 permissions before checking stage 2
1389 if (aarch64)
1390 fault = checkPermissions64(s1Te, req, mode, tc);
1391 else
1392 fault = checkPermissions(s1Te, req, mode);
1393 if (stage2Req & (fault == NoFault)) {
1394 Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te,
1395 req, translation, mode, timing, functional, curTranType);
1396 fault = s2Lookup->getTe(tc, mergeTe);
1397 if (s2Lookup->isComplete()) {
1398 *te = mergeTe;
1399 // We've finished with the lookup so delete it
1400 delete s2Lookup;
1401 } else {
1402 // The lookup hasn't completed, so we can't delete it now. We
1403 // get round this by asking the object to self delete when the
1404 // translation is complete.
1405 s2Lookup->setSelfDelete();
1406 }
1407 } else {
1408 // This case deals with an S1 hit (or bypass), followed by
1409 // an S2 hit-but-perms issue
1410 if (isStage2) {
1411 DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1412 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault);
1413 if (fault != NoFault) {
1414 ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get());
1415 armFault->annotate(ArmFault::S1PTW, false);
1416 armFault->annotate(ArmFault::OVA, vaddr_tainted);
1417 }
1418 }
1419 *te = s1Te;
1420 }
1421 }
1422 return fault;
1423}
1424
1425ArmISA::TLB *
1426ArmTLBParams::create()
1427{
1428 return new ArmISA::TLB(this);
1429}