tlb.cc (12735:e3da526a0654) tlb.cc (12749:223c83ed9979)
1/*
2 * Copyright (c) 2010-2013, 2016-2018 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 * Nathan Binkert
42 * Steve Reinhardt
43 */
44
45#include "arch/arm/tlb.hh"
46
47#include <memory>
48#include <string>
49#include <vector>
50
51#include "arch/arm/faults.hh"
52#include "arch/arm/pagetable.hh"
53#include "arch/arm/stage2_lookup.hh"
54#include "arch/arm/stage2_mmu.hh"
55#include "arch/arm/system.hh"
56#include "arch/arm/table_walker.hh"
57#include "arch/arm/utility.hh"
58#include "arch/generic/mmapped_ipr.hh"
59#include "base/inifile.hh"
60#include "base/str.hh"
61#include "base/trace.hh"
62#include "cpu/base.hh"
63#include "cpu/thread_context.hh"
64#include "debug/Checkpoint.hh"
65#include "debug/TLB.hh"
66#include "debug/TLBVerbose.hh"
67#include "mem/page_table.hh"
68#include "mem/request.hh"
69#include "params/ArmTLB.hh"
70#include "sim/full_system.hh"
71#include "sim/process.hh"
72
73using namespace std;
74using namespace ArmISA;
75
76TLB::TLB(const ArmTLBParams *p)
77 : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size),
78 isStage2(p->is_stage2), stage2Req(false), _attr(0),
79 directToStage2(false), tableWalker(p->walker), stage2Tlb(NULL),
80 stage2Mmu(NULL), test(nullptr), rangeMRU(1),
81 aarch64(false), aarch64EL(EL0), isPriv(false), isSecure(false),
82 isHyp(false), asid(0), vmid(0), dacr(0),
83 miscRegValid(false), miscRegContext(0), curTranType(NormalTran)
84{
85 const ArmSystem *sys = dynamic_cast<const ArmSystem *>(p->sys);
86
87 tableWalker->setTlb(this);
88
89 // Cache system-level properties
90 haveLPAE = tableWalker->haveLPAE();
91 haveVirtualization = tableWalker->haveVirtualization();
92 haveLargeAsid64 = tableWalker->haveLargeAsid64();
93
94 if (sys)
95 m5opRange = sys->m5opRange();
96}
97
98TLB::~TLB()
99{
100 delete[] table;
101}
102
103void
104TLB::init()
105{
106 if (stage2Mmu && !isStage2)
107 stage2Tlb = stage2Mmu->stage2Tlb();
108}
109
110void
111TLB::setMMU(Stage2MMU *m, MasterID master_id)
112{
113 stage2Mmu = m;
114 tableWalker->setMMU(m, master_id);
115}
116
117bool
118TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa)
119{
120 updateMiscReg(tc);
121
122 if (directToStage2) {
123 assert(stage2Tlb);
124 return stage2Tlb->translateFunctional(tc, va, pa);
125 }
126
127 TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false,
128 aarch64 ? aarch64EL : EL1);
129 if (!e)
130 return false;
131 pa = e->pAddr(va);
132 return true;
133}
134
135Fault
1/*
2 * Copyright (c) 2010-2013, 2016-2018 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 * Nathan Binkert
42 * Steve Reinhardt
43 */
44
45#include "arch/arm/tlb.hh"
46
47#include <memory>
48#include <string>
49#include <vector>
50
51#include "arch/arm/faults.hh"
52#include "arch/arm/pagetable.hh"
53#include "arch/arm/stage2_lookup.hh"
54#include "arch/arm/stage2_mmu.hh"
55#include "arch/arm/system.hh"
56#include "arch/arm/table_walker.hh"
57#include "arch/arm/utility.hh"
58#include "arch/generic/mmapped_ipr.hh"
59#include "base/inifile.hh"
60#include "base/str.hh"
61#include "base/trace.hh"
62#include "cpu/base.hh"
63#include "cpu/thread_context.hh"
64#include "debug/Checkpoint.hh"
65#include "debug/TLB.hh"
66#include "debug/TLBVerbose.hh"
67#include "mem/page_table.hh"
68#include "mem/request.hh"
69#include "params/ArmTLB.hh"
70#include "sim/full_system.hh"
71#include "sim/process.hh"
72
73using namespace std;
74using namespace ArmISA;
75
76TLB::TLB(const ArmTLBParams *p)
77 : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size),
78 isStage2(p->is_stage2), stage2Req(false), _attr(0),
79 directToStage2(false), tableWalker(p->walker), stage2Tlb(NULL),
80 stage2Mmu(NULL), test(nullptr), rangeMRU(1),
81 aarch64(false), aarch64EL(EL0), isPriv(false), isSecure(false),
82 isHyp(false), asid(0), vmid(0), dacr(0),
83 miscRegValid(false), miscRegContext(0), curTranType(NormalTran)
84{
85 const ArmSystem *sys = dynamic_cast<const ArmSystem *>(p->sys);
86
87 tableWalker->setTlb(this);
88
89 // Cache system-level properties
90 haveLPAE = tableWalker->haveLPAE();
91 haveVirtualization = tableWalker->haveVirtualization();
92 haveLargeAsid64 = tableWalker->haveLargeAsid64();
93
94 if (sys)
95 m5opRange = sys->m5opRange();
96}
97
98TLB::~TLB()
99{
100 delete[] table;
101}
102
103void
104TLB::init()
105{
106 if (stage2Mmu && !isStage2)
107 stage2Tlb = stage2Mmu->stage2Tlb();
108}
109
110void
111TLB::setMMU(Stage2MMU *m, MasterID master_id)
112{
113 stage2Mmu = m;
114 tableWalker->setMMU(m, master_id);
115}
116
117bool
118TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa)
119{
120 updateMiscReg(tc);
121
122 if (directToStage2) {
123 assert(stage2Tlb);
124 return stage2Tlb->translateFunctional(tc, va, pa);
125 }
126
127 TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false,
128 aarch64 ? aarch64EL : EL1);
129 if (!e)
130 return false;
131 pa = e->pAddr(va);
132 return true;
133}
134
135Fault
136TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
136TLB::finalizePhysical(const RequestPtr &req,
137 ThreadContext *tc, Mode mode) const
137{
138 const Addr paddr = req->getPaddr();
139
140 if (m5opRange.contains(paddr)) {
141 req->setFlags(Request::MMAPPED_IPR | Request::GENERIC_IPR);
142 req->setPaddr(GenericISA::iprAddressPseudoInst(
143 (paddr >> 8) & 0xFF,
144 paddr & 0xFF));
145 }
146
147 return NoFault;
148}
149
150TlbEntry*
151TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure,
152 bool functional, bool ignore_asn, uint8_t target_el)
153{
154
155 TlbEntry *retval = NULL;
156
157 // Maintaining LRU array
158 int x = 0;
159 while (retval == NULL && x < size) {
160 if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false,
161 target_el)) ||
162 (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) {
163 // We only move the hit entry ahead when the position is higher
164 // than rangeMRU
165 if (x > rangeMRU && !functional) {
166 TlbEntry tmp_entry = table[x];
167 for (int i = x; i > 0; i--)
168 table[i] = table[i - 1];
169 table[0] = tmp_entry;
170 retval = &table[0];
171 } else {
172 retval = &table[x];
173 }
174 break;
175 }
176 ++x;
177 }
178
179 DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d "
180 "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d "
181 "el: %d\n",
182 va, asn, retval ? "hit" : "miss", vmid, hyp, secure,
183 retval ? retval->pfn : 0, retval ? retval->size : 0,
184 retval ? retval->pAddr(va) : 0, retval ? retval->ap : 0,
185 retval ? retval->ns : 0, retval ? retval->nstid : 0,
186 retval ? retval->global : 0, retval ? retval->asid : 0,
187 retval ? retval->el : 0);
188
189 return retval;
190}
191
192// insert a new TLB entry
193void
194TLB::insert(Addr addr, TlbEntry &entry)
195{
196 DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x"
197 " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
198 " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn,
199 entry.size, entry.vpn, entry.asid, entry.vmid, entry.N,
200 entry.global, entry.valid, entry.nonCacheable, entry.xn,
201 entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid,
202 entry.isHyp);
203
204 if (table[size - 1].valid)
205 DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
206 "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n",
207 table[size-1].vpn << table[size-1].N, table[size-1].asid,
208 table[size-1].vmid, table[size-1].pfn << table[size-1].N,
209 table[size-1].size, table[size-1].ap, table[size-1].ns,
210 table[size-1].nstid, table[size-1].global, table[size-1].isHyp,
211 table[size-1].el);
212
213 //inserting to MRU position and evicting the LRU one
214
215 for (int i = size - 1; i > 0; --i)
216 table[i] = table[i-1];
217 table[0] = entry;
218
219 inserts++;
220 ppRefills->notify(1);
221}
222
223void
224TLB::printTlb() const
225{
226 int x = 0;
227 TlbEntry *te;
228 DPRINTF(TLB, "Current TLB contents:\n");
229 while (x < size) {
230 te = &table[x];
231 if (te->valid)
232 DPRINTF(TLB, " * %s\n", te->print());
233 ++x;
234 }
235}
236
237void
238TLB::flushAllSecurity(bool secure_lookup, uint8_t target_el, bool ignore_el)
239{
240 DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n",
241 (secure_lookup ? "secure" : "non-secure"));
242 int x = 0;
243 TlbEntry *te;
244 while (x < size) {
245 te = &table[x];
246 if (te->valid && secure_lookup == !te->nstid &&
247 (te->vmid == vmid || secure_lookup) &&
248 checkELMatch(target_el, te->el, ignore_el)) {
249
250 DPRINTF(TLB, " - %s\n", te->print());
251 te->valid = false;
252 flushedEntries++;
253 }
254 ++x;
255 }
256
257 flushTlb++;
258
259 // If there's a second stage TLB (and we're not it) then flush it as well
260 // if we're currently in hyp mode
261 if (!isStage2 && isHyp) {
262 stage2Tlb->flushAllSecurity(secure_lookup, true);
263 }
264}
265
266void
267TLB::flushAllNs(bool hyp, uint8_t target_el, bool ignore_el)
268{
269 DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n",
270 (hyp ? "hyp" : "non-hyp"));
271 int x = 0;
272 TlbEntry *te;
273 while (x < size) {
274 te = &table[x];
275 if (te->valid && te->nstid && te->isHyp == hyp &&
276 checkELMatch(target_el, te->el, ignore_el)) {
277
278 DPRINTF(TLB, " - %s\n", te->print());
279 flushedEntries++;
280 te->valid = false;
281 }
282 ++x;
283 }
284
285 flushTlb++;
286
287 // If there's a second stage TLB (and we're not it) then flush it as well
288 if (!isStage2 && !hyp) {
289 stage2Tlb->flushAllNs(false, true);
290 }
291}
292
293void
294TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, uint8_t target_el)
295{
296 DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x "
297 "(%s lookup)\n", mva, asn, (secure_lookup ?
298 "secure" : "non-secure"));
299 _flushMva(mva, asn, secure_lookup, false, false, target_el);
300 flushTlbMvaAsid++;
301}
302
303void
304TLB::flushAsid(uint64_t asn, bool secure_lookup, uint8_t target_el)
305{
306 DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn,
307 (secure_lookup ? "secure" : "non-secure"));
308
309 int x = 0 ;
310 TlbEntry *te;
311
312 while (x < size) {
313 te = &table[x];
314 if (te->valid && te->asid == asn && secure_lookup == !te->nstid &&
315 (te->vmid == vmid || secure_lookup) &&
316 checkELMatch(target_el, te->el, false)) {
317
318 te->valid = false;
319 DPRINTF(TLB, " - %s\n", te->print());
320 flushedEntries++;
321 }
322 ++x;
323 }
324 flushTlbAsid++;
325}
326
327void
328TLB::flushMva(Addr mva, bool secure_lookup, bool hyp, uint8_t target_el)
329{
330 DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva,
331 (secure_lookup ? "secure" : "non-secure"));
332 _flushMva(mva, 0xbeef, secure_lookup, hyp, true, target_el);
333 flushTlbMva++;
334}
335
336void
337TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, bool hyp,
338 bool ignore_asn, uint8_t target_el)
339{
340 TlbEntry *te;
341 // D5.7.2: Sign-extend address to 64 bits
342 mva = sext<56>(mva);
343 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
344 target_el);
345 while (te != NULL) {
346 if (secure_lookup == !te->nstid) {
347 DPRINTF(TLB, " - %s\n", te->print());
348 te->valid = false;
349 flushedEntries++;
350 }
351 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
352 target_el);
353 }
354}
355
356void
357TLB::flushIpaVmid(Addr ipa, bool secure_lookup, bool hyp, uint8_t target_el)
358{
359 assert(!isStage2);
360 stage2Tlb->_flushMva(ipa, 0xbeef, secure_lookup, hyp, true, target_el);
361}
362
363bool
364TLB::checkELMatch(uint8_t target_el, uint8_t tentry_el, bool ignore_el)
365{
366 bool elMatch = true;
367 if (!ignore_el) {
368 if (target_el == 2 || target_el == 3) {
369 elMatch = (tentry_el == target_el);
370 } else {
371 elMatch = (tentry_el == 0) || (tentry_el == 1);
372 }
373 }
374 return elMatch;
375}
376
377void
378TLB::drainResume()
379{
380 // We might have unserialized something or switched CPUs, so make
381 // sure to re-read the misc regs.
382 miscRegValid = false;
383}
384
385void
386TLB::takeOverFrom(BaseTLB *_otlb)
387{
388 TLB *otlb = dynamic_cast<TLB*>(_otlb);
389 /* Make sure we actually have a valid type */
390 if (otlb) {
391 _attr = otlb->_attr;
392 haveLPAE = otlb->haveLPAE;
393 directToStage2 = otlb->directToStage2;
394 stage2Req = otlb->stage2Req;
395
396 /* Sync the stage2 MMU if they exist in both
397 * the old CPU and the new
398 */
399 if (!isStage2 &&
400 stage2Tlb && otlb->stage2Tlb) {
401 stage2Tlb->takeOverFrom(otlb->stage2Tlb);
402 }
403 } else {
404 panic("Incompatible TLB type!");
405 }
406}
407
408void
409TLB::serialize(CheckpointOut &cp) const
410{
411 DPRINTF(Checkpoint, "Serializing Arm TLB\n");
412
413 SERIALIZE_SCALAR(_attr);
414 SERIALIZE_SCALAR(haveLPAE);
415 SERIALIZE_SCALAR(directToStage2);
416 SERIALIZE_SCALAR(stage2Req);
417
418 int num_entries = size;
419 SERIALIZE_SCALAR(num_entries);
420 for (int i = 0; i < size; i++)
421 table[i].serializeSection(cp, csprintf("TlbEntry%d", i));
422}
423
424void
425TLB::unserialize(CheckpointIn &cp)
426{
427 DPRINTF(Checkpoint, "Unserializing Arm TLB\n");
428
429 UNSERIALIZE_SCALAR(_attr);
430 UNSERIALIZE_SCALAR(haveLPAE);
431 UNSERIALIZE_SCALAR(directToStage2);
432 UNSERIALIZE_SCALAR(stage2Req);
433
434 int num_entries;
435 UNSERIALIZE_SCALAR(num_entries);
436 for (int i = 0; i < min(size, num_entries); i++)
437 table[i].unserializeSection(cp, csprintf("TlbEntry%d", i));
438}
439
440void
441TLB::regStats()
442{
443 BaseTLB::regStats();
444 instHits
445 .name(name() + ".inst_hits")
446 .desc("ITB inst hits")
447 ;
448
449 instMisses
450 .name(name() + ".inst_misses")
451 .desc("ITB inst misses")
452 ;
453
454 instAccesses
455 .name(name() + ".inst_accesses")
456 .desc("ITB inst accesses")
457 ;
458
459 readHits
460 .name(name() + ".read_hits")
461 .desc("DTB read hits")
462 ;
463
464 readMisses
465 .name(name() + ".read_misses")
466 .desc("DTB read misses")
467 ;
468
469 readAccesses
470 .name(name() + ".read_accesses")
471 .desc("DTB read accesses")
472 ;
473
474 writeHits
475 .name(name() + ".write_hits")
476 .desc("DTB write hits")
477 ;
478
479 writeMisses
480 .name(name() + ".write_misses")
481 .desc("DTB write misses")
482 ;
483
484 writeAccesses
485 .name(name() + ".write_accesses")
486 .desc("DTB write accesses")
487 ;
488
489 hits
490 .name(name() + ".hits")
491 .desc("DTB hits")
492 ;
493
494 misses
495 .name(name() + ".misses")
496 .desc("DTB misses")
497 ;
498
499 accesses
500 .name(name() + ".accesses")
501 .desc("DTB accesses")
502 ;
503
504 flushTlb
505 .name(name() + ".flush_tlb")
506 .desc("Number of times complete TLB was flushed")
507 ;
508
509 flushTlbMva
510 .name(name() + ".flush_tlb_mva")
511 .desc("Number of times TLB was flushed by MVA")
512 ;
513
514 flushTlbMvaAsid
515 .name(name() + ".flush_tlb_mva_asid")
516 .desc("Number of times TLB was flushed by MVA & ASID")
517 ;
518
519 flushTlbAsid
520 .name(name() + ".flush_tlb_asid")
521 .desc("Number of times TLB was flushed by ASID")
522 ;
523
524 flushedEntries
525 .name(name() + ".flush_entries")
526 .desc("Number of entries that have been flushed from TLB")
527 ;
528
529 alignFaults
530 .name(name() + ".align_faults")
531 .desc("Number of TLB faults due to alignment restrictions")
532 ;
533
534 prefetchFaults
535 .name(name() + ".prefetch_faults")
536 .desc("Number of TLB faults due to prefetch")
537 ;
538
539 domainFaults
540 .name(name() + ".domain_faults")
541 .desc("Number of TLB faults due to domain restrictions")
542 ;
543
544 permsFaults
545 .name(name() + ".perms_faults")
546 .desc("Number of TLB faults due to permissions restrictions")
547 ;
548
549 instAccesses = instHits + instMisses;
550 readAccesses = readHits + readMisses;
551 writeAccesses = writeHits + writeMisses;
552 hits = readHits + writeHits + instHits;
553 misses = readMisses + writeMisses + instMisses;
554 accesses = readAccesses + writeAccesses + instAccesses;
555}
556
557void
558TLB::regProbePoints()
559{
560 ppRefills.reset(new ProbePoints::PMU(getProbeManager(), "Refills"));
561}
562
563Fault
138{
139 const Addr paddr = req->getPaddr();
140
141 if (m5opRange.contains(paddr)) {
142 req->setFlags(Request::MMAPPED_IPR | Request::GENERIC_IPR);
143 req->setPaddr(GenericISA::iprAddressPseudoInst(
144 (paddr >> 8) & 0xFF,
145 paddr & 0xFF));
146 }
147
148 return NoFault;
149}
150
151TlbEntry*
152TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure,
153 bool functional, bool ignore_asn, uint8_t target_el)
154{
155
156 TlbEntry *retval = NULL;
157
158 // Maintaining LRU array
159 int x = 0;
160 while (retval == NULL && x < size) {
161 if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false,
162 target_el)) ||
163 (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) {
164 // We only move the hit entry ahead when the position is higher
165 // than rangeMRU
166 if (x > rangeMRU && !functional) {
167 TlbEntry tmp_entry = table[x];
168 for (int i = x; i > 0; i--)
169 table[i] = table[i - 1];
170 table[0] = tmp_entry;
171 retval = &table[0];
172 } else {
173 retval = &table[x];
174 }
175 break;
176 }
177 ++x;
178 }
179
180 DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d "
181 "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d "
182 "el: %d\n",
183 va, asn, retval ? "hit" : "miss", vmid, hyp, secure,
184 retval ? retval->pfn : 0, retval ? retval->size : 0,
185 retval ? retval->pAddr(va) : 0, retval ? retval->ap : 0,
186 retval ? retval->ns : 0, retval ? retval->nstid : 0,
187 retval ? retval->global : 0, retval ? retval->asid : 0,
188 retval ? retval->el : 0);
189
190 return retval;
191}
192
193// insert a new TLB entry
194void
195TLB::insert(Addr addr, TlbEntry &entry)
196{
197 DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x"
198 " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
199 " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn,
200 entry.size, entry.vpn, entry.asid, entry.vmid, entry.N,
201 entry.global, entry.valid, entry.nonCacheable, entry.xn,
202 entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid,
203 entry.isHyp);
204
205 if (table[size - 1].valid)
206 DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
207 "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n",
208 table[size-1].vpn << table[size-1].N, table[size-1].asid,
209 table[size-1].vmid, table[size-1].pfn << table[size-1].N,
210 table[size-1].size, table[size-1].ap, table[size-1].ns,
211 table[size-1].nstid, table[size-1].global, table[size-1].isHyp,
212 table[size-1].el);
213
214 //inserting to MRU position and evicting the LRU one
215
216 for (int i = size - 1; i > 0; --i)
217 table[i] = table[i-1];
218 table[0] = entry;
219
220 inserts++;
221 ppRefills->notify(1);
222}
223
224void
225TLB::printTlb() const
226{
227 int x = 0;
228 TlbEntry *te;
229 DPRINTF(TLB, "Current TLB contents:\n");
230 while (x < size) {
231 te = &table[x];
232 if (te->valid)
233 DPRINTF(TLB, " * %s\n", te->print());
234 ++x;
235 }
236}
237
238void
239TLB::flushAllSecurity(bool secure_lookup, uint8_t target_el, bool ignore_el)
240{
241 DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n",
242 (secure_lookup ? "secure" : "non-secure"));
243 int x = 0;
244 TlbEntry *te;
245 while (x < size) {
246 te = &table[x];
247 if (te->valid && secure_lookup == !te->nstid &&
248 (te->vmid == vmid || secure_lookup) &&
249 checkELMatch(target_el, te->el, ignore_el)) {
250
251 DPRINTF(TLB, " - %s\n", te->print());
252 te->valid = false;
253 flushedEntries++;
254 }
255 ++x;
256 }
257
258 flushTlb++;
259
260 // If there's a second stage TLB (and we're not it) then flush it as well
261 // if we're currently in hyp mode
262 if (!isStage2 && isHyp) {
263 stage2Tlb->flushAllSecurity(secure_lookup, true);
264 }
265}
266
267void
268TLB::flushAllNs(bool hyp, uint8_t target_el, bool ignore_el)
269{
270 DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n",
271 (hyp ? "hyp" : "non-hyp"));
272 int x = 0;
273 TlbEntry *te;
274 while (x < size) {
275 te = &table[x];
276 if (te->valid && te->nstid && te->isHyp == hyp &&
277 checkELMatch(target_el, te->el, ignore_el)) {
278
279 DPRINTF(TLB, " - %s\n", te->print());
280 flushedEntries++;
281 te->valid = false;
282 }
283 ++x;
284 }
285
286 flushTlb++;
287
288 // If there's a second stage TLB (and we're not it) then flush it as well
289 if (!isStage2 && !hyp) {
290 stage2Tlb->flushAllNs(false, true);
291 }
292}
293
294void
295TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, uint8_t target_el)
296{
297 DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x "
298 "(%s lookup)\n", mva, asn, (secure_lookup ?
299 "secure" : "non-secure"));
300 _flushMva(mva, asn, secure_lookup, false, false, target_el);
301 flushTlbMvaAsid++;
302}
303
304void
305TLB::flushAsid(uint64_t asn, bool secure_lookup, uint8_t target_el)
306{
307 DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn,
308 (secure_lookup ? "secure" : "non-secure"));
309
310 int x = 0 ;
311 TlbEntry *te;
312
313 while (x < size) {
314 te = &table[x];
315 if (te->valid && te->asid == asn && secure_lookup == !te->nstid &&
316 (te->vmid == vmid || secure_lookup) &&
317 checkELMatch(target_el, te->el, false)) {
318
319 te->valid = false;
320 DPRINTF(TLB, " - %s\n", te->print());
321 flushedEntries++;
322 }
323 ++x;
324 }
325 flushTlbAsid++;
326}
327
328void
329TLB::flushMva(Addr mva, bool secure_lookup, bool hyp, uint8_t target_el)
330{
331 DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva,
332 (secure_lookup ? "secure" : "non-secure"));
333 _flushMva(mva, 0xbeef, secure_lookup, hyp, true, target_el);
334 flushTlbMva++;
335}
336
337void
338TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, bool hyp,
339 bool ignore_asn, uint8_t target_el)
340{
341 TlbEntry *te;
342 // D5.7.2: Sign-extend address to 64 bits
343 mva = sext<56>(mva);
344 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
345 target_el);
346 while (te != NULL) {
347 if (secure_lookup == !te->nstid) {
348 DPRINTF(TLB, " - %s\n", te->print());
349 te->valid = false;
350 flushedEntries++;
351 }
352 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
353 target_el);
354 }
355}
356
357void
358TLB::flushIpaVmid(Addr ipa, bool secure_lookup, bool hyp, uint8_t target_el)
359{
360 assert(!isStage2);
361 stage2Tlb->_flushMva(ipa, 0xbeef, secure_lookup, hyp, true, target_el);
362}
363
364bool
365TLB::checkELMatch(uint8_t target_el, uint8_t tentry_el, bool ignore_el)
366{
367 bool elMatch = true;
368 if (!ignore_el) {
369 if (target_el == 2 || target_el == 3) {
370 elMatch = (tentry_el == target_el);
371 } else {
372 elMatch = (tentry_el == 0) || (tentry_el == 1);
373 }
374 }
375 return elMatch;
376}
377
378void
379TLB::drainResume()
380{
381 // We might have unserialized something or switched CPUs, so make
382 // sure to re-read the misc regs.
383 miscRegValid = false;
384}
385
386void
387TLB::takeOverFrom(BaseTLB *_otlb)
388{
389 TLB *otlb = dynamic_cast<TLB*>(_otlb);
390 /* Make sure we actually have a valid type */
391 if (otlb) {
392 _attr = otlb->_attr;
393 haveLPAE = otlb->haveLPAE;
394 directToStage2 = otlb->directToStage2;
395 stage2Req = otlb->stage2Req;
396
397 /* Sync the stage2 MMU if they exist in both
398 * the old CPU and the new
399 */
400 if (!isStage2 &&
401 stage2Tlb && otlb->stage2Tlb) {
402 stage2Tlb->takeOverFrom(otlb->stage2Tlb);
403 }
404 } else {
405 panic("Incompatible TLB type!");
406 }
407}
408
409void
410TLB::serialize(CheckpointOut &cp) const
411{
412 DPRINTF(Checkpoint, "Serializing Arm TLB\n");
413
414 SERIALIZE_SCALAR(_attr);
415 SERIALIZE_SCALAR(haveLPAE);
416 SERIALIZE_SCALAR(directToStage2);
417 SERIALIZE_SCALAR(stage2Req);
418
419 int num_entries = size;
420 SERIALIZE_SCALAR(num_entries);
421 for (int i = 0; i < size; i++)
422 table[i].serializeSection(cp, csprintf("TlbEntry%d", i));
423}
424
425void
426TLB::unserialize(CheckpointIn &cp)
427{
428 DPRINTF(Checkpoint, "Unserializing Arm TLB\n");
429
430 UNSERIALIZE_SCALAR(_attr);
431 UNSERIALIZE_SCALAR(haveLPAE);
432 UNSERIALIZE_SCALAR(directToStage2);
433 UNSERIALIZE_SCALAR(stage2Req);
434
435 int num_entries;
436 UNSERIALIZE_SCALAR(num_entries);
437 for (int i = 0; i < min(size, num_entries); i++)
438 table[i].unserializeSection(cp, csprintf("TlbEntry%d", i));
439}
440
441void
442TLB::regStats()
443{
444 BaseTLB::regStats();
445 instHits
446 .name(name() + ".inst_hits")
447 .desc("ITB inst hits")
448 ;
449
450 instMisses
451 .name(name() + ".inst_misses")
452 .desc("ITB inst misses")
453 ;
454
455 instAccesses
456 .name(name() + ".inst_accesses")
457 .desc("ITB inst accesses")
458 ;
459
460 readHits
461 .name(name() + ".read_hits")
462 .desc("DTB read hits")
463 ;
464
465 readMisses
466 .name(name() + ".read_misses")
467 .desc("DTB read misses")
468 ;
469
470 readAccesses
471 .name(name() + ".read_accesses")
472 .desc("DTB read accesses")
473 ;
474
475 writeHits
476 .name(name() + ".write_hits")
477 .desc("DTB write hits")
478 ;
479
480 writeMisses
481 .name(name() + ".write_misses")
482 .desc("DTB write misses")
483 ;
484
485 writeAccesses
486 .name(name() + ".write_accesses")
487 .desc("DTB write accesses")
488 ;
489
490 hits
491 .name(name() + ".hits")
492 .desc("DTB hits")
493 ;
494
495 misses
496 .name(name() + ".misses")
497 .desc("DTB misses")
498 ;
499
500 accesses
501 .name(name() + ".accesses")
502 .desc("DTB accesses")
503 ;
504
505 flushTlb
506 .name(name() + ".flush_tlb")
507 .desc("Number of times complete TLB was flushed")
508 ;
509
510 flushTlbMva
511 .name(name() + ".flush_tlb_mva")
512 .desc("Number of times TLB was flushed by MVA")
513 ;
514
515 flushTlbMvaAsid
516 .name(name() + ".flush_tlb_mva_asid")
517 .desc("Number of times TLB was flushed by MVA & ASID")
518 ;
519
520 flushTlbAsid
521 .name(name() + ".flush_tlb_asid")
522 .desc("Number of times TLB was flushed by ASID")
523 ;
524
525 flushedEntries
526 .name(name() + ".flush_entries")
527 .desc("Number of entries that have been flushed from TLB")
528 ;
529
530 alignFaults
531 .name(name() + ".align_faults")
532 .desc("Number of TLB faults due to alignment restrictions")
533 ;
534
535 prefetchFaults
536 .name(name() + ".prefetch_faults")
537 .desc("Number of TLB faults due to prefetch")
538 ;
539
540 domainFaults
541 .name(name() + ".domain_faults")
542 .desc("Number of TLB faults due to domain restrictions")
543 ;
544
545 permsFaults
546 .name(name() + ".perms_faults")
547 .desc("Number of TLB faults due to permissions restrictions")
548 ;
549
550 instAccesses = instHits + instMisses;
551 readAccesses = readHits + readMisses;
552 writeAccesses = writeHits + writeMisses;
553 hits = readHits + writeHits + instHits;
554 misses = readMisses + writeMisses + instMisses;
555 accesses = readAccesses + writeAccesses + instAccesses;
556}
557
558void
559TLB::regProbePoints()
560{
561 ppRefills.reset(new ProbePoints::PMU(getProbeManager(), "Refills"));
562}
563
564Fault
564TLB::translateSe(RequestPtr req, ThreadContext *tc, Mode mode,
565TLB::translateSe(const RequestPtr &req, ThreadContext *tc, Mode mode,
565 Translation *translation, bool &delay, bool timing)
566{
567 updateMiscReg(tc);
568 Addr vaddr_tainted = req->getVaddr();
569 Addr vaddr = 0;
570 if (aarch64)
571 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
572 else
573 vaddr = vaddr_tainted;
574 Request::Flags flags = req->getFlags();
575
576 bool is_fetch = (mode == Execute);
577 bool is_write = (mode == Write);
578
579 if (!is_fetch) {
580 assert(flags & MustBeOne);
581 if (sctlr.a || !(flags & AllowUnaligned)) {
582 if (vaddr & mask(flags & AlignmentMask)) {
583 // LPAE is always disabled in SE mode
584 return std::make_shared<DataAbort>(
585 vaddr_tainted,
586 TlbEntry::DomainType::NoAccess, is_write,
587 ArmFault::AlignmentFault, isStage2,
588 ArmFault::VmsaTran);
589 }
590 }
591 }
592
593 Addr paddr;
594 Process *p = tc->getProcessPtr();
595
596 if (!p->pTable->translate(vaddr, paddr))
597 return std::make_shared<GenericPageTableFault>(vaddr_tainted);
598 req->setPaddr(paddr);
599
600 return finalizePhysical(req, tc, mode);
601}
602
603Fault
566 Translation *translation, bool &delay, bool timing)
567{
568 updateMiscReg(tc);
569 Addr vaddr_tainted = req->getVaddr();
570 Addr vaddr = 0;
571 if (aarch64)
572 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
573 else
574 vaddr = vaddr_tainted;
575 Request::Flags flags = req->getFlags();
576
577 bool is_fetch = (mode == Execute);
578 bool is_write = (mode == Write);
579
580 if (!is_fetch) {
581 assert(flags & MustBeOne);
582 if (sctlr.a || !(flags & AllowUnaligned)) {
583 if (vaddr & mask(flags & AlignmentMask)) {
584 // LPAE is always disabled in SE mode
585 return std::make_shared<DataAbort>(
586 vaddr_tainted,
587 TlbEntry::DomainType::NoAccess, is_write,
588 ArmFault::AlignmentFault, isStage2,
589 ArmFault::VmsaTran);
590 }
591 }
592 }
593
594 Addr paddr;
595 Process *p = tc->getProcessPtr();
596
597 if (!p->pTable->translate(vaddr, paddr))
598 return std::make_shared<GenericPageTableFault>(vaddr_tainted);
599 req->setPaddr(paddr);
600
601 return finalizePhysical(req, tc, mode);
602}
603
604Fault
604TLB::checkPermissions(TlbEntry *te, RequestPtr req, Mode mode)
605TLB::checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode)
605{
606 // a data cache maintenance instruction that operates by MVA does
607 // not generate a Data Abort exeception due to a Permission fault
608 if (req->isCacheMaintenance()) {
609 return NoFault;
610 }
611
612 Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
613 Request::Flags flags = req->getFlags();
614 bool is_fetch = (mode == Execute);
615 bool is_write = (mode == Write);
616 bool is_priv = isPriv && !(flags & UserMode);
617
618 // Get the translation type from the actuall table entry
619 ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran
620 : ArmFault::VmsaTran;
621
622 // If this is the second stage of translation and the request is for a
623 // stage 1 page table walk then we need to check the HCR.PTW bit. This
624 // allows us to generate a fault if the request targets an area marked
625 // as a device or strongly ordered.
626 if (isStage2 && req->isPTWalk() && hcr.ptw &&
627 (te->mtype != TlbEntry::MemoryType::Normal)) {
628 return std::make_shared<DataAbort>(
629 vaddr, te->domain, is_write,
630 ArmFault::PermissionLL + te->lookupLevel,
631 isStage2, tranMethod);
632 }
633
634 // Generate an alignment fault for unaligned data accesses to device or
635 // strongly ordered memory
636 if (!is_fetch) {
637 if (te->mtype != TlbEntry::MemoryType::Normal) {
638 if (vaddr & mask(flags & AlignmentMask)) {
639 alignFaults++;
640 return std::make_shared<DataAbort>(
641 vaddr, TlbEntry::DomainType::NoAccess, is_write,
642 ArmFault::AlignmentFault, isStage2,
643 tranMethod);
644 }
645 }
646 }
647
648 if (te->nonCacheable) {
649 // Prevent prefetching from I/O devices.
650 if (req->isPrefetch()) {
651 // Here we can safely use the fault status for the short
652 // desc. format in all cases
653 return std::make_shared<PrefetchAbort>(
654 vaddr, ArmFault::PrefetchUncacheable,
655 isStage2, tranMethod);
656 }
657 }
658
659 if (!te->longDescFormat) {
660 switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
661 case 0:
662 domainFaults++;
663 DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x"
664 " domain: %#x write:%d\n", dacr,
665 static_cast<uint8_t>(te->domain), is_write);
666 if (is_fetch) {
667 // Use PC value instead of vaddr because vaddr might
668 // be aligned to cache line and should not be the
669 // address reported in FAR
670 return std::make_shared<PrefetchAbort>(
671 req->getPC(),
672 ArmFault::DomainLL + te->lookupLevel,
673 isStage2, tranMethod);
674 } else
675 return std::make_shared<DataAbort>(
676 vaddr, te->domain, is_write,
677 ArmFault::DomainLL + te->lookupLevel,
678 isStage2, tranMethod);
679 case 1:
680 // Continue with permissions check
681 break;
682 case 2:
683 panic("UNPRED domain\n");
684 case 3:
685 return NoFault;
686 }
687 }
688
689 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
690 uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap;
691 uint8_t hap = te->hap;
692
693 if (sctlr.afe == 1 || te->longDescFormat)
694 ap |= 1;
695
696 bool abt;
697 bool isWritable = true;
698 // If this is a stage 2 access (eg for reading stage 1 page table entries)
699 // then don't perform the AP permissions check, we stil do the HAP check
700 // below.
701 if (isStage2) {
702 abt = false;
703 } else {
704 switch (ap) {
705 case 0:
706 DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n",
707 (int)sctlr.rs);
708 if (!sctlr.xp) {
709 switch ((int)sctlr.rs) {
710 case 2:
711 abt = is_write;
712 break;
713 case 1:
714 abt = is_write || !is_priv;
715 break;
716 case 0:
717 case 3:
718 default:
719 abt = true;
720 break;
721 }
722 } else {
723 abt = true;
724 }
725 break;
726 case 1:
727 abt = !is_priv;
728 break;
729 case 2:
730 abt = !is_priv && is_write;
731 isWritable = is_priv;
732 break;
733 case 3:
734 abt = false;
735 break;
736 case 4:
737 panic("UNPRED premissions\n");
738 case 5:
739 abt = !is_priv || is_write;
740 isWritable = false;
741 break;
742 case 6:
743 case 7:
744 abt = is_write;
745 isWritable = false;
746 break;
747 default:
748 panic("Unknown permissions %#x\n", ap);
749 }
750 }
751
752 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
753 bool xn = te->xn || (isWritable && sctlr.wxn) ||
754 (ap == 3 && sctlr.uwxn && is_priv);
755 if (is_fetch && (abt || xn ||
756 (te->longDescFormat && te->pxn && is_priv) ||
757 (isSecure && te->ns && scr.sif))) {
758 permsFaults++;
759 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
760 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
761 ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe);
762 // Use PC value instead of vaddr because vaddr might be aligned to
763 // cache line and should not be the address reported in FAR
764 return std::make_shared<PrefetchAbort>(
765 req->getPC(),
766 ArmFault::PermissionLL + te->lookupLevel,
767 isStage2, tranMethod);
768 } else if (abt | hapAbt) {
769 permsFaults++;
770 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
771 " write:%d\n", ap, is_priv, is_write);
772 return std::make_shared<DataAbort>(
773 vaddr, te->domain, is_write,
774 ArmFault::PermissionLL + te->lookupLevel,
775 isStage2 | !abt, tranMethod);
776 }
777 return NoFault;
778}
779
780
781Fault
606{
607 // a data cache maintenance instruction that operates by MVA does
608 // not generate a Data Abort exeception due to a Permission fault
609 if (req->isCacheMaintenance()) {
610 return NoFault;
611 }
612
613 Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
614 Request::Flags flags = req->getFlags();
615 bool is_fetch = (mode == Execute);
616 bool is_write = (mode == Write);
617 bool is_priv = isPriv && !(flags & UserMode);
618
619 // Get the translation type from the actuall table entry
620 ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran
621 : ArmFault::VmsaTran;
622
623 // If this is the second stage of translation and the request is for a
624 // stage 1 page table walk then we need to check the HCR.PTW bit. This
625 // allows us to generate a fault if the request targets an area marked
626 // as a device or strongly ordered.
627 if (isStage2 && req->isPTWalk() && hcr.ptw &&
628 (te->mtype != TlbEntry::MemoryType::Normal)) {
629 return std::make_shared<DataAbort>(
630 vaddr, te->domain, is_write,
631 ArmFault::PermissionLL + te->lookupLevel,
632 isStage2, tranMethod);
633 }
634
635 // Generate an alignment fault for unaligned data accesses to device or
636 // strongly ordered memory
637 if (!is_fetch) {
638 if (te->mtype != TlbEntry::MemoryType::Normal) {
639 if (vaddr & mask(flags & AlignmentMask)) {
640 alignFaults++;
641 return std::make_shared<DataAbort>(
642 vaddr, TlbEntry::DomainType::NoAccess, is_write,
643 ArmFault::AlignmentFault, isStage2,
644 tranMethod);
645 }
646 }
647 }
648
649 if (te->nonCacheable) {
650 // Prevent prefetching from I/O devices.
651 if (req->isPrefetch()) {
652 // Here we can safely use the fault status for the short
653 // desc. format in all cases
654 return std::make_shared<PrefetchAbort>(
655 vaddr, ArmFault::PrefetchUncacheable,
656 isStage2, tranMethod);
657 }
658 }
659
660 if (!te->longDescFormat) {
661 switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
662 case 0:
663 domainFaults++;
664 DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x"
665 " domain: %#x write:%d\n", dacr,
666 static_cast<uint8_t>(te->domain), is_write);
667 if (is_fetch) {
668 // Use PC value instead of vaddr because vaddr might
669 // be aligned to cache line and should not be the
670 // address reported in FAR
671 return std::make_shared<PrefetchAbort>(
672 req->getPC(),
673 ArmFault::DomainLL + te->lookupLevel,
674 isStage2, tranMethod);
675 } else
676 return std::make_shared<DataAbort>(
677 vaddr, te->domain, is_write,
678 ArmFault::DomainLL + te->lookupLevel,
679 isStage2, tranMethod);
680 case 1:
681 // Continue with permissions check
682 break;
683 case 2:
684 panic("UNPRED domain\n");
685 case 3:
686 return NoFault;
687 }
688 }
689
690 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
691 uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap;
692 uint8_t hap = te->hap;
693
694 if (sctlr.afe == 1 || te->longDescFormat)
695 ap |= 1;
696
697 bool abt;
698 bool isWritable = true;
699 // If this is a stage 2 access (eg for reading stage 1 page table entries)
700 // then don't perform the AP permissions check, we stil do the HAP check
701 // below.
702 if (isStage2) {
703 abt = false;
704 } else {
705 switch (ap) {
706 case 0:
707 DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n",
708 (int)sctlr.rs);
709 if (!sctlr.xp) {
710 switch ((int)sctlr.rs) {
711 case 2:
712 abt = is_write;
713 break;
714 case 1:
715 abt = is_write || !is_priv;
716 break;
717 case 0:
718 case 3:
719 default:
720 abt = true;
721 break;
722 }
723 } else {
724 abt = true;
725 }
726 break;
727 case 1:
728 abt = !is_priv;
729 break;
730 case 2:
731 abt = !is_priv && is_write;
732 isWritable = is_priv;
733 break;
734 case 3:
735 abt = false;
736 break;
737 case 4:
738 panic("UNPRED premissions\n");
739 case 5:
740 abt = !is_priv || is_write;
741 isWritable = false;
742 break;
743 case 6:
744 case 7:
745 abt = is_write;
746 isWritable = false;
747 break;
748 default:
749 panic("Unknown permissions %#x\n", ap);
750 }
751 }
752
753 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
754 bool xn = te->xn || (isWritable && sctlr.wxn) ||
755 (ap == 3 && sctlr.uwxn && is_priv);
756 if (is_fetch && (abt || xn ||
757 (te->longDescFormat && te->pxn && is_priv) ||
758 (isSecure && te->ns && scr.sif))) {
759 permsFaults++;
760 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
761 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
762 ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe);
763 // Use PC value instead of vaddr because vaddr might be aligned to
764 // cache line and should not be the address reported in FAR
765 return std::make_shared<PrefetchAbort>(
766 req->getPC(),
767 ArmFault::PermissionLL + te->lookupLevel,
768 isStage2, tranMethod);
769 } else if (abt | hapAbt) {
770 permsFaults++;
771 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
772 " write:%d\n", ap, is_priv, is_write);
773 return std::make_shared<DataAbort>(
774 vaddr, te->domain, is_write,
775 ArmFault::PermissionLL + te->lookupLevel,
776 isStage2 | !abt, tranMethod);
777 }
778 return NoFault;
779}
780
781
782Fault
782TLB::checkPermissions64(TlbEntry *te, RequestPtr req, Mode mode,
783TLB::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode,
783 ThreadContext *tc)
784{
785 assert(aarch64);
786
787 // A data cache maintenance instruction that operates by VA does
788 // not generate a Permission fault unless:
789 // * It is a data cache invalidate (dc ivac) which requires write
790 // permissions to the VA, or
791 // * It is executed from EL0
792 if (req->isCacheClean() && aarch64EL != EL0 && !isStage2) {
793 return NoFault;
794 }
795
796 Addr vaddr_tainted = req->getVaddr();
797 Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
798
799 Request::Flags flags = req->getFlags();
800 bool is_fetch = (mode == Execute);
801 // Cache clean operations require read permissions to the specified VA
802 bool is_write = !req->isCacheClean() && mode == Write;
803 bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode);
804
805 updateMiscReg(tc, curTranType);
806
807 // If this is the second stage of translation and the request is for a
808 // stage 1 page table walk then we need to check the HCR.PTW bit. This
809 // allows us to generate a fault if the request targets an area marked
810 // as a device or strongly ordered.
811 if (isStage2 && req->isPTWalk() && hcr.ptw &&
812 (te->mtype != TlbEntry::MemoryType::Normal)) {
813 return std::make_shared<DataAbort>(
814 vaddr_tainted, te->domain, is_write,
815 ArmFault::PermissionLL + te->lookupLevel,
816 isStage2, ArmFault::LpaeTran);
817 }
818
819 // Generate an alignment fault for unaligned accesses to device or
820 // strongly ordered memory
821 if (!is_fetch) {
822 if (te->mtype != TlbEntry::MemoryType::Normal) {
823 if (vaddr & mask(flags & AlignmentMask)) {
824 alignFaults++;
825 return std::make_shared<DataAbort>(
826 vaddr_tainted,
827 TlbEntry::DomainType::NoAccess, is_write,
828 ArmFault::AlignmentFault, isStage2,
829 ArmFault::LpaeTran);
830 }
831 }
832 }
833
834 if (te->nonCacheable) {
835 // Prevent prefetching from I/O devices.
836 if (req->isPrefetch()) {
837 // Here we can safely use the fault status for the short
838 // desc. format in all cases
839 return std::make_shared<PrefetchAbort>(
840 vaddr_tainted,
841 ArmFault::PrefetchUncacheable,
842 isStage2, ArmFault::LpaeTran);
843 }
844 }
845
846 uint8_t ap = 0x3 & (te->ap); // 2-bit access protection field
847 bool grant = false;
848
849 uint8_t xn = te->xn;
850 uint8_t pxn = te->pxn;
851 bool r = !is_write && !is_fetch;
852 bool w = is_write;
853 bool x = is_fetch;
854 DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
855 "w:%d, x:%d\n", ap, xn, pxn, r, w, x);
856
857 if (isStage2) {
858 assert(ArmSystem::haveVirtualization(tc) && aarch64EL != EL2);
859 // In stage 2 we use the hypervisor access permission bits.
860 // The following permissions are described in ARM DDI 0487A.f
861 // D4-1802
862 uint8_t hap = 0x3 & te->hap;
863 if (is_fetch) {
864 // sctlr.wxn overrides the xn bit
865 grant = !sctlr.wxn && !xn;
866 } else if (is_write) {
867 grant = hap & 0x2;
868 } else { // is_read
869 grant = hap & 0x1;
870 }
871 } else {
872 switch (aarch64EL) {
873 case EL0:
874 {
875 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
876 switch (perm) {
877 case 0:
878 case 1:
879 case 8:
880 case 9:
881 grant = x;
882 break;
883 case 4:
884 case 5:
885 grant = r || w || (x && !sctlr.wxn);
886 break;
887 case 6:
888 case 7:
889 grant = r || w;
890 break;
891 case 12:
892 case 13:
893 grant = r || x;
894 break;
895 case 14:
896 case 15:
897 grant = r;
898 break;
899 default:
900 grant = false;
901 }
902 }
903 break;
904 case EL1:
905 {
906 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
907 switch (perm) {
908 case 0:
909 case 2:
910 grant = r || w || (x && !sctlr.wxn);
911 break;
912 case 1:
913 case 3:
914 case 4:
915 case 5:
916 case 6:
917 case 7:
918 // regions that are writeable at EL0 should not be
919 // executable at EL1
920 grant = r || w;
921 break;
922 case 8:
923 case 10:
924 case 12:
925 case 14:
926 grant = r || x;
927 break;
928 case 9:
929 case 11:
930 case 13:
931 case 15:
932 grant = r;
933 break;
934 default:
935 grant = false;
936 }
937 }
938 break;
939 case EL2:
940 case EL3:
941 {
942 uint8_t perm = (ap & 0x2) | xn;
943 switch (perm) {
944 case 0:
945 grant = r || w || (x && !sctlr.wxn) ;
946 break;
947 case 1:
948 grant = r || w;
949 break;
950 case 2:
951 grant = r || x;
952 break;
953 case 3:
954 grant = r;
955 break;
956 default:
957 grant = false;
958 }
959 }
960 break;
961 }
962 }
963
964 if (!grant) {
965 if (is_fetch) {
966 permsFaults++;
967 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. "
968 "AP:%d priv:%d write:%d ns:%d sif:%d "
969 "sctlr.afe: %d\n",
970 ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe);
971 // Use PC value instead of vaddr because vaddr might be aligned to
972 // cache line and should not be the address reported in FAR
973 return std::make_shared<PrefetchAbort>(
974 req->getPC(),
975 ArmFault::PermissionLL + te->lookupLevel,
976 isStage2, ArmFault::LpaeTran);
977 } else {
978 permsFaults++;
979 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d "
980 "priv:%d write:%d\n", ap, is_priv, is_write);
981 return std::make_shared<DataAbort>(
982 vaddr_tainted, te->domain, is_write,
983 ArmFault::PermissionLL + te->lookupLevel,
984 isStage2, ArmFault::LpaeTran);
985 }
986 }
987
988 return NoFault;
989}
990
991Fault
784 ThreadContext *tc)
785{
786 assert(aarch64);
787
788 // A data cache maintenance instruction that operates by VA does
789 // not generate a Permission fault unless:
790 // * It is a data cache invalidate (dc ivac) which requires write
791 // permissions to the VA, or
792 // * It is executed from EL0
793 if (req->isCacheClean() && aarch64EL != EL0 && !isStage2) {
794 return NoFault;
795 }
796
797 Addr vaddr_tainted = req->getVaddr();
798 Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
799
800 Request::Flags flags = req->getFlags();
801 bool is_fetch = (mode == Execute);
802 // Cache clean operations require read permissions to the specified VA
803 bool is_write = !req->isCacheClean() && mode == Write;
804 bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode);
805
806 updateMiscReg(tc, curTranType);
807
808 // If this is the second stage of translation and the request is for a
809 // stage 1 page table walk then we need to check the HCR.PTW bit. This
810 // allows us to generate a fault if the request targets an area marked
811 // as a device or strongly ordered.
812 if (isStage2 && req->isPTWalk() && hcr.ptw &&
813 (te->mtype != TlbEntry::MemoryType::Normal)) {
814 return std::make_shared<DataAbort>(
815 vaddr_tainted, te->domain, is_write,
816 ArmFault::PermissionLL + te->lookupLevel,
817 isStage2, ArmFault::LpaeTran);
818 }
819
820 // Generate an alignment fault for unaligned accesses to device or
821 // strongly ordered memory
822 if (!is_fetch) {
823 if (te->mtype != TlbEntry::MemoryType::Normal) {
824 if (vaddr & mask(flags & AlignmentMask)) {
825 alignFaults++;
826 return std::make_shared<DataAbort>(
827 vaddr_tainted,
828 TlbEntry::DomainType::NoAccess, is_write,
829 ArmFault::AlignmentFault, isStage2,
830 ArmFault::LpaeTran);
831 }
832 }
833 }
834
835 if (te->nonCacheable) {
836 // Prevent prefetching from I/O devices.
837 if (req->isPrefetch()) {
838 // Here we can safely use the fault status for the short
839 // desc. format in all cases
840 return std::make_shared<PrefetchAbort>(
841 vaddr_tainted,
842 ArmFault::PrefetchUncacheable,
843 isStage2, ArmFault::LpaeTran);
844 }
845 }
846
847 uint8_t ap = 0x3 & (te->ap); // 2-bit access protection field
848 bool grant = false;
849
850 uint8_t xn = te->xn;
851 uint8_t pxn = te->pxn;
852 bool r = !is_write && !is_fetch;
853 bool w = is_write;
854 bool x = is_fetch;
855 DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
856 "w:%d, x:%d\n", ap, xn, pxn, r, w, x);
857
858 if (isStage2) {
859 assert(ArmSystem::haveVirtualization(tc) && aarch64EL != EL2);
860 // In stage 2 we use the hypervisor access permission bits.
861 // The following permissions are described in ARM DDI 0487A.f
862 // D4-1802
863 uint8_t hap = 0x3 & te->hap;
864 if (is_fetch) {
865 // sctlr.wxn overrides the xn bit
866 grant = !sctlr.wxn && !xn;
867 } else if (is_write) {
868 grant = hap & 0x2;
869 } else { // is_read
870 grant = hap & 0x1;
871 }
872 } else {
873 switch (aarch64EL) {
874 case EL0:
875 {
876 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
877 switch (perm) {
878 case 0:
879 case 1:
880 case 8:
881 case 9:
882 grant = x;
883 break;
884 case 4:
885 case 5:
886 grant = r || w || (x && !sctlr.wxn);
887 break;
888 case 6:
889 case 7:
890 grant = r || w;
891 break;
892 case 12:
893 case 13:
894 grant = r || x;
895 break;
896 case 14:
897 case 15:
898 grant = r;
899 break;
900 default:
901 grant = false;
902 }
903 }
904 break;
905 case EL1:
906 {
907 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
908 switch (perm) {
909 case 0:
910 case 2:
911 grant = r || w || (x && !sctlr.wxn);
912 break;
913 case 1:
914 case 3:
915 case 4:
916 case 5:
917 case 6:
918 case 7:
919 // regions that are writeable at EL0 should not be
920 // executable at EL1
921 grant = r || w;
922 break;
923 case 8:
924 case 10:
925 case 12:
926 case 14:
927 grant = r || x;
928 break;
929 case 9:
930 case 11:
931 case 13:
932 case 15:
933 grant = r;
934 break;
935 default:
936 grant = false;
937 }
938 }
939 break;
940 case EL2:
941 case EL3:
942 {
943 uint8_t perm = (ap & 0x2) | xn;
944 switch (perm) {
945 case 0:
946 grant = r || w || (x && !sctlr.wxn) ;
947 break;
948 case 1:
949 grant = r || w;
950 break;
951 case 2:
952 grant = r || x;
953 break;
954 case 3:
955 grant = r;
956 break;
957 default:
958 grant = false;
959 }
960 }
961 break;
962 }
963 }
964
965 if (!grant) {
966 if (is_fetch) {
967 permsFaults++;
968 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. "
969 "AP:%d priv:%d write:%d ns:%d sif:%d "
970 "sctlr.afe: %d\n",
971 ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe);
972 // Use PC value instead of vaddr because vaddr might be aligned to
973 // cache line and should not be the address reported in FAR
974 return std::make_shared<PrefetchAbort>(
975 req->getPC(),
976 ArmFault::PermissionLL + te->lookupLevel,
977 isStage2, ArmFault::LpaeTran);
978 } else {
979 permsFaults++;
980 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d "
981 "priv:%d write:%d\n", ap, is_priv, is_write);
982 return std::make_shared<DataAbort>(
983 vaddr_tainted, te->domain, is_write,
984 ArmFault::PermissionLL + te->lookupLevel,
985 isStage2, ArmFault::LpaeTran);
986 }
987 }
988
989 return NoFault;
990}
991
992Fault
992TLB::translateFs(RequestPtr req, ThreadContext *tc, Mode mode,
993TLB::translateFs(const RequestPtr &req, ThreadContext *tc, Mode mode,
993 Translation *translation, bool &delay, bool timing,
994 TLB::ArmTranslationType tranType, bool functional)
995{
996 // No such thing as a functional timing access
997 assert(!(timing && functional));
998
999 updateMiscReg(tc, tranType);
1000
1001 Addr vaddr_tainted = req->getVaddr();
1002 Addr vaddr = 0;
1003 if (aarch64)
1004 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
1005 else
1006 vaddr = vaddr_tainted;
1007 Request::Flags flags = req->getFlags();
1008
1009 bool is_fetch = (mode == Execute);
1010 bool is_write = (mode == Write);
1011 bool long_desc_format = aarch64 || longDescFormatInUse(tc);
1012 ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
1013 : ArmFault::VmsaTran;
1014
1015 req->setAsid(asid);
1016
1017 DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
1018 isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran);
1019
1020 DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
1021 "flags %#lx tranType 0x%x\n", vaddr_tainted, mode, isStage2,
1022 scr, sctlr, flags, tranType);
1023
1024 if ((req->isInstFetch() && (!sctlr.i)) ||
1025 ((!req->isInstFetch()) && (!sctlr.c))){
1026 if (!req->isCacheMaintenance()) {
1027 req->setFlags(Request::UNCACHEABLE);
1028 }
1029 req->setFlags(Request::STRICT_ORDER);
1030 }
1031 if (!is_fetch) {
1032 assert(flags & MustBeOne);
1033 if (sctlr.a || !(flags & AllowUnaligned)) {
1034 if (vaddr & mask(flags & AlignmentMask)) {
1035 alignFaults++;
1036 return std::make_shared<DataAbort>(
1037 vaddr_tainted,
1038 TlbEntry::DomainType::NoAccess, is_write,
1039 ArmFault::AlignmentFault, isStage2,
1040 tranMethod);
1041 }
1042 }
1043 }
1044
1045 // If guest MMU is off or hcr.vm=0 go straight to stage2
1046 if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) {
1047
1048 req->setPaddr(vaddr);
1049 // When the MMU is off the security attribute corresponds to the
1050 // security state of the processor
1051 if (isSecure)
1052 req->setFlags(Request::SECURE);
1053
1054 // @todo: double check this (ARM ARM issue C B3.2.1)
1055 if (long_desc_format || sctlr.tre == 0 || nmrr.ir0 == 0 ||
1056 nmrr.or0 == 0 || prrr.tr0 != 0x2) {
1057 if (!req->isCacheMaintenance()) {
1058 req->setFlags(Request::UNCACHEABLE);
1059 }
1060 req->setFlags(Request::STRICT_ORDER);
1061 }
1062
1063 // Set memory attributes
1064 TlbEntry temp_te;
1065 temp_te.ns = !isSecure;
1066 if (isStage2 || hcr.dc == 0 || isSecure ||
1067 (isHyp && !(tranType & S1CTran))) {
1068
1069 temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal
1070 : TlbEntry::MemoryType::StronglyOrdered;
1071 temp_te.innerAttrs = 0x0;
1072 temp_te.outerAttrs = 0x0;
1073 temp_te.shareable = true;
1074 temp_te.outerShareable = true;
1075 } else {
1076 temp_te.mtype = TlbEntry::MemoryType::Normal;
1077 temp_te.innerAttrs = 0x3;
1078 temp_te.outerAttrs = 0x3;
1079 temp_te.shareable = false;
1080 temp_te.outerShareable = false;
1081 }
1082 temp_te.setAttributes(long_desc_format);
1083 DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: "
1084 "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n",
1085 temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
1086 isStage2);
1087 setAttr(temp_te.attributes);
1088
1089 return testTranslation(req, mode, TlbEntry::DomainType::NoAccess);
1090 }
1091
1092 DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n",
1093 isStage2 ? "IPA" : "VA", vaddr_tainted, asid);
1094 // Translation enabled
1095
1096 TlbEntry *te = NULL;
1097 TlbEntry mergeTe;
1098 Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
1099 functional, &mergeTe);
1100 // only proceed if we have a valid table entry
1101 if ((te == NULL) && (fault == NoFault)) delay = true;
1102
1103 // If we have the table entry transfer some of the attributes to the
1104 // request that triggered the translation
1105 if (te != NULL) {
1106 // Set memory attributes
1107 DPRINTF(TLBVerbose,
1108 "Setting memory attributes: shareable: %d, innerAttrs: %d, "
1109 "outerAttrs: %d, mtype: %d, isStage2: %d\n",
1110 te->shareable, te->innerAttrs, te->outerAttrs,
1111 static_cast<uint8_t>(te->mtype), isStage2);
1112 setAttr(te->attributes);
1113
1114 if (te->nonCacheable && !req->isCacheMaintenance())
1115 req->setFlags(Request::UNCACHEABLE);
1116
1117 // Require requests to be ordered if the request goes to
1118 // strongly ordered or device memory (i.e., anything other
1119 // than normal memory requires strict order).
1120 if (te->mtype != TlbEntry::MemoryType::Normal)
1121 req->setFlags(Request::STRICT_ORDER);
1122
1123 Addr pa = te->pAddr(vaddr);
1124 req->setPaddr(pa);
1125
1126 if (isSecure && !te->ns) {
1127 req->setFlags(Request::SECURE);
1128 }
1129 if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) &&
1130 (te->mtype != TlbEntry::MemoryType::Normal)) {
1131 // Unaligned accesses to Device memory should always cause an
1132 // abort regardless of sctlr.a
1133 alignFaults++;
1134 return std::make_shared<DataAbort>(
1135 vaddr_tainted,
1136 TlbEntry::DomainType::NoAccess, is_write,
1137 ArmFault::AlignmentFault, isStage2,
1138 tranMethod);
1139 }
1140
1141 // Check for a trickbox generated address fault
1142 if (fault == NoFault)
1143 fault = testTranslation(req, mode, te->domain);
1144 }
1145
1146 if (fault == NoFault) {
1147 // Generate Illegal Inst Set State fault if IL bit is set in CPSR
1148 if (aarch64 && is_fetch && cpsr.il == 1) {
1149 return std::make_shared<IllegalInstSetStateFault>();
1150 }
1151
1152 // Don't try to finalize a physical address unless the
1153 // translation has completed (i.e., there is a table entry).
1154 return te ? finalizePhysical(req, tc, mode) : NoFault;
1155 } else {
1156 return fault;
1157 }
1158}
1159
1160Fault
994 Translation *translation, bool &delay, bool timing,
995 TLB::ArmTranslationType tranType, bool functional)
996{
997 // No such thing as a functional timing access
998 assert(!(timing && functional));
999
1000 updateMiscReg(tc, tranType);
1001
1002 Addr vaddr_tainted = req->getVaddr();
1003 Addr vaddr = 0;
1004 if (aarch64)
1005 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
1006 else
1007 vaddr = vaddr_tainted;
1008 Request::Flags flags = req->getFlags();
1009
1010 bool is_fetch = (mode == Execute);
1011 bool is_write = (mode == Write);
1012 bool long_desc_format = aarch64 || longDescFormatInUse(tc);
1013 ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
1014 : ArmFault::VmsaTran;
1015
1016 req->setAsid(asid);
1017
1018 DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
1019 isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran);
1020
1021 DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
1022 "flags %#lx tranType 0x%x\n", vaddr_tainted, mode, isStage2,
1023 scr, sctlr, flags, tranType);
1024
1025 if ((req->isInstFetch() && (!sctlr.i)) ||
1026 ((!req->isInstFetch()) && (!sctlr.c))){
1027 if (!req->isCacheMaintenance()) {
1028 req->setFlags(Request::UNCACHEABLE);
1029 }
1030 req->setFlags(Request::STRICT_ORDER);
1031 }
1032 if (!is_fetch) {
1033 assert(flags & MustBeOne);
1034 if (sctlr.a || !(flags & AllowUnaligned)) {
1035 if (vaddr & mask(flags & AlignmentMask)) {
1036 alignFaults++;
1037 return std::make_shared<DataAbort>(
1038 vaddr_tainted,
1039 TlbEntry::DomainType::NoAccess, is_write,
1040 ArmFault::AlignmentFault, isStage2,
1041 tranMethod);
1042 }
1043 }
1044 }
1045
1046 // If guest MMU is off or hcr.vm=0 go straight to stage2
1047 if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) {
1048
1049 req->setPaddr(vaddr);
1050 // When the MMU is off the security attribute corresponds to the
1051 // security state of the processor
1052 if (isSecure)
1053 req->setFlags(Request::SECURE);
1054
1055 // @todo: double check this (ARM ARM issue C B3.2.1)
1056 if (long_desc_format || sctlr.tre == 0 || nmrr.ir0 == 0 ||
1057 nmrr.or0 == 0 || prrr.tr0 != 0x2) {
1058 if (!req->isCacheMaintenance()) {
1059 req->setFlags(Request::UNCACHEABLE);
1060 }
1061 req->setFlags(Request::STRICT_ORDER);
1062 }
1063
1064 // Set memory attributes
1065 TlbEntry temp_te;
1066 temp_te.ns = !isSecure;
1067 if (isStage2 || hcr.dc == 0 || isSecure ||
1068 (isHyp && !(tranType & S1CTran))) {
1069
1070 temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal
1071 : TlbEntry::MemoryType::StronglyOrdered;
1072 temp_te.innerAttrs = 0x0;
1073 temp_te.outerAttrs = 0x0;
1074 temp_te.shareable = true;
1075 temp_te.outerShareable = true;
1076 } else {
1077 temp_te.mtype = TlbEntry::MemoryType::Normal;
1078 temp_te.innerAttrs = 0x3;
1079 temp_te.outerAttrs = 0x3;
1080 temp_te.shareable = false;
1081 temp_te.outerShareable = false;
1082 }
1083 temp_te.setAttributes(long_desc_format);
1084 DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: "
1085 "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n",
1086 temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
1087 isStage2);
1088 setAttr(temp_te.attributes);
1089
1090 return testTranslation(req, mode, TlbEntry::DomainType::NoAccess);
1091 }
1092
1093 DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n",
1094 isStage2 ? "IPA" : "VA", vaddr_tainted, asid);
1095 // Translation enabled
1096
1097 TlbEntry *te = NULL;
1098 TlbEntry mergeTe;
1099 Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
1100 functional, &mergeTe);
1101 // only proceed if we have a valid table entry
1102 if ((te == NULL) && (fault == NoFault)) delay = true;
1103
1104 // If we have the table entry transfer some of the attributes to the
1105 // request that triggered the translation
1106 if (te != NULL) {
1107 // Set memory attributes
1108 DPRINTF(TLBVerbose,
1109 "Setting memory attributes: shareable: %d, innerAttrs: %d, "
1110 "outerAttrs: %d, mtype: %d, isStage2: %d\n",
1111 te->shareable, te->innerAttrs, te->outerAttrs,
1112 static_cast<uint8_t>(te->mtype), isStage2);
1113 setAttr(te->attributes);
1114
1115 if (te->nonCacheable && !req->isCacheMaintenance())
1116 req->setFlags(Request::UNCACHEABLE);
1117
1118 // Require requests to be ordered if the request goes to
1119 // strongly ordered or device memory (i.e., anything other
1120 // than normal memory requires strict order).
1121 if (te->mtype != TlbEntry::MemoryType::Normal)
1122 req->setFlags(Request::STRICT_ORDER);
1123
1124 Addr pa = te->pAddr(vaddr);
1125 req->setPaddr(pa);
1126
1127 if (isSecure && !te->ns) {
1128 req->setFlags(Request::SECURE);
1129 }
1130 if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) &&
1131 (te->mtype != TlbEntry::MemoryType::Normal)) {
1132 // Unaligned accesses to Device memory should always cause an
1133 // abort regardless of sctlr.a
1134 alignFaults++;
1135 return std::make_shared<DataAbort>(
1136 vaddr_tainted,
1137 TlbEntry::DomainType::NoAccess, is_write,
1138 ArmFault::AlignmentFault, isStage2,
1139 tranMethod);
1140 }
1141
1142 // Check for a trickbox generated address fault
1143 if (fault == NoFault)
1144 fault = testTranslation(req, mode, te->domain);
1145 }
1146
1147 if (fault == NoFault) {
1148 // Generate Illegal Inst Set State fault if IL bit is set in CPSR
1149 if (aarch64 && is_fetch && cpsr.il == 1) {
1150 return std::make_shared<IllegalInstSetStateFault>();
1151 }
1152
1153 // Don't try to finalize a physical address unless the
1154 // translation has completed (i.e., there is a table entry).
1155 return te ? finalizePhysical(req, tc, mode) : NoFault;
1156 } else {
1157 return fault;
1158 }
1159}
1160
1161Fault
1161TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode,
1162TLB::translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode,
1162 TLB::ArmTranslationType tranType)
1163{
1164 updateMiscReg(tc, tranType);
1165
1166 if (directToStage2) {
1167 assert(stage2Tlb);
1168 return stage2Tlb->translateAtomic(req, tc, mode, tranType);
1169 }
1170
1171 bool delay = false;
1172 Fault fault;
1173 if (FullSystem)
1174 fault = translateFs(req, tc, mode, NULL, delay, false, tranType);
1175 else
1176 fault = translateSe(req, tc, mode, NULL, delay, false);
1177 assert(!delay);
1178 return fault;
1179}
1180
1181Fault
1163 TLB::ArmTranslationType tranType)
1164{
1165 updateMiscReg(tc, tranType);
1166
1167 if (directToStage2) {
1168 assert(stage2Tlb);
1169 return stage2Tlb->translateAtomic(req, tc, mode, tranType);
1170 }
1171
1172 bool delay = false;
1173 Fault fault;
1174 if (FullSystem)
1175 fault = translateFs(req, tc, mode, NULL, delay, false, tranType);
1176 else
1177 fault = translateSe(req, tc, mode, NULL, delay, false);
1178 assert(!delay);
1179 return fault;
1180}
1181
1182Fault
1182TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode,
1183TLB::translateFunctional(const RequestPtr &req, ThreadContext *tc, Mode mode,
1183 TLB::ArmTranslationType tranType)
1184{
1185 updateMiscReg(tc, tranType);
1186
1187 if (directToStage2) {
1188 assert(stage2Tlb);
1189 return stage2Tlb->translateFunctional(req, tc, mode, tranType);
1190 }
1191
1192 bool delay = false;
1193 Fault fault;
1194 if (FullSystem)
1195 fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true);
1196 else
1197 fault = translateSe(req, tc, mode, NULL, delay, false);
1198 assert(!delay);
1199 return fault;
1200}
1201
1202void
1184 TLB::ArmTranslationType tranType)
1185{
1186 updateMiscReg(tc, tranType);
1187
1188 if (directToStage2) {
1189 assert(stage2Tlb);
1190 return stage2Tlb->translateFunctional(req, tc, mode, tranType);
1191 }
1192
1193 bool delay = false;
1194 Fault fault;
1195 if (FullSystem)
1196 fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true);
1197 else
1198 fault = translateSe(req, tc, mode, NULL, delay, false);
1199 assert(!delay);
1200 return fault;
1201}
1202
1203void
1203TLB::translateTiming(RequestPtr req, ThreadContext *tc,
1204TLB::translateTiming(const RequestPtr &req, ThreadContext *tc,
1204 Translation *translation, Mode mode, TLB::ArmTranslationType tranType)
1205{
1206 updateMiscReg(tc, tranType);
1207
1208 if (directToStage2) {
1209 assert(stage2Tlb);
1210 stage2Tlb->translateTiming(req, tc, translation, mode, tranType);
1211 return;
1212 }
1213
1214 assert(translation);
1215
1216 translateComplete(req, tc, translation, mode, tranType, isStage2);
1217}
1218
1219Fault
1205 Translation *translation, Mode mode, TLB::ArmTranslationType tranType)
1206{
1207 updateMiscReg(tc, tranType);
1208
1209 if (directToStage2) {
1210 assert(stage2Tlb);
1211 stage2Tlb->translateTiming(req, tc, translation, mode, tranType);
1212 return;
1213 }
1214
1215 assert(translation);
1216
1217 translateComplete(req, tc, translation, mode, tranType, isStage2);
1218}
1219
1220Fault
1220TLB::translateComplete(RequestPtr req, ThreadContext *tc,
1221TLB::translateComplete(const RequestPtr &req, ThreadContext *tc,
1221 Translation *translation, Mode mode, TLB::ArmTranslationType tranType,
1222 bool callFromS2)
1223{
1224 bool delay = false;
1225 Fault fault;
1226 if (FullSystem)
1227 fault = translateFs(req, tc, mode, translation, delay, true, tranType);
1228 else
1229 fault = translateSe(req, tc, mode, translation, delay, true);
1230 DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay, fault !=
1231 NoFault);
1232 // If we have a translation, and we're not in the middle of doing a stage
1233 // 2 translation tell the translation that we've either finished or its
1234 // going to take a while. By not doing this when we're in the middle of a
1235 // stage 2 translation we prevent marking the translation as delayed twice,
1236 // one when the translation starts and again when the stage 1 translation
1237 // completes.
1238 if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) {
1239 if (!delay)
1240 translation->finish(fault, req, tc, mode);
1241 else
1242 translation->markDelayed();
1243 }
1244 return fault;
1245}
1246
1247BaseMasterPort*
1248TLB::getMasterPort()
1249{
1250 return &stage2Mmu->getPort();
1251}
1252
1253void
1254TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType)
1255{
1256 // check if the regs have changed, or the translation mode is different.
1257 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1258 // one type of translation anyway
1259 if (miscRegValid && miscRegContext == tc->contextId() &&
1260 ((tranType == curTranType) || isStage2)) {
1261 return;
1262 }
1263
1264 DPRINTF(TLBVerbose, "TLB variables changed!\n");
1265 cpsr = tc->readMiscReg(MISCREG_CPSR);
1266
1267 // Dependencies: SCR/SCR_EL3, CPSR
1268 isSecure = inSecureState(tc) &&
1269 !(tranType & HypMode) && !(tranType & S1S2NsTran);
1270
1271 aarch64EL = tranTypeEL(cpsr, tranType);
1272 aarch64 = isStage2 ?
1273 ELIs64(tc, EL2) :
1274 ELIs64(tc, aarch64EL == EL0 ? EL1 : aarch64EL);
1275
1276 if (aarch64) { // AArch64
1277 // determine EL we need to translate in
1278 switch (aarch64EL) {
1279 case EL0:
1280 case EL1:
1281 {
1282 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
1283 ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
1284 uint64_t ttbr_asid = ttbcr.a1 ?
1285 tc->readMiscReg(MISCREG_TTBR1_EL1) :
1286 tc->readMiscReg(MISCREG_TTBR0_EL1);
1287 asid = bits(ttbr_asid,
1288 (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1289 }
1290 break;
1291 case EL2:
1292 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1293 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1294 asid = -1;
1295 break;
1296 case EL3:
1297 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3);
1298 ttbcr = tc->readMiscReg(MISCREG_TCR_EL3);
1299 asid = -1;
1300 break;
1301 }
1302 hcr = tc->readMiscReg(MISCREG_HCR_EL2);
1303 scr = tc->readMiscReg(MISCREG_SCR_EL3);
1304 isPriv = aarch64EL != EL0;
1305 if (haveVirtualization) {
1306 vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
1307 isHyp = tranType & HypMode;
1308 isHyp &= (tranType & S1S2NsTran) == 0;
1309 isHyp &= (tranType & S1CTran) == 0;
1310 // Work out if we should skip the first stage of translation and go
1311 // directly to stage 2. This value is cached so we don't have to
1312 // compute it for every translation.
1313 stage2Req = isStage2 ||
1314 (hcr.vm && !isHyp && !isSecure &&
1315 !(tranType & S1CTran) && (aarch64EL < EL2) &&
1316 !(tranType & S1E1Tran)); // <--- FIX THIS HACK
1317 directToStage2 = !isStage2 && stage2Req && !sctlr.m;
1318 } else {
1319 vmid = 0;
1320 isHyp = false;
1321 directToStage2 = false;
1322 stage2Req = false;
1323 }
1324 } else { // AArch32
1325 sctlr = tc->readMiscReg(snsBankedIndex(MISCREG_SCTLR, tc,
1326 !isSecure));
1327 ttbcr = tc->readMiscReg(snsBankedIndex(MISCREG_TTBCR, tc,
1328 !isSecure));
1329 scr = tc->readMiscReg(MISCREG_SCR);
1330 isPriv = cpsr.mode != MODE_USER;
1331 if (longDescFormatInUse(tc)) {
1332 uint64_t ttbr_asid = tc->readMiscReg(
1333 snsBankedIndex(ttbcr.a1 ? MISCREG_TTBR1 :
1334 MISCREG_TTBR0,
1335 tc, !isSecure));
1336 asid = bits(ttbr_asid, 55, 48);
1337 } else { // Short-descriptor translation table format in use
1338 CONTEXTIDR context_id = tc->readMiscReg(snsBankedIndex(
1339 MISCREG_CONTEXTIDR, tc,!isSecure));
1340 asid = context_id.asid;
1341 }
1342 prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR, tc,
1343 !isSecure));
1344 nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR, tc,
1345 !isSecure));
1346 dacr = tc->readMiscReg(snsBankedIndex(MISCREG_DACR, tc,
1347 !isSecure));
1348 hcr = tc->readMiscReg(MISCREG_HCR);
1349
1350 if (haveVirtualization) {
1351 vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
1352 isHyp = cpsr.mode == MODE_HYP;
1353 isHyp |= tranType & HypMode;
1354 isHyp &= (tranType & S1S2NsTran) == 0;
1355 isHyp &= (tranType & S1CTran) == 0;
1356 if (isHyp) {
1357 sctlr = tc->readMiscReg(MISCREG_HSCTLR);
1358 }
1359 // Work out if we should skip the first stage of translation and go
1360 // directly to stage 2. This value is cached so we don't have to
1361 // compute it for every translation.
1362 stage2Req = hcr.vm && !isStage2 && !isHyp && !isSecure &&
1363 !(tranType & S1CTran);
1364 directToStage2 = stage2Req && !sctlr.m;
1365 } else {
1366 vmid = 0;
1367 stage2Req = false;
1368 isHyp = false;
1369 directToStage2 = false;
1370 }
1371 }
1372 miscRegValid = true;
1373 miscRegContext = tc->contextId();
1374 curTranType = tranType;
1375}
1376
1377ExceptionLevel
1378TLB::tranTypeEL(CPSR cpsr, ArmTranslationType type)
1379{
1380 switch (type) {
1381 case S1E0Tran:
1382 case S12E0Tran:
1383 return EL0;
1384
1385 case S1E1Tran:
1386 case S12E1Tran:
1387 return EL1;
1388
1389 case S1E2Tran:
1390 return EL2;
1391
1392 case S1E3Tran:
1393 return EL3;
1394
1395 case NormalTran:
1396 case S1CTran:
1397 case S1S2NsTran:
1398 case HypMode:
1399 return opModeToEL((OperatingMode)(uint8_t)cpsr.mode);
1400
1401 default:
1402 panic("Unknown translation mode!\n");
1403 }
1404}
1405
1406Fault
1222 Translation *translation, Mode mode, TLB::ArmTranslationType tranType,
1223 bool callFromS2)
1224{
1225 bool delay = false;
1226 Fault fault;
1227 if (FullSystem)
1228 fault = translateFs(req, tc, mode, translation, delay, true, tranType);
1229 else
1230 fault = translateSe(req, tc, mode, translation, delay, true);
1231 DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay, fault !=
1232 NoFault);
1233 // If we have a translation, and we're not in the middle of doing a stage
1234 // 2 translation tell the translation that we've either finished or its
1235 // going to take a while. By not doing this when we're in the middle of a
1236 // stage 2 translation we prevent marking the translation as delayed twice,
1237 // one when the translation starts and again when the stage 1 translation
1238 // completes.
1239 if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) {
1240 if (!delay)
1241 translation->finish(fault, req, tc, mode);
1242 else
1243 translation->markDelayed();
1244 }
1245 return fault;
1246}
1247
1248BaseMasterPort*
1249TLB::getMasterPort()
1250{
1251 return &stage2Mmu->getPort();
1252}
1253
1254void
1255TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType)
1256{
1257 // check if the regs have changed, or the translation mode is different.
1258 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1259 // one type of translation anyway
1260 if (miscRegValid && miscRegContext == tc->contextId() &&
1261 ((tranType == curTranType) || isStage2)) {
1262 return;
1263 }
1264
1265 DPRINTF(TLBVerbose, "TLB variables changed!\n");
1266 cpsr = tc->readMiscReg(MISCREG_CPSR);
1267
1268 // Dependencies: SCR/SCR_EL3, CPSR
1269 isSecure = inSecureState(tc) &&
1270 !(tranType & HypMode) && !(tranType & S1S2NsTran);
1271
1272 aarch64EL = tranTypeEL(cpsr, tranType);
1273 aarch64 = isStage2 ?
1274 ELIs64(tc, EL2) :
1275 ELIs64(tc, aarch64EL == EL0 ? EL1 : aarch64EL);
1276
1277 if (aarch64) { // AArch64
1278 // determine EL we need to translate in
1279 switch (aarch64EL) {
1280 case EL0:
1281 case EL1:
1282 {
1283 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
1284 ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
1285 uint64_t ttbr_asid = ttbcr.a1 ?
1286 tc->readMiscReg(MISCREG_TTBR1_EL1) :
1287 tc->readMiscReg(MISCREG_TTBR0_EL1);
1288 asid = bits(ttbr_asid,
1289 (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1290 }
1291 break;
1292 case EL2:
1293 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1294 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1295 asid = -1;
1296 break;
1297 case EL3:
1298 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3);
1299 ttbcr = tc->readMiscReg(MISCREG_TCR_EL3);
1300 asid = -1;
1301 break;
1302 }
1303 hcr = tc->readMiscReg(MISCREG_HCR_EL2);
1304 scr = tc->readMiscReg(MISCREG_SCR_EL3);
1305 isPriv = aarch64EL != EL0;
1306 if (haveVirtualization) {
1307 vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
1308 isHyp = tranType & HypMode;
1309 isHyp &= (tranType & S1S2NsTran) == 0;
1310 isHyp &= (tranType & S1CTran) == 0;
1311 // Work out if we should skip the first stage of translation and go
1312 // directly to stage 2. This value is cached so we don't have to
1313 // compute it for every translation.
1314 stage2Req = isStage2 ||
1315 (hcr.vm && !isHyp && !isSecure &&
1316 !(tranType & S1CTran) && (aarch64EL < EL2) &&
1317 !(tranType & S1E1Tran)); // <--- FIX THIS HACK
1318 directToStage2 = !isStage2 && stage2Req && !sctlr.m;
1319 } else {
1320 vmid = 0;
1321 isHyp = false;
1322 directToStage2 = false;
1323 stage2Req = false;
1324 }
1325 } else { // AArch32
1326 sctlr = tc->readMiscReg(snsBankedIndex(MISCREG_SCTLR, tc,
1327 !isSecure));
1328 ttbcr = tc->readMiscReg(snsBankedIndex(MISCREG_TTBCR, tc,
1329 !isSecure));
1330 scr = tc->readMiscReg(MISCREG_SCR);
1331 isPriv = cpsr.mode != MODE_USER;
1332 if (longDescFormatInUse(tc)) {
1333 uint64_t ttbr_asid = tc->readMiscReg(
1334 snsBankedIndex(ttbcr.a1 ? MISCREG_TTBR1 :
1335 MISCREG_TTBR0,
1336 tc, !isSecure));
1337 asid = bits(ttbr_asid, 55, 48);
1338 } else { // Short-descriptor translation table format in use
1339 CONTEXTIDR context_id = tc->readMiscReg(snsBankedIndex(
1340 MISCREG_CONTEXTIDR, tc,!isSecure));
1341 asid = context_id.asid;
1342 }
1343 prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR, tc,
1344 !isSecure));
1345 nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR, tc,
1346 !isSecure));
1347 dacr = tc->readMiscReg(snsBankedIndex(MISCREG_DACR, tc,
1348 !isSecure));
1349 hcr = tc->readMiscReg(MISCREG_HCR);
1350
1351 if (haveVirtualization) {
1352 vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
1353 isHyp = cpsr.mode == MODE_HYP;
1354 isHyp |= tranType & HypMode;
1355 isHyp &= (tranType & S1S2NsTran) == 0;
1356 isHyp &= (tranType & S1CTran) == 0;
1357 if (isHyp) {
1358 sctlr = tc->readMiscReg(MISCREG_HSCTLR);
1359 }
1360 // Work out if we should skip the first stage of translation and go
1361 // directly to stage 2. This value is cached so we don't have to
1362 // compute it for every translation.
1363 stage2Req = hcr.vm && !isStage2 && !isHyp && !isSecure &&
1364 !(tranType & S1CTran);
1365 directToStage2 = stage2Req && !sctlr.m;
1366 } else {
1367 vmid = 0;
1368 stage2Req = false;
1369 isHyp = false;
1370 directToStage2 = false;
1371 }
1372 }
1373 miscRegValid = true;
1374 miscRegContext = tc->contextId();
1375 curTranType = tranType;
1376}
1377
1378ExceptionLevel
1379TLB::tranTypeEL(CPSR cpsr, ArmTranslationType type)
1380{
1381 switch (type) {
1382 case S1E0Tran:
1383 case S12E0Tran:
1384 return EL0;
1385
1386 case S1E1Tran:
1387 case S12E1Tran:
1388 return EL1;
1389
1390 case S1E2Tran:
1391 return EL2;
1392
1393 case S1E3Tran:
1394 return EL3;
1395
1396 case NormalTran:
1397 case S1CTran:
1398 case S1S2NsTran:
1399 case HypMode:
1400 return opModeToEL((OperatingMode)(uint8_t)cpsr.mode);
1401
1402 default:
1403 panic("Unknown translation mode!\n");
1404 }
1405}
1406
1407Fault
1407TLB::getTE(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
1408TLB::getTE(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode,
1408 Translation *translation, bool timing, bool functional,
1409 bool is_secure, TLB::ArmTranslationType tranType)
1410{
1411 bool is_fetch = (mode == Execute);
1412 bool is_write = (mode == Write);
1413
1414 Addr vaddr_tainted = req->getVaddr();
1415 Addr vaddr = 0;
1416 ExceptionLevel target_el = aarch64 ? aarch64EL : EL1;
1417 if (aarch64) {
1418 vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el, ttbcr);
1419 } else {
1420 vaddr = vaddr_tainted;
1421 }
1422 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1423 if (*te == NULL) {
1424 if (req->isPrefetch()) {
1425 // if the request is a prefetch don't attempt to fill the TLB or go
1426 // any further with the memory access (here we can safely use the
1427 // fault status for the short desc. format in all cases)
1428 prefetchFaults++;
1429 return std::make_shared<PrefetchAbort>(
1430 vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2);
1431 }
1432
1433 if (is_fetch)
1434 instMisses++;
1435 else if (is_write)
1436 writeMisses++;
1437 else
1438 readMisses++;
1439
1440 // start translation table walk, pass variables rather than
1441 // re-retreaving in table walker for speed
1442 DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1443 vaddr_tainted, asid, vmid);
1444 Fault fault;
1445 fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode,
1446 translation, timing, functional, is_secure,
1447 tranType, stage2Req);
1448 // for timing mode, return and wait for table walk,
1449 if (timing || fault != NoFault) {
1450 return fault;
1451 }
1452
1453 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1454 if (!*te)
1455 printTlb();
1456 assert(*te);
1457 } else {
1458 if (is_fetch)
1459 instHits++;
1460 else if (is_write)
1461 writeHits++;
1462 else
1463 readHits++;
1464 }
1465 return NoFault;
1466}
1467
1468Fault
1409 Translation *translation, bool timing, bool functional,
1410 bool is_secure, TLB::ArmTranslationType tranType)
1411{
1412 bool is_fetch = (mode == Execute);
1413 bool is_write = (mode == Write);
1414
1415 Addr vaddr_tainted = req->getVaddr();
1416 Addr vaddr = 0;
1417 ExceptionLevel target_el = aarch64 ? aarch64EL : EL1;
1418 if (aarch64) {
1419 vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el, ttbcr);
1420 } else {
1421 vaddr = vaddr_tainted;
1422 }
1423 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1424 if (*te == NULL) {
1425 if (req->isPrefetch()) {
1426 // if the request is a prefetch don't attempt to fill the TLB or go
1427 // any further with the memory access (here we can safely use the
1428 // fault status for the short desc. format in all cases)
1429 prefetchFaults++;
1430 return std::make_shared<PrefetchAbort>(
1431 vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2);
1432 }
1433
1434 if (is_fetch)
1435 instMisses++;
1436 else if (is_write)
1437 writeMisses++;
1438 else
1439 readMisses++;
1440
1441 // start translation table walk, pass variables rather than
1442 // re-retreaving in table walker for speed
1443 DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1444 vaddr_tainted, asid, vmid);
1445 Fault fault;
1446 fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode,
1447 translation, timing, functional, is_secure,
1448 tranType, stage2Req);
1449 // for timing mode, return and wait for table walk,
1450 if (timing || fault != NoFault) {
1451 return fault;
1452 }
1453
1454 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1455 if (!*te)
1456 printTlb();
1457 assert(*te);
1458 } else {
1459 if (is_fetch)
1460 instHits++;
1461 else if (is_write)
1462 writeHits++;
1463 else
1464 readHits++;
1465 }
1466 return NoFault;
1467}
1468
1469Fault
1469TLB::getResultTe(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
1470TLB::getResultTe(TlbEntry **te, const RequestPtr &req,
1471 ThreadContext *tc, Mode mode,
1470 Translation *translation, bool timing, bool functional,
1471 TlbEntry *mergeTe)
1472{
1473 Fault fault;
1474
1475 if (isStage2) {
1476 // We are already in the stage 2 TLB. Grab the table entry for stage
1477 // 2 only. We are here because stage 1 translation is disabled.
1478 TlbEntry *s2Te = NULL;
1479 // Get the stage 2 table entry
1480 fault = getTE(&s2Te, req, tc, mode, translation, timing, functional,
1481 isSecure, curTranType);
1482 // Check permissions of stage 2
1483 if ((s2Te != NULL) && (fault == NoFault)) {
1484 if (aarch64)
1485 fault = checkPermissions64(s2Te, req, mode, tc);
1486 else
1487 fault = checkPermissions(s2Te, req, mode);
1488 }
1489 *te = s2Te;
1490 return fault;
1491 }
1492
1493 TlbEntry *s1Te = NULL;
1494
1495 Addr vaddr_tainted = req->getVaddr();
1496
1497 // Get the stage 1 table entry
1498 fault = getTE(&s1Te, req, tc, mode, translation, timing, functional,
1499 isSecure, curTranType);
1500 // only proceed if we have a valid table entry
1501 if ((s1Te != NULL) && (fault == NoFault)) {
1502 // Check stage 1 permissions before checking stage 2
1503 if (aarch64)
1504 fault = checkPermissions64(s1Te, req, mode, tc);
1505 else
1506 fault = checkPermissions(s1Te, req, mode);
1507 if (stage2Req & (fault == NoFault)) {
1508 Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te,
1509 req, translation, mode, timing, functional, curTranType);
1510 fault = s2Lookup->getTe(tc, mergeTe);
1511 if (s2Lookup->isComplete()) {
1512 *te = mergeTe;
1513 // We've finished with the lookup so delete it
1514 delete s2Lookup;
1515 } else {
1516 // The lookup hasn't completed, so we can't delete it now. We
1517 // get round this by asking the object to self delete when the
1518 // translation is complete.
1519 s2Lookup->setSelfDelete();
1520 }
1521 } else {
1522 // This case deals with an S1 hit (or bypass), followed by
1523 // an S2 hit-but-perms issue
1524 if (isStage2) {
1525 DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1526 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault);
1527 if (fault != NoFault) {
1528 ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get());
1529 armFault->annotate(ArmFault::S1PTW, false);
1530 armFault->annotate(ArmFault::OVA, vaddr_tainted);
1531 }
1532 }
1533 *te = s1Te;
1534 }
1535 }
1536 return fault;
1537}
1538
1539void
1540TLB::setTestInterface(SimObject *_ti)
1541{
1542 if (!_ti) {
1543 test = nullptr;
1544 } else {
1545 TlbTestInterface *ti(dynamic_cast<TlbTestInterface *>(_ti));
1546 fatal_if(!ti, "%s is not a valid ARM TLB tester\n", _ti->name());
1547 test = ti;
1548 }
1549}
1550
1551Fault
1472 Translation *translation, bool timing, bool functional,
1473 TlbEntry *mergeTe)
1474{
1475 Fault fault;
1476
1477 if (isStage2) {
1478 // We are already in the stage 2 TLB. Grab the table entry for stage
1479 // 2 only. We are here because stage 1 translation is disabled.
1480 TlbEntry *s2Te = NULL;
1481 // Get the stage 2 table entry
1482 fault = getTE(&s2Te, req, tc, mode, translation, timing, functional,
1483 isSecure, curTranType);
1484 // Check permissions of stage 2
1485 if ((s2Te != NULL) && (fault == NoFault)) {
1486 if (aarch64)
1487 fault = checkPermissions64(s2Te, req, mode, tc);
1488 else
1489 fault = checkPermissions(s2Te, req, mode);
1490 }
1491 *te = s2Te;
1492 return fault;
1493 }
1494
1495 TlbEntry *s1Te = NULL;
1496
1497 Addr vaddr_tainted = req->getVaddr();
1498
1499 // Get the stage 1 table entry
1500 fault = getTE(&s1Te, req, tc, mode, translation, timing, functional,
1501 isSecure, curTranType);
1502 // only proceed if we have a valid table entry
1503 if ((s1Te != NULL) && (fault == NoFault)) {
1504 // Check stage 1 permissions before checking stage 2
1505 if (aarch64)
1506 fault = checkPermissions64(s1Te, req, mode, tc);
1507 else
1508 fault = checkPermissions(s1Te, req, mode);
1509 if (stage2Req & (fault == NoFault)) {
1510 Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te,
1511 req, translation, mode, timing, functional, curTranType);
1512 fault = s2Lookup->getTe(tc, mergeTe);
1513 if (s2Lookup->isComplete()) {
1514 *te = mergeTe;
1515 // We've finished with the lookup so delete it
1516 delete s2Lookup;
1517 } else {
1518 // The lookup hasn't completed, so we can't delete it now. We
1519 // get round this by asking the object to self delete when the
1520 // translation is complete.
1521 s2Lookup->setSelfDelete();
1522 }
1523 } else {
1524 // This case deals with an S1 hit (or bypass), followed by
1525 // an S2 hit-but-perms issue
1526 if (isStage2) {
1527 DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1528 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault);
1529 if (fault != NoFault) {
1530 ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get());
1531 armFault->annotate(ArmFault::S1PTW, false);
1532 armFault->annotate(ArmFault::OVA, vaddr_tainted);
1533 }
1534 }
1535 *te = s1Te;
1536 }
1537 }
1538 return fault;
1539}
1540
1541void
1542TLB::setTestInterface(SimObject *_ti)
1543{
1544 if (!_ti) {
1545 test = nullptr;
1546 } else {
1547 TlbTestInterface *ti(dynamic_cast<TlbTestInterface *>(_ti));
1548 fatal_if(!ti, "%s is not a valid ARM TLB tester\n", _ti->name());
1549 test = ti;
1550 }
1551}
1552
1553Fault
1552TLB::testTranslation(RequestPtr req, Mode mode, TlbEntry::DomainType domain)
1554TLB::testTranslation(const RequestPtr &req, Mode mode,
1555 TlbEntry::DomainType domain)
1553{
1554 if (!test || !req->hasSize() || req->getSize() == 0 ||
1555 req->isCacheMaintenance()) {
1556 return NoFault;
1557 } else {
1558 return test->translationCheck(req, isPriv, mode, domain);
1559 }
1560}
1561
1562Fault
1563TLB::testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode,
1564 TlbEntry::DomainType domain, LookupLevel lookup_level)
1565{
1566 if (!test) {
1567 return NoFault;
1568 } else {
1569 return test->walkCheck(pa, size, va, is_secure, isPriv, mode,
1570 domain, lookup_level);
1571 }
1572}
1573
1574
1575ArmISA::TLB *
1576ArmTLBParams::create()
1577{
1578 return new ArmISA::TLB(this);
1579}
1556{
1557 if (!test || !req->hasSize() || req->getSize() == 0 ||
1558 req->isCacheMaintenance()) {
1559 return NoFault;
1560 } else {
1561 return test->translationCheck(req, isPriv, mode, domain);
1562 }
1563}
1564
1565Fault
1566TLB::testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode,
1567 TlbEntry::DomainType domain, LookupLevel lookup_level)
1568{
1569 if (!test) {
1570 return NoFault;
1571 } else {
1572 return test->walkCheck(pa, size, va, is_secure, isPriv, mode,
1573 domain, lookup_level);
1574 }
1575}
1576
1577
1578ArmISA::TLB *
1579ArmTLBParams::create()
1580{
1581 return new ArmISA::TLB(this);
1582}