tlb.cc (12763:37c243ed1112) tlb.cc (13374:b7f652df5e5b)
1/*
2 * Copyright (c) 2010-2013, 2016-2018 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 * Nathan Binkert
42 * Steve Reinhardt
43 */
44
45#include "arch/arm/tlb.hh"
46
47#include <memory>
48#include <string>
49#include <vector>
50
51#include "arch/arm/faults.hh"
52#include "arch/arm/pagetable.hh"
53#include "arch/arm/stage2_lookup.hh"
54#include "arch/arm/stage2_mmu.hh"
55#include "arch/arm/system.hh"
56#include "arch/arm/table_walker.hh"
57#include "arch/arm/utility.hh"
58#include "arch/generic/mmapped_ipr.hh"
59#include "base/inifile.hh"
60#include "base/str.hh"
61#include "base/trace.hh"
62#include "cpu/base.hh"
63#include "cpu/thread_context.hh"
64#include "debug/Checkpoint.hh"
65#include "debug/TLB.hh"
66#include "debug/TLBVerbose.hh"
67#include "mem/page_table.hh"
68#include "mem/request.hh"
69#include "params/ArmTLB.hh"
70#include "sim/full_system.hh"
71#include "sim/process.hh"
72
73using namespace std;
74using namespace ArmISA;
75
76TLB::TLB(const ArmTLBParams *p)
77 : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size),
1/*
2 * Copyright (c) 2010-2013, 2016-2018 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 * Nathan Binkert
42 * Steve Reinhardt
43 */
44
45#include "arch/arm/tlb.hh"
46
47#include <memory>
48#include <string>
49#include <vector>
50
51#include "arch/arm/faults.hh"
52#include "arch/arm/pagetable.hh"
53#include "arch/arm/stage2_lookup.hh"
54#include "arch/arm/stage2_mmu.hh"
55#include "arch/arm/system.hh"
56#include "arch/arm/table_walker.hh"
57#include "arch/arm/utility.hh"
58#include "arch/generic/mmapped_ipr.hh"
59#include "base/inifile.hh"
60#include "base/str.hh"
61#include "base/trace.hh"
62#include "cpu/base.hh"
63#include "cpu/thread_context.hh"
64#include "debug/Checkpoint.hh"
65#include "debug/TLB.hh"
66#include "debug/TLBVerbose.hh"
67#include "mem/page_table.hh"
68#include "mem/request.hh"
69#include "params/ArmTLB.hh"
70#include "sim/full_system.hh"
71#include "sim/process.hh"
72
73using namespace std;
74using namespace ArmISA;
75
76TLB::TLB(const ArmTLBParams *p)
77 : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size),
78 isStage2(p->is_stage2), stage2Req(false), _attr(0),
78 isStage2(p->is_stage2), stage2Req(false), stage2DescReq(false), _attr(0),
79 directToStage2(false), tableWalker(p->walker), stage2Tlb(NULL),
80 stage2Mmu(NULL), test(nullptr), rangeMRU(1),
81 aarch64(false), aarch64EL(EL0), isPriv(false), isSecure(false),
82 isHyp(false), asid(0), vmid(0), dacr(0),
83 miscRegValid(false), miscRegContext(0), curTranType(NormalTran)
84{
85 const ArmSystem *sys = dynamic_cast<const ArmSystem *>(p->sys);
86
87 tableWalker->setTlb(this);
88
89 // Cache system-level properties
90 haveLPAE = tableWalker->haveLPAE();
91 haveVirtualization = tableWalker->haveVirtualization();
92 haveLargeAsid64 = tableWalker->haveLargeAsid64();
93
94 if (sys)
95 m5opRange = sys->m5opRange();
96}
97
98TLB::~TLB()
99{
100 delete[] table;
101}
102
103void
104TLB::init()
105{
106 if (stage2Mmu && !isStage2)
107 stage2Tlb = stage2Mmu->stage2Tlb();
108}
109
110void
111TLB::setMMU(Stage2MMU *m, MasterID master_id)
112{
113 stage2Mmu = m;
114 tableWalker->setMMU(m, master_id);
115}
116
117bool
118TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa)
119{
120 updateMiscReg(tc);
121
122 if (directToStage2) {
123 assert(stage2Tlb);
124 return stage2Tlb->translateFunctional(tc, va, pa);
125 }
126
127 TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false,
128 aarch64 ? aarch64EL : EL1);
129 if (!e)
130 return false;
131 pa = e->pAddr(va);
132 return true;
133}
134
135Fault
136TLB::finalizePhysical(const RequestPtr &req,
137 ThreadContext *tc, Mode mode) const
138{
139 const Addr paddr = req->getPaddr();
140
141 if (m5opRange.contains(paddr)) {
142 req->setFlags(Request::MMAPPED_IPR | Request::GENERIC_IPR);
143 req->setPaddr(GenericISA::iprAddressPseudoInst(
144 (paddr >> 8) & 0xFF,
145 paddr & 0xFF));
146 }
147
148 return NoFault;
149}
150
151TlbEntry*
152TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure,
153 bool functional, bool ignore_asn, uint8_t target_el)
154{
155
156 TlbEntry *retval = NULL;
157
158 // Maintaining LRU array
159 int x = 0;
160 while (retval == NULL && x < size) {
161 if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false,
162 target_el)) ||
163 (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) {
164 // We only move the hit entry ahead when the position is higher
165 // than rangeMRU
166 if (x > rangeMRU && !functional) {
167 TlbEntry tmp_entry = table[x];
168 for (int i = x; i > 0; i--)
169 table[i] = table[i - 1];
170 table[0] = tmp_entry;
171 retval = &table[0];
172 } else {
173 retval = &table[x];
174 }
175 break;
176 }
177 ++x;
178 }
179
180 DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d "
181 "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d "
182 "el: %d\n",
183 va, asn, retval ? "hit" : "miss", vmid, hyp, secure,
184 retval ? retval->pfn : 0, retval ? retval->size : 0,
185 retval ? retval->pAddr(va) : 0, retval ? retval->ap : 0,
186 retval ? retval->ns : 0, retval ? retval->nstid : 0,
187 retval ? retval->global : 0, retval ? retval->asid : 0,
188 retval ? retval->el : 0);
189
190 return retval;
191}
192
193// insert a new TLB entry
194void
195TLB::insert(Addr addr, TlbEntry &entry)
196{
197 DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x"
198 " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
199 " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn,
200 entry.size, entry.vpn, entry.asid, entry.vmid, entry.N,
201 entry.global, entry.valid, entry.nonCacheable, entry.xn,
202 entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid,
203 entry.isHyp);
204
205 if (table[size - 1].valid)
206 DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
207 "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n",
208 table[size-1].vpn << table[size-1].N, table[size-1].asid,
209 table[size-1].vmid, table[size-1].pfn << table[size-1].N,
210 table[size-1].size, table[size-1].ap, table[size-1].ns,
211 table[size-1].nstid, table[size-1].global, table[size-1].isHyp,
212 table[size-1].el);
213
214 //inserting to MRU position and evicting the LRU one
215
216 for (int i = size - 1; i > 0; --i)
217 table[i] = table[i-1];
218 table[0] = entry;
219
220 inserts++;
221 ppRefills->notify(1);
222}
223
224void
225TLB::printTlb() const
226{
227 int x = 0;
228 TlbEntry *te;
229 DPRINTF(TLB, "Current TLB contents:\n");
230 while (x < size) {
231 te = &table[x];
232 if (te->valid)
233 DPRINTF(TLB, " * %s\n", te->print());
234 ++x;
235 }
236}
237
238void
239TLB::flushAllSecurity(bool secure_lookup, uint8_t target_el, bool ignore_el)
240{
241 DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n",
242 (secure_lookup ? "secure" : "non-secure"));
243 int x = 0;
244 TlbEntry *te;
245 while (x < size) {
246 te = &table[x];
247 if (te->valid && secure_lookup == !te->nstid &&
248 (te->vmid == vmid || secure_lookup) &&
249 checkELMatch(target_el, te->el, ignore_el)) {
250
251 DPRINTF(TLB, " - %s\n", te->print());
252 te->valid = false;
253 flushedEntries++;
254 }
255 ++x;
256 }
257
258 flushTlb++;
259
260 // If there's a second stage TLB (and we're not it) then flush it as well
261 // if we're currently in hyp mode
262 if (!isStage2 && isHyp) {
263 stage2Tlb->flushAllSecurity(secure_lookup, true);
264 }
265}
266
267void
268TLB::flushAllNs(bool hyp, uint8_t target_el, bool ignore_el)
269{
270 DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n",
271 (hyp ? "hyp" : "non-hyp"));
272 int x = 0;
273 TlbEntry *te;
274 while (x < size) {
275 te = &table[x];
276 if (te->valid && te->nstid && te->isHyp == hyp &&
277 checkELMatch(target_el, te->el, ignore_el)) {
278
279 DPRINTF(TLB, " - %s\n", te->print());
280 flushedEntries++;
281 te->valid = false;
282 }
283 ++x;
284 }
285
286 flushTlb++;
287
288 // If there's a second stage TLB (and we're not it) then flush it as well
289 if (!isStage2 && !hyp) {
290 stage2Tlb->flushAllNs(false, true);
291 }
292}
293
294void
295TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, uint8_t target_el)
296{
297 DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x "
298 "(%s lookup)\n", mva, asn, (secure_lookup ?
299 "secure" : "non-secure"));
300 _flushMva(mva, asn, secure_lookup, false, false, target_el);
301 flushTlbMvaAsid++;
302}
303
304void
305TLB::flushAsid(uint64_t asn, bool secure_lookup, uint8_t target_el)
306{
307 DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn,
308 (secure_lookup ? "secure" : "non-secure"));
309
310 int x = 0 ;
311 TlbEntry *te;
312
313 while (x < size) {
314 te = &table[x];
315 if (te->valid && te->asid == asn && secure_lookup == !te->nstid &&
316 (te->vmid == vmid || secure_lookup) &&
317 checkELMatch(target_el, te->el, false)) {
318
319 te->valid = false;
320 DPRINTF(TLB, " - %s\n", te->print());
321 flushedEntries++;
322 }
323 ++x;
324 }
325 flushTlbAsid++;
326}
327
328void
329TLB::flushMva(Addr mva, bool secure_lookup, bool hyp, uint8_t target_el)
330{
331 DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva,
332 (secure_lookup ? "secure" : "non-secure"));
333 _flushMva(mva, 0xbeef, secure_lookup, hyp, true, target_el);
334 flushTlbMva++;
335}
336
337void
338TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, bool hyp,
339 bool ignore_asn, uint8_t target_el)
340{
341 TlbEntry *te;
342 // D5.7.2: Sign-extend address to 64 bits
343 mva = sext<56>(mva);
344 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
345 target_el);
346 while (te != NULL) {
347 if (secure_lookup == !te->nstid) {
348 DPRINTF(TLB, " - %s\n", te->print());
349 te->valid = false;
350 flushedEntries++;
351 }
352 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
353 target_el);
354 }
355}
356
357void
358TLB::flushIpaVmid(Addr ipa, bool secure_lookup, bool hyp, uint8_t target_el)
359{
360 assert(!isStage2);
361 stage2Tlb->_flushMva(ipa, 0xbeef, secure_lookup, hyp, true, target_el);
362}
363
364bool
365TLB::checkELMatch(uint8_t target_el, uint8_t tentry_el, bool ignore_el)
366{
367 bool elMatch = true;
368 if (!ignore_el) {
369 if (target_el == 2 || target_el == 3) {
370 elMatch = (tentry_el == target_el);
371 } else {
372 elMatch = (tentry_el == 0) || (tentry_el == 1);
373 }
374 }
375 return elMatch;
376}
377
378void
379TLB::drainResume()
380{
381 // We might have unserialized something or switched CPUs, so make
382 // sure to re-read the misc regs.
383 miscRegValid = false;
384}
385
386void
387TLB::takeOverFrom(BaseTLB *_otlb)
388{
389 TLB *otlb = dynamic_cast<TLB*>(_otlb);
390 /* Make sure we actually have a valid type */
391 if (otlb) {
392 _attr = otlb->_attr;
393 haveLPAE = otlb->haveLPAE;
394 directToStage2 = otlb->directToStage2;
395 stage2Req = otlb->stage2Req;
79 directToStage2(false), tableWalker(p->walker), stage2Tlb(NULL),
80 stage2Mmu(NULL), test(nullptr), rangeMRU(1),
81 aarch64(false), aarch64EL(EL0), isPriv(false), isSecure(false),
82 isHyp(false), asid(0), vmid(0), dacr(0),
83 miscRegValid(false), miscRegContext(0), curTranType(NormalTran)
84{
85 const ArmSystem *sys = dynamic_cast<const ArmSystem *>(p->sys);
86
87 tableWalker->setTlb(this);
88
89 // Cache system-level properties
90 haveLPAE = tableWalker->haveLPAE();
91 haveVirtualization = tableWalker->haveVirtualization();
92 haveLargeAsid64 = tableWalker->haveLargeAsid64();
93
94 if (sys)
95 m5opRange = sys->m5opRange();
96}
97
98TLB::~TLB()
99{
100 delete[] table;
101}
102
103void
104TLB::init()
105{
106 if (stage2Mmu && !isStage2)
107 stage2Tlb = stage2Mmu->stage2Tlb();
108}
109
110void
111TLB::setMMU(Stage2MMU *m, MasterID master_id)
112{
113 stage2Mmu = m;
114 tableWalker->setMMU(m, master_id);
115}
116
117bool
118TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa)
119{
120 updateMiscReg(tc);
121
122 if (directToStage2) {
123 assert(stage2Tlb);
124 return stage2Tlb->translateFunctional(tc, va, pa);
125 }
126
127 TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false,
128 aarch64 ? aarch64EL : EL1);
129 if (!e)
130 return false;
131 pa = e->pAddr(va);
132 return true;
133}
134
135Fault
136TLB::finalizePhysical(const RequestPtr &req,
137 ThreadContext *tc, Mode mode) const
138{
139 const Addr paddr = req->getPaddr();
140
141 if (m5opRange.contains(paddr)) {
142 req->setFlags(Request::MMAPPED_IPR | Request::GENERIC_IPR);
143 req->setPaddr(GenericISA::iprAddressPseudoInst(
144 (paddr >> 8) & 0xFF,
145 paddr & 0xFF));
146 }
147
148 return NoFault;
149}
150
151TlbEntry*
152TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure,
153 bool functional, bool ignore_asn, uint8_t target_el)
154{
155
156 TlbEntry *retval = NULL;
157
158 // Maintaining LRU array
159 int x = 0;
160 while (retval == NULL && x < size) {
161 if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false,
162 target_el)) ||
163 (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) {
164 // We only move the hit entry ahead when the position is higher
165 // than rangeMRU
166 if (x > rangeMRU && !functional) {
167 TlbEntry tmp_entry = table[x];
168 for (int i = x; i > 0; i--)
169 table[i] = table[i - 1];
170 table[0] = tmp_entry;
171 retval = &table[0];
172 } else {
173 retval = &table[x];
174 }
175 break;
176 }
177 ++x;
178 }
179
180 DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d "
181 "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d "
182 "el: %d\n",
183 va, asn, retval ? "hit" : "miss", vmid, hyp, secure,
184 retval ? retval->pfn : 0, retval ? retval->size : 0,
185 retval ? retval->pAddr(va) : 0, retval ? retval->ap : 0,
186 retval ? retval->ns : 0, retval ? retval->nstid : 0,
187 retval ? retval->global : 0, retval ? retval->asid : 0,
188 retval ? retval->el : 0);
189
190 return retval;
191}
192
193// insert a new TLB entry
194void
195TLB::insert(Addr addr, TlbEntry &entry)
196{
197 DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x"
198 " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
199 " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn,
200 entry.size, entry.vpn, entry.asid, entry.vmid, entry.N,
201 entry.global, entry.valid, entry.nonCacheable, entry.xn,
202 entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid,
203 entry.isHyp);
204
205 if (table[size - 1].valid)
206 DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
207 "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n",
208 table[size-1].vpn << table[size-1].N, table[size-1].asid,
209 table[size-1].vmid, table[size-1].pfn << table[size-1].N,
210 table[size-1].size, table[size-1].ap, table[size-1].ns,
211 table[size-1].nstid, table[size-1].global, table[size-1].isHyp,
212 table[size-1].el);
213
214 //inserting to MRU position and evicting the LRU one
215
216 for (int i = size - 1; i > 0; --i)
217 table[i] = table[i-1];
218 table[0] = entry;
219
220 inserts++;
221 ppRefills->notify(1);
222}
223
224void
225TLB::printTlb() const
226{
227 int x = 0;
228 TlbEntry *te;
229 DPRINTF(TLB, "Current TLB contents:\n");
230 while (x < size) {
231 te = &table[x];
232 if (te->valid)
233 DPRINTF(TLB, " * %s\n", te->print());
234 ++x;
235 }
236}
237
238void
239TLB::flushAllSecurity(bool secure_lookup, uint8_t target_el, bool ignore_el)
240{
241 DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n",
242 (secure_lookup ? "secure" : "non-secure"));
243 int x = 0;
244 TlbEntry *te;
245 while (x < size) {
246 te = &table[x];
247 if (te->valid && secure_lookup == !te->nstid &&
248 (te->vmid == vmid || secure_lookup) &&
249 checkELMatch(target_el, te->el, ignore_el)) {
250
251 DPRINTF(TLB, " - %s\n", te->print());
252 te->valid = false;
253 flushedEntries++;
254 }
255 ++x;
256 }
257
258 flushTlb++;
259
260 // If there's a second stage TLB (and we're not it) then flush it as well
261 // if we're currently in hyp mode
262 if (!isStage2 && isHyp) {
263 stage2Tlb->flushAllSecurity(secure_lookup, true);
264 }
265}
266
267void
268TLB::flushAllNs(bool hyp, uint8_t target_el, bool ignore_el)
269{
270 DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n",
271 (hyp ? "hyp" : "non-hyp"));
272 int x = 0;
273 TlbEntry *te;
274 while (x < size) {
275 te = &table[x];
276 if (te->valid && te->nstid && te->isHyp == hyp &&
277 checkELMatch(target_el, te->el, ignore_el)) {
278
279 DPRINTF(TLB, " - %s\n", te->print());
280 flushedEntries++;
281 te->valid = false;
282 }
283 ++x;
284 }
285
286 flushTlb++;
287
288 // If there's a second stage TLB (and we're not it) then flush it as well
289 if (!isStage2 && !hyp) {
290 stage2Tlb->flushAllNs(false, true);
291 }
292}
293
294void
295TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, uint8_t target_el)
296{
297 DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x "
298 "(%s lookup)\n", mva, asn, (secure_lookup ?
299 "secure" : "non-secure"));
300 _flushMva(mva, asn, secure_lookup, false, false, target_el);
301 flushTlbMvaAsid++;
302}
303
304void
305TLB::flushAsid(uint64_t asn, bool secure_lookup, uint8_t target_el)
306{
307 DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn,
308 (secure_lookup ? "secure" : "non-secure"));
309
310 int x = 0 ;
311 TlbEntry *te;
312
313 while (x < size) {
314 te = &table[x];
315 if (te->valid && te->asid == asn && secure_lookup == !te->nstid &&
316 (te->vmid == vmid || secure_lookup) &&
317 checkELMatch(target_el, te->el, false)) {
318
319 te->valid = false;
320 DPRINTF(TLB, " - %s\n", te->print());
321 flushedEntries++;
322 }
323 ++x;
324 }
325 flushTlbAsid++;
326}
327
328void
329TLB::flushMva(Addr mva, bool secure_lookup, bool hyp, uint8_t target_el)
330{
331 DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva,
332 (secure_lookup ? "secure" : "non-secure"));
333 _flushMva(mva, 0xbeef, secure_lookup, hyp, true, target_el);
334 flushTlbMva++;
335}
336
337void
338TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, bool hyp,
339 bool ignore_asn, uint8_t target_el)
340{
341 TlbEntry *te;
342 // D5.7.2: Sign-extend address to 64 bits
343 mva = sext<56>(mva);
344 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
345 target_el);
346 while (te != NULL) {
347 if (secure_lookup == !te->nstid) {
348 DPRINTF(TLB, " - %s\n", te->print());
349 te->valid = false;
350 flushedEntries++;
351 }
352 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
353 target_el);
354 }
355}
356
357void
358TLB::flushIpaVmid(Addr ipa, bool secure_lookup, bool hyp, uint8_t target_el)
359{
360 assert(!isStage2);
361 stage2Tlb->_flushMva(ipa, 0xbeef, secure_lookup, hyp, true, target_el);
362}
363
364bool
365TLB::checkELMatch(uint8_t target_el, uint8_t tentry_el, bool ignore_el)
366{
367 bool elMatch = true;
368 if (!ignore_el) {
369 if (target_el == 2 || target_el == 3) {
370 elMatch = (tentry_el == target_el);
371 } else {
372 elMatch = (tentry_el == 0) || (tentry_el == 1);
373 }
374 }
375 return elMatch;
376}
377
378void
379TLB::drainResume()
380{
381 // We might have unserialized something or switched CPUs, so make
382 // sure to re-read the misc regs.
383 miscRegValid = false;
384}
385
386void
387TLB::takeOverFrom(BaseTLB *_otlb)
388{
389 TLB *otlb = dynamic_cast<TLB*>(_otlb);
390 /* Make sure we actually have a valid type */
391 if (otlb) {
392 _attr = otlb->_attr;
393 haveLPAE = otlb->haveLPAE;
394 directToStage2 = otlb->directToStage2;
395 stage2Req = otlb->stage2Req;
396 stage2DescReq = otlb->stage2DescReq;
396
397 /* Sync the stage2 MMU if they exist in both
398 * the old CPU and the new
399 */
400 if (!isStage2 &&
401 stage2Tlb && otlb->stage2Tlb) {
402 stage2Tlb->takeOverFrom(otlb->stage2Tlb);
403 }
404 } else {
405 panic("Incompatible TLB type!");
406 }
407}
408
409void
410TLB::serialize(CheckpointOut &cp) const
411{
412 DPRINTF(Checkpoint, "Serializing Arm TLB\n");
413
414 SERIALIZE_SCALAR(_attr);
415 SERIALIZE_SCALAR(haveLPAE);
416 SERIALIZE_SCALAR(directToStage2);
417 SERIALIZE_SCALAR(stage2Req);
397
398 /* Sync the stage2 MMU if they exist in both
399 * the old CPU and the new
400 */
401 if (!isStage2 &&
402 stage2Tlb && otlb->stage2Tlb) {
403 stage2Tlb->takeOverFrom(otlb->stage2Tlb);
404 }
405 } else {
406 panic("Incompatible TLB type!");
407 }
408}
409
410void
411TLB::serialize(CheckpointOut &cp) const
412{
413 DPRINTF(Checkpoint, "Serializing Arm TLB\n");
414
415 SERIALIZE_SCALAR(_attr);
416 SERIALIZE_SCALAR(haveLPAE);
417 SERIALIZE_SCALAR(directToStage2);
418 SERIALIZE_SCALAR(stage2Req);
419 SERIALIZE_SCALAR(stage2DescReq);
418
419 int num_entries = size;
420 SERIALIZE_SCALAR(num_entries);
421 for (int i = 0; i < size; i++)
422 table[i].serializeSection(cp, csprintf("TlbEntry%d", i));
423}
424
425void
426TLB::unserialize(CheckpointIn &cp)
427{
428 DPRINTF(Checkpoint, "Unserializing Arm TLB\n");
429
430 UNSERIALIZE_SCALAR(_attr);
431 UNSERIALIZE_SCALAR(haveLPAE);
432 UNSERIALIZE_SCALAR(directToStage2);
433 UNSERIALIZE_SCALAR(stage2Req);
420
421 int num_entries = size;
422 SERIALIZE_SCALAR(num_entries);
423 for (int i = 0; i < size; i++)
424 table[i].serializeSection(cp, csprintf("TlbEntry%d", i));
425}
426
427void
428TLB::unserialize(CheckpointIn &cp)
429{
430 DPRINTF(Checkpoint, "Unserializing Arm TLB\n");
431
432 UNSERIALIZE_SCALAR(_attr);
433 UNSERIALIZE_SCALAR(haveLPAE);
434 UNSERIALIZE_SCALAR(directToStage2);
435 UNSERIALIZE_SCALAR(stage2Req);
436 UNSERIALIZE_SCALAR(stage2DescReq);
434
435 int num_entries;
436 UNSERIALIZE_SCALAR(num_entries);
437 for (int i = 0; i < min(size, num_entries); i++)
438 table[i].unserializeSection(cp, csprintf("TlbEntry%d", i));
439}
440
441void
442TLB::regStats()
443{
444 BaseTLB::regStats();
445 instHits
446 .name(name() + ".inst_hits")
447 .desc("ITB inst hits")
448 ;
449
450 instMisses
451 .name(name() + ".inst_misses")
452 .desc("ITB inst misses")
453 ;
454
455 instAccesses
456 .name(name() + ".inst_accesses")
457 .desc("ITB inst accesses")
458 ;
459
460 readHits
461 .name(name() + ".read_hits")
462 .desc("DTB read hits")
463 ;
464
465 readMisses
466 .name(name() + ".read_misses")
467 .desc("DTB read misses")
468 ;
469
470 readAccesses
471 .name(name() + ".read_accesses")
472 .desc("DTB read accesses")
473 ;
474
475 writeHits
476 .name(name() + ".write_hits")
477 .desc("DTB write hits")
478 ;
479
480 writeMisses
481 .name(name() + ".write_misses")
482 .desc("DTB write misses")
483 ;
484
485 writeAccesses
486 .name(name() + ".write_accesses")
487 .desc("DTB write accesses")
488 ;
489
490 hits
491 .name(name() + ".hits")
492 .desc("DTB hits")
493 ;
494
495 misses
496 .name(name() + ".misses")
497 .desc("DTB misses")
498 ;
499
500 accesses
501 .name(name() + ".accesses")
502 .desc("DTB accesses")
503 ;
504
505 flushTlb
506 .name(name() + ".flush_tlb")
507 .desc("Number of times complete TLB was flushed")
508 ;
509
510 flushTlbMva
511 .name(name() + ".flush_tlb_mva")
512 .desc("Number of times TLB was flushed by MVA")
513 ;
514
515 flushTlbMvaAsid
516 .name(name() + ".flush_tlb_mva_asid")
517 .desc("Number of times TLB was flushed by MVA & ASID")
518 ;
519
520 flushTlbAsid
521 .name(name() + ".flush_tlb_asid")
522 .desc("Number of times TLB was flushed by ASID")
523 ;
524
525 flushedEntries
526 .name(name() + ".flush_entries")
527 .desc("Number of entries that have been flushed from TLB")
528 ;
529
530 alignFaults
531 .name(name() + ".align_faults")
532 .desc("Number of TLB faults due to alignment restrictions")
533 ;
534
535 prefetchFaults
536 .name(name() + ".prefetch_faults")
537 .desc("Number of TLB faults due to prefetch")
538 ;
539
540 domainFaults
541 .name(name() + ".domain_faults")
542 .desc("Number of TLB faults due to domain restrictions")
543 ;
544
545 permsFaults
546 .name(name() + ".perms_faults")
547 .desc("Number of TLB faults due to permissions restrictions")
548 ;
549
550 instAccesses = instHits + instMisses;
551 readAccesses = readHits + readMisses;
552 writeAccesses = writeHits + writeMisses;
553 hits = readHits + writeHits + instHits;
554 misses = readMisses + writeMisses + instMisses;
555 accesses = readAccesses + writeAccesses + instAccesses;
556}
557
558void
559TLB::regProbePoints()
560{
561 ppRefills.reset(new ProbePoints::PMU(getProbeManager(), "Refills"));
562}
563
564Fault
565TLB::translateSe(const RequestPtr &req, ThreadContext *tc, Mode mode,
566 Translation *translation, bool &delay, bool timing)
567{
568 updateMiscReg(tc);
569 Addr vaddr_tainted = req->getVaddr();
570 Addr vaddr = 0;
571 if (aarch64)
572 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
573 else
574 vaddr = vaddr_tainted;
575 Request::Flags flags = req->getFlags();
576
577 bool is_fetch = (mode == Execute);
578 bool is_write = (mode == Write);
579
580 if (!is_fetch) {
581 assert(flags & MustBeOne);
582 if (sctlr.a || !(flags & AllowUnaligned)) {
583 if (vaddr & mask(flags & AlignmentMask)) {
584 // LPAE is always disabled in SE mode
585 return std::make_shared<DataAbort>(
586 vaddr_tainted,
587 TlbEntry::DomainType::NoAccess, is_write,
588 ArmFault::AlignmentFault, isStage2,
589 ArmFault::VmsaTran);
590 }
591 }
592 }
593
594 Addr paddr;
595 Process *p = tc->getProcessPtr();
596
597 if (!p->pTable->translate(vaddr, paddr))
598 return std::make_shared<GenericPageTableFault>(vaddr_tainted);
599 req->setPaddr(paddr);
600
601 return finalizePhysical(req, tc, mode);
602}
603
604Fault
605TLB::checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode)
606{
607 // a data cache maintenance instruction that operates by MVA does
608 // not generate a Data Abort exeception due to a Permission fault
609 if (req->isCacheMaintenance()) {
610 return NoFault;
611 }
612
613 Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
614 Request::Flags flags = req->getFlags();
615 bool is_fetch = (mode == Execute);
616 bool is_write = (mode == Write);
617 bool is_priv = isPriv && !(flags & UserMode);
618
619 // Get the translation type from the actuall table entry
620 ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran
621 : ArmFault::VmsaTran;
622
623 // If this is the second stage of translation and the request is for a
624 // stage 1 page table walk then we need to check the HCR.PTW bit. This
625 // allows us to generate a fault if the request targets an area marked
626 // as a device or strongly ordered.
627 if (isStage2 && req->isPTWalk() && hcr.ptw &&
628 (te->mtype != TlbEntry::MemoryType::Normal)) {
629 return std::make_shared<DataAbort>(
630 vaddr, te->domain, is_write,
631 ArmFault::PermissionLL + te->lookupLevel,
632 isStage2, tranMethod);
633 }
634
635 // Generate an alignment fault for unaligned data accesses to device or
636 // strongly ordered memory
637 if (!is_fetch) {
638 if (te->mtype != TlbEntry::MemoryType::Normal) {
639 if (vaddr & mask(flags & AlignmentMask)) {
640 alignFaults++;
641 return std::make_shared<DataAbort>(
642 vaddr, TlbEntry::DomainType::NoAccess, is_write,
643 ArmFault::AlignmentFault, isStage2,
644 tranMethod);
645 }
646 }
647 }
648
649 if (te->nonCacheable) {
650 // Prevent prefetching from I/O devices.
651 if (req->isPrefetch()) {
652 // Here we can safely use the fault status for the short
653 // desc. format in all cases
654 return std::make_shared<PrefetchAbort>(
655 vaddr, ArmFault::PrefetchUncacheable,
656 isStage2, tranMethod);
657 }
658 }
659
660 if (!te->longDescFormat) {
661 switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
662 case 0:
663 domainFaults++;
664 DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x"
665 " domain: %#x write:%d\n", dacr,
666 static_cast<uint8_t>(te->domain), is_write);
667 if (is_fetch) {
668 // Use PC value instead of vaddr because vaddr might
669 // be aligned to cache line and should not be the
670 // address reported in FAR
671 return std::make_shared<PrefetchAbort>(
672 req->getPC(),
673 ArmFault::DomainLL + te->lookupLevel,
674 isStage2, tranMethod);
675 } else
676 return std::make_shared<DataAbort>(
677 vaddr, te->domain, is_write,
678 ArmFault::DomainLL + te->lookupLevel,
679 isStage2, tranMethod);
680 case 1:
681 // Continue with permissions check
682 break;
683 case 2:
684 panic("UNPRED domain\n");
685 case 3:
686 return NoFault;
687 }
688 }
689
690 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
691 uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap;
692 uint8_t hap = te->hap;
693
694 if (sctlr.afe == 1 || te->longDescFormat)
695 ap |= 1;
696
697 bool abt;
698 bool isWritable = true;
699 // If this is a stage 2 access (eg for reading stage 1 page table entries)
700 // then don't perform the AP permissions check, we stil do the HAP check
701 // below.
702 if (isStage2) {
703 abt = false;
704 } else {
705 switch (ap) {
706 case 0:
707 DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n",
708 (int)sctlr.rs);
709 if (!sctlr.xp) {
710 switch ((int)sctlr.rs) {
711 case 2:
712 abt = is_write;
713 break;
714 case 1:
715 abt = is_write || !is_priv;
716 break;
717 case 0:
718 case 3:
719 default:
720 abt = true;
721 break;
722 }
723 } else {
724 abt = true;
725 }
726 break;
727 case 1:
728 abt = !is_priv;
729 break;
730 case 2:
731 abt = !is_priv && is_write;
732 isWritable = is_priv;
733 break;
734 case 3:
735 abt = false;
736 break;
737 case 4:
738 panic("UNPRED premissions\n");
739 case 5:
740 abt = !is_priv || is_write;
741 isWritable = false;
742 break;
743 case 6:
744 case 7:
745 abt = is_write;
746 isWritable = false;
747 break;
748 default:
749 panic("Unknown permissions %#x\n", ap);
750 }
751 }
752
753 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
754 bool xn = te->xn || (isWritable && sctlr.wxn) ||
755 (ap == 3 && sctlr.uwxn && is_priv);
756 if (is_fetch && (abt || xn ||
757 (te->longDescFormat && te->pxn && is_priv) ||
758 (isSecure && te->ns && scr.sif))) {
759 permsFaults++;
760 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
761 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
762 ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe);
763 // Use PC value instead of vaddr because vaddr might be aligned to
764 // cache line and should not be the address reported in FAR
765 return std::make_shared<PrefetchAbort>(
766 req->getPC(),
767 ArmFault::PermissionLL + te->lookupLevel,
768 isStage2, tranMethod);
769 } else if (abt | hapAbt) {
770 permsFaults++;
771 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
772 " write:%d\n", ap, is_priv, is_write);
773 return std::make_shared<DataAbort>(
774 vaddr, te->domain, is_write,
775 ArmFault::PermissionLL + te->lookupLevel,
776 isStage2 | !abt, tranMethod);
777 }
778 return NoFault;
779}
780
781
782Fault
783TLB::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode,
784 ThreadContext *tc)
785{
786 assert(aarch64);
787
788 // A data cache maintenance instruction that operates by VA does
789 // not generate a Permission fault unless:
790 // * It is a data cache invalidate (dc ivac) which requires write
791 // permissions to the VA, or
792 // * It is executed from EL0
793 if (req->isCacheClean() && aarch64EL != EL0 && !isStage2) {
794 return NoFault;
795 }
796
797 Addr vaddr_tainted = req->getVaddr();
798 Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
799
800 Request::Flags flags = req->getFlags();
801 bool is_fetch = (mode == Execute);
802 // Cache clean operations require read permissions to the specified VA
803 bool is_write = !req->isCacheClean() && mode == Write;
804 bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode);
805
806 updateMiscReg(tc, curTranType);
807
808 // If this is the second stage of translation and the request is for a
809 // stage 1 page table walk then we need to check the HCR.PTW bit. This
810 // allows us to generate a fault if the request targets an area marked
811 // as a device or strongly ordered.
812 if (isStage2 && req->isPTWalk() && hcr.ptw &&
813 (te->mtype != TlbEntry::MemoryType::Normal)) {
814 return std::make_shared<DataAbort>(
815 vaddr_tainted, te->domain, is_write,
816 ArmFault::PermissionLL + te->lookupLevel,
817 isStage2, ArmFault::LpaeTran);
818 }
819
820 // Generate an alignment fault for unaligned accesses to device or
821 // strongly ordered memory
822 if (!is_fetch) {
823 if (te->mtype != TlbEntry::MemoryType::Normal) {
824 if (vaddr & mask(flags & AlignmentMask)) {
825 alignFaults++;
826 return std::make_shared<DataAbort>(
827 vaddr_tainted,
828 TlbEntry::DomainType::NoAccess, is_write,
829 ArmFault::AlignmentFault, isStage2,
830 ArmFault::LpaeTran);
831 }
832 }
833 }
834
835 if (te->nonCacheable) {
836 // Prevent prefetching from I/O devices.
837 if (req->isPrefetch()) {
838 // Here we can safely use the fault status for the short
839 // desc. format in all cases
840 return std::make_shared<PrefetchAbort>(
841 vaddr_tainted,
842 ArmFault::PrefetchUncacheable,
843 isStage2, ArmFault::LpaeTran);
844 }
845 }
846
847 uint8_t ap = 0x3 & (te->ap); // 2-bit access protection field
848 bool grant = false;
849
850 uint8_t xn = te->xn;
851 uint8_t pxn = te->pxn;
852 bool r = !is_write && !is_fetch;
853 bool w = is_write;
854 bool x = is_fetch;
855 DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
856 "w:%d, x:%d\n", ap, xn, pxn, r, w, x);
857
858 if (isStage2) {
859 assert(ArmSystem::haveVirtualization(tc) && aarch64EL != EL2);
860 // In stage 2 we use the hypervisor access permission bits.
861 // The following permissions are described in ARM DDI 0487A.f
862 // D4-1802
863 uint8_t hap = 0x3 & te->hap;
864 if (is_fetch) {
865 // sctlr.wxn overrides the xn bit
866 grant = !sctlr.wxn && !xn;
867 } else if (is_write) {
868 grant = hap & 0x2;
869 } else { // is_read
870 grant = hap & 0x1;
871 }
872 } else {
873 switch (aarch64EL) {
874 case EL0:
875 {
876 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
877 switch (perm) {
878 case 0:
879 case 1:
880 case 8:
881 case 9:
882 grant = x;
883 break;
884 case 4:
885 case 5:
886 grant = r || w || (x && !sctlr.wxn);
887 break;
888 case 6:
889 case 7:
890 grant = r || w;
891 break;
892 case 12:
893 case 13:
894 grant = r || x;
895 break;
896 case 14:
897 case 15:
898 grant = r;
899 break;
900 default:
901 grant = false;
902 }
903 }
904 break;
905 case EL1:
906 {
907 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
908 switch (perm) {
909 case 0:
910 case 2:
911 grant = r || w || (x && !sctlr.wxn);
912 break;
913 case 1:
914 case 3:
915 case 4:
916 case 5:
917 case 6:
918 case 7:
919 // regions that are writeable at EL0 should not be
920 // executable at EL1
921 grant = r || w;
922 break;
923 case 8:
924 case 10:
925 case 12:
926 case 14:
927 grant = r || x;
928 break;
929 case 9:
930 case 11:
931 case 13:
932 case 15:
933 grant = r;
934 break;
935 default:
936 grant = false;
937 }
938 }
939 break;
940 case EL2:
941 case EL3:
942 {
943 uint8_t perm = (ap & 0x2) | xn;
944 switch (perm) {
945 case 0:
946 grant = r || w || (x && !sctlr.wxn) ;
947 break;
948 case 1:
949 grant = r || w;
950 break;
951 case 2:
952 grant = r || x;
953 break;
954 case 3:
955 grant = r;
956 break;
957 default:
958 grant = false;
959 }
960 }
961 break;
962 }
963 }
964
965 if (!grant) {
966 if (is_fetch) {
967 permsFaults++;
968 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. "
969 "AP:%d priv:%d write:%d ns:%d sif:%d "
970 "sctlr.afe: %d\n",
971 ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe);
972 // Use PC value instead of vaddr because vaddr might be aligned to
973 // cache line and should not be the address reported in FAR
974 return std::make_shared<PrefetchAbort>(
975 req->getPC(),
976 ArmFault::PermissionLL + te->lookupLevel,
977 isStage2, ArmFault::LpaeTran);
978 } else {
979 permsFaults++;
980 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d "
981 "priv:%d write:%d\n", ap, is_priv, is_write);
982 return std::make_shared<DataAbort>(
983 vaddr_tainted, te->domain, is_write,
984 ArmFault::PermissionLL + te->lookupLevel,
985 isStage2, ArmFault::LpaeTran);
986 }
987 }
988
989 return NoFault;
990}
991
992Fault
993TLB::translateFs(const RequestPtr &req, ThreadContext *tc, Mode mode,
994 Translation *translation, bool &delay, bool timing,
995 TLB::ArmTranslationType tranType, bool functional)
996{
997 // No such thing as a functional timing access
998 assert(!(timing && functional));
999
1000 updateMiscReg(tc, tranType);
1001
1002 Addr vaddr_tainted = req->getVaddr();
1003 Addr vaddr = 0;
1004 if (aarch64)
1005 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
1006 else
1007 vaddr = vaddr_tainted;
1008 Request::Flags flags = req->getFlags();
1009
1010 bool is_fetch = (mode == Execute);
1011 bool is_write = (mode == Write);
1012 bool long_desc_format = aarch64 || longDescFormatInUse(tc);
1013 ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
1014 : ArmFault::VmsaTran;
1015
1016 req->setAsid(asid);
1017
1018 DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
1019 isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran);
1020
1021 DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
1022 "flags %#lx tranType 0x%x\n", vaddr_tainted, mode, isStage2,
1023 scr, sctlr, flags, tranType);
1024
1025 if ((req->isInstFetch() && (!sctlr.i)) ||
1026 ((!req->isInstFetch()) && (!sctlr.c))){
1027 if (!req->isCacheMaintenance()) {
1028 req->setFlags(Request::UNCACHEABLE);
1029 }
1030 req->setFlags(Request::STRICT_ORDER);
1031 }
1032 if (!is_fetch) {
1033 assert(flags & MustBeOne);
1034 if (sctlr.a || !(flags & AllowUnaligned)) {
1035 if (vaddr & mask(flags & AlignmentMask)) {
1036 alignFaults++;
1037 return std::make_shared<DataAbort>(
1038 vaddr_tainted,
1039 TlbEntry::DomainType::NoAccess, is_write,
1040 ArmFault::AlignmentFault, isStage2,
1041 tranMethod);
1042 }
1043 }
1044 }
1045
1046 // If guest MMU is off or hcr.vm=0 go straight to stage2
1047 if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) {
1048
1049 req->setPaddr(vaddr);
1050 // When the MMU is off the security attribute corresponds to the
1051 // security state of the processor
1052 if (isSecure)
1053 req->setFlags(Request::SECURE);
1054
1055 // @todo: double check this (ARM ARM issue C B3.2.1)
1056 if (long_desc_format || sctlr.tre == 0 || nmrr.ir0 == 0 ||
1057 nmrr.or0 == 0 || prrr.tr0 != 0x2) {
1058 if (!req->isCacheMaintenance()) {
1059 req->setFlags(Request::UNCACHEABLE);
1060 }
1061 req->setFlags(Request::STRICT_ORDER);
1062 }
1063
1064 // Set memory attributes
1065 TlbEntry temp_te;
1066 temp_te.ns = !isSecure;
1067 if (isStage2 || hcr.dc == 0 || isSecure ||
1068 (isHyp && !(tranType & S1CTran))) {
1069
1070 temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal
1071 : TlbEntry::MemoryType::StronglyOrdered;
1072 temp_te.innerAttrs = 0x0;
1073 temp_te.outerAttrs = 0x0;
1074 temp_te.shareable = true;
1075 temp_te.outerShareable = true;
1076 } else {
1077 temp_te.mtype = TlbEntry::MemoryType::Normal;
1078 temp_te.innerAttrs = 0x3;
1079 temp_te.outerAttrs = 0x3;
1080 temp_te.shareable = false;
1081 temp_te.outerShareable = false;
1082 }
1083 temp_te.setAttributes(long_desc_format);
1084 DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: "
1085 "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n",
1086 temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
1087 isStage2);
1088 setAttr(temp_te.attributes);
1089
1090 return testTranslation(req, mode, TlbEntry::DomainType::NoAccess);
1091 }
1092
1093 DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n",
1094 isStage2 ? "IPA" : "VA", vaddr_tainted, asid);
1095 // Translation enabled
1096
1097 TlbEntry *te = NULL;
1098 TlbEntry mergeTe;
1099 Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
1100 functional, &mergeTe);
1101 // only proceed if we have a valid table entry
1102 if ((te == NULL) && (fault == NoFault)) delay = true;
1103
1104 // If we have the table entry transfer some of the attributes to the
1105 // request that triggered the translation
1106 if (te != NULL) {
1107 // Set memory attributes
1108 DPRINTF(TLBVerbose,
1109 "Setting memory attributes: shareable: %d, innerAttrs: %d, "
1110 "outerAttrs: %d, mtype: %d, isStage2: %d\n",
1111 te->shareable, te->innerAttrs, te->outerAttrs,
1112 static_cast<uint8_t>(te->mtype), isStage2);
1113 setAttr(te->attributes);
1114
1115 if (te->nonCacheable && !req->isCacheMaintenance())
1116 req->setFlags(Request::UNCACHEABLE);
1117
1118 // Require requests to be ordered if the request goes to
1119 // strongly ordered or device memory (i.e., anything other
1120 // than normal memory requires strict order).
1121 if (te->mtype != TlbEntry::MemoryType::Normal)
1122 req->setFlags(Request::STRICT_ORDER);
1123
1124 Addr pa = te->pAddr(vaddr);
1125 req->setPaddr(pa);
1126
1127 if (isSecure && !te->ns) {
1128 req->setFlags(Request::SECURE);
1129 }
1130 if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) &&
1131 (te->mtype != TlbEntry::MemoryType::Normal)) {
1132 // Unaligned accesses to Device memory should always cause an
1133 // abort regardless of sctlr.a
1134 alignFaults++;
1135 return std::make_shared<DataAbort>(
1136 vaddr_tainted,
1137 TlbEntry::DomainType::NoAccess, is_write,
1138 ArmFault::AlignmentFault, isStage2,
1139 tranMethod);
1140 }
1141
1142 // Check for a trickbox generated address fault
1143 if (fault == NoFault)
1144 fault = testTranslation(req, mode, te->domain);
1145 }
1146
1147 if (fault == NoFault) {
1148 // Don't try to finalize a physical address unless the
1149 // translation has completed (i.e., there is a table entry).
1150 return te ? finalizePhysical(req, tc, mode) : NoFault;
1151 } else {
1152 return fault;
1153 }
1154}
1155
1156Fault
1157TLB::translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode,
1158 TLB::ArmTranslationType tranType)
1159{
1160 updateMiscReg(tc, tranType);
1161
1162 if (directToStage2) {
1163 assert(stage2Tlb);
1164 return stage2Tlb->translateAtomic(req, tc, mode, tranType);
1165 }
1166
1167 bool delay = false;
1168 Fault fault;
1169 if (FullSystem)
1170 fault = translateFs(req, tc, mode, NULL, delay, false, tranType);
1171 else
1172 fault = translateSe(req, tc, mode, NULL, delay, false);
1173 assert(!delay);
1174 return fault;
1175}
1176
1177Fault
1178TLB::translateFunctional(const RequestPtr &req, ThreadContext *tc, Mode mode,
1179 TLB::ArmTranslationType tranType)
1180{
1181 updateMiscReg(tc, tranType);
1182
1183 if (directToStage2) {
1184 assert(stage2Tlb);
1185 return stage2Tlb->translateFunctional(req, tc, mode, tranType);
1186 }
1187
1188 bool delay = false;
1189 Fault fault;
1190 if (FullSystem)
1191 fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true);
1192 else
1193 fault = translateSe(req, tc, mode, NULL, delay, false);
1194 assert(!delay);
1195 return fault;
1196}
1197
1198void
1199TLB::translateTiming(const RequestPtr &req, ThreadContext *tc,
1200 Translation *translation, Mode mode, TLB::ArmTranslationType tranType)
1201{
1202 updateMiscReg(tc, tranType);
1203
1204 if (directToStage2) {
1205 assert(stage2Tlb);
1206 stage2Tlb->translateTiming(req, tc, translation, mode, tranType);
1207 return;
1208 }
1209
1210 assert(translation);
1211
1212 translateComplete(req, tc, translation, mode, tranType, isStage2);
1213}
1214
1215Fault
1216TLB::translateComplete(const RequestPtr &req, ThreadContext *tc,
1217 Translation *translation, Mode mode, TLB::ArmTranslationType tranType,
1218 bool callFromS2)
1219{
1220 bool delay = false;
1221 Fault fault;
1222 if (FullSystem)
1223 fault = translateFs(req, tc, mode, translation, delay, true, tranType);
1224 else
1225 fault = translateSe(req, tc, mode, translation, delay, true);
1226 DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay, fault !=
1227 NoFault);
1228 // If we have a translation, and we're not in the middle of doing a stage
1229 // 2 translation tell the translation that we've either finished or its
1230 // going to take a while. By not doing this when we're in the middle of a
1231 // stage 2 translation we prevent marking the translation as delayed twice,
1232 // one when the translation starts and again when the stage 1 translation
1233 // completes.
1234 if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) {
1235 if (!delay)
1236 translation->finish(fault, req, tc, mode);
1237 else
1238 translation->markDelayed();
1239 }
1240 return fault;
1241}
1242
1243BaseMasterPort*
1244TLB::getMasterPort()
1245{
1246 return &stage2Mmu->getPort();
1247}
1248
1249void
1250TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType)
1251{
1252 // check if the regs have changed, or the translation mode is different.
1253 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1254 // one type of translation anyway
1255 if (miscRegValid && miscRegContext == tc->contextId() &&
1256 ((tranType == curTranType) || isStage2)) {
1257 return;
1258 }
1259
1260 DPRINTF(TLBVerbose, "TLB variables changed!\n");
1261 cpsr = tc->readMiscReg(MISCREG_CPSR);
1262
1263 // Dependencies: SCR/SCR_EL3, CPSR
1264 isSecure = inSecureState(tc) &&
1265 !(tranType & HypMode) && !(tranType & S1S2NsTran);
1266
1267 aarch64EL = tranTypeEL(cpsr, tranType);
1268 aarch64 = isStage2 ?
1269 ELIs64(tc, EL2) :
1270 ELIs64(tc, aarch64EL == EL0 ? EL1 : aarch64EL);
1271
1272 if (aarch64) { // AArch64
1273 // determine EL we need to translate in
1274 switch (aarch64EL) {
1275 case EL0:
1276 case EL1:
1277 {
1278 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
1279 ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
1280 uint64_t ttbr_asid = ttbcr.a1 ?
1281 tc->readMiscReg(MISCREG_TTBR1_EL1) :
1282 tc->readMiscReg(MISCREG_TTBR0_EL1);
1283 asid = bits(ttbr_asid,
1284 (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1285 }
1286 break;
1287 case EL2:
1288 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1289 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1290 asid = -1;
1291 break;
1292 case EL3:
1293 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3);
1294 ttbcr = tc->readMiscReg(MISCREG_TCR_EL3);
1295 asid = -1;
1296 break;
1297 }
1298 hcr = tc->readMiscReg(MISCREG_HCR_EL2);
1299 scr = tc->readMiscReg(MISCREG_SCR_EL3);
1300 isPriv = aarch64EL != EL0;
1301 if (haveVirtualization) {
1302 vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
1303 isHyp = tranType & HypMode;
1304 isHyp &= (tranType & S1S2NsTran) == 0;
1305 isHyp &= (tranType & S1CTran) == 0;
1306 // Work out if we should skip the first stage of translation and go
1307 // directly to stage 2. This value is cached so we don't have to
1308 // compute it for every translation.
1309 stage2Req = isStage2 ||
1310 (hcr.vm && !isHyp && !isSecure &&
1311 !(tranType & S1CTran) && (aarch64EL < EL2) &&
1312 !(tranType & S1E1Tran)); // <--- FIX THIS HACK
437
438 int num_entries;
439 UNSERIALIZE_SCALAR(num_entries);
440 for (int i = 0; i < min(size, num_entries); i++)
441 table[i].unserializeSection(cp, csprintf("TlbEntry%d", i));
442}
443
444void
445TLB::regStats()
446{
447 BaseTLB::regStats();
448 instHits
449 .name(name() + ".inst_hits")
450 .desc("ITB inst hits")
451 ;
452
453 instMisses
454 .name(name() + ".inst_misses")
455 .desc("ITB inst misses")
456 ;
457
458 instAccesses
459 .name(name() + ".inst_accesses")
460 .desc("ITB inst accesses")
461 ;
462
463 readHits
464 .name(name() + ".read_hits")
465 .desc("DTB read hits")
466 ;
467
468 readMisses
469 .name(name() + ".read_misses")
470 .desc("DTB read misses")
471 ;
472
473 readAccesses
474 .name(name() + ".read_accesses")
475 .desc("DTB read accesses")
476 ;
477
478 writeHits
479 .name(name() + ".write_hits")
480 .desc("DTB write hits")
481 ;
482
483 writeMisses
484 .name(name() + ".write_misses")
485 .desc("DTB write misses")
486 ;
487
488 writeAccesses
489 .name(name() + ".write_accesses")
490 .desc("DTB write accesses")
491 ;
492
493 hits
494 .name(name() + ".hits")
495 .desc("DTB hits")
496 ;
497
498 misses
499 .name(name() + ".misses")
500 .desc("DTB misses")
501 ;
502
503 accesses
504 .name(name() + ".accesses")
505 .desc("DTB accesses")
506 ;
507
508 flushTlb
509 .name(name() + ".flush_tlb")
510 .desc("Number of times complete TLB was flushed")
511 ;
512
513 flushTlbMva
514 .name(name() + ".flush_tlb_mva")
515 .desc("Number of times TLB was flushed by MVA")
516 ;
517
518 flushTlbMvaAsid
519 .name(name() + ".flush_tlb_mva_asid")
520 .desc("Number of times TLB was flushed by MVA & ASID")
521 ;
522
523 flushTlbAsid
524 .name(name() + ".flush_tlb_asid")
525 .desc("Number of times TLB was flushed by ASID")
526 ;
527
528 flushedEntries
529 .name(name() + ".flush_entries")
530 .desc("Number of entries that have been flushed from TLB")
531 ;
532
533 alignFaults
534 .name(name() + ".align_faults")
535 .desc("Number of TLB faults due to alignment restrictions")
536 ;
537
538 prefetchFaults
539 .name(name() + ".prefetch_faults")
540 .desc("Number of TLB faults due to prefetch")
541 ;
542
543 domainFaults
544 .name(name() + ".domain_faults")
545 .desc("Number of TLB faults due to domain restrictions")
546 ;
547
548 permsFaults
549 .name(name() + ".perms_faults")
550 .desc("Number of TLB faults due to permissions restrictions")
551 ;
552
553 instAccesses = instHits + instMisses;
554 readAccesses = readHits + readMisses;
555 writeAccesses = writeHits + writeMisses;
556 hits = readHits + writeHits + instHits;
557 misses = readMisses + writeMisses + instMisses;
558 accesses = readAccesses + writeAccesses + instAccesses;
559}
560
561void
562TLB::regProbePoints()
563{
564 ppRefills.reset(new ProbePoints::PMU(getProbeManager(), "Refills"));
565}
566
567Fault
568TLB::translateSe(const RequestPtr &req, ThreadContext *tc, Mode mode,
569 Translation *translation, bool &delay, bool timing)
570{
571 updateMiscReg(tc);
572 Addr vaddr_tainted = req->getVaddr();
573 Addr vaddr = 0;
574 if (aarch64)
575 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
576 else
577 vaddr = vaddr_tainted;
578 Request::Flags flags = req->getFlags();
579
580 bool is_fetch = (mode == Execute);
581 bool is_write = (mode == Write);
582
583 if (!is_fetch) {
584 assert(flags & MustBeOne);
585 if (sctlr.a || !(flags & AllowUnaligned)) {
586 if (vaddr & mask(flags & AlignmentMask)) {
587 // LPAE is always disabled in SE mode
588 return std::make_shared<DataAbort>(
589 vaddr_tainted,
590 TlbEntry::DomainType::NoAccess, is_write,
591 ArmFault::AlignmentFault, isStage2,
592 ArmFault::VmsaTran);
593 }
594 }
595 }
596
597 Addr paddr;
598 Process *p = tc->getProcessPtr();
599
600 if (!p->pTable->translate(vaddr, paddr))
601 return std::make_shared<GenericPageTableFault>(vaddr_tainted);
602 req->setPaddr(paddr);
603
604 return finalizePhysical(req, tc, mode);
605}
606
607Fault
608TLB::checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode)
609{
610 // a data cache maintenance instruction that operates by MVA does
611 // not generate a Data Abort exeception due to a Permission fault
612 if (req->isCacheMaintenance()) {
613 return NoFault;
614 }
615
616 Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
617 Request::Flags flags = req->getFlags();
618 bool is_fetch = (mode == Execute);
619 bool is_write = (mode == Write);
620 bool is_priv = isPriv && !(flags & UserMode);
621
622 // Get the translation type from the actuall table entry
623 ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran
624 : ArmFault::VmsaTran;
625
626 // If this is the second stage of translation and the request is for a
627 // stage 1 page table walk then we need to check the HCR.PTW bit. This
628 // allows us to generate a fault if the request targets an area marked
629 // as a device or strongly ordered.
630 if (isStage2 && req->isPTWalk() && hcr.ptw &&
631 (te->mtype != TlbEntry::MemoryType::Normal)) {
632 return std::make_shared<DataAbort>(
633 vaddr, te->domain, is_write,
634 ArmFault::PermissionLL + te->lookupLevel,
635 isStage2, tranMethod);
636 }
637
638 // Generate an alignment fault for unaligned data accesses to device or
639 // strongly ordered memory
640 if (!is_fetch) {
641 if (te->mtype != TlbEntry::MemoryType::Normal) {
642 if (vaddr & mask(flags & AlignmentMask)) {
643 alignFaults++;
644 return std::make_shared<DataAbort>(
645 vaddr, TlbEntry::DomainType::NoAccess, is_write,
646 ArmFault::AlignmentFault, isStage2,
647 tranMethod);
648 }
649 }
650 }
651
652 if (te->nonCacheable) {
653 // Prevent prefetching from I/O devices.
654 if (req->isPrefetch()) {
655 // Here we can safely use the fault status for the short
656 // desc. format in all cases
657 return std::make_shared<PrefetchAbort>(
658 vaddr, ArmFault::PrefetchUncacheable,
659 isStage2, tranMethod);
660 }
661 }
662
663 if (!te->longDescFormat) {
664 switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
665 case 0:
666 domainFaults++;
667 DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x"
668 " domain: %#x write:%d\n", dacr,
669 static_cast<uint8_t>(te->domain), is_write);
670 if (is_fetch) {
671 // Use PC value instead of vaddr because vaddr might
672 // be aligned to cache line and should not be the
673 // address reported in FAR
674 return std::make_shared<PrefetchAbort>(
675 req->getPC(),
676 ArmFault::DomainLL + te->lookupLevel,
677 isStage2, tranMethod);
678 } else
679 return std::make_shared<DataAbort>(
680 vaddr, te->domain, is_write,
681 ArmFault::DomainLL + te->lookupLevel,
682 isStage2, tranMethod);
683 case 1:
684 // Continue with permissions check
685 break;
686 case 2:
687 panic("UNPRED domain\n");
688 case 3:
689 return NoFault;
690 }
691 }
692
693 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
694 uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap;
695 uint8_t hap = te->hap;
696
697 if (sctlr.afe == 1 || te->longDescFormat)
698 ap |= 1;
699
700 bool abt;
701 bool isWritable = true;
702 // If this is a stage 2 access (eg for reading stage 1 page table entries)
703 // then don't perform the AP permissions check, we stil do the HAP check
704 // below.
705 if (isStage2) {
706 abt = false;
707 } else {
708 switch (ap) {
709 case 0:
710 DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n",
711 (int)sctlr.rs);
712 if (!sctlr.xp) {
713 switch ((int)sctlr.rs) {
714 case 2:
715 abt = is_write;
716 break;
717 case 1:
718 abt = is_write || !is_priv;
719 break;
720 case 0:
721 case 3:
722 default:
723 abt = true;
724 break;
725 }
726 } else {
727 abt = true;
728 }
729 break;
730 case 1:
731 abt = !is_priv;
732 break;
733 case 2:
734 abt = !is_priv && is_write;
735 isWritable = is_priv;
736 break;
737 case 3:
738 abt = false;
739 break;
740 case 4:
741 panic("UNPRED premissions\n");
742 case 5:
743 abt = !is_priv || is_write;
744 isWritable = false;
745 break;
746 case 6:
747 case 7:
748 abt = is_write;
749 isWritable = false;
750 break;
751 default:
752 panic("Unknown permissions %#x\n", ap);
753 }
754 }
755
756 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
757 bool xn = te->xn || (isWritable && sctlr.wxn) ||
758 (ap == 3 && sctlr.uwxn && is_priv);
759 if (is_fetch && (abt || xn ||
760 (te->longDescFormat && te->pxn && is_priv) ||
761 (isSecure && te->ns && scr.sif))) {
762 permsFaults++;
763 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
764 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
765 ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe);
766 // Use PC value instead of vaddr because vaddr might be aligned to
767 // cache line and should not be the address reported in FAR
768 return std::make_shared<PrefetchAbort>(
769 req->getPC(),
770 ArmFault::PermissionLL + te->lookupLevel,
771 isStage2, tranMethod);
772 } else if (abt | hapAbt) {
773 permsFaults++;
774 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
775 " write:%d\n", ap, is_priv, is_write);
776 return std::make_shared<DataAbort>(
777 vaddr, te->domain, is_write,
778 ArmFault::PermissionLL + te->lookupLevel,
779 isStage2 | !abt, tranMethod);
780 }
781 return NoFault;
782}
783
784
785Fault
786TLB::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode,
787 ThreadContext *tc)
788{
789 assert(aarch64);
790
791 // A data cache maintenance instruction that operates by VA does
792 // not generate a Permission fault unless:
793 // * It is a data cache invalidate (dc ivac) which requires write
794 // permissions to the VA, or
795 // * It is executed from EL0
796 if (req->isCacheClean() && aarch64EL != EL0 && !isStage2) {
797 return NoFault;
798 }
799
800 Addr vaddr_tainted = req->getVaddr();
801 Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
802
803 Request::Flags flags = req->getFlags();
804 bool is_fetch = (mode == Execute);
805 // Cache clean operations require read permissions to the specified VA
806 bool is_write = !req->isCacheClean() && mode == Write;
807 bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode);
808
809 updateMiscReg(tc, curTranType);
810
811 // If this is the second stage of translation and the request is for a
812 // stage 1 page table walk then we need to check the HCR.PTW bit. This
813 // allows us to generate a fault if the request targets an area marked
814 // as a device or strongly ordered.
815 if (isStage2 && req->isPTWalk() && hcr.ptw &&
816 (te->mtype != TlbEntry::MemoryType::Normal)) {
817 return std::make_shared<DataAbort>(
818 vaddr_tainted, te->domain, is_write,
819 ArmFault::PermissionLL + te->lookupLevel,
820 isStage2, ArmFault::LpaeTran);
821 }
822
823 // Generate an alignment fault for unaligned accesses to device or
824 // strongly ordered memory
825 if (!is_fetch) {
826 if (te->mtype != TlbEntry::MemoryType::Normal) {
827 if (vaddr & mask(flags & AlignmentMask)) {
828 alignFaults++;
829 return std::make_shared<DataAbort>(
830 vaddr_tainted,
831 TlbEntry::DomainType::NoAccess, is_write,
832 ArmFault::AlignmentFault, isStage2,
833 ArmFault::LpaeTran);
834 }
835 }
836 }
837
838 if (te->nonCacheable) {
839 // Prevent prefetching from I/O devices.
840 if (req->isPrefetch()) {
841 // Here we can safely use the fault status for the short
842 // desc. format in all cases
843 return std::make_shared<PrefetchAbort>(
844 vaddr_tainted,
845 ArmFault::PrefetchUncacheable,
846 isStage2, ArmFault::LpaeTran);
847 }
848 }
849
850 uint8_t ap = 0x3 & (te->ap); // 2-bit access protection field
851 bool grant = false;
852
853 uint8_t xn = te->xn;
854 uint8_t pxn = te->pxn;
855 bool r = !is_write && !is_fetch;
856 bool w = is_write;
857 bool x = is_fetch;
858 DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
859 "w:%d, x:%d\n", ap, xn, pxn, r, w, x);
860
861 if (isStage2) {
862 assert(ArmSystem::haveVirtualization(tc) && aarch64EL != EL2);
863 // In stage 2 we use the hypervisor access permission bits.
864 // The following permissions are described in ARM DDI 0487A.f
865 // D4-1802
866 uint8_t hap = 0x3 & te->hap;
867 if (is_fetch) {
868 // sctlr.wxn overrides the xn bit
869 grant = !sctlr.wxn && !xn;
870 } else if (is_write) {
871 grant = hap & 0x2;
872 } else { // is_read
873 grant = hap & 0x1;
874 }
875 } else {
876 switch (aarch64EL) {
877 case EL0:
878 {
879 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
880 switch (perm) {
881 case 0:
882 case 1:
883 case 8:
884 case 9:
885 grant = x;
886 break;
887 case 4:
888 case 5:
889 grant = r || w || (x && !sctlr.wxn);
890 break;
891 case 6:
892 case 7:
893 grant = r || w;
894 break;
895 case 12:
896 case 13:
897 grant = r || x;
898 break;
899 case 14:
900 case 15:
901 grant = r;
902 break;
903 default:
904 grant = false;
905 }
906 }
907 break;
908 case EL1:
909 {
910 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
911 switch (perm) {
912 case 0:
913 case 2:
914 grant = r || w || (x && !sctlr.wxn);
915 break;
916 case 1:
917 case 3:
918 case 4:
919 case 5:
920 case 6:
921 case 7:
922 // regions that are writeable at EL0 should not be
923 // executable at EL1
924 grant = r || w;
925 break;
926 case 8:
927 case 10:
928 case 12:
929 case 14:
930 grant = r || x;
931 break;
932 case 9:
933 case 11:
934 case 13:
935 case 15:
936 grant = r;
937 break;
938 default:
939 grant = false;
940 }
941 }
942 break;
943 case EL2:
944 case EL3:
945 {
946 uint8_t perm = (ap & 0x2) | xn;
947 switch (perm) {
948 case 0:
949 grant = r || w || (x && !sctlr.wxn) ;
950 break;
951 case 1:
952 grant = r || w;
953 break;
954 case 2:
955 grant = r || x;
956 break;
957 case 3:
958 grant = r;
959 break;
960 default:
961 grant = false;
962 }
963 }
964 break;
965 }
966 }
967
968 if (!grant) {
969 if (is_fetch) {
970 permsFaults++;
971 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. "
972 "AP:%d priv:%d write:%d ns:%d sif:%d "
973 "sctlr.afe: %d\n",
974 ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe);
975 // Use PC value instead of vaddr because vaddr might be aligned to
976 // cache line and should not be the address reported in FAR
977 return std::make_shared<PrefetchAbort>(
978 req->getPC(),
979 ArmFault::PermissionLL + te->lookupLevel,
980 isStage2, ArmFault::LpaeTran);
981 } else {
982 permsFaults++;
983 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d "
984 "priv:%d write:%d\n", ap, is_priv, is_write);
985 return std::make_shared<DataAbort>(
986 vaddr_tainted, te->domain, is_write,
987 ArmFault::PermissionLL + te->lookupLevel,
988 isStage2, ArmFault::LpaeTran);
989 }
990 }
991
992 return NoFault;
993}
994
995Fault
996TLB::translateFs(const RequestPtr &req, ThreadContext *tc, Mode mode,
997 Translation *translation, bool &delay, bool timing,
998 TLB::ArmTranslationType tranType, bool functional)
999{
1000 // No such thing as a functional timing access
1001 assert(!(timing && functional));
1002
1003 updateMiscReg(tc, tranType);
1004
1005 Addr vaddr_tainted = req->getVaddr();
1006 Addr vaddr = 0;
1007 if (aarch64)
1008 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
1009 else
1010 vaddr = vaddr_tainted;
1011 Request::Flags flags = req->getFlags();
1012
1013 bool is_fetch = (mode == Execute);
1014 bool is_write = (mode == Write);
1015 bool long_desc_format = aarch64 || longDescFormatInUse(tc);
1016 ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
1017 : ArmFault::VmsaTran;
1018
1019 req->setAsid(asid);
1020
1021 DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
1022 isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran);
1023
1024 DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
1025 "flags %#lx tranType 0x%x\n", vaddr_tainted, mode, isStage2,
1026 scr, sctlr, flags, tranType);
1027
1028 if ((req->isInstFetch() && (!sctlr.i)) ||
1029 ((!req->isInstFetch()) && (!sctlr.c))){
1030 if (!req->isCacheMaintenance()) {
1031 req->setFlags(Request::UNCACHEABLE);
1032 }
1033 req->setFlags(Request::STRICT_ORDER);
1034 }
1035 if (!is_fetch) {
1036 assert(flags & MustBeOne);
1037 if (sctlr.a || !(flags & AllowUnaligned)) {
1038 if (vaddr & mask(flags & AlignmentMask)) {
1039 alignFaults++;
1040 return std::make_shared<DataAbort>(
1041 vaddr_tainted,
1042 TlbEntry::DomainType::NoAccess, is_write,
1043 ArmFault::AlignmentFault, isStage2,
1044 tranMethod);
1045 }
1046 }
1047 }
1048
1049 // If guest MMU is off or hcr.vm=0 go straight to stage2
1050 if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) {
1051
1052 req->setPaddr(vaddr);
1053 // When the MMU is off the security attribute corresponds to the
1054 // security state of the processor
1055 if (isSecure)
1056 req->setFlags(Request::SECURE);
1057
1058 // @todo: double check this (ARM ARM issue C B3.2.1)
1059 if (long_desc_format || sctlr.tre == 0 || nmrr.ir0 == 0 ||
1060 nmrr.or0 == 0 || prrr.tr0 != 0x2) {
1061 if (!req->isCacheMaintenance()) {
1062 req->setFlags(Request::UNCACHEABLE);
1063 }
1064 req->setFlags(Request::STRICT_ORDER);
1065 }
1066
1067 // Set memory attributes
1068 TlbEntry temp_te;
1069 temp_te.ns = !isSecure;
1070 if (isStage2 || hcr.dc == 0 || isSecure ||
1071 (isHyp && !(tranType & S1CTran))) {
1072
1073 temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal
1074 : TlbEntry::MemoryType::StronglyOrdered;
1075 temp_te.innerAttrs = 0x0;
1076 temp_te.outerAttrs = 0x0;
1077 temp_te.shareable = true;
1078 temp_te.outerShareable = true;
1079 } else {
1080 temp_te.mtype = TlbEntry::MemoryType::Normal;
1081 temp_te.innerAttrs = 0x3;
1082 temp_te.outerAttrs = 0x3;
1083 temp_te.shareable = false;
1084 temp_te.outerShareable = false;
1085 }
1086 temp_te.setAttributes(long_desc_format);
1087 DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: "
1088 "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n",
1089 temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
1090 isStage2);
1091 setAttr(temp_te.attributes);
1092
1093 return testTranslation(req, mode, TlbEntry::DomainType::NoAccess);
1094 }
1095
1096 DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n",
1097 isStage2 ? "IPA" : "VA", vaddr_tainted, asid);
1098 // Translation enabled
1099
1100 TlbEntry *te = NULL;
1101 TlbEntry mergeTe;
1102 Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
1103 functional, &mergeTe);
1104 // only proceed if we have a valid table entry
1105 if ((te == NULL) && (fault == NoFault)) delay = true;
1106
1107 // If we have the table entry transfer some of the attributes to the
1108 // request that triggered the translation
1109 if (te != NULL) {
1110 // Set memory attributes
1111 DPRINTF(TLBVerbose,
1112 "Setting memory attributes: shareable: %d, innerAttrs: %d, "
1113 "outerAttrs: %d, mtype: %d, isStage2: %d\n",
1114 te->shareable, te->innerAttrs, te->outerAttrs,
1115 static_cast<uint8_t>(te->mtype), isStage2);
1116 setAttr(te->attributes);
1117
1118 if (te->nonCacheable && !req->isCacheMaintenance())
1119 req->setFlags(Request::UNCACHEABLE);
1120
1121 // Require requests to be ordered if the request goes to
1122 // strongly ordered or device memory (i.e., anything other
1123 // than normal memory requires strict order).
1124 if (te->mtype != TlbEntry::MemoryType::Normal)
1125 req->setFlags(Request::STRICT_ORDER);
1126
1127 Addr pa = te->pAddr(vaddr);
1128 req->setPaddr(pa);
1129
1130 if (isSecure && !te->ns) {
1131 req->setFlags(Request::SECURE);
1132 }
1133 if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) &&
1134 (te->mtype != TlbEntry::MemoryType::Normal)) {
1135 // Unaligned accesses to Device memory should always cause an
1136 // abort regardless of sctlr.a
1137 alignFaults++;
1138 return std::make_shared<DataAbort>(
1139 vaddr_tainted,
1140 TlbEntry::DomainType::NoAccess, is_write,
1141 ArmFault::AlignmentFault, isStage2,
1142 tranMethod);
1143 }
1144
1145 // Check for a trickbox generated address fault
1146 if (fault == NoFault)
1147 fault = testTranslation(req, mode, te->domain);
1148 }
1149
1150 if (fault == NoFault) {
1151 // Don't try to finalize a physical address unless the
1152 // translation has completed (i.e., there is a table entry).
1153 return te ? finalizePhysical(req, tc, mode) : NoFault;
1154 } else {
1155 return fault;
1156 }
1157}
1158
1159Fault
1160TLB::translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode,
1161 TLB::ArmTranslationType tranType)
1162{
1163 updateMiscReg(tc, tranType);
1164
1165 if (directToStage2) {
1166 assert(stage2Tlb);
1167 return stage2Tlb->translateAtomic(req, tc, mode, tranType);
1168 }
1169
1170 bool delay = false;
1171 Fault fault;
1172 if (FullSystem)
1173 fault = translateFs(req, tc, mode, NULL, delay, false, tranType);
1174 else
1175 fault = translateSe(req, tc, mode, NULL, delay, false);
1176 assert(!delay);
1177 return fault;
1178}
1179
1180Fault
1181TLB::translateFunctional(const RequestPtr &req, ThreadContext *tc, Mode mode,
1182 TLB::ArmTranslationType tranType)
1183{
1184 updateMiscReg(tc, tranType);
1185
1186 if (directToStage2) {
1187 assert(stage2Tlb);
1188 return stage2Tlb->translateFunctional(req, tc, mode, tranType);
1189 }
1190
1191 bool delay = false;
1192 Fault fault;
1193 if (FullSystem)
1194 fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true);
1195 else
1196 fault = translateSe(req, tc, mode, NULL, delay, false);
1197 assert(!delay);
1198 return fault;
1199}
1200
1201void
1202TLB::translateTiming(const RequestPtr &req, ThreadContext *tc,
1203 Translation *translation, Mode mode, TLB::ArmTranslationType tranType)
1204{
1205 updateMiscReg(tc, tranType);
1206
1207 if (directToStage2) {
1208 assert(stage2Tlb);
1209 stage2Tlb->translateTiming(req, tc, translation, mode, tranType);
1210 return;
1211 }
1212
1213 assert(translation);
1214
1215 translateComplete(req, tc, translation, mode, tranType, isStage2);
1216}
1217
1218Fault
1219TLB::translateComplete(const RequestPtr &req, ThreadContext *tc,
1220 Translation *translation, Mode mode, TLB::ArmTranslationType tranType,
1221 bool callFromS2)
1222{
1223 bool delay = false;
1224 Fault fault;
1225 if (FullSystem)
1226 fault = translateFs(req, tc, mode, translation, delay, true, tranType);
1227 else
1228 fault = translateSe(req, tc, mode, translation, delay, true);
1229 DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay, fault !=
1230 NoFault);
1231 // If we have a translation, and we're not in the middle of doing a stage
1232 // 2 translation tell the translation that we've either finished or its
1233 // going to take a while. By not doing this when we're in the middle of a
1234 // stage 2 translation we prevent marking the translation as delayed twice,
1235 // one when the translation starts and again when the stage 1 translation
1236 // completes.
1237 if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) {
1238 if (!delay)
1239 translation->finish(fault, req, tc, mode);
1240 else
1241 translation->markDelayed();
1242 }
1243 return fault;
1244}
1245
1246BaseMasterPort*
1247TLB::getMasterPort()
1248{
1249 return &stage2Mmu->getPort();
1250}
1251
1252void
1253TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType)
1254{
1255 // check if the regs have changed, or the translation mode is different.
1256 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1257 // one type of translation anyway
1258 if (miscRegValid && miscRegContext == tc->contextId() &&
1259 ((tranType == curTranType) || isStage2)) {
1260 return;
1261 }
1262
1263 DPRINTF(TLBVerbose, "TLB variables changed!\n");
1264 cpsr = tc->readMiscReg(MISCREG_CPSR);
1265
1266 // Dependencies: SCR/SCR_EL3, CPSR
1267 isSecure = inSecureState(tc) &&
1268 !(tranType & HypMode) && !(tranType & S1S2NsTran);
1269
1270 aarch64EL = tranTypeEL(cpsr, tranType);
1271 aarch64 = isStage2 ?
1272 ELIs64(tc, EL2) :
1273 ELIs64(tc, aarch64EL == EL0 ? EL1 : aarch64EL);
1274
1275 if (aarch64) { // AArch64
1276 // determine EL we need to translate in
1277 switch (aarch64EL) {
1278 case EL0:
1279 case EL1:
1280 {
1281 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
1282 ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
1283 uint64_t ttbr_asid = ttbcr.a1 ?
1284 tc->readMiscReg(MISCREG_TTBR1_EL1) :
1285 tc->readMiscReg(MISCREG_TTBR0_EL1);
1286 asid = bits(ttbr_asid,
1287 (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1288 }
1289 break;
1290 case EL2:
1291 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1292 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1293 asid = -1;
1294 break;
1295 case EL3:
1296 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3);
1297 ttbcr = tc->readMiscReg(MISCREG_TCR_EL3);
1298 asid = -1;
1299 break;
1300 }
1301 hcr = tc->readMiscReg(MISCREG_HCR_EL2);
1302 scr = tc->readMiscReg(MISCREG_SCR_EL3);
1303 isPriv = aarch64EL != EL0;
1304 if (haveVirtualization) {
1305 vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
1306 isHyp = tranType & HypMode;
1307 isHyp &= (tranType & S1S2NsTran) == 0;
1308 isHyp &= (tranType & S1CTran) == 0;
1309 // Work out if we should skip the first stage of translation and go
1310 // directly to stage 2. This value is cached so we don't have to
1311 // compute it for every translation.
1312 stage2Req = isStage2 ||
1313 (hcr.vm && !isHyp && !isSecure &&
1314 !(tranType & S1CTran) && (aarch64EL < EL2) &&
1315 !(tranType & S1E1Tran)); // <--- FIX THIS HACK
1316 stage2DescReq = isStage2 || (hcr.vm && !isHyp && !isSecure &&
1317 (aarch64EL < EL2));
1313 directToStage2 = !isStage2 && stage2Req && !sctlr.m;
1314 } else {
1315 vmid = 0;
1316 isHyp = false;
1317 directToStage2 = false;
1318 stage2Req = false;
1318 directToStage2 = !isStage2 && stage2Req && !sctlr.m;
1319 } else {
1320 vmid = 0;
1321 isHyp = false;
1322 directToStage2 = false;
1323 stage2Req = false;
1324 stage2DescReq = false;
1319 }
1320 } else { // AArch32
1321 sctlr = tc->readMiscReg(snsBankedIndex(MISCREG_SCTLR, tc,
1322 !isSecure));
1323 ttbcr = tc->readMiscReg(snsBankedIndex(MISCREG_TTBCR, tc,
1324 !isSecure));
1325 scr = tc->readMiscReg(MISCREG_SCR);
1326 isPriv = cpsr.mode != MODE_USER;
1327 if (longDescFormatInUse(tc)) {
1328 uint64_t ttbr_asid = tc->readMiscReg(
1329 snsBankedIndex(ttbcr.a1 ? MISCREG_TTBR1 :
1330 MISCREG_TTBR0,
1331 tc, !isSecure));
1332 asid = bits(ttbr_asid, 55, 48);
1333 } else { // Short-descriptor translation table format in use
1334 CONTEXTIDR context_id = tc->readMiscReg(snsBankedIndex(
1335 MISCREG_CONTEXTIDR, tc,!isSecure));
1336 asid = context_id.asid;
1337 }
1338 prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR, tc,
1339 !isSecure));
1340 nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR, tc,
1341 !isSecure));
1342 dacr = tc->readMiscReg(snsBankedIndex(MISCREG_DACR, tc,
1343 !isSecure));
1344 hcr = tc->readMiscReg(MISCREG_HCR);
1345
1346 if (haveVirtualization) {
1347 vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
1348 isHyp = cpsr.mode == MODE_HYP;
1349 isHyp |= tranType & HypMode;
1350 isHyp &= (tranType & S1S2NsTran) == 0;
1351 isHyp &= (tranType & S1CTran) == 0;
1352 if (isHyp) {
1353 sctlr = tc->readMiscReg(MISCREG_HSCTLR);
1354 }
1355 // Work out if we should skip the first stage of translation and go
1356 // directly to stage 2. This value is cached so we don't have to
1357 // compute it for every translation.
1358 stage2Req = hcr.vm && !isStage2 && !isHyp && !isSecure &&
1359 !(tranType & S1CTran);
1325 }
1326 } else { // AArch32
1327 sctlr = tc->readMiscReg(snsBankedIndex(MISCREG_SCTLR, tc,
1328 !isSecure));
1329 ttbcr = tc->readMiscReg(snsBankedIndex(MISCREG_TTBCR, tc,
1330 !isSecure));
1331 scr = tc->readMiscReg(MISCREG_SCR);
1332 isPriv = cpsr.mode != MODE_USER;
1333 if (longDescFormatInUse(tc)) {
1334 uint64_t ttbr_asid = tc->readMiscReg(
1335 snsBankedIndex(ttbcr.a1 ? MISCREG_TTBR1 :
1336 MISCREG_TTBR0,
1337 tc, !isSecure));
1338 asid = bits(ttbr_asid, 55, 48);
1339 } else { // Short-descriptor translation table format in use
1340 CONTEXTIDR context_id = tc->readMiscReg(snsBankedIndex(
1341 MISCREG_CONTEXTIDR, tc,!isSecure));
1342 asid = context_id.asid;
1343 }
1344 prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR, tc,
1345 !isSecure));
1346 nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR, tc,
1347 !isSecure));
1348 dacr = tc->readMiscReg(snsBankedIndex(MISCREG_DACR, tc,
1349 !isSecure));
1350 hcr = tc->readMiscReg(MISCREG_HCR);
1351
1352 if (haveVirtualization) {
1353 vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
1354 isHyp = cpsr.mode == MODE_HYP;
1355 isHyp |= tranType & HypMode;
1356 isHyp &= (tranType & S1S2NsTran) == 0;
1357 isHyp &= (tranType & S1CTran) == 0;
1358 if (isHyp) {
1359 sctlr = tc->readMiscReg(MISCREG_HSCTLR);
1360 }
1361 // Work out if we should skip the first stage of translation and go
1362 // directly to stage 2. This value is cached so we don't have to
1363 // compute it for every translation.
1364 stage2Req = hcr.vm && !isStage2 && !isHyp && !isSecure &&
1365 !(tranType & S1CTran);
1366 stage2DescReq = hcr.vm && !isStage2 && !isHyp && !isSecure;
1360 directToStage2 = stage2Req && !sctlr.m;
1361 } else {
1362 vmid = 0;
1363 stage2Req = false;
1364 isHyp = false;
1365 directToStage2 = false;
1367 directToStage2 = stage2Req && !sctlr.m;
1368 } else {
1369 vmid = 0;
1370 stage2Req = false;
1371 isHyp = false;
1372 directToStage2 = false;
1373 stage2DescReq = false;
1366 }
1367 }
1368 miscRegValid = true;
1369 miscRegContext = tc->contextId();
1370 curTranType = tranType;
1371}
1372
1373ExceptionLevel
1374TLB::tranTypeEL(CPSR cpsr, ArmTranslationType type)
1375{
1376 switch (type) {
1377 case S1E0Tran:
1378 case S12E0Tran:
1379 return EL0;
1380
1381 case S1E1Tran:
1382 case S12E1Tran:
1383 return EL1;
1384
1385 case S1E2Tran:
1386 return EL2;
1387
1388 case S1E3Tran:
1389 return EL3;
1390
1391 case NormalTran:
1392 case S1CTran:
1393 case S1S2NsTran:
1394 case HypMode:
1395 return opModeToEL((OperatingMode)(uint8_t)cpsr.mode);
1396
1397 default:
1398 panic("Unknown translation mode!\n");
1399 }
1400}
1401
1402Fault
1403TLB::getTE(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode,
1404 Translation *translation, bool timing, bool functional,
1405 bool is_secure, TLB::ArmTranslationType tranType)
1406{
1407 bool is_fetch = (mode == Execute);
1408 bool is_write = (mode == Write);
1409
1410 Addr vaddr_tainted = req->getVaddr();
1411 Addr vaddr = 0;
1412 ExceptionLevel target_el = aarch64 ? aarch64EL : EL1;
1413 if (aarch64) {
1414 vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el, ttbcr);
1415 } else {
1416 vaddr = vaddr_tainted;
1417 }
1418 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1419 if (*te == NULL) {
1420 if (req->isPrefetch()) {
1421 // if the request is a prefetch don't attempt to fill the TLB or go
1422 // any further with the memory access (here we can safely use the
1423 // fault status for the short desc. format in all cases)
1424 prefetchFaults++;
1425 return std::make_shared<PrefetchAbort>(
1426 vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2);
1427 }
1428
1429 if (is_fetch)
1430 instMisses++;
1431 else if (is_write)
1432 writeMisses++;
1433 else
1434 readMisses++;
1435
1436 // start translation table walk, pass variables rather than
1437 // re-retreaving in table walker for speed
1438 DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1439 vaddr_tainted, asid, vmid);
1440 Fault fault;
1441 fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode,
1442 translation, timing, functional, is_secure,
1374 }
1375 }
1376 miscRegValid = true;
1377 miscRegContext = tc->contextId();
1378 curTranType = tranType;
1379}
1380
1381ExceptionLevel
1382TLB::tranTypeEL(CPSR cpsr, ArmTranslationType type)
1383{
1384 switch (type) {
1385 case S1E0Tran:
1386 case S12E0Tran:
1387 return EL0;
1388
1389 case S1E1Tran:
1390 case S12E1Tran:
1391 return EL1;
1392
1393 case S1E2Tran:
1394 return EL2;
1395
1396 case S1E3Tran:
1397 return EL3;
1398
1399 case NormalTran:
1400 case S1CTran:
1401 case S1S2NsTran:
1402 case HypMode:
1403 return opModeToEL((OperatingMode)(uint8_t)cpsr.mode);
1404
1405 default:
1406 panic("Unknown translation mode!\n");
1407 }
1408}
1409
1410Fault
1411TLB::getTE(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode,
1412 Translation *translation, bool timing, bool functional,
1413 bool is_secure, TLB::ArmTranslationType tranType)
1414{
1415 bool is_fetch = (mode == Execute);
1416 bool is_write = (mode == Write);
1417
1418 Addr vaddr_tainted = req->getVaddr();
1419 Addr vaddr = 0;
1420 ExceptionLevel target_el = aarch64 ? aarch64EL : EL1;
1421 if (aarch64) {
1422 vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el, ttbcr);
1423 } else {
1424 vaddr = vaddr_tainted;
1425 }
1426 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1427 if (*te == NULL) {
1428 if (req->isPrefetch()) {
1429 // if the request is a prefetch don't attempt to fill the TLB or go
1430 // any further with the memory access (here we can safely use the
1431 // fault status for the short desc. format in all cases)
1432 prefetchFaults++;
1433 return std::make_shared<PrefetchAbort>(
1434 vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2);
1435 }
1436
1437 if (is_fetch)
1438 instMisses++;
1439 else if (is_write)
1440 writeMisses++;
1441 else
1442 readMisses++;
1443
1444 // start translation table walk, pass variables rather than
1445 // re-retreaving in table walker for speed
1446 DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1447 vaddr_tainted, asid, vmid);
1448 Fault fault;
1449 fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode,
1450 translation, timing, functional, is_secure,
1443 tranType, stage2Req);
1451 tranType, stage2DescReq);
1444 // for timing mode, return and wait for table walk,
1445 if (timing || fault != NoFault) {
1446 return fault;
1447 }
1448
1449 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1450 if (!*te)
1451 printTlb();
1452 assert(*te);
1453 } else {
1454 if (is_fetch)
1455 instHits++;
1456 else if (is_write)
1457 writeHits++;
1458 else
1459 readHits++;
1460 }
1461 return NoFault;
1462}
1463
1464Fault
1465TLB::getResultTe(TlbEntry **te, const RequestPtr &req,
1466 ThreadContext *tc, Mode mode,
1467 Translation *translation, bool timing, bool functional,
1468 TlbEntry *mergeTe)
1469{
1470 Fault fault;
1471
1472 if (isStage2) {
1473 // We are already in the stage 2 TLB. Grab the table entry for stage
1474 // 2 only. We are here because stage 1 translation is disabled.
1475 TlbEntry *s2Te = NULL;
1476 // Get the stage 2 table entry
1477 fault = getTE(&s2Te, req, tc, mode, translation, timing, functional,
1478 isSecure, curTranType);
1479 // Check permissions of stage 2
1480 if ((s2Te != NULL) && (fault == NoFault)) {
1481 if (aarch64)
1482 fault = checkPermissions64(s2Te, req, mode, tc);
1483 else
1484 fault = checkPermissions(s2Te, req, mode);
1485 }
1486 *te = s2Te;
1487 return fault;
1488 }
1489
1490 TlbEntry *s1Te = NULL;
1491
1492 Addr vaddr_tainted = req->getVaddr();
1493
1494 // Get the stage 1 table entry
1495 fault = getTE(&s1Te, req, tc, mode, translation, timing, functional,
1496 isSecure, curTranType);
1497 // only proceed if we have a valid table entry
1498 if ((s1Te != NULL) && (fault == NoFault)) {
1499 // Check stage 1 permissions before checking stage 2
1500 if (aarch64)
1501 fault = checkPermissions64(s1Te, req, mode, tc);
1502 else
1503 fault = checkPermissions(s1Te, req, mode);
1504 if (stage2Req & (fault == NoFault)) {
1505 Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te,
1506 req, translation, mode, timing, functional, curTranType);
1507 fault = s2Lookup->getTe(tc, mergeTe);
1508 if (s2Lookup->isComplete()) {
1509 *te = mergeTe;
1510 // We've finished with the lookup so delete it
1511 delete s2Lookup;
1512 } else {
1513 // The lookup hasn't completed, so we can't delete it now. We
1514 // get round this by asking the object to self delete when the
1515 // translation is complete.
1516 s2Lookup->setSelfDelete();
1517 }
1518 } else {
1519 // This case deals with an S1 hit (or bypass), followed by
1520 // an S2 hit-but-perms issue
1521 if (isStage2) {
1522 DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1523 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault);
1524 if (fault != NoFault) {
1525 ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get());
1526 armFault->annotate(ArmFault::S1PTW, false);
1527 armFault->annotate(ArmFault::OVA, vaddr_tainted);
1528 }
1529 }
1530 *te = s1Te;
1531 }
1532 }
1533 return fault;
1534}
1535
1536void
1537TLB::setTestInterface(SimObject *_ti)
1538{
1539 if (!_ti) {
1540 test = nullptr;
1541 } else {
1542 TlbTestInterface *ti(dynamic_cast<TlbTestInterface *>(_ti));
1543 fatal_if(!ti, "%s is not a valid ARM TLB tester\n", _ti->name());
1544 test = ti;
1545 }
1546}
1547
1548Fault
1549TLB::testTranslation(const RequestPtr &req, Mode mode,
1550 TlbEntry::DomainType domain)
1551{
1552 if (!test || !req->hasSize() || req->getSize() == 0 ||
1553 req->isCacheMaintenance()) {
1554 return NoFault;
1555 } else {
1556 return test->translationCheck(req, isPriv, mode, domain);
1557 }
1558}
1559
1560Fault
1561TLB::testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode,
1562 TlbEntry::DomainType domain, LookupLevel lookup_level)
1563{
1564 if (!test) {
1565 return NoFault;
1566 } else {
1567 return test->walkCheck(pa, size, va, is_secure, isPriv, mode,
1568 domain, lookup_level);
1569 }
1570}
1571
1572
1573ArmISA::TLB *
1574ArmTLBParams::create()
1575{
1576 return new ArmISA::TLB(this);
1577}
1452 // for timing mode, return and wait for table walk,
1453 if (timing || fault != NoFault) {
1454 return fault;
1455 }
1456
1457 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1458 if (!*te)
1459 printTlb();
1460 assert(*te);
1461 } else {
1462 if (is_fetch)
1463 instHits++;
1464 else if (is_write)
1465 writeHits++;
1466 else
1467 readHits++;
1468 }
1469 return NoFault;
1470}
1471
1472Fault
1473TLB::getResultTe(TlbEntry **te, const RequestPtr &req,
1474 ThreadContext *tc, Mode mode,
1475 Translation *translation, bool timing, bool functional,
1476 TlbEntry *mergeTe)
1477{
1478 Fault fault;
1479
1480 if (isStage2) {
1481 // We are already in the stage 2 TLB. Grab the table entry for stage
1482 // 2 only. We are here because stage 1 translation is disabled.
1483 TlbEntry *s2Te = NULL;
1484 // Get the stage 2 table entry
1485 fault = getTE(&s2Te, req, tc, mode, translation, timing, functional,
1486 isSecure, curTranType);
1487 // Check permissions of stage 2
1488 if ((s2Te != NULL) && (fault == NoFault)) {
1489 if (aarch64)
1490 fault = checkPermissions64(s2Te, req, mode, tc);
1491 else
1492 fault = checkPermissions(s2Te, req, mode);
1493 }
1494 *te = s2Te;
1495 return fault;
1496 }
1497
1498 TlbEntry *s1Te = NULL;
1499
1500 Addr vaddr_tainted = req->getVaddr();
1501
1502 // Get the stage 1 table entry
1503 fault = getTE(&s1Te, req, tc, mode, translation, timing, functional,
1504 isSecure, curTranType);
1505 // only proceed if we have a valid table entry
1506 if ((s1Te != NULL) && (fault == NoFault)) {
1507 // Check stage 1 permissions before checking stage 2
1508 if (aarch64)
1509 fault = checkPermissions64(s1Te, req, mode, tc);
1510 else
1511 fault = checkPermissions(s1Te, req, mode);
1512 if (stage2Req & (fault == NoFault)) {
1513 Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te,
1514 req, translation, mode, timing, functional, curTranType);
1515 fault = s2Lookup->getTe(tc, mergeTe);
1516 if (s2Lookup->isComplete()) {
1517 *te = mergeTe;
1518 // We've finished with the lookup so delete it
1519 delete s2Lookup;
1520 } else {
1521 // The lookup hasn't completed, so we can't delete it now. We
1522 // get round this by asking the object to self delete when the
1523 // translation is complete.
1524 s2Lookup->setSelfDelete();
1525 }
1526 } else {
1527 // This case deals with an S1 hit (or bypass), followed by
1528 // an S2 hit-but-perms issue
1529 if (isStage2) {
1530 DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1531 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault);
1532 if (fault != NoFault) {
1533 ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get());
1534 armFault->annotate(ArmFault::S1PTW, false);
1535 armFault->annotate(ArmFault::OVA, vaddr_tainted);
1536 }
1537 }
1538 *te = s1Te;
1539 }
1540 }
1541 return fault;
1542}
1543
1544void
1545TLB::setTestInterface(SimObject *_ti)
1546{
1547 if (!_ti) {
1548 test = nullptr;
1549 } else {
1550 TlbTestInterface *ti(dynamic_cast<TlbTestInterface *>(_ti));
1551 fatal_if(!ti, "%s is not a valid ARM TLB tester\n", _ti->name());
1552 test = ti;
1553 }
1554}
1555
1556Fault
1557TLB::testTranslation(const RequestPtr &req, Mode mode,
1558 TlbEntry::DomainType domain)
1559{
1560 if (!test || !req->hasSize() || req->getSize() == 0 ||
1561 req->isCacheMaintenance()) {
1562 return NoFault;
1563 } else {
1564 return test->translationCheck(req, isPriv, mode, domain);
1565 }
1566}
1567
1568Fault
1569TLB::testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode,
1570 TlbEntry::DomainType domain, LookupLevel lookup_level)
1571{
1572 if (!test) {
1573 return NoFault;
1574 } else {
1575 return test->walkCheck(pa, size, va, is_secure, isPriv, mode,
1576 domain, lookup_level);
1577 }
1578}
1579
1580
1581ArmISA::TLB *
1582ArmTLBParams::create()
1583{
1584 return new ArmISA::TLB(this);
1585}