table_walker.cc (14095:4f5d16d7cf45) table_walker.cc (14280:9e3f2937f72c)
1/*
2 * Copyright (c) 2010, 2012-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Ali Saidi
38 * Giacomo Gabrielli
39 */
40#include "arch/arm/table_walker.hh"
41
42#include <memory>
43
44#include "arch/arm/faults.hh"
45#include "arch/arm/stage2_mmu.hh"
46#include "arch/arm/system.hh"
47#include "arch/arm/tlb.hh"
48#include "cpu/base.hh"
49#include "cpu/thread_context.hh"
50#include "debug/Checkpoint.hh"
51#include "debug/Drain.hh"
52#include "debug/TLB.hh"
53#include "debug/TLBVerbose.hh"
54#include "dev/dma_device.hh"
55#include "sim/system.hh"
56
57using namespace ArmISA;
58
59TableWalker::TableWalker(const Params *p)
60 : ClockedObject(p),
61 stage2Mmu(NULL), port(NULL), masterId(Request::invldMasterId),
62 isStage2(p->is_stage2), tlb(NULL),
63 currState(NULL), pending(false),
64 numSquashable(p->num_squash_per_cycle),
65 pendingReqs(0),
66 pendingChangeTick(curTick()),
67 doL1DescEvent([this]{ doL1DescriptorWrapper(); }, name()),
68 doL2DescEvent([this]{ doL2DescriptorWrapper(); }, name()),
69 doL0LongDescEvent([this]{ doL0LongDescriptorWrapper(); }, name()),
70 doL1LongDescEvent([this]{ doL1LongDescriptorWrapper(); }, name()),
71 doL2LongDescEvent([this]{ doL2LongDescriptorWrapper(); }, name()),
72 doL3LongDescEvent([this]{ doL3LongDescriptorWrapper(); }, name()),
73 LongDescEventByLevel { &doL0LongDescEvent, &doL1LongDescEvent,
74 &doL2LongDescEvent, &doL3LongDescEvent },
75 doProcessEvent([this]{ processWalkWrapper(); }, name())
76{
77 sctlr = 0;
78
79 // Cache system-level properties
80 if (FullSystem) {
81 ArmSystem *armSys = dynamic_cast<ArmSystem *>(p->sys);
82 assert(armSys);
83 haveSecurity = armSys->haveSecurity();
84 _haveLPAE = armSys->haveLPAE();
85 _haveVirtualization = armSys->haveVirtualization();
86 physAddrRange = armSys->physAddrRange();
87 _haveLargeAsid64 = armSys->haveLargeAsid64();
88 } else {
89 haveSecurity = _haveLPAE = _haveVirtualization = false;
90 _haveLargeAsid64 = false;
91 physAddrRange = 32;
92 }
93
94}
95
96TableWalker::~TableWalker()
97{
98 ;
99}
100
101void
102TableWalker::setMMU(Stage2MMU *m, MasterID master_id)
103{
104 stage2Mmu = m;
105 port = &m->getDMAPort();
106 masterId = master_id;
107}
108
109void
110TableWalker::init()
111{
112 fatal_if(!stage2Mmu, "Table walker must have a valid stage-2 MMU\n");
113 fatal_if(!port, "Table walker must have a valid port\n");
114 fatal_if(!tlb, "Table walker must have a valid TLB\n");
115}
116
117Port &
118TableWalker::getPort(const std::string &if_name, PortID idx)
119{
120 if (if_name == "port") {
121 if (!isStage2) {
122 return *port;
123 } else {
124 fatal("Cannot access table walker port through stage-two walker\n");
125 }
126 }
127 return ClockedObject::getPort(if_name, idx);
128}
129
130TableWalker::WalkerState::WalkerState() :
131 tc(nullptr), aarch64(false), el(EL0), physAddrRange(0), req(nullptr),
132 asid(0), vmid(0), isHyp(false), transState(nullptr),
133 vaddr(0), vaddr_tainted(0),
134 sctlr(0), scr(0), cpsr(0), tcr(0),
135 htcr(0), hcr(0), vtcr(0),
136 isWrite(false), isFetch(false), isSecure(false),
137 secureLookup(false), rwTable(false), userTable(false), xnTable(false),
138 pxnTable(false), hpd(false), stage2Req(false),
139 stage2Tran(nullptr), timing(false), functional(false),
140 mode(BaseTLB::Read), tranType(TLB::NormalTran), l2Desc(l1Desc),
141 delayed(false), tableWalker(nullptr)
142{
143}
144
145void
146TableWalker::completeDrain()
147{
148 if (drainState() == DrainState::Draining &&
149 stateQueues[L0].empty() && stateQueues[L1].empty() &&
150 stateQueues[L2].empty() && stateQueues[L3].empty() &&
151 pendingQueue.empty()) {
152
153 DPRINTF(Drain, "TableWalker done draining, processing drain event\n");
154 signalDrainDone();
155 }
156}
157
158DrainState
159TableWalker::drain()
160{
161 bool state_queues_not_empty = false;
162
163 for (int i = 0; i < MAX_LOOKUP_LEVELS; ++i) {
164 if (!stateQueues[i].empty()) {
165 state_queues_not_empty = true;
166 break;
167 }
168 }
169
170 if (state_queues_not_empty || pendingQueue.size()) {
171 DPRINTF(Drain, "TableWalker not drained\n");
172 return DrainState::Draining;
173 } else {
174 DPRINTF(Drain, "TableWalker free, no need to drain\n");
175 return DrainState::Drained;
176 }
177}
178
179void
180TableWalker::drainResume()
181{
182 if (params()->sys->isTimingMode() && currState) {
183 delete currState;
184 currState = NULL;
185 pendingChange();
186 }
187}
188
189Fault
190TableWalker::walk(const RequestPtr &_req, ThreadContext *_tc, uint16_t _asid,
191 uint8_t _vmid, bool _isHyp, TLB::Mode _mode,
192 TLB::Translation *_trans, bool _timing, bool _functional,
193 bool secure, TLB::ArmTranslationType tranType,
194 bool _stage2Req)
195{
196 assert(!(_functional && _timing));
197 ++statWalks;
198
199 WalkerState *savedCurrState = NULL;
200
201 if (!currState && !_functional) {
202 // For atomic mode, a new WalkerState instance should be only created
203 // once per TLB. For timing mode, a new instance is generated for every
204 // TLB miss.
205 DPRINTF(TLBVerbose, "creating new instance of WalkerState\n");
206
207 currState = new WalkerState();
208 currState->tableWalker = this;
209 } else if (_functional) {
210 // If we are mixing functional mode with timing (or even
211 // atomic), we need to to be careful and clean up after
212 // ourselves to not risk getting into an inconsistent state.
213 DPRINTF(TLBVerbose, "creating functional instance of WalkerState\n");
214 savedCurrState = currState;
215 currState = new WalkerState();
216 currState->tableWalker = this;
217 } else if (_timing) {
218 // This is a translation that was completed and then faulted again
219 // because some underlying parameters that affect the translation
220 // changed out from under us (e.g. asid). It will either be a
221 // misprediction, in which case nothing will happen or we'll use
222 // this fault to re-execute the faulting instruction which should clean
223 // up everything.
224 if (currState->vaddr_tainted == _req->getVaddr()) {
225 ++statSquashedBefore;
226 return std::make_shared<ReExec>();
227 }
228 }
229 pendingChange();
230
231 currState->startTime = curTick();
232 currState->tc = _tc;
233 // ARM DDI 0487A.f (ARMv8 ARM) pg J8-5672
234 // aarch32/translation/translation/AArch32.TranslateAddress dictates
235 // even AArch32 EL0 will use AArch64 translation if EL1 is in AArch64.
236 if (isStage2) {
237 currState->el = EL1;
238 currState->aarch64 = ELIs64(_tc, EL2);
239 } else {
240 currState->el =
241 TLB::tranTypeEL(_tc->readMiscReg(MISCREG_CPSR), tranType);
242 currState->aarch64 =
243 ELIs64(_tc, currState->el == EL0 ? EL1 : currState->el);
244 }
245 currState->transState = _trans;
246 currState->req = _req;
247 currState->fault = NoFault;
248 currState->asid = _asid;
249 currState->vmid = _vmid;
250 currState->isHyp = _isHyp;
251 currState->timing = _timing;
252 currState->functional = _functional;
253 currState->mode = _mode;
254 currState->tranType = tranType;
255 currState->isSecure = secure;
256 currState->physAddrRange = physAddrRange;
257
258 /** @todo These should be cached or grabbed from cached copies in
259 the TLB, all these miscreg reads are expensive */
260 currState->vaddr_tainted = currState->req->getVaddr();
261 if (currState->aarch64)
262 currState->vaddr = purifyTaggedAddr(currState->vaddr_tainted,
263 currState->tc, currState->el);
264 else
265 currState->vaddr = currState->vaddr_tainted;
266
267 if (currState->aarch64) {
268 if (isStage2) {
269 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
270 currState->vtcr = currState->tc->readMiscReg(MISCREG_VTCR_EL2);
271 } else switch (currState->el) {
272 case EL0:
273 case EL1:
274 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
275 currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL1);
276 break;
277 case EL2:
278 assert(_haveVirtualization);
279 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL2);
280 currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL2);
281 break;
282 case EL3:
283 assert(haveSecurity);
284 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL3);
285 currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL3);
286 break;
287 default:
288 panic("Invalid exception level");
289 break;
290 }
291 currState->hcr = currState->tc->readMiscReg(MISCREG_HCR_EL2);
292 } else {
293 currState->sctlr = currState->tc->readMiscReg(snsBankedIndex(
294 MISCREG_SCTLR, currState->tc, !currState->isSecure));
295 currState->ttbcr = currState->tc->readMiscReg(snsBankedIndex(
296 MISCREG_TTBCR, currState->tc, !currState->isSecure));
297 currState->htcr = currState->tc->readMiscReg(MISCREG_HTCR);
298 currState->hcr = currState->tc->readMiscReg(MISCREG_HCR);
299 currState->vtcr = currState->tc->readMiscReg(MISCREG_VTCR);
300 }
301 sctlr = currState->sctlr;
302
303 currState->isFetch = (currState->mode == TLB::Execute);
304 currState->isWrite = (currState->mode == TLB::Write);
305
306 statRequestOrigin[REQUESTED][currState->isFetch]++;
307
308 currState->stage2Req = _stage2Req && !isStage2;
309
310 bool long_desc_format = currState->aarch64 || _isHyp || isStage2 ||
311 longDescFormatInUse(currState->tc);
312
313 if (long_desc_format) {
314 // Helper variables used for hierarchical permissions
315 currState->secureLookup = currState->isSecure;
316 currState->rwTable = true;
317 currState->userTable = true;
318 currState->xnTable = false;
319 currState->pxnTable = false;
320
321 ++statWalksLongDescriptor;
322 } else {
323 ++statWalksShortDescriptor;
324 }
325
326 if (!currState->timing) {
327 Fault fault = NoFault;
328 if (currState->aarch64)
329 fault = processWalkAArch64();
330 else if (long_desc_format)
331 fault = processWalkLPAE();
332 else
333 fault = processWalk();
334
335 // If this was a functional non-timing access restore state to
336 // how we found it.
337 if (currState->functional) {
338 delete currState;
339 currState = savedCurrState;
340 }
341 return fault;
342 }
343
344 if (pending || pendingQueue.size()) {
345 pendingQueue.push_back(currState);
346 currState = NULL;
347 pendingChange();
348 } else {
349 pending = true;
350 pendingChange();
351 if (currState->aarch64)
352 return processWalkAArch64();
353 else if (long_desc_format)
354 return processWalkLPAE();
355 else
356 return processWalk();
357 }
358
359 return NoFault;
360}
361
362void
363TableWalker::processWalkWrapper()
364{
365 assert(!currState);
366 assert(pendingQueue.size());
367 pendingChange();
368 currState = pendingQueue.front();
369
370 // Check if a previous walk filled this request already
371 // @TODO Should this always be the TLB or should we look in the stage2 TLB?
372 TlbEntry* te = tlb->lookup(currState->vaddr, currState->asid,
373 currState->vmid, currState->isHyp, currState->isSecure, true, false,
374 currState->el);
375
376 // Check if we still need to have a walk for this request. If the requesting
377 // instruction has been squashed, or a previous walk has filled the TLB with
378 // a match, we just want to get rid of the walk. The latter could happen
379 // when there are multiple outstanding misses to a single page and a
380 // previous request has been successfully translated.
381 if (!currState->transState->squashed() && !te) {
382 // We've got a valid request, lets process it
383 pending = true;
384 pendingQueue.pop_front();
385 // Keep currState in case one of the processWalk... calls NULLs it
386 WalkerState *curr_state_copy = currState;
387 Fault f;
388 if (currState->aarch64)
389 f = processWalkAArch64();
390 else if (longDescFormatInUse(currState->tc) ||
391 currState->isHyp || isStage2)
392 f = processWalkLPAE();
393 else
394 f = processWalk();
395
396 if (f != NoFault) {
397 curr_state_copy->transState->finish(f, curr_state_copy->req,
398 curr_state_copy->tc, curr_state_copy->mode);
399
400 delete curr_state_copy;
401 }
402 return;
403 }
404
405
406 // If the instruction that we were translating for has been
407 // squashed we shouldn't bother.
408 unsigned num_squashed = 0;
409 ThreadContext *tc = currState->tc;
410 while ((num_squashed < numSquashable) && currState &&
411 (currState->transState->squashed() || te)) {
412 pendingQueue.pop_front();
413 num_squashed++;
414 statSquashedBefore++;
415
416 DPRINTF(TLB, "Squashing table walk for address %#x\n",
417 currState->vaddr_tainted);
418
419 if (currState->transState->squashed()) {
420 // finish the translation which will delete the translation object
421 currState->transState->finish(
422 std::make_shared<UnimpFault>("Squashed Inst"),
423 currState->req, currState->tc, currState->mode);
424 } else {
425 // translate the request now that we know it will work
426 statWalkServiceTime.sample(curTick() - currState->startTime);
427 tlb->translateTiming(currState->req, currState->tc,
428 currState->transState, currState->mode);
429
430 }
431
432 // delete the current request
433 delete currState;
434
435 // peak at the next one
436 if (pendingQueue.size()) {
437 currState = pendingQueue.front();
438 te = tlb->lookup(currState->vaddr, currState->asid,
439 currState->vmid, currState->isHyp, currState->isSecure, true,
440 false, currState->el);
441 } else {
442 // Terminate the loop, nothing more to do
443 currState = NULL;
444 }
445 }
446 pendingChange();
447
448 // if we still have pending translations, schedule more work
449 nextWalk(tc);
450 currState = NULL;
451}
452
453Fault
454TableWalker::processWalk()
455{
456 Addr ttbr = 0;
457
458 // If translation isn't enabled, we shouldn't be here
459 assert(currState->sctlr.m || isStage2);
1/*
2 * Copyright (c) 2010, 2012-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Ali Saidi
38 * Giacomo Gabrielli
39 */
40#include "arch/arm/table_walker.hh"
41
42#include <memory>
43
44#include "arch/arm/faults.hh"
45#include "arch/arm/stage2_mmu.hh"
46#include "arch/arm/system.hh"
47#include "arch/arm/tlb.hh"
48#include "cpu/base.hh"
49#include "cpu/thread_context.hh"
50#include "debug/Checkpoint.hh"
51#include "debug/Drain.hh"
52#include "debug/TLB.hh"
53#include "debug/TLBVerbose.hh"
54#include "dev/dma_device.hh"
55#include "sim/system.hh"
56
57using namespace ArmISA;
58
59TableWalker::TableWalker(const Params *p)
60 : ClockedObject(p),
61 stage2Mmu(NULL), port(NULL), masterId(Request::invldMasterId),
62 isStage2(p->is_stage2), tlb(NULL),
63 currState(NULL), pending(false),
64 numSquashable(p->num_squash_per_cycle),
65 pendingReqs(0),
66 pendingChangeTick(curTick()),
67 doL1DescEvent([this]{ doL1DescriptorWrapper(); }, name()),
68 doL2DescEvent([this]{ doL2DescriptorWrapper(); }, name()),
69 doL0LongDescEvent([this]{ doL0LongDescriptorWrapper(); }, name()),
70 doL1LongDescEvent([this]{ doL1LongDescriptorWrapper(); }, name()),
71 doL2LongDescEvent([this]{ doL2LongDescriptorWrapper(); }, name()),
72 doL3LongDescEvent([this]{ doL3LongDescriptorWrapper(); }, name()),
73 LongDescEventByLevel { &doL0LongDescEvent, &doL1LongDescEvent,
74 &doL2LongDescEvent, &doL3LongDescEvent },
75 doProcessEvent([this]{ processWalkWrapper(); }, name())
76{
77 sctlr = 0;
78
79 // Cache system-level properties
80 if (FullSystem) {
81 ArmSystem *armSys = dynamic_cast<ArmSystem *>(p->sys);
82 assert(armSys);
83 haveSecurity = armSys->haveSecurity();
84 _haveLPAE = armSys->haveLPAE();
85 _haveVirtualization = armSys->haveVirtualization();
86 physAddrRange = armSys->physAddrRange();
87 _haveLargeAsid64 = armSys->haveLargeAsid64();
88 } else {
89 haveSecurity = _haveLPAE = _haveVirtualization = false;
90 _haveLargeAsid64 = false;
91 physAddrRange = 32;
92 }
93
94}
95
96TableWalker::~TableWalker()
97{
98 ;
99}
100
101void
102TableWalker::setMMU(Stage2MMU *m, MasterID master_id)
103{
104 stage2Mmu = m;
105 port = &m->getDMAPort();
106 masterId = master_id;
107}
108
109void
110TableWalker::init()
111{
112 fatal_if(!stage2Mmu, "Table walker must have a valid stage-2 MMU\n");
113 fatal_if(!port, "Table walker must have a valid port\n");
114 fatal_if(!tlb, "Table walker must have a valid TLB\n");
115}
116
117Port &
118TableWalker::getPort(const std::string &if_name, PortID idx)
119{
120 if (if_name == "port") {
121 if (!isStage2) {
122 return *port;
123 } else {
124 fatal("Cannot access table walker port through stage-two walker\n");
125 }
126 }
127 return ClockedObject::getPort(if_name, idx);
128}
129
130TableWalker::WalkerState::WalkerState() :
131 tc(nullptr), aarch64(false), el(EL0), physAddrRange(0), req(nullptr),
132 asid(0), vmid(0), isHyp(false), transState(nullptr),
133 vaddr(0), vaddr_tainted(0),
134 sctlr(0), scr(0), cpsr(0), tcr(0),
135 htcr(0), hcr(0), vtcr(0),
136 isWrite(false), isFetch(false), isSecure(false),
137 secureLookup(false), rwTable(false), userTable(false), xnTable(false),
138 pxnTable(false), hpd(false), stage2Req(false),
139 stage2Tran(nullptr), timing(false), functional(false),
140 mode(BaseTLB::Read), tranType(TLB::NormalTran), l2Desc(l1Desc),
141 delayed(false), tableWalker(nullptr)
142{
143}
144
145void
146TableWalker::completeDrain()
147{
148 if (drainState() == DrainState::Draining &&
149 stateQueues[L0].empty() && stateQueues[L1].empty() &&
150 stateQueues[L2].empty() && stateQueues[L3].empty() &&
151 pendingQueue.empty()) {
152
153 DPRINTF(Drain, "TableWalker done draining, processing drain event\n");
154 signalDrainDone();
155 }
156}
157
158DrainState
159TableWalker::drain()
160{
161 bool state_queues_not_empty = false;
162
163 for (int i = 0; i < MAX_LOOKUP_LEVELS; ++i) {
164 if (!stateQueues[i].empty()) {
165 state_queues_not_empty = true;
166 break;
167 }
168 }
169
170 if (state_queues_not_empty || pendingQueue.size()) {
171 DPRINTF(Drain, "TableWalker not drained\n");
172 return DrainState::Draining;
173 } else {
174 DPRINTF(Drain, "TableWalker free, no need to drain\n");
175 return DrainState::Drained;
176 }
177}
178
179void
180TableWalker::drainResume()
181{
182 if (params()->sys->isTimingMode() && currState) {
183 delete currState;
184 currState = NULL;
185 pendingChange();
186 }
187}
188
189Fault
190TableWalker::walk(const RequestPtr &_req, ThreadContext *_tc, uint16_t _asid,
191 uint8_t _vmid, bool _isHyp, TLB::Mode _mode,
192 TLB::Translation *_trans, bool _timing, bool _functional,
193 bool secure, TLB::ArmTranslationType tranType,
194 bool _stage2Req)
195{
196 assert(!(_functional && _timing));
197 ++statWalks;
198
199 WalkerState *savedCurrState = NULL;
200
201 if (!currState && !_functional) {
202 // For atomic mode, a new WalkerState instance should be only created
203 // once per TLB. For timing mode, a new instance is generated for every
204 // TLB miss.
205 DPRINTF(TLBVerbose, "creating new instance of WalkerState\n");
206
207 currState = new WalkerState();
208 currState->tableWalker = this;
209 } else if (_functional) {
210 // If we are mixing functional mode with timing (or even
211 // atomic), we need to to be careful and clean up after
212 // ourselves to not risk getting into an inconsistent state.
213 DPRINTF(TLBVerbose, "creating functional instance of WalkerState\n");
214 savedCurrState = currState;
215 currState = new WalkerState();
216 currState->tableWalker = this;
217 } else if (_timing) {
218 // This is a translation that was completed and then faulted again
219 // because some underlying parameters that affect the translation
220 // changed out from under us (e.g. asid). It will either be a
221 // misprediction, in which case nothing will happen or we'll use
222 // this fault to re-execute the faulting instruction which should clean
223 // up everything.
224 if (currState->vaddr_tainted == _req->getVaddr()) {
225 ++statSquashedBefore;
226 return std::make_shared<ReExec>();
227 }
228 }
229 pendingChange();
230
231 currState->startTime = curTick();
232 currState->tc = _tc;
233 // ARM DDI 0487A.f (ARMv8 ARM) pg J8-5672
234 // aarch32/translation/translation/AArch32.TranslateAddress dictates
235 // even AArch32 EL0 will use AArch64 translation if EL1 is in AArch64.
236 if (isStage2) {
237 currState->el = EL1;
238 currState->aarch64 = ELIs64(_tc, EL2);
239 } else {
240 currState->el =
241 TLB::tranTypeEL(_tc->readMiscReg(MISCREG_CPSR), tranType);
242 currState->aarch64 =
243 ELIs64(_tc, currState->el == EL0 ? EL1 : currState->el);
244 }
245 currState->transState = _trans;
246 currState->req = _req;
247 currState->fault = NoFault;
248 currState->asid = _asid;
249 currState->vmid = _vmid;
250 currState->isHyp = _isHyp;
251 currState->timing = _timing;
252 currState->functional = _functional;
253 currState->mode = _mode;
254 currState->tranType = tranType;
255 currState->isSecure = secure;
256 currState->physAddrRange = physAddrRange;
257
258 /** @todo These should be cached or grabbed from cached copies in
259 the TLB, all these miscreg reads are expensive */
260 currState->vaddr_tainted = currState->req->getVaddr();
261 if (currState->aarch64)
262 currState->vaddr = purifyTaggedAddr(currState->vaddr_tainted,
263 currState->tc, currState->el);
264 else
265 currState->vaddr = currState->vaddr_tainted;
266
267 if (currState->aarch64) {
268 if (isStage2) {
269 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
270 currState->vtcr = currState->tc->readMiscReg(MISCREG_VTCR_EL2);
271 } else switch (currState->el) {
272 case EL0:
273 case EL1:
274 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
275 currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL1);
276 break;
277 case EL2:
278 assert(_haveVirtualization);
279 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL2);
280 currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL2);
281 break;
282 case EL3:
283 assert(haveSecurity);
284 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL3);
285 currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL3);
286 break;
287 default:
288 panic("Invalid exception level");
289 break;
290 }
291 currState->hcr = currState->tc->readMiscReg(MISCREG_HCR_EL2);
292 } else {
293 currState->sctlr = currState->tc->readMiscReg(snsBankedIndex(
294 MISCREG_SCTLR, currState->tc, !currState->isSecure));
295 currState->ttbcr = currState->tc->readMiscReg(snsBankedIndex(
296 MISCREG_TTBCR, currState->tc, !currState->isSecure));
297 currState->htcr = currState->tc->readMiscReg(MISCREG_HTCR);
298 currState->hcr = currState->tc->readMiscReg(MISCREG_HCR);
299 currState->vtcr = currState->tc->readMiscReg(MISCREG_VTCR);
300 }
301 sctlr = currState->sctlr;
302
303 currState->isFetch = (currState->mode == TLB::Execute);
304 currState->isWrite = (currState->mode == TLB::Write);
305
306 statRequestOrigin[REQUESTED][currState->isFetch]++;
307
308 currState->stage2Req = _stage2Req && !isStage2;
309
310 bool long_desc_format = currState->aarch64 || _isHyp || isStage2 ||
311 longDescFormatInUse(currState->tc);
312
313 if (long_desc_format) {
314 // Helper variables used for hierarchical permissions
315 currState->secureLookup = currState->isSecure;
316 currState->rwTable = true;
317 currState->userTable = true;
318 currState->xnTable = false;
319 currState->pxnTable = false;
320
321 ++statWalksLongDescriptor;
322 } else {
323 ++statWalksShortDescriptor;
324 }
325
326 if (!currState->timing) {
327 Fault fault = NoFault;
328 if (currState->aarch64)
329 fault = processWalkAArch64();
330 else if (long_desc_format)
331 fault = processWalkLPAE();
332 else
333 fault = processWalk();
334
335 // If this was a functional non-timing access restore state to
336 // how we found it.
337 if (currState->functional) {
338 delete currState;
339 currState = savedCurrState;
340 }
341 return fault;
342 }
343
344 if (pending || pendingQueue.size()) {
345 pendingQueue.push_back(currState);
346 currState = NULL;
347 pendingChange();
348 } else {
349 pending = true;
350 pendingChange();
351 if (currState->aarch64)
352 return processWalkAArch64();
353 else if (long_desc_format)
354 return processWalkLPAE();
355 else
356 return processWalk();
357 }
358
359 return NoFault;
360}
361
362void
363TableWalker::processWalkWrapper()
364{
365 assert(!currState);
366 assert(pendingQueue.size());
367 pendingChange();
368 currState = pendingQueue.front();
369
370 // Check if a previous walk filled this request already
371 // @TODO Should this always be the TLB or should we look in the stage2 TLB?
372 TlbEntry* te = tlb->lookup(currState->vaddr, currState->asid,
373 currState->vmid, currState->isHyp, currState->isSecure, true, false,
374 currState->el);
375
376 // Check if we still need to have a walk for this request. If the requesting
377 // instruction has been squashed, or a previous walk has filled the TLB with
378 // a match, we just want to get rid of the walk. The latter could happen
379 // when there are multiple outstanding misses to a single page and a
380 // previous request has been successfully translated.
381 if (!currState->transState->squashed() && !te) {
382 // We've got a valid request, lets process it
383 pending = true;
384 pendingQueue.pop_front();
385 // Keep currState in case one of the processWalk... calls NULLs it
386 WalkerState *curr_state_copy = currState;
387 Fault f;
388 if (currState->aarch64)
389 f = processWalkAArch64();
390 else if (longDescFormatInUse(currState->tc) ||
391 currState->isHyp || isStage2)
392 f = processWalkLPAE();
393 else
394 f = processWalk();
395
396 if (f != NoFault) {
397 curr_state_copy->transState->finish(f, curr_state_copy->req,
398 curr_state_copy->tc, curr_state_copy->mode);
399
400 delete curr_state_copy;
401 }
402 return;
403 }
404
405
406 // If the instruction that we were translating for has been
407 // squashed we shouldn't bother.
408 unsigned num_squashed = 0;
409 ThreadContext *tc = currState->tc;
410 while ((num_squashed < numSquashable) && currState &&
411 (currState->transState->squashed() || te)) {
412 pendingQueue.pop_front();
413 num_squashed++;
414 statSquashedBefore++;
415
416 DPRINTF(TLB, "Squashing table walk for address %#x\n",
417 currState->vaddr_tainted);
418
419 if (currState->transState->squashed()) {
420 // finish the translation which will delete the translation object
421 currState->transState->finish(
422 std::make_shared<UnimpFault>("Squashed Inst"),
423 currState->req, currState->tc, currState->mode);
424 } else {
425 // translate the request now that we know it will work
426 statWalkServiceTime.sample(curTick() - currState->startTime);
427 tlb->translateTiming(currState->req, currState->tc,
428 currState->transState, currState->mode);
429
430 }
431
432 // delete the current request
433 delete currState;
434
435 // peak at the next one
436 if (pendingQueue.size()) {
437 currState = pendingQueue.front();
438 te = tlb->lookup(currState->vaddr, currState->asid,
439 currState->vmid, currState->isHyp, currState->isSecure, true,
440 false, currState->el);
441 } else {
442 // Terminate the loop, nothing more to do
443 currState = NULL;
444 }
445 }
446 pendingChange();
447
448 // if we still have pending translations, schedule more work
449 nextWalk(tc);
450 currState = NULL;
451}
452
453Fault
454TableWalker::processWalk()
455{
456 Addr ttbr = 0;
457
458 // If translation isn't enabled, we shouldn't be here
459 assert(currState->sctlr.m || isStage2);
460 const bool is_atomic = currState->req->isAtomic();
460
461 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
462 currState->vaddr_tainted, currState->ttbcr, mbits(currState->vaddr, 31,
463 32 - currState->ttbcr.n));
464
465 statWalkWaitTime.sample(curTick() - currState->startTime);
466
467 if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31,
468 32 - currState->ttbcr.n)) {
469 DPRINTF(TLB, " - Selecting TTBR0\n");
470 // Check if table walk is allowed when Security Extensions are enabled
471 if (haveSecurity && currState->ttbcr.pd0) {
472 if (currState->isFetch)
473 return std::make_shared<PrefetchAbort>(
474 currState->vaddr_tainted,
475 ArmFault::TranslationLL + L1,
476 isStage2,
477 ArmFault::VmsaTran);
478 else
479 return std::make_shared<DataAbort>(
480 currState->vaddr_tainted,
461
462 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
463 currState->vaddr_tainted, currState->ttbcr, mbits(currState->vaddr, 31,
464 32 - currState->ttbcr.n));
465
466 statWalkWaitTime.sample(curTick() - currState->startTime);
467
468 if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31,
469 32 - currState->ttbcr.n)) {
470 DPRINTF(TLB, " - Selecting TTBR0\n");
471 // Check if table walk is allowed when Security Extensions are enabled
472 if (haveSecurity && currState->ttbcr.pd0) {
473 if (currState->isFetch)
474 return std::make_shared<PrefetchAbort>(
475 currState->vaddr_tainted,
476 ArmFault::TranslationLL + L1,
477 isStage2,
478 ArmFault::VmsaTran);
479 else
480 return std::make_shared<DataAbort>(
481 currState->vaddr_tainted,
481 TlbEntry::DomainType::NoAccess, currState->isWrite,
482 TlbEntry::DomainType::NoAccess,
483 is_atomic ? false : currState->isWrite,
482 ArmFault::TranslationLL + L1, isStage2,
483 ArmFault::VmsaTran);
484 }
485 ttbr = currState->tc->readMiscReg(snsBankedIndex(
486 MISCREG_TTBR0, currState->tc, !currState->isSecure));
487 } else {
488 DPRINTF(TLB, " - Selecting TTBR1\n");
489 // Check if table walk is allowed when Security Extensions are enabled
490 if (haveSecurity && currState->ttbcr.pd1) {
491 if (currState->isFetch)
492 return std::make_shared<PrefetchAbort>(
493 currState->vaddr_tainted,
494 ArmFault::TranslationLL + L1,
495 isStage2,
496 ArmFault::VmsaTran);
497 else
498 return std::make_shared<DataAbort>(
499 currState->vaddr_tainted,
484 ArmFault::TranslationLL + L1, isStage2,
485 ArmFault::VmsaTran);
486 }
487 ttbr = currState->tc->readMiscReg(snsBankedIndex(
488 MISCREG_TTBR0, currState->tc, !currState->isSecure));
489 } else {
490 DPRINTF(TLB, " - Selecting TTBR1\n");
491 // Check if table walk is allowed when Security Extensions are enabled
492 if (haveSecurity && currState->ttbcr.pd1) {
493 if (currState->isFetch)
494 return std::make_shared<PrefetchAbort>(
495 currState->vaddr_tainted,
496 ArmFault::TranslationLL + L1,
497 isStage2,
498 ArmFault::VmsaTran);
499 else
500 return std::make_shared<DataAbort>(
501 currState->vaddr_tainted,
500 TlbEntry::DomainType::NoAccess, currState->isWrite,
502 TlbEntry::DomainType::NoAccess,
503 is_atomic ? false : currState->isWrite,
501 ArmFault::TranslationLL + L1, isStage2,
502 ArmFault::VmsaTran);
503 }
504 ttbr = currState->tc->readMiscReg(snsBankedIndex(
505 MISCREG_TTBR1, currState->tc, !currState->isSecure));
506 currState->ttbcr.n = 0;
507 }
508
509 Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) |
510 (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2);
511 DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr,
512 currState->isSecure ? "s" : "ns");
513
514 // Trickbox address check
515 Fault f;
516 f = testWalk(l1desc_addr, sizeof(uint32_t),
517 TlbEntry::DomainType::NoAccess, L1);
518 if (f) {
519 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
520 if (currState->timing) {
521 pending = false;
522 nextWalk(currState->tc);
523 currState = NULL;
524 } else {
525 currState->tc = NULL;
526 currState->req = NULL;
527 }
528 return f;
529 }
530
531 Request::Flags flag = Request::PT_WALK;
532 if (currState->sctlr.c == 0) {
533 flag.set(Request::UNCACHEABLE);
534 }
535
536 if (currState->isSecure) {
537 flag.set(Request::SECURE);
538 }
539
540 bool delayed;
541 delayed = fetchDescriptor(l1desc_addr, (uint8_t*)&currState->l1Desc.data,
542 sizeof(uint32_t), flag, L1, &doL1DescEvent,
543 &TableWalker::doL1Descriptor);
544 if (!delayed) {
545 f = currState->fault;
546 }
547
548 return f;
549}
550
551Fault
552TableWalker::processWalkLPAE()
553{
554 Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
555 int tsz, n;
556 LookupLevel start_lookup_level = L1;
557
558 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n",
559 currState->vaddr_tainted, currState->ttbcr);
560
561 statWalkWaitTime.sample(curTick() - currState->startTime);
562
563 Request::Flags flag = Request::PT_WALK;
564 if (currState->isSecure)
565 flag.set(Request::SECURE);
566
567 // work out which base address register to use, if in hyp mode we always
568 // use HTTBR
569 if (isStage2) {
570 DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n");
571 ttbr = currState->tc->readMiscReg(MISCREG_VTTBR);
572 tsz = sext<4>(currState->vtcr.t0sz);
573 start_lookup_level = currState->vtcr.sl0 ? L1 : L2;
574 } else if (currState->isHyp) {
575 DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n");
576 ttbr = currState->tc->readMiscReg(MISCREG_HTTBR);
577 tsz = currState->htcr.t0sz;
578 } else {
579 assert(longDescFormatInUse(currState->tc));
580
581 // Determine boundaries of TTBR0/1 regions
582 if (currState->ttbcr.t0sz)
583 ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1;
584 else if (currState->ttbcr.t1sz)
585 ttbr0_max = (1ULL << 32) -
586 (1ULL << (32 - currState->ttbcr.t1sz)) - 1;
587 else
588 ttbr0_max = (1ULL << 32) - 1;
589 if (currState->ttbcr.t1sz)
590 ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz));
591 else
592 ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz));
593
504 ArmFault::TranslationLL + L1, isStage2,
505 ArmFault::VmsaTran);
506 }
507 ttbr = currState->tc->readMiscReg(snsBankedIndex(
508 MISCREG_TTBR1, currState->tc, !currState->isSecure));
509 currState->ttbcr.n = 0;
510 }
511
512 Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) |
513 (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2);
514 DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr,
515 currState->isSecure ? "s" : "ns");
516
517 // Trickbox address check
518 Fault f;
519 f = testWalk(l1desc_addr, sizeof(uint32_t),
520 TlbEntry::DomainType::NoAccess, L1);
521 if (f) {
522 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
523 if (currState->timing) {
524 pending = false;
525 nextWalk(currState->tc);
526 currState = NULL;
527 } else {
528 currState->tc = NULL;
529 currState->req = NULL;
530 }
531 return f;
532 }
533
534 Request::Flags flag = Request::PT_WALK;
535 if (currState->sctlr.c == 0) {
536 flag.set(Request::UNCACHEABLE);
537 }
538
539 if (currState->isSecure) {
540 flag.set(Request::SECURE);
541 }
542
543 bool delayed;
544 delayed = fetchDescriptor(l1desc_addr, (uint8_t*)&currState->l1Desc.data,
545 sizeof(uint32_t), flag, L1, &doL1DescEvent,
546 &TableWalker::doL1Descriptor);
547 if (!delayed) {
548 f = currState->fault;
549 }
550
551 return f;
552}
553
554Fault
555TableWalker::processWalkLPAE()
556{
557 Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
558 int tsz, n;
559 LookupLevel start_lookup_level = L1;
560
561 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n",
562 currState->vaddr_tainted, currState->ttbcr);
563
564 statWalkWaitTime.sample(curTick() - currState->startTime);
565
566 Request::Flags flag = Request::PT_WALK;
567 if (currState->isSecure)
568 flag.set(Request::SECURE);
569
570 // work out which base address register to use, if in hyp mode we always
571 // use HTTBR
572 if (isStage2) {
573 DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n");
574 ttbr = currState->tc->readMiscReg(MISCREG_VTTBR);
575 tsz = sext<4>(currState->vtcr.t0sz);
576 start_lookup_level = currState->vtcr.sl0 ? L1 : L2;
577 } else if (currState->isHyp) {
578 DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n");
579 ttbr = currState->tc->readMiscReg(MISCREG_HTTBR);
580 tsz = currState->htcr.t0sz;
581 } else {
582 assert(longDescFormatInUse(currState->tc));
583
584 // Determine boundaries of TTBR0/1 regions
585 if (currState->ttbcr.t0sz)
586 ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1;
587 else if (currState->ttbcr.t1sz)
588 ttbr0_max = (1ULL << 32) -
589 (1ULL << (32 - currState->ttbcr.t1sz)) - 1;
590 else
591 ttbr0_max = (1ULL << 32) - 1;
592 if (currState->ttbcr.t1sz)
593 ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz));
594 else
595 ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz));
596
597 const bool is_atomic = currState->req->isAtomic();
598
594 // The following code snippet selects the appropriate translation table base
595 // address (TTBR0 or TTBR1) and the appropriate starting lookup level
596 // depending on the address range supported by the translation table (ARM
597 // ARM issue C B3.6.4)
598 if (currState->vaddr <= ttbr0_max) {
599 DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n");
600 // Check if table walk is allowed
601 if (currState->ttbcr.epd0) {
602 if (currState->isFetch)
603 return std::make_shared<PrefetchAbort>(
604 currState->vaddr_tainted,
605 ArmFault::TranslationLL + L1,
606 isStage2,
607 ArmFault::LpaeTran);
608 else
609 return std::make_shared<DataAbort>(
610 currState->vaddr_tainted,
611 TlbEntry::DomainType::NoAccess,
599 // The following code snippet selects the appropriate translation table base
600 // address (TTBR0 or TTBR1) and the appropriate starting lookup level
601 // depending on the address range supported by the translation table (ARM
602 // ARM issue C B3.6.4)
603 if (currState->vaddr <= ttbr0_max) {
604 DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n");
605 // Check if table walk is allowed
606 if (currState->ttbcr.epd0) {
607 if (currState->isFetch)
608 return std::make_shared<PrefetchAbort>(
609 currState->vaddr_tainted,
610 ArmFault::TranslationLL + L1,
611 isStage2,
612 ArmFault::LpaeTran);
613 else
614 return std::make_shared<DataAbort>(
615 currState->vaddr_tainted,
616 TlbEntry::DomainType::NoAccess,
612 currState->isWrite,
617 is_atomic ? false : currState->isWrite,
613 ArmFault::TranslationLL + L1,
614 isStage2,
615 ArmFault::LpaeTran);
616 }
617 ttbr = currState->tc->readMiscReg(snsBankedIndex(
618 MISCREG_TTBR0, currState->tc, !currState->isSecure));
619 tsz = currState->ttbcr.t0sz;
620 if (ttbr0_max < (1ULL << 30)) // Upper limit < 1 GB
621 start_lookup_level = L2;
622 } else if (currState->vaddr >= ttbr1_min) {
623 DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n");
624 // Check if table walk is allowed
625 if (currState->ttbcr.epd1) {
626 if (currState->isFetch)
627 return std::make_shared<PrefetchAbort>(
628 currState->vaddr_tainted,
629 ArmFault::TranslationLL + L1,
630 isStage2,
631 ArmFault::LpaeTran);
632 else
633 return std::make_shared<DataAbort>(
634 currState->vaddr_tainted,
635 TlbEntry::DomainType::NoAccess,
618 ArmFault::TranslationLL + L1,
619 isStage2,
620 ArmFault::LpaeTran);
621 }
622 ttbr = currState->tc->readMiscReg(snsBankedIndex(
623 MISCREG_TTBR0, currState->tc, !currState->isSecure));
624 tsz = currState->ttbcr.t0sz;
625 if (ttbr0_max < (1ULL << 30)) // Upper limit < 1 GB
626 start_lookup_level = L2;
627 } else if (currState->vaddr >= ttbr1_min) {
628 DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n");
629 // Check if table walk is allowed
630 if (currState->ttbcr.epd1) {
631 if (currState->isFetch)
632 return std::make_shared<PrefetchAbort>(
633 currState->vaddr_tainted,
634 ArmFault::TranslationLL + L1,
635 isStage2,
636 ArmFault::LpaeTran);
637 else
638 return std::make_shared<DataAbort>(
639 currState->vaddr_tainted,
640 TlbEntry::DomainType::NoAccess,
636 currState->isWrite,
641 is_atomic ? false : currState->isWrite,
637 ArmFault::TranslationLL + L1,
638 isStage2,
639 ArmFault::LpaeTran);
640 }
641 ttbr = currState->tc->readMiscReg(snsBankedIndex(
642 MISCREG_TTBR1, currState->tc, !currState->isSecure));
643 tsz = currState->ttbcr.t1sz;
644 if (ttbr1_min >= (1ULL << 31) + (1ULL << 30)) // Lower limit >= 3 GB
645 start_lookup_level = L2;
646 } else {
647 // Out of boundaries -> translation fault
648 if (currState->isFetch)
649 return std::make_shared<PrefetchAbort>(
650 currState->vaddr_tainted,
651 ArmFault::TranslationLL + L1,
652 isStage2,
653 ArmFault::LpaeTran);
654 else
655 return std::make_shared<DataAbort>(
656 currState->vaddr_tainted,
657 TlbEntry::DomainType::NoAccess,
642 ArmFault::TranslationLL + L1,
643 isStage2,
644 ArmFault::LpaeTran);
645 }
646 ttbr = currState->tc->readMiscReg(snsBankedIndex(
647 MISCREG_TTBR1, currState->tc, !currState->isSecure));
648 tsz = currState->ttbcr.t1sz;
649 if (ttbr1_min >= (1ULL << 31) + (1ULL << 30)) // Lower limit >= 3 GB
650 start_lookup_level = L2;
651 } else {
652 // Out of boundaries -> translation fault
653 if (currState->isFetch)
654 return std::make_shared<PrefetchAbort>(
655 currState->vaddr_tainted,
656 ArmFault::TranslationLL + L1,
657 isStage2,
658 ArmFault::LpaeTran);
659 else
660 return std::make_shared<DataAbort>(
661 currState->vaddr_tainted,
662 TlbEntry::DomainType::NoAccess,
658 currState->isWrite, ArmFault::TranslationLL + L1,
663 is_atomic ? false : currState->isWrite,
664 ArmFault::TranslationLL + L1,
659 isStage2, ArmFault::LpaeTran);
660 }
661
662 }
663
664 // Perform lookup (ARM ARM issue C B3.6.6)
665 if (start_lookup_level == L1) {
666 n = 5 - tsz;
667 desc_addr = mbits(ttbr, 39, n) |
668 (bits(currState->vaddr, n + 26, 30) << 3);
669 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
670 desc_addr, currState->isSecure ? "s" : "ns");
671 } else {
672 // Skip first-level lookup
673 n = (tsz >= 2 ? 14 - tsz : 12);
674 desc_addr = mbits(ttbr, 39, n) |
675 (bits(currState->vaddr, n + 17, 21) << 3);
676 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
677 desc_addr, currState->isSecure ? "s" : "ns");
678 }
679
680 // Trickbox address check
681 Fault f = testWalk(desc_addr, sizeof(uint64_t),
682 TlbEntry::DomainType::NoAccess, start_lookup_level);
683 if (f) {
684 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
685 if (currState->timing) {
686 pending = false;
687 nextWalk(currState->tc);
688 currState = NULL;
689 } else {
690 currState->tc = NULL;
691 currState->req = NULL;
692 }
693 return f;
694 }
695
696 if (currState->sctlr.c == 0) {
697 flag.set(Request::UNCACHEABLE);
698 }
699
700 currState->longDesc.lookupLevel = start_lookup_level;
701 currState->longDesc.aarch64 = false;
702 currState->longDesc.grainSize = Grain4KB;
703
704 bool delayed = fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
705 sizeof(uint64_t), flag, start_lookup_level,
706 LongDescEventByLevel[start_lookup_level],
707 &TableWalker::doLongDescriptor);
708 if (!delayed) {
709 f = currState->fault;
710 }
711
712 return f;
713}
714
715unsigned
716TableWalker::adjustTableSizeAArch64(unsigned tsz)
717{
718 if (tsz < 25)
719 return 25;
720 if (tsz > 48)
721 return 48;
722 return tsz;
723}
724
725bool
726TableWalker::checkAddrSizeFaultAArch64(Addr addr, int currPhysAddrRange)
727{
728 return (currPhysAddrRange != MaxPhysAddrRange &&
729 bits(addr, MaxPhysAddrRange - 1, currPhysAddrRange));
730}
731
732Fault
733TableWalker::processWalkAArch64()
734{
735 assert(currState->aarch64);
736
737 DPRINTF(TLB, "Beginning table walk for address %#llx, TCR: %#llx\n",
738 currState->vaddr_tainted, currState->tcr);
739
740 static const GrainSize GrainMap_tg0[] =
741 { Grain4KB, Grain64KB, Grain16KB, ReservedGrain };
742 static const GrainSize GrainMap_tg1[] =
743 { ReservedGrain, Grain16KB, Grain4KB, Grain64KB };
744
745 statWalkWaitTime.sample(curTick() - currState->startTime);
746
747 // Determine TTBR, table size, granule size and phys. address range
748 Addr ttbr = 0;
749 int tsz = 0, ps = 0;
750 GrainSize tg = Grain4KB; // grain size computed from tg* field
751 bool fault = false;
752
753 LookupLevel start_lookup_level = MAX_LOOKUP_LEVELS;
754
755 switch (currState->el) {
756 case EL0:
757 case EL1:
758 if (isStage2) {
759 DPRINTF(TLB, " - Selecting VTTBR0 (AArch64 stage 2)\n");
760 ttbr = currState->tc->readMiscReg(MISCREG_VTTBR_EL2);
761 tsz = 64 - currState->vtcr.t0sz64;
762 tg = GrainMap_tg0[currState->vtcr.tg0];
763 // ARM DDI 0487A.f D7-2148
764 // The starting level of stage 2 translation depends on
765 // VTCR_EL2.SL0 and VTCR_EL2.TG0
766 LookupLevel __ = MAX_LOOKUP_LEVELS; // invalid level
767 uint8_t sl_tg = (currState->vtcr.sl0 << 2) | currState->vtcr.tg0;
768 static const LookupLevel SLL[] = {
769 L2, L3, L3, __, // sl0 == 0
770 L1, L2, L2, __, // sl0 == 1, etc.
771 L0, L1, L1, __,
772 __, __, __, __
773 };
774 start_lookup_level = SLL[sl_tg];
775 panic_if(start_lookup_level == MAX_LOOKUP_LEVELS,
776 "Cannot discern lookup level from vtcr.{sl0,tg0}");
777 ps = currState->vtcr.ps;
778 } else {
779 switch (bits(currState->vaddr, 63,48)) {
780 case 0:
781 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
782 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL1);
783 tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
784 tg = GrainMap_tg0[currState->tcr.tg0];
785 currState->hpd = currState->tcr.hpd0;
786 if (bits(currState->vaddr, 63, tsz) != 0x0 ||
787 currState->tcr.epd0)
788 fault = true;
789 break;
790 case 0xffff:
791 DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
792 ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL1);
793 tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz);
794 tg = GrainMap_tg1[currState->tcr.tg1];
795 currState->hpd = currState->tcr.hpd1;
796 if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) ||
797 currState->tcr.epd1)
798 fault = true;
799 break;
800 default:
801 // top two bytes must be all 0s or all 1s, else invalid addr
802 fault = true;
803 }
804 ps = currState->tcr.ips;
805 }
806 break;
807 case EL2:
808 switch(bits(currState->vaddr, 63,48)) {
809 case 0:
810 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
811 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL2);
812 tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
813 tg = GrainMap_tg0[currState->tcr.tg0];
814 currState->hpd = currState->hcr.e2h ?
815 currState->tcr.hpd0 : currState->tcr.hpd;
816 break;
817
818 case 0xffff:
819 DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
820 ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL2);
821 tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz);
822 tg = GrainMap_tg1[currState->tcr.tg1];
823 currState->hpd = currState->tcr.hpd1;
824 if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) ||
825 currState->tcr.epd1 || !currState->hcr.e2h)
826 fault = true;
827 break;
828
829 default:
830 // invalid addr if top two bytes are not all 0s
831 fault = true;
832 }
833 ps = currState->tcr.ps;
834 break;
835 case EL3:
836 switch(bits(currState->vaddr, 63,48)) {
837 case 0:
838 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
839 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL3);
840 tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
841 tg = GrainMap_tg0[currState->tcr.tg0];
842 currState->hpd = currState->tcr.hpd;
843 break;
844 default:
845 // invalid addr if top two bytes are not all 0s
846 fault = true;
847 }
848 ps = currState->tcr.ps;
849 break;
850 }
851
665 isStage2, ArmFault::LpaeTran);
666 }
667
668 }
669
670 // Perform lookup (ARM ARM issue C B3.6.6)
671 if (start_lookup_level == L1) {
672 n = 5 - tsz;
673 desc_addr = mbits(ttbr, 39, n) |
674 (bits(currState->vaddr, n + 26, 30) << 3);
675 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
676 desc_addr, currState->isSecure ? "s" : "ns");
677 } else {
678 // Skip first-level lookup
679 n = (tsz >= 2 ? 14 - tsz : 12);
680 desc_addr = mbits(ttbr, 39, n) |
681 (bits(currState->vaddr, n + 17, 21) << 3);
682 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
683 desc_addr, currState->isSecure ? "s" : "ns");
684 }
685
686 // Trickbox address check
687 Fault f = testWalk(desc_addr, sizeof(uint64_t),
688 TlbEntry::DomainType::NoAccess, start_lookup_level);
689 if (f) {
690 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
691 if (currState->timing) {
692 pending = false;
693 nextWalk(currState->tc);
694 currState = NULL;
695 } else {
696 currState->tc = NULL;
697 currState->req = NULL;
698 }
699 return f;
700 }
701
702 if (currState->sctlr.c == 0) {
703 flag.set(Request::UNCACHEABLE);
704 }
705
706 currState->longDesc.lookupLevel = start_lookup_level;
707 currState->longDesc.aarch64 = false;
708 currState->longDesc.grainSize = Grain4KB;
709
710 bool delayed = fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
711 sizeof(uint64_t), flag, start_lookup_level,
712 LongDescEventByLevel[start_lookup_level],
713 &TableWalker::doLongDescriptor);
714 if (!delayed) {
715 f = currState->fault;
716 }
717
718 return f;
719}
720
721unsigned
722TableWalker::adjustTableSizeAArch64(unsigned tsz)
723{
724 if (tsz < 25)
725 return 25;
726 if (tsz > 48)
727 return 48;
728 return tsz;
729}
730
731bool
732TableWalker::checkAddrSizeFaultAArch64(Addr addr, int currPhysAddrRange)
733{
734 return (currPhysAddrRange != MaxPhysAddrRange &&
735 bits(addr, MaxPhysAddrRange - 1, currPhysAddrRange));
736}
737
738Fault
739TableWalker::processWalkAArch64()
740{
741 assert(currState->aarch64);
742
743 DPRINTF(TLB, "Beginning table walk for address %#llx, TCR: %#llx\n",
744 currState->vaddr_tainted, currState->tcr);
745
746 static const GrainSize GrainMap_tg0[] =
747 { Grain4KB, Grain64KB, Grain16KB, ReservedGrain };
748 static const GrainSize GrainMap_tg1[] =
749 { ReservedGrain, Grain16KB, Grain4KB, Grain64KB };
750
751 statWalkWaitTime.sample(curTick() - currState->startTime);
752
753 // Determine TTBR, table size, granule size and phys. address range
754 Addr ttbr = 0;
755 int tsz = 0, ps = 0;
756 GrainSize tg = Grain4KB; // grain size computed from tg* field
757 bool fault = false;
758
759 LookupLevel start_lookup_level = MAX_LOOKUP_LEVELS;
760
761 switch (currState->el) {
762 case EL0:
763 case EL1:
764 if (isStage2) {
765 DPRINTF(TLB, " - Selecting VTTBR0 (AArch64 stage 2)\n");
766 ttbr = currState->tc->readMiscReg(MISCREG_VTTBR_EL2);
767 tsz = 64 - currState->vtcr.t0sz64;
768 tg = GrainMap_tg0[currState->vtcr.tg0];
769 // ARM DDI 0487A.f D7-2148
770 // The starting level of stage 2 translation depends on
771 // VTCR_EL2.SL0 and VTCR_EL2.TG0
772 LookupLevel __ = MAX_LOOKUP_LEVELS; // invalid level
773 uint8_t sl_tg = (currState->vtcr.sl0 << 2) | currState->vtcr.tg0;
774 static const LookupLevel SLL[] = {
775 L2, L3, L3, __, // sl0 == 0
776 L1, L2, L2, __, // sl0 == 1, etc.
777 L0, L1, L1, __,
778 __, __, __, __
779 };
780 start_lookup_level = SLL[sl_tg];
781 panic_if(start_lookup_level == MAX_LOOKUP_LEVELS,
782 "Cannot discern lookup level from vtcr.{sl0,tg0}");
783 ps = currState->vtcr.ps;
784 } else {
785 switch (bits(currState->vaddr, 63,48)) {
786 case 0:
787 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
788 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL1);
789 tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
790 tg = GrainMap_tg0[currState->tcr.tg0];
791 currState->hpd = currState->tcr.hpd0;
792 if (bits(currState->vaddr, 63, tsz) != 0x0 ||
793 currState->tcr.epd0)
794 fault = true;
795 break;
796 case 0xffff:
797 DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
798 ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL1);
799 tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz);
800 tg = GrainMap_tg1[currState->tcr.tg1];
801 currState->hpd = currState->tcr.hpd1;
802 if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) ||
803 currState->tcr.epd1)
804 fault = true;
805 break;
806 default:
807 // top two bytes must be all 0s or all 1s, else invalid addr
808 fault = true;
809 }
810 ps = currState->tcr.ips;
811 }
812 break;
813 case EL2:
814 switch(bits(currState->vaddr, 63,48)) {
815 case 0:
816 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
817 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL2);
818 tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
819 tg = GrainMap_tg0[currState->tcr.tg0];
820 currState->hpd = currState->hcr.e2h ?
821 currState->tcr.hpd0 : currState->tcr.hpd;
822 break;
823
824 case 0xffff:
825 DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
826 ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL2);
827 tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz);
828 tg = GrainMap_tg1[currState->tcr.tg1];
829 currState->hpd = currState->tcr.hpd1;
830 if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) ||
831 currState->tcr.epd1 || !currState->hcr.e2h)
832 fault = true;
833 break;
834
835 default:
836 // invalid addr if top two bytes are not all 0s
837 fault = true;
838 }
839 ps = currState->tcr.ps;
840 break;
841 case EL3:
842 switch(bits(currState->vaddr, 63,48)) {
843 case 0:
844 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
845 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL3);
846 tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
847 tg = GrainMap_tg0[currState->tcr.tg0];
848 currState->hpd = currState->tcr.hpd;
849 break;
850 default:
851 // invalid addr if top two bytes are not all 0s
852 fault = true;
853 }
854 ps = currState->tcr.ps;
855 break;
856 }
857
858 const bool is_atomic = currState->req->isAtomic();
859
852 if (fault) {
853 Fault f;
854 if (currState->isFetch)
855 f = std::make_shared<PrefetchAbort>(
856 currState->vaddr_tainted,
857 ArmFault::TranslationLL + L0, isStage2,
858 ArmFault::LpaeTran);
859 else
860 f = std::make_shared<DataAbort>(
861 currState->vaddr_tainted,
862 TlbEntry::DomainType::NoAccess,
860 if (fault) {
861 Fault f;
862 if (currState->isFetch)
863 f = std::make_shared<PrefetchAbort>(
864 currState->vaddr_tainted,
865 ArmFault::TranslationLL + L0, isStage2,
866 ArmFault::LpaeTran);
867 else
868 f = std::make_shared<DataAbort>(
869 currState->vaddr_tainted,
870 TlbEntry::DomainType::NoAccess,
863 currState->isWrite,
871 is_atomic ? false : currState->isWrite,
864 ArmFault::TranslationLL + L0,
865 isStage2, ArmFault::LpaeTran);
866
867 if (currState->timing) {
868 pending = false;
869 nextWalk(currState->tc);
870 currState = NULL;
871 } else {
872 currState->tc = NULL;
873 currState->req = NULL;
874 }
875 return f;
876
877 }
878
879 if (tg == ReservedGrain) {
880 warn_once("Reserved granule size requested; gem5's IMPLEMENTATION "
881 "DEFINED behavior takes this to mean 4KB granules\n");
882 tg = Grain4KB;
883 }
884
885 // Determine starting lookup level
886 // See aarch64/translation/walk in Appendix G: ARMv8 Pseudocode Library
887 // in ARM DDI 0487A. These table values correspond to the cascading tests
888 // to compute the lookup level and are of the form
889 // (grain_size + N*stride), for N = {1, 2, 3}.
890 // A value of 64 will never succeed and a value of 0 will always succeed.
891 if (start_lookup_level == MAX_LOOKUP_LEVELS) {
892 struct GrainMap {
893 GrainSize grain_size;
894 unsigned lookup_level_cutoff[MAX_LOOKUP_LEVELS];
895 };
896 static const GrainMap GM[] = {
897 { Grain4KB, { 39, 30, 0, 0 } },
898 { Grain16KB, { 47, 36, 25, 0 } },
899 { Grain64KB, { 64, 42, 29, 0 } }
900 };
901
902 const unsigned *lookup = NULL; // points to a lookup_level_cutoff
903
904 for (unsigned i = 0; i < 3; ++i) { // choose entry of GM[]
905 if (tg == GM[i].grain_size) {
906 lookup = GM[i].lookup_level_cutoff;
907 break;
908 }
909 }
910 assert(lookup);
911
912 for (int L = L0; L != MAX_LOOKUP_LEVELS; ++L) {
913 if (tsz > lookup[L]) {
914 start_lookup_level = (LookupLevel) L;
915 break;
916 }
917 }
918 panic_if(start_lookup_level == MAX_LOOKUP_LEVELS,
919 "Table walker couldn't find lookup level\n");
920 }
921
922 int stride = tg - 3;
923
924 // Determine table base address
925 int base_addr_lo = 3 + tsz - stride * (3 - start_lookup_level) - tg;
926 Addr base_addr = mbits(ttbr, 47, base_addr_lo);
927
928 // Determine physical address size and raise an Address Size Fault if
929 // necessary
930 int pa_range = decodePhysAddrRange64(ps);
931 // Clamp to lower limit
932 if (pa_range > physAddrRange)
933 currState->physAddrRange = physAddrRange;
934 else
935 currState->physAddrRange = pa_range;
936 if (checkAddrSizeFaultAArch64(base_addr, currState->physAddrRange)) {
937 DPRINTF(TLB, "Address size fault before any lookup\n");
938 Fault f;
939 if (currState->isFetch)
940 f = std::make_shared<PrefetchAbort>(
941 currState->vaddr_tainted,
942 ArmFault::AddressSizeLL + start_lookup_level,
943 isStage2,
944 ArmFault::LpaeTran);
945 else
946 f = std::make_shared<DataAbort>(
947 currState->vaddr_tainted,
948 TlbEntry::DomainType::NoAccess,
872 ArmFault::TranslationLL + L0,
873 isStage2, ArmFault::LpaeTran);
874
875 if (currState->timing) {
876 pending = false;
877 nextWalk(currState->tc);
878 currState = NULL;
879 } else {
880 currState->tc = NULL;
881 currState->req = NULL;
882 }
883 return f;
884
885 }
886
887 if (tg == ReservedGrain) {
888 warn_once("Reserved granule size requested; gem5's IMPLEMENTATION "
889 "DEFINED behavior takes this to mean 4KB granules\n");
890 tg = Grain4KB;
891 }
892
893 // Determine starting lookup level
894 // See aarch64/translation/walk in Appendix G: ARMv8 Pseudocode Library
895 // in ARM DDI 0487A. These table values correspond to the cascading tests
896 // to compute the lookup level and are of the form
897 // (grain_size + N*stride), for N = {1, 2, 3}.
898 // A value of 64 will never succeed and a value of 0 will always succeed.
899 if (start_lookup_level == MAX_LOOKUP_LEVELS) {
900 struct GrainMap {
901 GrainSize grain_size;
902 unsigned lookup_level_cutoff[MAX_LOOKUP_LEVELS];
903 };
904 static const GrainMap GM[] = {
905 { Grain4KB, { 39, 30, 0, 0 } },
906 { Grain16KB, { 47, 36, 25, 0 } },
907 { Grain64KB, { 64, 42, 29, 0 } }
908 };
909
910 const unsigned *lookup = NULL; // points to a lookup_level_cutoff
911
912 for (unsigned i = 0; i < 3; ++i) { // choose entry of GM[]
913 if (tg == GM[i].grain_size) {
914 lookup = GM[i].lookup_level_cutoff;
915 break;
916 }
917 }
918 assert(lookup);
919
920 for (int L = L0; L != MAX_LOOKUP_LEVELS; ++L) {
921 if (tsz > lookup[L]) {
922 start_lookup_level = (LookupLevel) L;
923 break;
924 }
925 }
926 panic_if(start_lookup_level == MAX_LOOKUP_LEVELS,
927 "Table walker couldn't find lookup level\n");
928 }
929
930 int stride = tg - 3;
931
932 // Determine table base address
933 int base_addr_lo = 3 + tsz - stride * (3 - start_lookup_level) - tg;
934 Addr base_addr = mbits(ttbr, 47, base_addr_lo);
935
936 // Determine physical address size and raise an Address Size Fault if
937 // necessary
938 int pa_range = decodePhysAddrRange64(ps);
939 // Clamp to lower limit
940 if (pa_range > physAddrRange)
941 currState->physAddrRange = physAddrRange;
942 else
943 currState->physAddrRange = pa_range;
944 if (checkAddrSizeFaultAArch64(base_addr, currState->physAddrRange)) {
945 DPRINTF(TLB, "Address size fault before any lookup\n");
946 Fault f;
947 if (currState->isFetch)
948 f = std::make_shared<PrefetchAbort>(
949 currState->vaddr_tainted,
950 ArmFault::AddressSizeLL + start_lookup_level,
951 isStage2,
952 ArmFault::LpaeTran);
953 else
954 f = std::make_shared<DataAbort>(
955 currState->vaddr_tainted,
956 TlbEntry::DomainType::NoAccess,
949 currState->isWrite,
957 is_atomic ? false : currState->isWrite,
950 ArmFault::AddressSizeLL + start_lookup_level,
951 isStage2,
952 ArmFault::LpaeTran);
953
954
955 if (currState->timing) {
956 pending = false;
957 nextWalk(currState->tc);
958 currState = NULL;
959 } else {
960 currState->tc = NULL;
961 currState->req = NULL;
962 }
963 return f;
964
965 }
966
967 // Determine descriptor address
968 Addr desc_addr = base_addr |
969 (bits(currState->vaddr, tsz - 1,
970 stride * (3 - start_lookup_level) + tg) << 3);
971
972 // Trickbox address check
973 Fault f = testWalk(desc_addr, sizeof(uint64_t),
974 TlbEntry::DomainType::NoAccess, start_lookup_level);
975 if (f) {
976 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
977 if (currState->timing) {
978 pending = false;
979 nextWalk(currState->tc);
980 currState = NULL;
981 } else {
982 currState->tc = NULL;
983 currState->req = NULL;
984 }
985 return f;
986 }
987
988 Request::Flags flag = Request::PT_WALK;
989 if (currState->sctlr.c == 0) {
990 flag.set(Request::UNCACHEABLE);
991 }
992
993 if (currState->isSecure) {
994 flag.set(Request::SECURE);
995 }
996
997 currState->longDesc.lookupLevel = start_lookup_level;
998 currState->longDesc.aarch64 = true;
999 currState->longDesc.grainSize = tg;
1000
1001 if (currState->timing) {
1002 fetchDescriptor(desc_addr, (uint8_t*) &currState->longDesc.data,
1003 sizeof(uint64_t), flag, start_lookup_level,
1004 LongDescEventByLevel[start_lookup_level], NULL);
1005 } else {
1006 fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
1007 sizeof(uint64_t), flag, -1, NULL,
1008 &TableWalker::doLongDescriptor);
1009 f = currState->fault;
1010 }
1011
1012 return f;
1013}
1014
1015void
1016TableWalker::memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr,
1017 uint8_t texcb, bool s)
1018{
1019 // Note: tc and sctlr local variables are hiding tc and sctrl class
1020 // variables
1021 DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s);
1022 te.shareable = false; // default value
1023 te.nonCacheable = false;
1024 te.outerShareable = false;
1025 if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) {
1026 switch(texcb) {
1027 case 0: // Stongly-ordered
1028 te.nonCacheable = true;
1029 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1030 te.shareable = true;
1031 te.innerAttrs = 1;
1032 te.outerAttrs = 0;
1033 break;
1034 case 1: // Shareable Device
1035 te.nonCacheable = true;
1036 te.mtype = TlbEntry::MemoryType::Device;
1037 te.shareable = true;
1038 te.innerAttrs = 3;
1039 te.outerAttrs = 0;
1040 break;
1041 case 2: // Outer and Inner Write-Through, no Write-Allocate
1042 te.mtype = TlbEntry::MemoryType::Normal;
1043 te.shareable = s;
1044 te.innerAttrs = 6;
1045 te.outerAttrs = bits(texcb, 1, 0);
1046 break;
1047 case 3: // Outer and Inner Write-Back, no Write-Allocate
1048 te.mtype = TlbEntry::MemoryType::Normal;
1049 te.shareable = s;
1050 te.innerAttrs = 7;
1051 te.outerAttrs = bits(texcb, 1, 0);
1052 break;
1053 case 4: // Outer and Inner Non-cacheable
1054 te.nonCacheable = true;
1055 te.mtype = TlbEntry::MemoryType::Normal;
1056 te.shareable = s;
1057 te.innerAttrs = 0;
1058 te.outerAttrs = bits(texcb, 1, 0);
1059 break;
1060 case 5: // Reserved
1061 panic("Reserved texcb value!\n");
1062 break;
1063 case 6: // Implementation Defined
1064 panic("Implementation-defined texcb value!\n");
1065 break;
1066 case 7: // Outer and Inner Write-Back, Write-Allocate
1067 te.mtype = TlbEntry::MemoryType::Normal;
1068 te.shareable = s;
1069 te.innerAttrs = 5;
1070 te.outerAttrs = 1;
1071 break;
1072 case 8: // Non-shareable Device
1073 te.nonCacheable = true;
1074 te.mtype = TlbEntry::MemoryType::Device;
1075 te.shareable = false;
1076 te.innerAttrs = 3;
1077 te.outerAttrs = 0;
1078 break;
1079 case 9 ... 15: // Reserved
1080 panic("Reserved texcb value!\n");
1081 break;
1082 case 16 ... 31: // Cacheable Memory
1083 te.mtype = TlbEntry::MemoryType::Normal;
1084 te.shareable = s;
1085 if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0)
1086 te.nonCacheable = true;
1087 te.innerAttrs = bits(texcb, 1, 0);
1088 te.outerAttrs = bits(texcb, 3, 2);
1089 break;
1090 default:
1091 panic("More than 32 states for 5 bits?\n");
1092 }
1093 } else {
1094 assert(tc);
1095 PRRR prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR,
1096 currState->tc, !currState->isSecure));
1097 NMRR nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR,
1098 currState->tc, !currState->isSecure));
1099 DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
1100 uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
1101 switch(bits(texcb, 2,0)) {
1102 case 0:
1103 curr_tr = prrr.tr0;
1104 curr_ir = nmrr.ir0;
1105 curr_or = nmrr.or0;
1106 te.outerShareable = (prrr.nos0 == 0);
1107 break;
1108 case 1:
1109 curr_tr = prrr.tr1;
1110 curr_ir = nmrr.ir1;
1111 curr_or = nmrr.or1;
1112 te.outerShareable = (prrr.nos1 == 0);
1113 break;
1114 case 2:
1115 curr_tr = prrr.tr2;
1116 curr_ir = nmrr.ir2;
1117 curr_or = nmrr.or2;
1118 te.outerShareable = (prrr.nos2 == 0);
1119 break;
1120 case 3:
1121 curr_tr = prrr.tr3;
1122 curr_ir = nmrr.ir3;
1123 curr_or = nmrr.or3;
1124 te.outerShareable = (prrr.nos3 == 0);
1125 break;
1126 case 4:
1127 curr_tr = prrr.tr4;
1128 curr_ir = nmrr.ir4;
1129 curr_or = nmrr.or4;
1130 te.outerShareable = (prrr.nos4 == 0);
1131 break;
1132 case 5:
1133 curr_tr = prrr.tr5;
1134 curr_ir = nmrr.ir5;
1135 curr_or = nmrr.or5;
1136 te.outerShareable = (prrr.nos5 == 0);
1137 break;
1138 case 6:
1139 panic("Imp defined type\n");
1140 case 7:
1141 curr_tr = prrr.tr7;
1142 curr_ir = nmrr.ir7;
1143 curr_or = nmrr.or7;
1144 te.outerShareable = (prrr.nos7 == 0);
1145 break;
1146 }
1147
1148 switch(curr_tr) {
1149 case 0:
1150 DPRINTF(TLBVerbose, "StronglyOrdered\n");
1151 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1152 te.nonCacheable = true;
1153 te.innerAttrs = 1;
1154 te.outerAttrs = 0;
1155 te.shareable = true;
1156 break;
1157 case 1:
1158 DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n",
1159 prrr.ds1, prrr.ds0, s);
1160 te.mtype = TlbEntry::MemoryType::Device;
1161 te.nonCacheable = true;
1162 te.innerAttrs = 3;
1163 te.outerAttrs = 0;
1164 if (prrr.ds1 && s)
1165 te.shareable = true;
1166 if (prrr.ds0 && !s)
1167 te.shareable = true;
1168 break;
1169 case 2:
1170 DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n",
1171 prrr.ns1, prrr.ns0, s);
1172 te.mtype = TlbEntry::MemoryType::Normal;
1173 if (prrr.ns1 && s)
1174 te.shareable = true;
1175 if (prrr.ns0 && !s)
1176 te.shareable = true;
1177 break;
1178 case 3:
1179 panic("Reserved type");
1180 }
1181
1182 if (te.mtype == TlbEntry::MemoryType::Normal){
1183 switch(curr_ir) {
1184 case 0:
1185 te.nonCacheable = true;
1186 te.innerAttrs = 0;
1187 break;
1188 case 1:
1189 te.innerAttrs = 5;
1190 break;
1191 case 2:
1192 te.innerAttrs = 6;
1193 break;
1194 case 3:
1195 te.innerAttrs = 7;
1196 break;
1197 }
1198
1199 switch(curr_or) {
1200 case 0:
1201 te.nonCacheable = true;
1202 te.outerAttrs = 0;
1203 break;
1204 case 1:
1205 te.outerAttrs = 1;
1206 break;
1207 case 2:
1208 te.outerAttrs = 2;
1209 break;
1210 case 3:
1211 te.outerAttrs = 3;
1212 break;
1213 }
1214 }
1215 }
1216 DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, "
1217 "outerAttrs: %d\n",
1218 te.shareable, te.innerAttrs, te.outerAttrs);
1219 te.setAttributes(false);
1220}
1221
1222void
1223TableWalker::memAttrsLPAE(ThreadContext *tc, TlbEntry &te,
1224 LongDescriptor &lDescriptor)
1225{
1226 assert(_haveLPAE);
1227
1228 uint8_t attr;
1229 uint8_t sh = lDescriptor.sh();
1230 // Different format and source of attributes if this is a stage 2
1231 // translation
1232 if (isStage2) {
1233 attr = lDescriptor.memAttr();
1234 uint8_t attr_3_2 = (attr >> 2) & 0x3;
1235 uint8_t attr_1_0 = attr & 0x3;
1236
1237 DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh);
1238
1239 if (attr_3_2 == 0) {
1240 te.mtype = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered
1241 : TlbEntry::MemoryType::Device;
1242 te.outerAttrs = 0;
1243 te.innerAttrs = attr_1_0 == 0 ? 1 : 3;
1244 te.nonCacheable = true;
1245 } else {
1246 te.mtype = TlbEntry::MemoryType::Normal;
1247 te.outerAttrs = attr_3_2 == 1 ? 0 :
1248 attr_3_2 == 2 ? 2 : 1;
1249 te.innerAttrs = attr_1_0 == 1 ? 0 :
1250 attr_1_0 == 2 ? 6 : 5;
1251 te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1252 }
1253 } else {
1254 uint8_t attrIndx = lDescriptor.attrIndx();
1255
1256 // LPAE always uses remapping of memory attributes, irrespective of the
1257 // value of SCTLR.TRE
1258 MiscRegIndex reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0;
1259 int reg_as_int = snsBankedIndex(reg, currState->tc,
1260 !currState->isSecure);
1261 uint32_t mair = currState->tc->readMiscReg(reg_as_int);
1262 attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1263 uint8_t attr_7_4 = bits(attr, 7, 4);
1264 uint8_t attr_3_0 = bits(attr, 3, 0);
1265 DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr);
1266
1267 // Note: the memory subsystem only cares about the 'cacheable' memory
1268 // attribute. The other attributes are only used to fill the PAR register
1269 // accordingly to provide the illusion of full support
1270 te.nonCacheable = false;
1271
1272 switch (attr_7_4) {
1273 case 0x0:
1274 // Strongly-ordered or Device memory
1275 if (attr_3_0 == 0x0)
1276 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1277 else if (attr_3_0 == 0x4)
1278 te.mtype = TlbEntry::MemoryType::Device;
1279 else
1280 panic("Unpredictable behavior\n");
1281 te.nonCacheable = true;
1282 te.outerAttrs = 0;
1283 break;
1284 case 0x4:
1285 // Normal memory, Outer Non-cacheable
1286 te.mtype = TlbEntry::MemoryType::Normal;
1287 te.outerAttrs = 0;
1288 if (attr_3_0 == 0x4)
1289 // Inner Non-cacheable
1290 te.nonCacheable = true;
1291 else if (attr_3_0 < 0x8)
1292 panic("Unpredictable behavior\n");
1293 break;
1294 case 0x8:
1295 case 0x9:
1296 case 0xa:
1297 case 0xb:
1298 case 0xc:
1299 case 0xd:
1300 case 0xe:
1301 case 0xf:
1302 if (attr_7_4 & 0x4) {
1303 te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1304 } else {
1305 te.outerAttrs = 0x2;
1306 }
1307 // Normal memory, Outer Cacheable
1308 te.mtype = TlbEntry::MemoryType::Normal;
1309 if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1310 panic("Unpredictable behavior\n");
1311 break;
1312 default:
1313 panic("Unpredictable behavior\n");
1314 break;
1315 }
1316
1317 switch (attr_3_0) {
1318 case 0x0:
1319 te.innerAttrs = 0x1;
1320 break;
1321 case 0x4:
1322 te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1323 break;
1324 case 0x8:
1325 case 0x9:
1326 case 0xA:
1327 case 0xB:
1328 te.innerAttrs = 6;
1329 break;
1330 case 0xC:
1331 case 0xD:
1332 case 0xE:
1333 case 0xF:
1334 te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1335 break;
1336 default:
1337 panic("Unpredictable behavior\n");
1338 break;
1339 }
1340 }
1341
1342 te.outerShareable = sh == 2;
1343 te.shareable = (sh & 0x2) ? true : false;
1344 te.setAttributes(true);
1345 te.attributes |= (uint64_t) attr << 56;
1346}
1347
1348void
1349TableWalker::memAttrsAArch64(ThreadContext *tc, TlbEntry &te,
1350 LongDescriptor &lDescriptor)
1351{
1352 uint8_t attr;
1353 uint8_t attr_hi;
1354 uint8_t attr_lo;
1355 uint8_t sh = lDescriptor.sh();
1356
1357 if (isStage2) {
1358 attr = lDescriptor.memAttr();
1359 uint8_t attr_hi = (attr >> 2) & 0x3;
1360 uint8_t attr_lo = attr & 0x3;
1361
1362 DPRINTF(TLBVerbose, "memAttrsAArch64 MemAttr:%#x sh:%#x\n", attr, sh);
1363
1364 if (attr_hi == 0) {
1365 te.mtype = attr_lo == 0 ? TlbEntry::MemoryType::StronglyOrdered
1366 : TlbEntry::MemoryType::Device;
1367 te.outerAttrs = 0;
1368 te.innerAttrs = attr_lo == 0 ? 1 : 3;
1369 te.nonCacheable = true;
1370 } else {
1371 te.mtype = TlbEntry::MemoryType::Normal;
1372 te.outerAttrs = attr_hi == 1 ? 0 :
1373 attr_hi == 2 ? 2 : 1;
1374 te.innerAttrs = attr_lo == 1 ? 0 :
1375 attr_lo == 2 ? 6 : 5;
1376 // Treat write-through memory as uncacheable, this is safe
1377 // but for performance reasons not optimal.
1378 te.nonCacheable = (attr_hi == 1) || (attr_hi == 2) ||
1379 (attr_lo == 1) || (attr_lo == 2);
1380 }
1381 } else {
1382 uint8_t attrIndx = lDescriptor.attrIndx();
1383
1384 DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh);
1385
1386 // Select MAIR
1387 uint64_t mair;
1388 switch (currState->el) {
1389 case EL0:
1390 case EL1:
1391 mair = tc->readMiscReg(MISCREG_MAIR_EL1);
1392 break;
1393 case EL2:
1394 mair = tc->readMiscReg(MISCREG_MAIR_EL2);
1395 break;
1396 case EL3:
1397 mair = tc->readMiscReg(MISCREG_MAIR_EL3);
1398 break;
1399 default:
1400 panic("Invalid exception level");
1401 break;
1402 }
1403
1404 // Select attributes
1405 attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1406 attr_lo = bits(attr, 3, 0);
1407 attr_hi = bits(attr, 7, 4);
1408
1409 // Memory type
1410 te.mtype = attr_hi == 0 ? TlbEntry::MemoryType::Device : TlbEntry::MemoryType::Normal;
1411
1412 // Cacheability
1413 te.nonCacheable = false;
1414 if (te.mtype == TlbEntry::MemoryType::Device) { // Device memory
1415 te.nonCacheable = true;
1416 }
1417 // Treat write-through memory as uncacheable, this is safe
1418 // but for performance reasons not optimal.
1419 switch (attr_hi) {
1420 case 0x1 ... 0x3: // Normal Memory, Outer Write-through transient
1421 case 0x4: // Normal memory, Outer Non-cacheable
1422 case 0x8 ... 0xb: // Normal Memory, Outer Write-through non-transient
1423 te.nonCacheable = true;
1424 }
1425 switch (attr_lo) {
1426 case 0x1 ... 0x3: // Normal Memory, Inner Write-through transient
1427 case 0x9 ... 0xb: // Normal Memory, Inner Write-through non-transient
1428 warn_if(!attr_hi, "Unpredictable behavior");
1429 M5_FALLTHROUGH;
1430 case 0x4: // Device-nGnRE memory or
1431 // Normal memory, Inner Non-cacheable
1432 case 0x8: // Device-nGRE memory or
1433 // Normal memory, Inner Write-through non-transient
1434 te.nonCacheable = true;
1435 }
1436
1437 te.shareable = sh == 2;
1438 te.outerShareable = (sh & 0x2) ? true : false;
1439 // Attributes formatted according to the 64-bit PAR
1440 te.attributes = ((uint64_t) attr << 56) |
1441 (1 << 11) | // LPAE bit
1442 (te.ns << 9) | // NS bit
1443 (sh << 7);
1444 }
1445}
1446
1447void
1448TableWalker::doL1Descriptor()
1449{
1450 if (currState->fault != NoFault) {
1451 return;
1452 }
1453
1454 currState->l1Desc.data = htog(currState->l1Desc.data,
1455 byteOrder(currState->tc));
1456
1457 DPRINTF(TLB, "L1 descriptor for %#x is %#x\n",
1458 currState->vaddr_tainted, currState->l1Desc.data);
1459 TlbEntry te;
1460
958 ArmFault::AddressSizeLL + start_lookup_level,
959 isStage2,
960 ArmFault::LpaeTran);
961
962
963 if (currState->timing) {
964 pending = false;
965 nextWalk(currState->tc);
966 currState = NULL;
967 } else {
968 currState->tc = NULL;
969 currState->req = NULL;
970 }
971 return f;
972
973 }
974
975 // Determine descriptor address
976 Addr desc_addr = base_addr |
977 (bits(currState->vaddr, tsz - 1,
978 stride * (3 - start_lookup_level) + tg) << 3);
979
980 // Trickbox address check
981 Fault f = testWalk(desc_addr, sizeof(uint64_t),
982 TlbEntry::DomainType::NoAccess, start_lookup_level);
983 if (f) {
984 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
985 if (currState->timing) {
986 pending = false;
987 nextWalk(currState->tc);
988 currState = NULL;
989 } else {
990 currState->tc = NULL;
991 currState->req = NULL;
992 }
993 return f;
994 }
995
996 Request::Flags flag = Request::PT_WALK;
997 if (currState->sctlr.c == 0) {
998 flag.set(Request::UNCACHEABLE);
999 }
1000
1001 if (currState->isSecure) {
1002 flag.set(Request::SECURE);
1003 }
1004
1005 currState->longDesc.lookupLevel = start_lookup_level;
1006 currState->longDesc.aarch64 = true;
1007 currState->longDesc.grainSize = tg;
1008
1009 if (currState->timing) {
1010 fetchDescriptor(desc_addr, (uint8_t*) &currState->longDesc.data,
1011 sizeof(uint64_t), flag, start_lookup_level,
1012 LongDescEventByLevel[start_lookup_level], NULL);
1013 } else {
1014 fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
1015 sizeof(uint64_t), flag, -1, NULL,
1016 &TableWalker::doLongDescriptor);
1017 f = currState->fault;
1018 }
1019
1020 return f;
1021}
1022
1023void
1024TableWalker::memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr,
1025 uint8_t texcb, bool s)
1026{
1027 // Note: tc and sctlr local variables are hiding tc and sctrl class
1028 // variables
1029 DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s);
1030 te.shareable = false; // default value
1031 te.nonCacheable = false;
1032 te.outerShareable = false;
1033 if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) {
1034 switch(texcb) {
1035 case 0: // Stongly-ordered
1036 te.nonCacheable = true;
1037 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1038 te.shareable = true;
1039 te.innerAttrs = 1;
1040 te.outerAttrs = 0;
1041 break;
1042 case 1: // Shareable Device
1043 te.nonCacheable = true;
1044 te.mtype = TlbEntry::MemoryType::Device;
1045 te.shareable = true;
1046 te.innerAttrs = 3;
1047 te.outerAttrs = 0;
1048 break;
1049 case 2: // Outer and Inner Write-Through, no Write-Allocate
1050 te.mtype = TlbEntry::MemoryType::Normal;
1051 te.shareable = s;
1052 te.innerAttrs = 6;
1053 te.outerAttrs = bits(texcb, 1, 0);
1054 break;
1055 case 3: // Outer and Inner Write-Back, no Write-Allocate
1056 te.mtype = TlbEntry::MemoryType::Normal;
1057 te.shareable = s;
1058 te.innerAttrs = 7;
1059 te.outerAttrs = bits(texcb, 1, 0);
1060 break;
1061 case 4: // Outer and Inner Non-cacheable
1062 te.nonCacheable = true;
1063 te.mtype = TlbEntry::MemoryType::Normal;
1064 te.shareable = s;
1065 te.innerAttrs = 0;
1066 te.outerAttrs = bits(texcb, 1, 0);
1067 break;
1068 case 5: // Reserved
1069 panic("Reserved texcb value!\n");
1070 break;
1071 case 6: // Implementation Defined
1072 panic("Implementation-defined texcb value!\n");
1073 break;
1074 case 7: // Outer and Inner Write-Back, Write-Allocate
1075 te.mtype = TlbEntry::MemoryType::Normal;
1076 te.shareable = s;
1077 te.innerAttrs = 5;
1078 te.outerAttrs = 1;
1079 break;
1080 case 8: // Non-shareable Device
1081 te.nonCacheable = true;
1082 te.mtype = TlbEntry::MemoryType::Device;
1083 te.shareable = false;
1084 te.innerAttrs = 3;
1085 te.outerAttrs = 0;
1086 break;
1087 case 9 ... 15: // Reserved
1088 panic("Reserved texcb value!\n");
1089 break;
1090 case 16 ... 31: // Cacheable Memory
1091 te.mtype = TlbEntry::MemoryType::Normal;
1092 te.shareable = s;
1093 if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0)
1094 te.nonCacheable = true;
1095 te.innerAttrs = bits(texcb, 1, 0);
1096 te.outerAttrs = bits(texcb, 3, 2);
1097 break;
1098 default:
1099 panic("More than 32 states for 5 bits?\n");
1100 }
1101 } else {
1102 assert(tc);
1103 PRRR prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR,
1104 currState->tc, !currState->isSecure));
1105 NMRR nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR,
1106 currState->tc, !currState->isSecure));
1107 DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
1108 uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
1109 switch(bits(texcb, 2,0)) {
1110 case 0:
1111 curr_tr = prrr.tr0;
1112 curr_ir = nmrr.ir0;
1113 curr_or = nmrr.or0;
1114 te.outerShareable = (prrr.nos0 == 0);
1115 break;
1116 case 1:
1117 curr_tr = prrr.tr1;
1118 curr_ir = nmrr.ir1;
1119 curr_or = nmrr.or1;
1120 te.outerShareable = (prrr.nos1 == 0);
1121 break;
1122 case 2:
1123 curr_tr = prrr.tr2;
1124 curr_ir = nmrr.ir2;
1125 curr_or = nmrr.or2;
1126 te.outerShareable = (prrr.nos2 == 0);
1127 break;
1128 case 3:
1129 curr_tr = prrr.tr3;
1130 curr_ir = nmrr.ir3;
1131 curr_or = nmrr.or3;
1132 te.outerShareable = (prrr.nos3 == 0);
1133 break;
1134 case 4:
1135 curr_tr = prrr.tr4;
1136 curr_ir = nmrr.ir4;
1137 curr_or = nmrr.or4;
1138 te.outerShareable = (prrr.nos4 == 0);
1139 break;
1140 case 5:
1141 curr_tr = prrr.tr5;
1142 curr_ir = nmrr.ir5;
1143 curr_or = nmrr.or5;
1144 te.outerShareable = (prrr.nos5 == 0);
1145 break;
1146 case 6:
1147 panic("Imp defined type\n");
1148 case 7:
1149 curr_tr = prrr.tr7;
1150 curr_ir = nmrr.ir7;
1151 curr_or = nmrr.or7;
1152 te.outerShareable = (prrr.nos7 == 0);
1153 break;
1154 }
1155
1156 switch(curr_tr) {
1157 case 0:
1158 DPRINTF(TLBVerbose, "StronglyOrdered\n");
1159 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1160 te.nonCacheable = true;
1161 te.innerAttrs = 1;
1162 te.outerAttrs = 0;
1163 te.shareable = true;
1164 break;
1165 case 1:
1166 DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n",
1167 prrr.ds1, prrr.ds0, s);
1168 te.mtype = TlbEntry::MemoryType::Device;
1169 te.nonCacheable = true;
1170 te.innerAttrs = 3;
1171 te.outerAttrs = 0;
1172 if (prrr.ds1 && s)
1173 te.shareable = true;
1174 if (prrr.ds0 && !s)
1175 te.shareable = true;
1176 break;
1177 case 2:
1178 DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n",
1179 prrr.ns1, prrr.ns0, s);
1180 te.mtype = TlbEntry::MemoryType::Normal;
1181 if (prrr.ns1 && s)
1182 te.shareable = true;
1183 if (prrr.ns0 && !s)
1184 te.shareable = true;
1185 break;
1186 case 3:
1187 panic("Reserved type");
1188 }
1189
1190 if (te.mtype == TlbEntry::MemoryType::Normal){
1191 switch(curr_ir) {
1192 case 0:
1193 te.nonCacheable = true;
1194 te.innerAttrs = 0;
1195 break;
1196 case 1:
1197 te.innerAttrs = 5;
1198 break;
1199 case 2:
1200 te.innerAttrs = 6;
1201 break;
1202 case 3:
1203 te.innerAttrs = 7;
1204 break;
1205 }
1206
1207 switch(curr_or) {
1208 case 0:
1209 te.nonCacheable = true;
1210 te.outerAttrs = 0;
1211 break;
1212 case 1:
1213 te.outerAttrs = 1;
1214 break;
1215 case 2:
1216 te.outerAttrs = 2;
1217 break;
1218 case 3:
1219 te.outerAttrs = 3;
1220 break;
1221 }
1222 }
1223 }
1224 DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, "
1225 "outerAttrs: %d\n",
1226 te.shareable, te.innerAttrs, te.outerAttrs);
1227 te.setAttributes(false);
1228}
1229
1230void
1231TableWalker::memAttrsLPAE(ThreadContext *tc, TlbEntry &te,
1232 LongDescriptor &lDescriptor)
1233{
1234 assert(_haveLPAE);
1235
1236 uint8_t attr;
1237 uint8_t sh = lDescriptor.sh();
1238 // Different format and source of attributes if this is a stage 2
1239 // translation
1240 if (isStage2) {
1241 attr = lDescriptor.memAttr();
1242 uint8_t attr_3_2 = (attr >> 2) & 0x3;
1243 uint8_t attr_1_0 = attr & 0x3;
1244
1245 DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh);
1246
1247 if (attr_3_2 == 0) {
1248 te.mtype = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered
1249 : TlbEntry::MemoryType::Device;
1250 te.outerAttrs = 0;
1251 te.innerAttrs = attr_1_0 == 0 ? 1 : 3;
1252 te.nonCacheable = true;
1253 } else {
1254 te.mtype = TlbEntry::MemoryType::Normal;
1255 te.outerAttrs = attr_3_2 == 1 ? 0 :
1256 attr_3_2 == 2 ? 2 : 1;
1257 te.innerAttrs = attr_1_0 == 1 ? 0 :
1258 attr_1_0 == 2 ? 6 : 5;
1259 te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1260 }
1261 } else {
1262 uint8_t attrIndx = lDescriptor.attrIndx();
1263
1264 // LPAE always uses remapping of memory attributes, irrespective of the
1265 // value of SCTLR.TRE
1266 MiscRegIndex reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0;
1267 int reg_as_int = snsBankedIndex(reg, currState->tc,
1268 !currState->isSecure);
1269 uint32_t mair = currState->tc->readMiscReg(reg_as_int);
1270 attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1271 uint8_t attr_7_4 = bits(attr, 7, 4);
1272 uint8_t attr_3_0 = bits(attr, 3, 0);
1273 DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr);
1274
1275 // Note: the memory subsystem only cares about the 'cacheable' memory
1276 // attribute. The other attributes are only used to fill the PAR register
1277 // accordingly to provide the illusion of full support
1278 te.nonCacheable = false;
1279
1280 switch (attr_7_4) {
1281 case 0x0:
1282 // Strongly-ordered or Device memory
1283 if (attr_3_0 == 0x0)
1284 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1285 else if (attr_3_0 == 0x4)
1286 te.mtype = TlbEntry::MemoryType::Device;
1287 else
1288 panic("Unpredictable behavior\n");
1289 te.nonCacheable = true;
1290 te.outerAttrs = 0;
1291 break;
1292 case 0x4:
1293 // Normal memory, Outer Non-cacheable
1294 te.mtype = TlbEntry::MemoryType::Normal;
1295 te.outerAttrs = 0;
1296 if (attr_3_0 == 0x4)
1297 // Inner Non-cacheable
1298 te.nonCacheable = true;
1299 else if (attr_3_0 < 0x8)
1300 panic("Unpredictable behavior\n");
1301 break;
1302 case 0x8:
1303 case 0x9:
1304 case 0xa:
1305 case 0xb:
1306 case 0xc:
1307 case 0xd:
1308 case 0xe:
1309 case 0xf:
1310 if (attr_7_4 & 0x4) {
1311 te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1312 } else {
1313 te.outerAttrs = 0x2;
1314 }
1315 // Normal memory, Outer Cacheable
1316 te.mtype = TlbEntry::MemoryType::Normal;
1317 if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1318 panic("Unpredictable behavior\n");
1319 break;
1320 default:
1321 panic("Unpredictable behavior\n");
1322 break;
1323 }
1324
1325 switch (attr_3_0) {
1326 case 0x0:
1327 te.innerAttrs = 0x1;
1328 break;
1329 case 0x4:
1330 te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1331 break;
1332 case 0x8:
1333 case 0x9:
1334 case 0xA:
1335 case 0xB:
1336 te.innerAttrs = 6;
1337 break;
1338 case 0xC:
1339 case 0xD:
1340 case 0xE:
1341 case 0xF:
1342 te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1343 break;
1344 default:
1345 panic("Unpredictable behavior\n");
1346 break;
1347 }
1348 }
1349
1350 te.outerShareable = sh == 2;
1351 te.shareable = (sh & 0x2) ? true : false;
1352 te.setAttributes(true);
1353 te.attributes |= (uint64_t) attr << 56;
1354}
1355
1356void
1357TableWalker::memAttrsAArch64(ThreadContext *tc, TlbEntry &te,
1358 LongDescriptor &lDescriptor)
1359{
1360 uint8_t attr;
1361 uint8_t attr_hi;
1362 uint8_t attr_lo;
1363 uint8_t sh = lDescriptor.sh();
1364
1365 if (isStage2) {
1366 attr = lDescriptor.memAttr();
1367 uint8_t attr_hi = (attr >> 2) & 0x3;
1368 uint8_t attr_lo = attr & 0x3;
1369
1370 DPRINTF(TLBVerbose, "memAttrsAArch64 MemAttr:%#x sh:%#x\n", attr, sh);
1371
1372 if (attr_hi == 0) {
1373 te.mtype = attr_lo == 0 ? TlbEntry::MemoryType::StronglyOrdered
1374 : TlbEntry::MemoryType::Device;
1375 te.outerAttrs = 0;
1376 te.innerAttrs = attr_lo == 0 ? 1 : 3;
1377 te.nonCacheable = true;
1378 } else {
1379 te.mtype = TlbEntry::MemoryType::Normal;
1380 te.outerAttrs = attr_hi == 1 ? 0 :
1381 attr_hi == 2 ? 2 : 1;
1382 te.innerAttrs = attr_lo == 1 ? 0 :
1383 attr_lo == 2 ? 6 : 5;
1384 // Treat write-through memory as uncacheable, this is safe
1385 // but for performance reasons not optimal.
1386 te.nonCacheable = (attr_hi == 1) || (attr_hi == 2) ||
1387 (attr_lo == 1) || (attr_lo == 2);
1388 }
1389 } else {
1390 uint8_t attrIndx = lDescriptor.attrIndx();
1391
1392 DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh);
1393
1394 // Select MAIR
1395 uint64_t mair;
1396 switch (currState->el) {
1397 case EL0:
1398 case EL1:
1399 mair = tc->readMiscReg(MISCREG_MAIR_EL1);
1400 break;
1401 case EL2:
1402 mair = tc->readMiscReg(MISCREG_MAIR_EL2);
1403 break;
1404 case EL3:
1405 mair = tc->readMiscReg(MISCREG_MAIR_EL3);
1406 break;
1407 default:
1408 panic("Invalid exception level");
1409 break;
1410 }
1411
1412 // Select attributes
1413 attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1414 attr_lo = bits(attr, 3, 0);
1415 attr_hi = bits(attr, 7, 4);
1416
1417 // Memory type
1418 te.mtype = attr_hi == 0 ? TlbEntry::MemoryType::Device : TlbEntry::MemoryType::Normal;
1419
1420 // Cacheability
1421 te.nonCacheable = false;
1422 if (te.mtype == TlbEntry::MemoryType::Device) { // Device memory
1423 te.nonCacheable = true;
1424 }
1425 // Treat write-through memory as uncacheable, this is safe
1426 // but for performance reasons not optimal.
1427 switch (attr_hi) {
1428 case 0x1 ... 0x3: // Normal Memory, Outer Write-through transient
1429 case 0x4: // Normal memory, Outer Non-cacheable
1430 case 0x8 ... 0xb: // Normal Memory, Outer Write-through non-transient
1431 te.nonCacheable = true;
1432 }
1433 switch (attr_lo) {
1434 case 0x1 ... 0x3: // Normal Memory, Inner Write-through transient
1435 case 0x9 ... 0xb: // Normal Memory, Inner Write-through non-transient
1436 warn_if(!attr_hi, "Unpredictable behavior");
1437 M5_FALLTHROUGH;
1438 case 0x4: // Device-nGnRE memory or
1439 // Normal memory, Inner Non-cacheable
1440 case 0x8: // Device-nGRE memory or
1441 // Normal memory, Inner Write-through non-transient
1442 te.nonCacheable = true;
1443 }
1444
1445 te.shareable = sh == 2;
1446 te.outerShareable = (sh & 0x2) ? true : false;
1447 // Attributes formatted according to the 64-bit PAR
1448 te.attributes = ((uint64_t) attr << 56) |
1449 (1 << 11) | // LPAE bit
1450 (te.ns << 9) | // NS bit
1451 (sh << 7);
1452 }
1453}
1454
1455void
1456TableWalker::doL1Descriptor()
1457{
1458 if (currState->fault != NoFault) {
1459 return;
1460 }
1461
1462 currState->l1Desc.data = htog(currState->l1Desc.data,
1463 byteOrder(currState->tc));
1464
1465 DPRINTF(TLB, "L1 descriptor for %#x is %#x\n",
1466 currState->vaddr_tainted, currState->l1Desc.data);
1467 TlbEntry te;
1468
1469 const bool is_atomic = currState->req->isAtomic();
1470
1461 switch (currState->l1Desc.type()) {
1462 case L1Descriptor::Ignore:
1463 case L1Descriptor::Reserved:
1464 if (!currState->timing) {
1465 currState->tc = NULL;
1466 currState->req = NULL;
1467 }
1468 DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n");
1469 if (currState->isFetch)
1470 currState->fault =
1471 std::make_shared<PrefetchAbort>(
1472 currState->vaddr_tainted,
1473 ArmFault::TranslationLL + L1,
1474 isStage2,
1475 ArmFault::VmsaTran);
1476 else
1477 currState->fault =
1478 std::make_shared<DataAbort>(
1479 currState->vaddr_tainted,
1480 TlbEntry::DomainType::NoAccess,
1471 switch (currState->l1Desc.type()) {
1472 case L1Descriptor::Ignore:
1473 case L1Descriptor::Reserved:
1474 if (!currState->timing) {
1475 currState->tc = NULL;
1476 currState->req = NULL;
1477 }
1478 DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n");
1479 if (currState->isFetch)
1480 currState->fault =
1481 std::make_shared<PrefetchAbort>(
1482 currState->vaddr_tainted,
1483 ArmFault::TranslationLL + L1,
1484 isStage2,
1485 ArmFault::VmsaTran);
1486 else
1487 currState->fault =
1488 std::make_shared<DataAbort>(
1489 currState->vaddr_tainted,
1490 TlbEntry::DomainType::NoAccess,
1481 currState->isWrite,
1491 is_atomic ? false : currState->isWrite,
1482 ArmFault::TranslationLL + L1, isStage2,
1483 ArmFault::VmsaTran);
1484 return;
1485 case L1Descriptor::Section:
1486 if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
1487 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is
1488 * enabled if set, do l1.Desc.setAp0() instead of generating
1489 * AccessFlag0
1490 */
1491
1492 currState->fault = std::make_shared<DataAbort>(
1493 currState->vaddr_tainted,
1494 currState->l1Desc.domain(),
1492 ArmFault::TranslationLL + L1, isStage2,
1493 ArmFault::VmsaTran);
1494 return;
1495 case L1Descriptor::Section:
1496 if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
1497 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is
1498 * enabled if set, do l1.Desc.setAp0() instead of generating
1499 * AccessFlag0
1500 */
1501
1502 currState->fault = std::make_shared<DataAbort>(
1503 currState->vaddr_tainted,
1504 currState->l1Desc.domain(),
1495 currState->isWrite,
1505 is_atomic ? false : currState->isWrite,
1496 ArmFault::AccessFlagLL + L1,
1497 isStage2,
1498 ArmFault::VmsaTran);
1499 }
1500 if (currState->l1Desc.supersection()) {
1501 panic("Haven't implemented supersections\n");
1502 }
1503 insertTableEntry(currState->l1Desc, false);
1504 return;
1505 case L1Descriptor::PageTable:
1506 {
1507 Addr l2desc_addr;
1508 l2desc_addr = currState->l1Desc.l2Addr() |
1509 (bits(currState->vaddr, 19, 12) << 2);
1510 DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n",
1511 l2desc_addr, currState->isSecure ? "s" : "ns");
1512
1513 // Trickbox address check
1514 currState->fault = testWalk(l2desc_addr, sizeof(uint32_t),
1515 currState->l1Desc.domain(), L2);
1516
1517 if (currState->fault) {
1518 if (!currState->timing) {
1519 currState->tc = NULL;
1520 currState->req = NULL;
1521 }
1522 return;
1523 }
1524
1525 Request::Flags flag = Request::PT_WALK;
1526 if (currState->isSecure)
1527 flag.set(Request::SECURE);
1528
1529 bool delayed;
1530 delayed = fetchDescriptor(l2desc_addr,
1531 (uint8_t*)&currState->l2Desc.data,
1532 sizeof(uint32_t), flag, -1, &doL2DescEvent,
1533 &TableWalker::doL2Descriptor);
1534 if (delayed) {
1535 currState->delayed = true;
1536 }
1537
1538 return;
1539 }
1540 default:
1541 panic("A new type in a 2 bit field?\n");
1542 }
1543}
1544
1545Fault
1546TableWalker::generateLongDescFault(ArmFault::FaultSource src)
1547{
1548 if (currState->isFetch) {
1549 return std::make_shared<PrefetchAbort>(
1550 currState->vaddr_tainted,
1551 src + currState->longDesc.lookupLevel,
1552 isStage2,
1553 ArmFault::LpaeTran);
1554 } else {
1555 return std::make_shared<DataAbort>(
1556 currState->vaddr_tainted,
1557 TlbEntry::DomainType::NoAccess,
1506 ArmFault::AccessFlagLL + L1,
1507 isStage2,
1508 ArmFault::VmsaTran);
1509 }
1510 if (currState->l1Desc.supersection()) {
1511 panic("Haven't implemented supersections\n");
1512 }
1513 insertTableEntry(currState->l1Desc, false);
1514 return;
1515 case L1Descriptor::PageTable:
1516 {
1517 Addr l2desc_addr;
1518 l2desc_addr = currState->l1Desc.l2Addr() |
1519 (bits(currState->vaddr, 19, 12) << 2);
1520 DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n",
1521 l2desc_addr, currState->isSecure ? "s" : "ns");
1522
1523 // Trickbox address check
1524 currState->fault = testWalk(l2desc_addr, sizeof(uint32_t),
1525 currState->l1Desc.domain(), L2);
1526
1527 if (currState->fault) {
1528 if (!currState->timing) {
1529 currState->tc = NULL;
1530 currState->req = NULL;
1531 }
1532 return;
1533 }
1534
1535 Request::Flags flag = Request::PT_WALK;
1536 if (currState->isSecure)
1537 flag.set(Request::SECURE);
1538
1539 bool delayed;
1540 delayed = fetchDescriptor(l2desc_addr,
1541 (uint8_t*)&currState->l2Desc.data,
1542 sizeof(uint32_t), flag, -1, &doL2DescEvent,
1543 &TableWalker::doL2Descriptor);
1544 if (delayed) {
1545 currState->delayed = true;
1546 }
1547
1548 return;
1549 }
1550 default:
1551 panic("A new type in a 2 bit field?\n");
1552 }
1553}
1554
1555Fault
1556TableWalker::generateLongDescFault(ArmFault::FaultSource src)
1557{
1558 if (currState->isFetch) {
1559 return std::make_shared<PrefetchAbort>(
1560 currState->vaddr_tainted,
1561 src + currState->longDesc.lookupLevel,
1562 isStage2,
1563 ArmFault::LpaeTran);
1564 } else {
1565 return std::make_shared<DataAbort>(
1566 currState->vaddr_tainted,
1567 TlbEntry::DomainType::NoAccess,
1558 currState->isWrite,
1568 currState->req->isAtomic() ? false : currState->isWrite,
1559 src + currState->longDesc.lookupLevel,
1560 isStage2,
1561 ArmFault::LpaeTran);
1562 }
1563}
1564
1565void
1566TableWalker::doLongDescriptor()
1567{
1568 if (currState->fault != NoFault) {
1569 return;
1570 }
1571
1572 currState->longDesc.data = htog(currState->longDesc.data,
1573 byteOrder(currState->tc));
1574
1575 DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n",
1576 currState->longDesc.lookupLevel, currState->vaddr_tainted,
1577 currState->longDesc.data,
1578 currState->aarch64 ? "AArch64" : "long-desc.");
1579
1580 if ((currState->longDesc.type() == LongDescriptor::Block) ||
1581 (currState->longDesc.type() == LongDescriptor::Page)) {
1582 DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1583 "xn: %d, ap: %d, af: %d, type: %d\n",
1584 currState->longDesc.lookupLevel,
1585 currState->longDesc.data,
1586 currState->longDesc.pxn(),
1587 currState->longDesc.xn(),
1588 currState->longDesc.ap(),
1589 currState->longDesc.af(),
1590 currState->longDesc.type());
1591 } else {
1592 DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, type: %d\n",
1593 currState->longDesc.lookupLevel,
1594 currState->longDesc.data,
1595 currState->longDesc.type());
1596 }
1597
1598 TlbEntry te;
1599
1600 switch (currState->longDesc.type()) {
1601 case LongDescriptor::Invalid:
1569 src + currState->longDesc.lookupLevel,
1570 isStage2,
1571 ArmFault::LpaeTran);
1572 }
1573}
1574
1575void
1576TableWalker::doLongDescriptor()
1577{
1578 if (currState->fault != NoFault) {
1579 return;
1580 }
1581
1582 currState->longDesc.data = htog(currState->longDesc.data,
1583 byteOrder(currState->tc));
1584
1585 DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n",
1586 currState->longDesc.lookupLevel, currState->vaddr_tainted,
1587 currState->longDesc.data,
1588 currState->aarch64 ? "AArch64" : "long-desc.");
1589
1590 if ((currState->longDesc.type() == LongDescriptor::Block) ||
1591 (currState->longDesc.type() == LongDescriptor::Page)) {
1592 DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1593 "xn: %d, ap: %d, af: %d, type: %d\n",
1594 currState->longDesc.lookupLevel,
1595 currState->longDesc.data,
1596 currState->longDesc.pxn(),
1597 currState->longDesc.xn(),
1598 currState->longDesc.ap(),
1599 currState->longDesc.af(),
1600 currState->longDesc.type());
1601 } else {
1602 DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, type: %d\n",
1603 currState->longDesc.lookupLevel,
1604 currState->longDesc.data,
1605 currState->longDesc.type());
1606 }
1607
1608 TlbEntry te;
1609
1610 switch (currState->longDesc.type()) {
1611 case LongDescriptor::Invalid:
1602 if (!currState->timing) {
1603 currState->tc = NULL;
1604 currState->req = NULL;
1605 }
1606
1607 DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n",
1608 currState->longDesc.lookupLevel,
1609 ArmFault::TranslationLL + currState->longDesc.lookupLevel);
1610
1611 currState->fault = generateLongDescFault(ArmFault::TranslationLL);
1612 DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n",
1613 currState->longDesc.lookupLevel,
1614 ArmFault::TranslationLL + currState->longDesc.lookupLevel);
1615
1616 currState->fault = generateLongDescFault(ArmFault::TranslationLL);
1617 if (!currState->timing) {
1618 currState->tc = NULL;
1619 currState->req = NULL;
1620 }
1612 return;
1613
1614 case LongDescriptor::Block:
1615 case LongDescriptor::Page:
1616 {
1617 auto fault_source = ArmFault::FaultSourceInvalid;
1618 // Check for address size fault
1619 if (checkAddrSizeFaultAArch64(
1620 mbits(currState->longDesc.data, MaxPhysAddrRange - 1,
1621 currState->longDesc.offsetBits()),
1622 currState->physAddrRange)) {
1623
1624 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1625 currState->longDesc.lookupLevel);
1626 fault_source = ArmFault::AddressSizeLL;
1627
1628 // Check for access fault
1629 } else if (currState->longDesc.af() == 0) {
1630
1631 DPRINTF(TLB, "L%d descriptor causing Access Fault\n",
1632 currState->longDesc.lookupLevel);
1633 fault_source = ArmFault::AccessFlagLL;
1634 }
1635
1636 if (fault_source != ArmFault::FaultSourceInvalid) {
1637 currState->fault = generateLongDescFault(fault_source);
1638 } else {
1639 insertTableEntry(currState->longDesc, true);
1640 }
1641 }
1642 return;
1643 case LongDescriptor::Table:
1644 {
1645 // Set hierarchical permission flags
1646 currState->secureLookup = currState->secureLookup &&
1647 currState->longDesc.secureTable();
1648 currState->rwTable = currState->rwTable &&
1649 (currState->longDesc.rwTable() || currState->hpd);
1650 currState->userTable = currState->userTable &&
1651 (currState->longDesc.userTable() || currState->hpd);
1652 currState->xnTable = currState->xnTable ||
1653 (currState->longDesc.xnTable() && !currState->hpd);
1654 currState->pxnTable = currState->pxnTable ||
1655 (currState->longDesc.pxnTable() && !currState->hpd);
1656
1657 // Set up next level lookup
1658 Addr next_desc_addr = currState->longDesc.nextDescAddr(
1659 currState->vaddr);
1660
1661 DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1662 currState->longDesc.lookupLevel,
1663 currState->longDesc.lookupLevel + 1,
1664 next_desc_addr,
1665 currState->secureLookup ? "s" : "ns");
1666
1667 // Check for address size fault
1668 if (currState->aarch64 && checkAddrSizeFaultAArch64(
1669 next_desc_addr, currState->physAddrRange)) {
1670 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1671 currState->longDesc.lookupLevel);
1672
1673 currState->fault = generateLongDescFault(
1674 ArmFault::AddressSizeLL);
1675 return;
1676 }
1677
1678 // Trickbox address check
1679 currState->fault = testWalk(
1680 next_desc_addr, sizeof(uint64_t), TlbEntry::DomainType::Client,
1681 toLookupLevel(currState->longDesc.lookupLevel +1));
1682
1683 if (currState->fault) {
1684 if (!currState->timing) {
1685 currState->tc = NULL;
1686 currState->req = NULL;
1687 }
1688 return;
1689 }
1690
1691 Request::Flags flag = Request::PT_WALK;
1692 if (currState->secureLookup)
1693 flag.set(Request::SECURE);
1694
1695 LookupLevel L = currState->longDesc.lookupLevel =
1696 (LookupLevel) (currState->longDesc.lookupLevel + 1);
1697 Event *event = NULL;
1698 switch (L) {
1699 case L1:
1700 assert(currState->aarch64);
1701 case L2:
1702 case L3:
1703 event = LongDescEventByLevel[L];
1704 break;
1705 default:
1706 panic("Wrong lookup level in table walk\n");
1707 break;
1708 }
1709
1710 bool delayed;
1711 delayed = fetchDescriptor(next_desc_addr, (uint8_t*)&currState->longDesc.data,
1712 sizeof(uint64_t), flag, -1, event,
1713 &TableWalker::doLongDescriptor);
1714 if (delayed) {
1715 currState->delayed = true;
1716 }
1717 }
1718 return;
1719 default:
1720 panic("A new type in a 2 bit field?\n");
1721 }
1722}
1723
1724void
1725TableWalker::doL2Descriptor()
1726{
1727 if (currState->fault != NoFault) {
1728 return;
1729 }
1730
1731 currState->l2Desc.data = htog(currState->l2Desc.data,
1732 byteOrder(currState->tc));
1733
1734 DPRINTF(TLB, "L2 descriptor for %#x is %#x\n",
1735 currState->vaddr_tainted, currState->l2Desc.data);
1736 TlbEntry te;
1737
1621 return;
1622
1623 case LongDescriptor::Block:
1624 case LongDescriptor::Page:
1625 {
1626 auto fault_source = ArmFault::FaultSourceInvalid;
1627 // Check for address size fault
1628 if (checkAddrSizeFaultAArch64(
1629 mbits(currState->longDesc.data, MaxPhysAddrRange - 1,
1630 currState->longDesc.offsetBits()),
1631 currState->physAddrRange)) {
1632
1633 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1634 currState->longDesc.lookupLevel);
1635 fault_source = ArmFault::AddressSizeLL;
1636
1637 // Check for access fault
1638 } else if (currState->longDesc.af() == 0) {
1639
1640 DPRINTF(TLB, "L%d descriptor causing Access Fault\n",
1641 currState->longDesc.lookupLevel);
1642 fault_source = ArmFault::AccessFlagLL;
1643 }
1644
1645 if (fault_source != ArmFault::FaultSourceInvalid) {
1646 currState->fault = generateLongDescFault(fault_source);
1647 } else {
1648 insertTableEntry(currState->longDesc, true);
1649 }
1650 }
1651 return;
1652 case LongDescriptor::Table:
1653 {
1654 // Set hierarchical permission flags
1655 currState->secureLookup = currState->secureLookup &&
1656 currState->longDesc.secureTable();
1657 currState->rwTable = currState->rwTable &&
1658 (currState->longDesc.rwTable() || currState->hpd);
1659 currState->userTable = currState->userTable &&
1660 (currState->longDesc.userTable() || currState->hpd);
1661 currState->xnTable = currState->xnTable ||
1662 (currState->longDesc.xnTable() && !currState->hpd);
1663 currState->pxnTable = currState->pxnTable ||
1664 (currState->longDesc.pxnTable() && !currState->hpd);
1665
1666 // Set up next level lookup
1667 Addr next_desc_addr = currState->longDesc.nextDescAddr(
1668 currState->vaddr);
1669
1670 DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1671 currState->longDesc.lookupLevel,
1672 currState->longDesc.lookupLevel + 1,
1673 next_desc_addr,
1674 currState->secureLookup ? "s" : "ns");
1675
1676 // Check for address size fault
1677 if (currState->aarch64 && checkAddrSizeFaultAArch64(
1678 next_desc_addr, currState->physAddrRange)) {
1679 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1680 currState->longDesc.lookupLevel);
1681
1682 currState->fault = generateLongDescFault(
1683 ArmFault::AddressSizeLL);
1684 return;
1685 }
1686
1687 // Trickbox address check
1688 currState->fault = testWalk(
1689 next_desc_addr, sizeof(uint64_t), TlbEntry::DomainType::Client,
1690 toLookupLevel(currState->longDesc.lookupLevel +1));
1691
1692 if (currState->fault) {
1693 if (!currState->timing) {
1694 currState->tc = NULL;
1695 currState->req = NULL;
1696 }
1697 return;
1698 }
1699
1700 Request::Flags flag = Request::PT_WALK;
1701 if (currState->secureLookup)
1702 flag.set(Request::SECURE);
1703
1704 LookupLevel L = currState->longDesc.lookupLevel =
1705 (LookupLevel) (currState->longDesc.lookupLevel + 1);
1706 Event *event = NULL;
1707 switch (L) {
1708 case L1:
1709 assert(currState->aarch64);
1710 case L2:
1711 case L3:
1712 event = LongDescEventByLevel[L];
1713 break;
1714 default:
1715 panic("Wrong lookup level in table walk\n");
1716 break;
1717 }
1718
1719 bool delayed;
1720 delayed = fetchDescriptor(next_desc_addr, (uint8_t*)&currState->longDesc.data,
1721 sizeof(uint64_t), flag, -1, event,
1722 &TableWalker::doLongDescriptor);
1723 if (delayed) {
1724 currState->delayed = true;
1725 }
1726 }
1727 return;
1728 default:
1729 panic("A new type in a 2 bit field?\n");
1730 }
1731}
1732
1733void
1734TableWalker::doL2Descriptor()
1735{
1736 if (currState->fault != NoFault) {
1737 return;
1738 }
1739
1740 currState->l2Desc.data = htog(currState->l2Desc.data,
1741 byteOrder(currState->tc));
1742
1743 DPRINTF(TLB, "L2 descriptor for %#x is %#x\n",
1744 currState->vaddr_tainted, currState->l2Desc.data);
1745 TlbEntry te;
1746
1747 const bool is_atomic = currState->req->isAtomic();
1748
1738 if (currState->l2Desc.invalid()) {
1739 DPRINTF(TLB, "L2 descriptor invalid, causing fault\n");
1740 if (!currState->timing) {
1741 currState->tc = NULL;
1742 currState->req = NULL;
1743 }
1744 if (currState->isFetch)
1745 currState->fault = std::make_shared<PrefetchAbort>(
1746 currState->vaddr_tainted,
1747 ArmFault::TranslationLL + L2,
1748 isStage2,
1749 ArmFault::VmsaTran);
1750 else
1751 currState->fault = std::make_shared<DataAbort>(
1752 currState->vaddr_tainted, currState->l1Desc.domain(),
1749 if (currState->l2Desc.invalid()) {
1750 DPRINTF(TLB, "L2 descriptor invalid, causing fault\n");
1751 if (!currState->timing) {
1752 currState->tc = NULL;
1753 currState->req = NULL;
1754 }
1755 if (currState->isFetch)
1756 currState->fault = std::make_shared<PrefetchAbort>(
1757 currState->vaddr_tainted,
1758 ArmFault::TranslationLL + L2,
1759 isStage2,
1760 ArmFault::VmsaTran);
1761 else
1762 currState->fault = std::make_shared<DataAbort>(
1763 currState->vaddr_tainted, currState->l1Desc.domain(),
1753 currState->isWrite, ArmFault::TranslationLL + L2,
1764 is_atomic ? false : currState->isWrite,
1765 ArmFault::TranslationLL + L2,
1754 isStage2,
1755 ArmFault::VmsaTran);
1756 return;
1757 }
1758
1759 if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) {
1760 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is enabled
1761 * if set, do l2.Desc.setAp0() instead of generating AccessFlag0
1762 */
1763 DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n",
1764 currState->sctlr.afe, currState->l2Desc.ap());
1765
1766 currState->fault = std::make_shared<DataAbort>(
1767 currState->vaddr_tainted,
1766 isStage2,
1767 ArmFault::VmsaTran);
1768 return;
1769 }
1770
1771 if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) {
1772 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is enabled
1773 * if set, do l2.Desc.setAp0() instead of generating AccessFlag0
1774 */
1775 DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n",
1776 currState->sctlr.afe, currState->l2Desc.ap());
1777
1778 currState->fault = std::make_shared<DataAbort>(
1779 currState->vaddr_tainted,
1768 TlbEntry::DomainType::NoAccess, currState->isWrite,
1780 TlbEntry::DomainType::NoAccess,
1781 is_atomic ? false : currState->isWrite,
1769 ArmFault::AccessFlagLL + L2, isStage2,
1770 ArmFault::VmsaTran);
1771 }
1772
1773 insertTableEntry(currState->l2Desc, false);
1774}
1775
1776void
1777TableWalker::doL1DescriptorWrapper()
1778{
1779 currState = stateQueues[L1].front();
1780 currState->delayed = false;
1781 // if there's a stage2 translation object we don't need it any more
1782 if (currState->stage2Tran) {
1783 delete currState->stage2Tran;
1784 currState->stage2Tran = NULL;
1785 }
1786
1787
1788 DPRINTF(TLBVerbose, "L1 Desc object host addr: %p\n",&currState->l1Desc.data);
1789 DPRINTF(TLBVerbose, "L1 Desc object data: %08x\n",currState->l1Desc.data);
1790
1791 DPRINTF(TLBVerbose, "calling doL1Descriptor for vaddr:%#x\n", currState->vaddr_tainted);
1792 doL1Descriptor();
1793
1794 stateQueues[L1].pop_front();
1795 // Check if fault was generated
1796 if (currState->fault != NoFault) {
1797 currState->transState->finish(currState->fault, currState->req,
1798 currState->tc, currState->mode);
1799 statWalksShortTerminatedAtLevel[0]++;
1800
1801 pending = false;
1802 nextWalk(currState->tc);
1803
1804 currState->req = NULL;
1805 currState->tc = NULL;
1806 currState->delayed = false;
1807 delete currState;
1808 }
1809 else if (!currState->delayed) {
1810 // delay is not set so there is no L2 to do
1811 // Don't finish the translation if a stage 2 look up is underway
1812 statWalkServiceTime.sample(curTick() - currState->startTime);
1813 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1814 tlb->translateTiming(currState->req, currState->tc,
1815 currState->transState, currState->mode);
1816 statWalksShortTerminatedAtLevel[0]++;
1817
1818 pending = false;
1819 nextWalk(currState->tc);
1820
1821 currState->req = NULL;
1822 currState->tc = NULL;
1823 currState->delayed = false;
1824 delete currState;
1825 } else {
1826 // need to do L2 descriptor
1827 stateQueues[L2].push_back(currState);
1828 }
1829 currState = NULL;
1830}
1831
1832void
1833TableWalker::doL2DescriptorWrapper()
1834{
1835 currState = stateQueues[L2].front();
1836 assert(currState->delayed);
1837 // if there's a stage2 translation object we don't need it any more
1838 if (currState->stage2Tran) {
1839 delete currState->stage2Tran;
1840 currState->stage2Tran = NULL;
1841 }
1842
1843 DPRINTF(TLBVerbose, "calling doL2Descriptor for vaddr:%#x\n",
1844 currState->vaddr_tainted);
1845 doL2Descriptor();
1846
1847 // Check if fault was generated
1848 if (currState->fault != NoFault) {
1849 currState->transState->finish(currState->fault, currState->req,
1850 currState->tc, currState->mode);
1851 statWalksShortTerminatedAtLevel[1]++;
1852 } else {
1853 statWalkServiceTime.sample(curTick() - currState->startTime);
1854 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1855 tlb->translateTiming(currState->req, currState->tc,
1856 currState->transState, currState->mode);
1857 statWalksShortTerminatedAtLevel[1]++;
1858 }
1859
1860
1861 stateQueues[L2].pop_front();
1862 pending = false;
1863 nextWalk(currState->tc);
1864
1865 currState->req = NULL;
1866 currState->tc = NULL;
1867 currState->delayed = false;
1868
1869 delete currState;
1870 currState = NULL;
1871}
1872
1873void
1874TableWalker::doL0LongDescriptorWrapper()
1875{
1876 doLongDescriptorWrapper(L0);
1877}
1878
1879void
1880TableWalker::doL1LongDescriptorWrapper()
1881{
1882 doLongDescriptorWrapper(L1);
1883}
1884
1885void
1886TableWalker::doL2LongDescriptorWrapper()
1887{
1888 doLongDescriptorWrapper(L2);
1889}
1890
1891void
1892TableWalker::doL3LongDescriptorWrapper()
1893{
1894 doLongDescriptorWrapper(L3);
1895}
1896
1897void
1898TableWalker::doLongDescriptorWrapper(LookupLevel curr_lookup_level)
1899{
1900 currState = stateQueues[curr_lookup_level].front();
1901 assert(curr_lookup_level == currState->longDesc.lookupLevel);
1902 currState->delayed = false;
1903
1904 // if there's a stage2 translation object we don't need it any more
1905 if (currState->stage2Tran) {
1906 delete currState->stage2Tran;
1907 currState->stage2Tran = NULL;
1908 }
1909
1910 DPRINTF(TLBVerbose, "calling doLongDescriptor for vaddr:%#x\n",
1911 currState->vaddr_tainted);
1912 doLongDescriptor();
1913
1914 stateQueues[curr_lookup_level].pop_front();
1915
1916 if (currState->fault != NoFault) {
1917 // A fault was generated
1918 currState->transState->finish(currState->fault, currState->req,
1919 currState->tc, currState->mode);
1920
1921 pending = false;
1922 nextWalk(currState->tc);
1923
1924 currState->req = NULL;
1925 currState->tc = NULL;
1926 currState->delayed = false;
1927 delete currState;
1928 } else if (!currState->delayed) {
1929 // No additional lookups required
1930 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1931 statWalkServiceTime.sample(curTick() - currState->startTime);
1932 tlb->translateTiming(currState->req, currState->tc,
1933 currState->transState, currState->mode);
1934 statWalksLongTerminatedAtLevel[(unsigned) curr_lookup_level]++;
1935
1936 pending = false;
1937 nextWalk(currState->tc);
1938
1939 currState->req = NULL;
1940 currState->tc = NULL;
1941 currState->delayed = false;
1942 delete currState;
1943 } else {
1944 if (curr_lookup_level >= MAX_LOOKUP_LEVELS - 1)
1945 panic("Max. number of lookups already reached in table walk\n");
1946 // Need to perform additional lookups
1947 stateQueues[currState->longDesc.lookupLevel].push_back(currState);
1948 }
1949 currState = NULL;
1950}
1951
1952
1953void
1954TableWalker::nextWalk(ThreadContext *tc)
1955{
1956 if (pendingQueue.size())
1957 schedule(doProcessEvent, clockEdge(Cycles(1)));
1958 else
1959 completeDrain();
1960}
1961
1962bool
1963TableWalker::fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes,
1964 Request::Flags flags, int queueIndex, Event *event,
1965 void (TableWalker::*doDescriptor)())
1966{
1967 bool isTiming = currState->timing;
1968
1969 DPRINTF(TLBVerbose, "Fetching descriptor at address: 0x%x stage2Req: %d\n",
1970 descAddr, currState->stage2Req);
1971
1972 // If this translation has a stage 2 then we know descAddr is an IPA and
1973 // needs to be translated before we can access the page table. Do that
1974 // check here.
1975 if (currState->stage2Req) {
1976 Fault fault;
1977 flags = flags | TLB::MustBeOne;
1978
1979 if (isTiming) {
1980 Stage2MMU::Stage2Translation *tran = new
1981 Stage2MMU::Stage2Translation(*stage2Mmu, data, event,
1982 currState->vaddr);
1983 currState->stage2Tran = tran;
1984 stage2Mmu->readDataTimed(currState->tc, descAddr, tran, numBytes,
1985 flags);
1986 fault = tran->fault;
1987 } else {
1988 fault = stage2Mmu->readDataUntimed(currState->tc,
1989 currState->vaddr, descAddr, data, numBytes, flags,
1990 currState->functional);
1991 }
1992
1993 if (fault != NoFault) {
1994 currState->fault = fault;
1995 }
1996 if (isTiming) {
1997 if (queueIndex >= 0) {
1998 DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
1999 stateQueues[queueIndex].size());
2000 stateQueues[queueIndex].push_back(currState);
2001 currState = NULL;
2002 }
2003 } else {
2004 (this->*doDescriptor)();
2005 }
2006 } else {
2007 if (isTiming) {
2008 port->dmaAction(MemCmd::ReadReq, descAddr, numBytes, event, data,
2009 currState->tc->getCpuPtr()->clockPeriod(),flags);
2010 if (queueIndex >= 0) {
2011 DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
2012 stateQueues[queueIndex].size());
2013 stateQueues[queueIndex].push_back(currState);
2014 currState = NULL;
2015 }
2016 } else if (!currState->functional) {
2017 port->dmaAction(MemCmd::ReadReq, descAddr, numBytes, NULL, data,
2018 currState->tc->getCpuPtr()->clockPeriod(), flags);
2019 (this->*doDescriptor)();
2020 } else {
2021 RequestPtr req = std::make_shared<Request>(
2022 descAddr, numBytes, flags, masterId);
2023
2024 req->taskId(ContextSwitchTaskId::DMA);
2025 PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
2026 pkt->dataStatic(data);
2027 port->sendFunctional(pkt);
2028 (this->*doDescriptor)();
2029 delete pkt;
2030 }
2031 }
2032 return (isTiming);
2033}
2034
2035void
2036TableWalker::insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
2037{
2038 TlbEntry te;
2039
2040 // Create and fill a new page table entry
2041 te.valid = true;
2042 te.longDescFormat = longDescriptor;
2043 te.isHyp = currState->isHyp;
2044 te.asid = currState->asid;
2045 te.vmid = currState->vmid;
2046 te.N = descriptor.offsetBits();
2047 te.vpn = currState->vaddr >> te.N;
2048 te.size = (1<<te.N) - 1;
2049 te.pfn = descriptor.pfn();
2050 te.domain = descriptor.domain();
2051 te.lookupLevel = descriptor.lookupLevel;
2052 te.ns = !descriptor.secure(haveSecurity, currState) || isStage2;
2053 te.nstid = !currState->isSecure;
2054 te.xn = descriptor.xn();
2055 if (currState->aarch64)
2056 te.el = currState->el;
2057 else
2058 te.el = EL1;
2059
2060 statPageSizes[pageSizeNtoStatBin(te.N)]++;
2061 statRequestOrigin[COMPLETED][currState->isFetch]++;
2062
2063 // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
2064 // as global
2065 te.global = descriptor.global(currState) || isStage2;
2066 if (longDescriptor) {
2067 LongDescriptor lDescriptor =
2068 dynamic_cast<LongDescriptor &>(descriptor);
2069
2070 te.xn |= currState->xnTable;
2071 te.pxn = currState->pxnTable || lDescriptor.pxn();
2072 if (isStage2) {
2073 // this is actually the HAP field, but its stored in the same bit
2074 // possitions as the AP field in a stage 1 translation.
2075 te.hap = lDescriptor.ap();
2076 } else {
2077 te.ap = ((!currState->rwTable || descriptor.ap() >> 1) << 1) |
2078 (currState->userTable && (descriptor.ap() & 0x1));
2079 }
2080 if (currState->aarch64)
2081 memAttrsAArch64(currState->tc, te, lDescriptor);
2082 else
2083 memAttrsLPAE(currState->tc, te, lDescriptor);
2084 } else {
2085 te.ap = descriptor.ap();
2086 memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(),
2087 descriptor.shareable());
2088 }
2089
2090 // Debug output
2091 DPRINTF(TLB, descriptor.dbgHeader().c_str());
2092 DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2093 te.N, te.pfn, te.size, te.global, te.valid);
2094 DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2095 "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
2096 te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp,
2097 te.nonCacheable, te.ns);
2098 DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
2099 descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
2100 descriptor.getRawData());
2101
2102 // Insert the entry into the TLB
2103 tlb->insert(currState->vaddr, te);
2104 if (!currState->timing) {
2105 currState->tc = NULL;
2106 currState->req = NULL;
2107 }
2108}
2109
2110ArmISA::TableWalker *
2111ArmTableWalkerParams::create()
2112{
2113 return new ArmISA::TableWalker(this);
2114}
2115
2116LookupLevel
2117TableWalker::toLookupLevel(uint8_t lookup_level_as_int)
2118{
2119 switch (lookup_level_as_int) {
2120 case L1:
2121 return L1;
2122 case L2:
2123 return L2;
2124 case L3:
2125 return L3;
2126 default:
2127 panic("Invalid lookup level conversion");
2128 }
2129}
2130
2131/* this method keeps track of the table walker queue's residency, so
2132 * needs to be called whenever requests start and complete. */
2133void
2134TableWalker::pendingChange()
2135{
2136 unsigned n = pendingQueue.size();
2137 if ((currState != NULL) && (currState != pendingQueue.front())) {
2138 ++n;
2139 }
2140
2141 if (n != pendingReqs) {
2142 Tick now = curTick();
2143 statPendingWalks.sample(pendingReqs, now - pendingChangeTick);
2144 pendingReqs = n;
2145 pendingChangeTick = now;
2146 }
2147}
2148
2149Fault
2150TableWalker::testWalk(Addr pa, Addr size, TlbEntry::DomainType domain,
2151 LookupLevel lookup_level)
2152{
2153 return tlb->testWalk(pa, size, currState->vaddr, currState->isSecure,
2154 currState->mode, domain, lookup_level);
2155}
2156
2157
2158uint8_t
2159TableWalker::pageSizeNtoStatBin(uint8_t N)
2160{
2161 /* for statPageSizes */
2162 switch(N) {
2163 case 12: return 0; // 4K
2164 case 14: return 1; // 16K (using 16K granule in v8-64)
2165 case 16: return 2; // 64K
2166 case 20: return 3; // 1M
2167 case 21: return 4; // 2M-LPAE
2168 case 24: return 5; // 16M
2169 case 25: return 6; // 32M (using 16K granule in v8-64)
2170 case 29: return 7; // 512M (using 64K granule in v8-64)
2171 case 30: return 8; // 1G-LPAE
2172 default:
2173 panic("unknown page size");
2174 return 255;
2175 }
2176}
2177
2178void
2179TableWalker::regStats()
2180{
2181 ClockedObject::regStats();
2182
2183 statWalks
2184 .name(name() + ".walks")
2185 .desc("Table walker walks requested")
2186 ;
2187
2188 statWalksShortDescriptor
2189 .name(name() + ".walksShort")
2190 .desc("Table walker walks initiated with short descriptors")
2191 .flags(Stats::nozero)
2192 ;
2193
2194 statWalksLongDescriptor
2195 .name(name() + ".walksLong")
2196 .desc("Table walker walks initiated with long descriptors")
2197 .flags(Stats::nozero)
2198 ;
2199
2200 statWalksShortTerminatedAtLevel
2201 .init(2)
2202 .name(name() + ".walksShortTerminationLevel")
2203 .desc("Level at which table walker walks "
2204 "with short descriptors terminate")
2205 .flags(Stats::nozero)
2206 ;
2207 statWalksShortTerminatedAtLevel.subname(0, "Level1");
2208 statWalksShortTerminatedAtLevel.subname(1, "Level2");
2209
2210 statWalksLongTerminatedAtLevel
2211 .init(4)
2212 .name(name() + ".walksLongTerminationLevel")
2213 .desc("Level at which table walker walks "
2214 "with long descriptors terminate")
2215 .flags(Stats::nozero)
2216 ;
2217 statWalksLongTerminatedAtLevel.subname(0, "Level0");
2218 statWalksLongTerminatedAtLevel.subname(1, "Level1");
2219 statWalksLongTerminatedAtLevel.subname(2, "Level2");
2220 statWalksLongTerminatedAtLevel.subname(3, "Level3");
2221
2222 statSquashedBefore
2223 .name(name() + ".walksSquashedBefore")
2224 .desc("Table walks squashed before starting")
2225 .flags(Stats::nozero)
2226 ;
2227
2228 statSquashedAfter
2229 .name(name() + ".walksSquashedAfter")
2230 .desc("Table walks squashed after completion")
2231 .flags(Stats::nozero)
2232 ;
2233
2234 statWalkWaitTime
2235 .init(16)
2236 .name(name() + ".walkWaitTime")
2237 .desc("Table walker wait (enqueue to first request) latency")
2238 .flags(Stats::pdf | Stats::nozero | Stats::nonan)
2239 ;
2240
2241 statWalkServiceTime
2242 .init(16)
2243 .name(name() + ".walkCompletionTime")
2244 .desc("Table walker service (enqueue to completion) latency")
2245 .flags(Stats::pdf | Stats::nozero | Stats::nonan)
2246 ;
2247
2248 statPendingWalks
2249 .init(16)
2250 .name(name() + ".walksPending")
2251 .desc("Table walker pending requests distribution")
2252 .flags(Stats::pdf | Stats::dist | Stats::nozero | Stats::nonan)
2253 ;
2254
2255 statPageSizes // see DDI 0487A D4-1661
2256 .init(9)
2257 .name(name() + ".walkPageSizes")
2258 .desc("Table walker page sizes translated")
2259 .flags(Stats::total | Stats::pdf | Stats::dist | Stats::nozero)
2260 ;
2261 statPageSizes.subname(0, "4K");
2262 statPageSizes.subname(1, "16K");
2263 statPageSizes.subname(2, "64K");
2264 statPageSizes.subname(3, "1M");
2265 statPageSizes.subname(4, "2M");
2266 statPageSizes.subname(5, "16M");
2267 statPageSizes.subname(6, "32M");
2268 statPageSizes.subname(7, "512M");
2269 statPageSizes.subname(8, "1G");
2270
2271 statRequestOrigin
2272 .init(2,2) // Instruction/Data, requests/completed
2273 .name(name() + ".walkRequestOrigin")
2274 .desc("Table walker requests started/completed, data/inst")
2275 .flags(Stats::total)
2276 ;
2277 statRequestOrigin.subname(0,"Requested");
2278 statRequestOrigin.subname(1,"Completed");
2279 statRequestOrigin.ysubname(0,"Data");
2280 statRequestOrigin.ysubname(1,"Inst");
2281}
1782 ArmFault::AccessFlagLL + L2, isStage2,
1783 ArmFault::VmsaTran);
1784 }
1785
1786 insertTableEntry(currState->l2Desc, false);
1787}
1788
1789void
1790TableWalker::doL1DescriptorWrapper()
1791{
1792 currState = stateQueues[L1].front();
1793 currState->delayed = false;
1794 // if there's a stage2 translation object we don't need it any more
1795 if (currState->stage2Tran) {
1796 delete currState->stage2Tran;
1797 currState->stage2Tran = NULL;
1798 }
1799
1800
1801 DPRINTF(TLBVerbose, "L1 Desc object host addr: %p\n",&currState->l1Desc.data);
1802 DPRINTF(TLBVerbose, "L1 Desc object data: %08x\n",currState->l1Desc.data);
1803
1804 DPRINTF(TLBVerbose, "calling doL1Descriptor for vaddr:%#x\n", currState->vaddr_tainted);
1805 doL1Descriptor();
1806
1807 stateQueues[L1].pop_front();
1808 // Check if fault was generated
1809 if (currState->fault != NoFault) {
1810 currState->transState->finish(currState->fault, currState->req,
1811 currState->tc, currState->mode);
1812 statWalksShortTerminatedAtLevel[0]++;
1813
1814 pending = false;
1815 nextWalk(currState->tc);
1816
1817 currState->req = NULL;
1818 currState->tc = NULL;
1819 currState->delayed = false;
1820 delete currState;
1821 }
1822 else if (!currState->delayed) {
1823 // delay is not set so there is no L2 to do
1824 // Don't finish the translation if a stage 2 look up is underway
1825 statWalkServiceTime.sample(curTick() - currState->startTime);
1826 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1827 tlb->translateTiming(currState->req, currState->tc,
1828 currState->transState, currState->mode);
1829 statWalksShortTerminatedAtLevel[0]++;
1830
1831 pending = false;
1832 nextWalk(currState->tc);
1833
1834 currState->req = NULL;
1835 currState->tc = NULL;
1836 currState->delayed = false;
1837 delete currState;
1838 } else {
1839 // need to do L2 descriptor
1840 stateQueues[L2].push_back(currState);
1841 }
1842 currState = NULL;
1843}
1844
1845void
1846TableWalker::doL2DescriptorWrapper()
1847{
1848 currState = stateQueues[L2].front();
1849 assert(currState->delayed);
1850 // if there's a stage2 translation object we don't need it any more
1851 if (currState->stage2Tran) {
1852 delete currState->stage2Tran;
1853 currState->stage2Tran = NULL;
1854 }
1855
1856 DPRINTF(TLBVerbose, "calling doL2Descriptor for vaddr:%#x\n",
1857 currState->vaddr_tainted);
1858 doL2Descriptor();
1859
1860 // Check if fault was generated
1861 if (currState->fault != NoFault) {
1862 currState->transState->finish(currState->fault, currState->req,
1863 currState->tc, currState->mode);
1864 statWalksShortTerminatedAtLevel[1]++;
1865 } else {
1866 statWalkServiceTime.sample(curTick() - currState->startTime);
1867 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1868 tlb->translateTiming(currState->req, currState->tc,
1869 currState->transState, currState->mode);
1870 statWalksShortTerminatedAtLevel[1]++;
1871 }
1872
1873
1874 stateQueues[L2].pop_front();
1875 pending = false;
1876 nextWalk(currState->tc);
1877
1878 currState->req = NULL;
1879 currState->tc = NULL;
1880 currState->delayed = false;
1881
1882 delete currState;
1883 currState = NULL;
1884}
1885
1886void
1887TableWalker::doL0LongDescriptorWrapper()
1888{
1889 doLongDescriptorWrapper(L0);
1890}
1891
1892void
1893TableWalker::doL1LongDescriptorWrapper()
1894{
1895 doLongDescriptorWrapper(L1);
1896}
1897
1898void
1899TableWalker::doL2LongDescriptorWrapper()
1900{
1901 doLongDescriptorWrapper(L2);
1902}
1903
1904void
1905TableWalker::doL3LongDescriptorWrapper()
1906{
1907 doLongDescriptorWrapper(L3);
1908}
1909
1910void
1911TableWalker::doLongDescriptorWrapper(LookupLevel curr_lookup_level)
1912{
1913 currState = stateQueues[curr_lookup_level].front();
1914 assert(curr_lookup_level == currState->longDesc.lookupLevel);
1915 currState->delayed = false;
1916
1917 // if there's a stage2 translation object we don't need it any more
1918 if (currState->stage2Tran) {
1919 delete currState->stage2Tran;
1920 currState->stage2Tran = NULL;
1921 }
1922
1923 DPRINTF(TLBVerbose, "calling doLongDescriptor for vaddr:%#x\n",
1924 currState->vaddr_tainted);
1925 doLongDescriptor();
1926
1927 stateQueues[curr_lookup_level].pop_front();
1928
1929 if (currState->fault != NoFault) {
1930 // A fault was generated
1931 currState->transState->finish(currState->fault, currState->req,
1932 currState->tc, currState->mode);
1933
1934 pending = false;
1935 nextWalk(currState->tc);
1936
1937 currState->req = NULL;
1938 currState->tc = NULL;
1939 currState->delayed = false;
1940 delete currState;
1941 } else if (!currState->delayed) {
1942 // No additional lookups required
1943 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1944 statWalkServiceTime.sample(curTick() - currState->startTime);
1945 tlb->translateTiming(currState->req, currState->tc,
1946 currState->transState, currState->mode);
1947 statWalksLongTerminatedAtLevel[(unsigned) curr_lookup_level]++;
1948
1949 pending = false;
1950 nextWalk(currState->tc);
1951
1952 currState->req = NULL;
1953 currState->tc = NULL;
1954 currState->delayed = false;
1955 delete currState;
1956 } else {
1957 if (curr_lookup_level >= MAX_LOOKUP_LEVELS - 1)
1958 panic("Max. number of lookups already reached in table walk\n");
1959 // Need to perform additional lookups
1960 stateQueues[currState->longDesc.lookupLevel].push_back(currState);
1961 }
1962 currState = NULL;
1963}
1964
1965
1966void
1967TableWalker::nextWalk(ThreadContext *tc)
1968{
1969 if (pendingQueue.size())
1970 schedule(doProcessEvent, clockEdge(Cycles(1)));
1971 else
1972 completeDrain();
1973}
1974
1975bool
1976TableWalker::fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes,
1977 Request::Flags flags, int queueIndex, Event *event,
1978 void (TableWalker::*doDescriptor)())
1979{
1980 bool isTiming = currState->timing;
1981
1982 DPRINTF(TLBVerbose, "Fetching descriptor at address: 0x%x stage2Req: %d\n",
1983 descAddr, currState->stage2Req);
1984
1985 // If this translation has a stage 2 then we know descAddr is an IPA and
1986 // needs to be translated before we can access the page table. Do that
1987 // check here.
1988 if (currState->stage2Req) {
1989 Fault fault;
1990 flags = flags | TLB::MustBeOne;
1991
1992 if (isTiming) {
1993 Stage2MMU::Stage2Translation *tran = new
1994 Stage2MMU::Stage2Translation(*stage2Mmu, data, event,
1995 currState->vaddr);
1996 currState->stage2Tran = tran;
1997 stage2Mmu->readDataTimed(currState->tc, descAddr, tran, numBytes,
1998 flags);
1999 fault = tran->fault;
2000 } else {
2001 fault = stage2Mmu->readDataUntimed(currState->tc,
2002 currState->vaddr, descAddr, data, numBytes, flags,
2003 currState->functional);
2004 }
2005
2006 if (fault != NoFault) {
2007 currState->fault = fault;
2008 }
2009 if (isTiming) {
2010 if (queueIndex >= 0) {
2011 DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
2012 stateQueues[queueIndex].size());
2013 stateQueues[queueIndex].push_back(currState);
2014 currState = NULL;
2015 }
2016 } else {
2017 (this->*doDescriptor)();
2018 }
2019 } else {
2020 if (isTiming) {
2021 port->dmaAction(MemCmd::ReadReq, descAddr, numBytes, event, data,
2022 currState->tc->getCpuPtr()->clockPeriod(),flags);
2023 if (queueIndex >= 0) {
2024 DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
2025 stateQueues[queueIndex].size());
2026 stateQueues[queueIndex].push_back(currState);
2027 currState = NULL;
2028 }
2029 } else if (!currState->functional) {
2030 port->dmaAction(MemCmd::ReadReq, descAddr, numBytes, NULL, data,
2031 currState->tc->getCpuPtr()->clockPeriod(), flags);
2032 (this->*doDescriptor)();
2033 } else {
2034 RequestPtr req = std::make_shared<Request>(
2035 descAddr, numBytes, flags, masterId);
2036
2037 req->taskId(ContextSwitchTaskId::DMA);
2038 PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
2039 pkt->dataStatic(data);
2040 port->sendFunctional(pkt);
2041 (this->*doDescriptor)();
2042 delete pkt;
2043 }
2044 }
2045 return (isTiming);
2046}
2047
2048void
2049TableWalker::insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
2050{
2051 TlbEntry te;
2052
2053 // Create and fill a new page table entry
2054 te.valid = true;
2055 te.longDescFormat = longDescriptor;
2056 te.isHyp = currState->isHyp;
2057 te.asid = currState->asid;
2058 te.vmid = currState->vmid;
2059 te.N = descriptor.offsetBits();
2060 te.vpn = currState->vaddr >> te.N;
2061 te.size = (1<<te.N) - 1;
2062 te.pfn = descriptor.pfn();
2063 te.domain = descriptor.domain();
2064 te.lookupLevel = descriptor.lookupLevel;
2065 te.ns = !descriptor.secure(haveSecurity, currState) || isStage2;
2066 te.nstid = !currState->isSecure;
2067 te.xn = descriptor.xn();
2068 if (currState->aarch64)
2069 te.el = currState->el;
2070 else
2071 te.el = EL1;
2072
2073 statPageSizes[pageSizeNtoStatBin(te.N)]++;
2074 statRequestOrigin[COMPLETED][currState->isFetch]++;
2075
2076 // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
2077 // as global
2078 te.global = descriptor.global(currState) || isStage2;
2079 if (longDescriptor) {
2080 LongDescriptor lDescriptor =
2081 dynamic_cast<LongDescriptor &>(descriptor);
2082
2083 te.xn |= currState->xnTable;
2084 te.pxn = currState->pxnTable || lDescriptor.pxn();
2085 if (isStage2) {
2086 // this is actually the HAP field, but its stored in the same bit
2087 // possitions as the AP field in a stage 1 translation.
2088 te.hap = lDescriptor.ap();
2089 } else {
2090 te.ap = ((!currState->rwTable || descriptor.ap() >> 1) << 1) |
2091 (currState->userTable && (descriptor.ap() & 0x1));
2092 }
2093 if (currState->aarch64)
2094 memAttrsAArch64(currState->tc, te, lDescriptor);
2095 else
2096 memAttrsLPAE(currState->tc, te, lDescriptor);
2097 } else {
2098 te.ap = descriptor.ap();
2099 memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(),
2100 descriptor.shareable());
2101 }
2102
2103 // Debug output
2104 DPRINTF(TLB, descriptor.dbgHeader().c_str());
2105 DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2106 te.N, te.pfn, te.size, te.global, te.valid);
2107 DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2108 "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
2109 te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp,
2110 te.nonCacheable, te.ns);
2111 DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
2112 descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
2113 descriptor.getRawData());
2114
2115 // Insert the entry into the TLB
2116 tlb->insert(currState->vaddr, te);
2117 if (!currState->timing) {
2118 currState->tc = NULL;
2119 currState->req = NULL;
2120 }
2121}
2122
2123ArmISA::TableWalker *
2124ArmTableWalkerParams::create()
2125{
2126 return new ArmISA::TableWalker(this);
2127}
2128
2129LookupLevel
2130TableWalker::toLookupLevel(uint8_t lookup_level_as_int)
2131{
2132 switch (lookup_level_as_int) {
2133 case L1:
2134 return L1;
2135 case L2:
2136 return L2;
2137 case L3:
2138 return L3;
2139 default:
2140 panic("Invalid lookup level conversion");
2141 }
2142}
2143
2144/* this method keeps track of the table walker queue's residency, so
2145 * needs to be called whenever requests start and complete. */
2146void
2147TableWalker::pendingChange()
2148{
2149 unsigned n = pendingQueue.size();
2150 if ((currState != NULL) && (currState != pendingQueue.front())) {
2151 ++n;
2152 }
2153
2154 if (n != pendingReqs) {
2155 Tick now = curTick();
2156 statPendingWalks.sample(pendingReqs, now - pendingChangeTick);
2157 pendingReqs = n;
2158 pendingChangeTick = now;
2159 }
2160}
2161
2162Fault
2163TableWalker::testWalk(Addr pa, Addr size, TlbEntry::DomainType domain,
2164 LookupLevel lookup_level)
2165{
2166 return tlb->testWalk(pa, size, currState->vaddr, currState->isSecure,
2167 currState->mode, domain, lookup_level);
2168}
2169
2170
2171uint8_t
2172TableWalker::pageSizeNtoStatBin(uint8_t N)
2173{
2174 /* for statPageSizes */
2175 switch(N) {
2176 case 12: return 0; // 4K
2177 case 14: return 1; // 16K (using 16K granule in v8-64)
2178 case 16: return 2; // 64K
2179 case 20: return 3; // 1M
2180 case 21: return 4; // 2M-LPAE
2181 case 24: return 5; // 16M
2182 case 25: return 6; // 32M (using 16K granule in v8-64)
2183 case 29: return 7; // 512M (using 64K granule in v8-64)
2184 case 30: return 8; // 1G-LPAE
2185 default:
2186 panic("unknown page size");
2187 return 255;
2188 }
2189}
2190
2191void
2192TableWalker::regStats()
2193{
2194 ClockedObject::regStats();
2195
2196 statWalks
2197 .name(name() + ".walks")
2198 .desc("Table walker walks requested")
2199 ;
2200
2201 statWalksShortDescriptor
2202 .name(name() + ".walksShort")
2203 .desc("Table walker walks initiated with short descriptors")
2204 .flags(Stats::nozero)
2205 ;
2206
2207 statWalksLongDescriptor
2208 .name(name() + ".walksLong")
2209 .desc("Table walker walks initiated with long descriptors")
2210 .flags(Stats::nozero)
2211 ;
2212
2213 statWalksShortTerminatedAtLevel
2214 .init(2)
2215 .name(name() + ".walksShortTerminationLevel")
2216 .desc("Level at which table walker walks "
2217 "with short descriptors terminate")
2218 .flags(Stats::nozero)
2219 ;
2220 statWalksShortTerminatedAtLevel.subname(0, "Level1");
2221 statWalksShortTerminatedAtLevel.subname(1, "Level2");
2222
2223 statWalksLongTerminatedAtLevel
2224 .init(4)
2225 .name(name() + ".walksLongTerminationLevel")
2226 .desc("Level at which table walker walks "
2227 "with long descriptors terminate")
2228 .flags(Stats::nozero)
2229 ;
2230 statWalksLongTerminatedAtLevel.subname(0, "Level0");
2231 statWalksLongTerminatedAtLevel.subname(1, "Level1");
2232 statWalksLongTerminatedAtLevel.subname(2, "Level2");
2233 statWalksLongTerminatedAtLevel.subname(3, "Level3");
2234
2235 statSquashedBefore
2236 .name(name() + ".walksSquashedBefore")
2237 .desc("Table walks squashed before starting")
2238 .flags(Stats::nozero)
2239 ;
2240
2241 statSquashedAfter
2242 .name(name() + ".walksSquashedAfter")
2243 .desc("Table walks squashed after completion")
2244 .flags(Stats::nozero)
2245 ;
2246
2247 statWalkWaitTime
2248 .init(16)
2249 .name(name() + ".walkWaitTime")
2250 .desc("Table walker wait (enqueue to first request) latency")
2251 .flags(Stats::pdf | Stats::nozero | Stats::nonan)
2252 ;
2253
2254 statWalkServiceTime
2255 .init(16)
2256 .name(name() + ".walkCompletionTime")
2257 .desc("Table walker service (enqueue to completion) latency")
2258 .flags(Stats::pdf | Stats::nozero | Stats::nonan)
2259 ;
2260
2261 statPendingWalks
2262 .init(16)
2263 .name(name() + ".walksPending")
2264 .desc("Table walker pending requests distribution")
2265 .flags(Stats::pdf | Stats::dist | Stats::nozero | Stats::nonan)
2266 ;
2267
2268 statPageSizes // see DDI 0487A D4-1661
2269 .init(9)
2270 .name(name() + ".walkPageSizes")
2271 .desc("Table walker page sizes translated")
2272 .flags(Stats::total | Stats::pdf | Stats::dist | Stats::nozero)
2273 ;
2274 statPageSizes.subname(0, "4K");
2275 statPageSizes.subname(1, "16K");
2276 statPageSizes.subname(2, "64K");
2277 statPageSizes.subname(3, "1M");
2278 statPageSizes.subname(4, "2M");
2279 statPageSizes.subname(5, "16M");
2280 statPageSizes.subname(6, "32M");
2281 statPageSizes.subname(7, "512M");
2282 statPageSizes.subname(8, "1G");
2283
2284 statRequestOrigin
2285 .init(2,2) // Instruction/Data, requests/completed
2286 .name(name() + ".walkRequestOrigin")
2287 .desc("Table walker requests started/completed, data/inst")
2288 .flags(Stats::total)
2289 ;
2290 statRequestOrigin.subname(0,"Requested");
2291 statRequestOrigin.subname(1,"Completed");
2292 statRequestOrigin.ysubname(0,"Data");
2293 statRequestOrigin.ysubname(1,"Inst");
2294}