Deleted Added
sdiff udiff text old ( 12663:565c16ffe1d1 ) new ( 12717:2e2c211644d2 )
full compact
1/*
2 * Copyright (c) 2011-2015 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * For use for simulation and test purposes only
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:

--- 59 unchanged lines hidden (view full) ---

68 assoc = p->assoc;
69 assert(assoc <= size);
70 numSets = size/assoc;
71 allocationPolicy = p->allocationPolicy;
72 hasMemSidePort = false;
73 accessDistance = p->accessDistance;
74 clock = p->clk_domain->clockPeriod();
75
76 tlb.assign(size, GpuTlbEntry());
77
78 freeList.resize(numSets);
79 entryList.resize(numSets);
80
81 for (int set = 0; set < numSets; ++set) {
82 for (int way = 0; way < assoc; ++way) {
83 int x = set * assoc + way;
84 freeList[set].push_back(&tlb.at(x));

--- 76 unchanged lines hidden (view full) ---

161 hasMemSidePort = true;
162
163 return *memSidePort[idx];
164 } else {
165 panic("TLBCoalescer::getMasterPort: unknown port %s\n", if_name);
166 }
167 }
168
169 GpuTlbEntry*
170 GpuTLB::insert(Addr vpn, GpuTlbEntry &entry)
171 {
172 GpuTlbEntry *newEntry = nullptr;
173
174 /**
175 * vpn holds the virtual page address
176 * The least significant bits are simply masked
177 */
178 int set = (vpn >> TheISA::PageShift) & setMask;
179
180 if (!freeList[set].empty()) {

--- 36 unchanged lines hidden (view full) ---

217
218 break;
219 }
220 }
221
222 return entry;
223 }
224
225 GpuTlbEntry*
226 GpuTLB::lookup(Addr va, bool update_lru)
227 {
228 int set = (va >> TheISA::PageShift) & setMask;
229
230 auto entry = lookupIt(va, update_lru);
231
232 if (entry == entryList[set].end())
233 return nullptr;
234 else
235 return *entry;
236 }
237
238 void
239 GpuTLB::invalidateAll()
240 {
241 DPRINTF(GPUTLB, "Invalidating all entries.\n");
242
243 for (int i = 0; i < numSets; ++i) {
244 while (!entryList[i].empty()) {
245 GpuTlbEntry *entry = entryList[i].front();
246 entryList[i].pop_front();
247 freeList[i].push_back(entry);
248 }
249 }
250 }
251
252 void
253 GpuTLB::setConfigAddress(uint32_t addr)

--- 425 unchanged lines hidden (view full) ---

679 DPRINTF(GPUTLB, "In protected mode.\n");
680 // make sure we are in 64-bit mode
681 assert(m5Reg.mode == LongMode);
682
683 // If paging is enabled, do the translation.
684 if (m5Reg.paging) {
685 DPRINTF(GPUTLB, "Paging enabled.\n");
686 //update LRU stack on a hit
687 GpuTlbEntry *entry = lookup(vaddr, true);
688
689 if (entry)
690 tlb_hit = true;
691
692 if (!update_stats) {
693 // functional tlb access for memory initialization
694 // i.e., memory seeding or instr. seeding -> don't update
695 // TLB and stats

--- 91 unchanged lines hidden (view full) ---

787 return std::make_shared<GeneralProtection>(0);
788 }
789 }
790
791 // If paging is enabled, do the translation.
792 if (m5Reg.paging) {
793 DPRINTF(GPUTLB, "Paging enabled.\n");
794 // The vaddr already has the segment base applied.
795 GpuTlbEntry *entry = lookup(vaddr);
796 localNumTLBAccesses++;
797
798 if (!entry) {
799 localNumTLBMisses++;
800 if (timing) {
801 latency = missLatency1;
802 }
803

--- 21 unchanged lines hidden (view full) ---

825 mode, true,
826 false);
827 } else {
828 Addr alignedVaddr = p->pTable->pageAlign(vaddr);
829
830 DPRINTF(GPUTLB, "Mapping %#x to %#x\n",
831 alignedVaddr, pte->paddr);
832
833 GpuTlbEntry gpuEntry(
834 p->pTable->pid(), alignedVaddr,
835 pte->paddr, true);
836 entry = insert(alignedVaddr, gpuEntry);
837 }
838
839 DPRINTF(GPUTLB, "Miss was serviced.\n");
840 }
841 } else {
842 localNumTLBHits++;
843

--- 229 unchanged lines hidden (view full) ---

1073 RequestPtr tmp_req = pkt->req;
1074
1075 // Access the TLB and figure out if it's a hit or a miss.
1076 bool success = tlbLookup(tmp_req, tmp_tc, update_stats);
1077
1078 if (success) {
1079 lookup_outcome = TLB_HIT;
1080 // Put the entry in SenderState
1081 GpuTlbEntry *entry = lookup(tmp_req->getVaddr(), false);
1082 assert(entry);
1083
1084 sender_state->tlbEntry =
1085 new GpuTlbEntry(0, entry->vaddr, entry->paddr, entry->valid);
1086
1087 if (update_stats) {
1088 // the reqCnt has an entry per level, so its size tells us
1089 // which level we are in
1090 sender_state->hitLevel = sender_state->reqCnt.size();
1091 globalNumTLBHits += req_cnt;
1092 }
1093 } else {

--- 35 unchanged lines hidden (view full) ---

1129 }
1130
1131 /**
1132 * Do Paging protection checks. If we encounter a page fault, then
1133 * an assertion is fired.
1134 */
1135 void
1136 GpuTLB::pagingProtectionChecks(ThreadContext *tc, PacketPtr pkt,
1137 GpuTlbEntry * tlb_entry, Mode mode)
1138 {
1139 HandyM5Reg m5Reg = tc->readMiscRegNoEffect(MISCREG_M5_REG);
1140 uint32_t flags = pkt->req->getFlags();
1141 bool storeCheck = flags & (StoreCheck << FlagShift);
1142
1143 // Do paging protection checks.
1144 bool inUser = (m5Reg.cpl == 3 && !(flags & (CPL0FlagBit << FlagShift)));
1145 CR0 cr0 = tc->readMiscRegNoEffect(MISCREG_CR0);

--- 29 unchanged lines hidden (view full) ---

1175 Addr vaddr = pkt->req->getVaddr();
1176
1177 TranslationState *sender_state =
1178 safe_cast<TranslationState*>(pkt->senderState);
1179
1180 ThreadContext *tc = sender_state->tc;
1181 Mode mode = sender_state->tlbMode;
1182
1183 GpuTlbEntry *local_entry, *new_entry;
1184
1185 if (tlb_outcome == TLB_HIT) {
1186 DPRINTF(GPUTLB, "Translation Done - TLB Hit for addr %#x\n", vaddr);
1187 local_entry = sender_state->tlbEntry;
1188 } else {
1189 DPRINTF(GPUTLB, "Translation Done - TLB Miss for addr %#x\n",
1190 vaddr);
1191

--- 150 unchanged lines hidden (view full) ---

1342 pte = p->pTable->lookup(vaddr);
1343 }
1344
1345 if (pte) {
1346 DPRINTF(GPUTLB, "Mapping %#x to %#x\n", alignedVaddr,
1347 pte->paddr);
1348
1349 sender_state->tlbEntry =
1350 new GpuTlbEntry(0, virtPageAddr, pte->paddr, true);
1351 } else {
1352 sender_state->tlbEntry =
1353 new GpuTlbEntry(0, 0, 0, false);
1354 }
1355
1356 handleTranslationReturn(virtPageAddr, TLB_MISS, pkt);
1357 } else if (outcome == MISS_RETURN) {
1358 /** we add an extra cycle in the return path of the translation
1359 * requests in between the various TLB levels.
1360 */
1361 handleTranslationReturn(virtPageAddr, TLB_MISS, pkt);

--- 60 unchanged lines hidden (view full) ---

1422 {
1423 TranslationState *sender_state =
1424 safe_cast<TranslationState*>(pkt->senderState);
1425
1426 ThreadContext *tc = sender_state->tc;
1427 Mode mode = sender_state->tlbMode;
1428 Addr vaddr = pkt->req->getVaddr();
1429
1430 GpuTlbEntry *local_entry, *new_entry;
1431
1432 if (tlb_outcome == TLB_HIT) {
1433 DPRINTF(GPUTLB, "Functional Translation Done - TLB hit for addr "
1434 "%#x\n", vaddr);
1435
1436 local_entry = sender_state->tlbEntry;
1437 } else {
1438 DPRINTF(GPUTLB, "Functional Translation Done - TLB miss for addr "

--- 17 unchanged lines hidden (view full) ---

1456
1457 assert(local_entry);
1458 }
1459
1460 DPRINTF(GPUTLB, "Entry found with vaddr %#x, doing protection checks "
1461 "while paddr was %#x.\n", local_entry->vaddr,
1462 local_entry->paddr);
1463
1464 // Do paging checks if it's a normal functional access. If it's for a
1465 // prefetch, then sometimes you can try to prefetch something that won't
1466 // pass protection. We don't actually want to fault becuase there is no
1467 // demand access to deem this a violation. Just put it in the TLB and
1468 // it will fault if indeed a future demand access touches it in
1469 // violation.
1470 if (!sender_state->prefetch && sender_state->tlbEntry->valid)
1471 pagingProtectionChecks(tc, pkt, local_entry, mode);
1472
1473 int page_size = local_entry->size();
1474 Addr paddr = local_entry->paddr | (vaddr & (page_size - 1));
1475 DPRINTF(GPUTLB, "Translated %#x -> %#x.\n", vaddr, paddr);
1476
1477 pkt->req->setPaddr(paddr);
1478

--- 66 unchanged lines hidden (view full) ---

1545 // no PageFaults are permitted after
1546 // the second page table lookup
1547 assert(pte);
1548
1549 DPRINTF(GPUTLB, "Mapping %#x to %#x\n", alignedVaddr,
1550 pte->paddr);
1551
1552 sender_state->tlbEntry =
1553 new GpuTlbEntry(0, virt_page_addr,
1554 pte->paddr, true);
1555 } else {
1556 // If this was a prefetch, then do the normal thing if it
1557 // was a successful translation. Otherwise, send an empty
1558 // TLB entry back so that it can be figured out as empty and
1559 // handled accordingly.
1560 if (pte) {
1561 DPRINTF(GPUTLB, "Mapping %#x to %#x\n", alignedVaddr,
1562 pte->paddr);
1563
1564 sender_state->tlbEntry =
1565 new GpuTlbEntry(0, virt_page_addr,
1566 pte->paddr, true);
1567 } else {
1568 DPRINTF(GPUPrefetch, "Prefetch failed %#x\n",
1569 alignedVaddr);
1570
1571 sender_state->tlbEntry = new GpuTlbEntry();
1572
1573 return;
1574 }
1575 }
1576 }
1577 } else {
1578 DPRINTF(GPUPrefetch, "Functional Hit for vaddr %#x\n",
1579 tlb->lookup(pkt->req->getVaddr()));
1580
1581 GpuTlbEntry *entry = tlb->lookup(pkt->req->getVaddr(),
1582 update_stats);
1583
1584 assert(entry);
1585
1586 sender_state->tlbEntry =
1587 new GpuTlbEntry(0, entry->vaddr, entry->paddr, entry->valid);
1588 }
1589 // This is the function that would populate pkt->req with the paddr of
1590 // the translation. But if no translation happens (i.e Prefetch fails)
1591 // then the early returns in the above code wiill keep this function
1592 // from executing.
1593 tlb->handleFuncTranslationReturn(pkt, tlb_outcome);
1594 }
1595

--- 208 unchanged lines hidden ---