76c76
< tlb.assign(size, GpuTlbEntry());
---
> tlb.assign(size, TlbEntry());
169,170c169,170
< GpuTlbEntry*
< GpuTLB::insert(Addr vpn, GpuTlbEntry &entry)
---
> TlbEntry*
> GpuTLB::insert(Addr vpn, TlbEntry &entry)
172c172
< GpuTlbEntry *newEntry = nullptr;
---
> TlbEntry *newEntry = nullptr;
225c225
< GpuTlbEntry*
---
> TlbEntry*
245c245
< GpuTlbEntry *entry = entryList[i].front();
---
> TlbEntry *entry = entryList[i].front();
687c687
< GpuTlbEntry *entry = lookup(vaddr, true);
---
> TlbEntry *entry = lookup(vaddr, true);
795c795
< GpuTlbEntry *entry = lookup(vaddr);
---
> TlbEntry *entry = lookup(vaddr);
833,835c833,834
< GpuTlbEntry gpuEntry(
< p->pTable->pid(), alignedVaddr,
< pte->paddr, true);
---
> TlbEntry gpuEntry(p->pid(), alignedVaddr,
> pte->paddr, false, false);
1081c1080
< GpuTlbEntry *entry = lookup(tmp_req->getVaddr(), false);
---
> TlbEntry *entry = lookup(tmp_req->getVaddr(), false);
1083a1083
> auto p = sender_state->tc->getProcessPtr();
1085c1085,1086
< new GpuTlbEntry(0, entry->vaddr, entry->paddr, entry->valid);
---
> new TlbEntry(p->pid(), entry->vaddr, entry->paddr,
> false, false);
1137c1138
< GpuTlbEntry * tlb_entry, Mode mode)
---
> TlbEntry * tlb_entry, Mode mode)
1183c1184
< GpuTlbEntry *local_entry, *new_entry;
---
> TlbEntry *local_entry, *new_entry;
1350c1351,1352
< new GpuTlbEntry(0, virtPageAddr, pte->paddr, true);
---
> new TlbEntry(p->pid(), virtPageAddr, pte->paddr, false,
> false);
1352,1353c1354
< sender_state->tlbEntry =
< new GpuTlbEntry(0, 0, 0, false);
---
> sender_state->tlbEntry = nullptr;
1430c1431
< GpuTlbEntry *local_entry, *new_entry;
---
> TlbEntry *local_entry, *new_entry;
1464,1470c1465,1476
< // Do paging checks if it's a normal functional access. If it's for a
< // prefetch, then sometimes you can try to prefetch something that won't
< // pass protection. We don't actually want to fault becuase there is no
< // demand access to deem this a violation. Just put it in the TLB and
< // it will fault if indeed a future demand access touches it in
< // violation.
< if (!sender_state->prefetch && sender_state->tlbEntry->valid)
---
> /**
> * Do paging checks if it's a normal functional access. If it's for a
> * prefetch, then sometimes you can try to prefetch something that
> * won't pass protection. We don't actually want to fault becuase there
> * is no demand access to deem this a violation. Just put it in the
> * TLB and it will fault if indeed a future demand access touches it in
> * violation.
> *
> * This feature could be used to explore security issues around
> * speculative memory accesses.
> */
> if (!sender_state->prefetch && sender_state->tlbEntry)
1553,1554c1559,1560
< new GpuTlbEntry(0, virt_page_addr,
< pte->paddr, true);
---
> new TlbEntry(p->pid(), virt_page_addr,
> pte->paddr, false, false);
1565,1566c1571,1572
< new GpuTlbEntry(0, virt_page_addr,
< pte->paddr, true);
---
> new TlbEntry(p->pid(), virt_page_addr,
> pte->paddr, false, false);
1571c1577
< sender_state->tlbEntry = new GpuTlbEntry();
---
> sender_state->tlbEntry = nullptr;
1581c1587
< GpuTlbEntry *entry = tlb->lookup(pkt->req->getVaddr(),
---
> TlbEntry *entry = tlb->lookup(pkt->req->getVaddr(),
1585a1592
> auto p = sender_state->tc->getProcessPtr();
1587c1594,1595
< new GpuTlbEntry(0, entry->vaddr, entry->paddr, entry->valid);
---
> new TlbEntry(p->pid(), entry->vaddr, entry->paddr,
> false, false);