721c721,724
< DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
---
> DPRINTF(SimpleCPU, "Received fetch response %#x\n", pkt->getAddr());
> // we should only ever see one response per cycle since we only
> // issue a new request once this response is sunk
> assert(!tickEvent.scheduled());
723c726
< Tick next_tick = cpu->clockEdge();
---
> tickEvent.schedule(pkt, cpu->clockEdge());
725,729d727
< if (next_tick == curTick())
< cpu->completeIfetch(pkt);
< else
< tickEvent.schedule(pkt, next_tick);
<
839,840c837
< // delay processing of returned data until next CPU clock edge
< Tick next_tick = cpu->clockEdge();
---
> DPRINTF(SimpleCPU, "Received load/store response %#x\n", pkt->getAddr());
842,843c839,844
< if (next_tick == curTick()) {
< cpu->completeDataAccess(pkt);
---
> // The timing CPU is not really ticked, instead it relies on the
> // memory system (fetch and load/store) to set the pace.
> if (!tickEvent.scheduled()) {
> // Delay processing of returned data until next CPU clock edge
> tickEvent.schedule(pkt, cpu->clockEdge());
> return true;
845,854c846,851
< if (!tickEvent.scheduled()) {
< tickEvent.schedule(pkt, next_tick);
< } else {
< // In the case of a split transaction and a cache that is
< // faster than a CPU we could get two responses before
< // next_tick expires
< if (!retryEvent.scheduled())
< cpu->schedule(retryEvent, next_tick);
< return false;
< }
---
> // In the case of a split transaction and a cache that is
> // faster than a CPU we could get two responses in the
> // same tick, delay the second one
> if (!retryEvent.scheduled())
> cpu->schedule(retryEvent, cpu->clockEdge(Cycles(1)));
> return false;
856,857d852
<
< return true;