1/* 2 * Copyright (c) 2002-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Steve Reinhardt 29 */ 30 31#include "arch/locked_mem.hh" 32#include "arch/mmaped_ipr.hh" 33#include "arch/utility.hh" 34#include "base/bigint.hh" 35#include "cpu/exetrace.hh" 36#include "cpu/simple/timing.hh" 37#include "mem/packet.hh" 38#include "mem/packet_access.hh" 39#include "params/TimingSimpleCPU.hh" 40#include "sim/system.hh" 41 42using namespace std; 43using namespace TheISA; 44 45Port * 46TimingSimpleCPU::getPort(const std::string &if_name, int idx) 47{ 48 if (if_name == "dcache_port") 49 return &dcachePort; 50 else if (if_name == "icache_port") 51 return &icachePort; 52 else 53 panic("No Such Port\n"); 54} 55 56void 57TimingSimpleCPU::init() 58{ 59 BaseCPU::init(); 60#if FULL_SYSTEM 61 for (int i = 0; i < threadContexts.size(); ++i) { 62 ThreadContext *tc = threadContexts[i]; 63 64 // initialize CPU, including PC 65 TheISA::initCPU(tc, _cpuId); 66 } 67#endif 68} 69 70Tick 71TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt) 72{ 73 panic("TimingSimpleCPU doesn't expect recvAtomic callback!"); 74 return curTick; 75} 76 77void 78TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt) 79{ 80 //No internal storage to update, jusst return 81 return; 82} 83 84void 85TimingSimpleCPU::CpuPort::recvStatusChange(Status status) 86{ 87 if (status == RangeChange) { 88 if (!snoopRangeSent) { 89 snoopRangeSent = true; 90 sendStatusChange(Port::RangeChange); 91 } 92 return; 93 } 94 95 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!"); 96} 97 98 99void 100TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t) 101{ 102 pkt = _pkt; 103 cpu->schedule(this, t); 104} 105 106TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p) 107 : BaseSimpleCPU(p), icachePort(this, p->clock), dcachePort(this, p->clock), fetchEvent(this) 108{ 109 _status = Idle; 110 111 icachePort.snoopRangeSent = false; 112 dcachePort.snoopRangeSent = false; 113 114 ifetch_pkt = dcache_pkt = NULL; 115 drainEvent = NULL; 116 previousTick = 0; 117 changeState(SimObject::Running); 118} 119 120 121TimingSimpleCPU::~TimingSimpleCPU() 122{ 123} 124 125void 126TimingSimpleCPU::serialize(ostream &os) 127{ 128 SimObject::State so_state = SimObject::getState(); 129 SERIALIZE_ENUM(so_state); 130 BaseSimpleCPU::serialize(os); 131} 132 133void 134TimingSimpleCPU::unserialize(Checkpoint *cp, const string §ion) 135{ 136 SimObject::State so_state; 137 UNSERIALIZE_ENUM(so_state); 138 BaseSimpleCPU::unserialize(cp, section); 139} 140 141unsigned int 142TimingSimpleCPU::drain(Event *drain_event) 143{ 144 // TimingSimpleCPU is ready to drain if it's not waiting for 145 // an access to complete. 146 if (_status == Idle || _status == Running || _status == SwitchedOut) { 147 changeState(SimObject::Drained); 148 return 0; 149 } else { 150 changeState(SimObject::Draining); 151 drainEvent = drain_event; 152 return 1; 153 } 154} 155 156void 157TimingSimpleCPU::resume() 158{ 159 DPRINTF(SimpleCPU, "Resume\n"); 160 if (_status != SwitchedOut && _status != Idle) { 161 assert(system->getMemoryMode() == Enums::timing); 162 163 if (fetchEvent.scheduled()) 164 deschedule(fetchEvent); 165 166 schedule(fetchEvent, nextCycle()); 167 } 168 169 changeState(SimObject::Running); 170} 171 172void 173TimingSimpleCPU::switchOut() 174{ 175 assert(_status == Running || _status == Idle); 176 _status = SwitchedOut; 177 numCycles += tickToCycles(curTick - previousTick); 178 179 // If we've been scheduled to resume but are then told to switch out, 180 // we'll need to cancel it. 181 if (fetchEvent.scheduled()) 182 deschedule(fetchEvent); 183} 184 185 186void 187TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 188{ 189 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort); 190 191 // if any of this CPU's ThreadContexts are active, mark the CPU as 192 // running and schedule its tick event. 193 for (int i = 0; i < threadContexts.size(); ++i) { 194 ThreadContext *tc = threadContexts[i]; 195 if (tc->status() == ThreadContext::Active && _status != Running) { 196 _status = Running; 197 break; 198 } 199 } 200 201 if (_status != Running) { 202 _status = Idle; 203 } 204 assert(threadContexts.size() == 1); 205 previousTick = curTick; 206} 207 208 209void 210TimingSimpleCPU::activateContext(int thread_num, int delay) 211{ 212 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 213 214 assert(thread_num == 0); 215 assert(thread); 216 217 assert(_status == Idle); 218 219 notIdleFraction++; 220 _status = Running; 221 222 // kick things off by initiating the fetch of the next instruction 223 schedule(fetchEvent, nextCycle(curTick + ticks(delay))); 224} 225 226 227void 228TimingSimpleCPU::suspendContext(int thread_num) 229{ 230 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 231 232 assert(thread_num == 0); 233 assert(thread); 234 235 assert(_status == Running); 236 237 // just change status to Idle... if status != Running, 238 // completeInst() will not initiate fetch of next instruction. 239 240 notIdleFraction--; 241 _status = Idle; 242} 243 244bool 245TimingSimpleCPU::handleReadPacket(PacketPtr pkt) 246{ 247 RequestPtr req = pkt->req; 248 if (req->isMmapedIpr()) { 249 Tick delay; 250 delay = TheISA::handleIprRead(thread->getTC(), pkt); 251 new IprEvent(pkt, this, nextCycle(curTick + delay)); 252 _status = DcacheWaitResponse; 253 dcache_pkt = NULL; 254 } else if (!dcachePort.sendTiming(pkt)) { 255 _status = DcacheRetry; 256 dcache_pkt = pkt; 257 } else { 258 _status = DcacheWaitResponse; 259 // memory system takes ownership of packet 260 dcache_pkt = NULL; 261 } 262 return dcache_pkt == NULL; 263} 264 265Fault 266TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2, 267 RequestPtr &req, Addr split_addr, uint8_t *data, bool read) 268{ 269 Fault fault; 270 RequestPtr req1, req2; 271 assert(!req->isLocked() && !req->isSwap()); 272 req->splitOnVaddr(split_addr, req1, req2); 273 274 pkt1 = pkt2 = NULL; 275 if ((fault = buildPacket(pkt1, req1, read)) != NoFault || 276 (fault = buildPacket(pkt2, req2, read)) != NoFault) { 277 delete req;
| 1/* 2 * Copyright (c) 2002-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Steve Reinhardt 29 */ 30 31#include "arch/locked_mem.hh" 32#include "arch/mmaped_ipr.hh" 33#include "arch/utility.hh" 34#include "base/bigint.hh" 35#include "cpu/exetrace.hh" 36#include "cpu/simple/timing.hh" 37#include "mem/packet.hh" 38#include "mem/packet_access.hh" 39#include "params/TimingSimpleCPU.hh" 40#include "sim/system.hh" 41 42using namespace std; 43using namespace TheISA; 44 45Port * 46TimingSimpleCPU::getPort(const std::string &if_name, int idx) 47{ 48 if (if_name == "dcache_port") 49 return &dcachePort; 50 else if (if_name == "icache_port") 51 return &icachePort; 52 else 53 panic("No Such Port\n"); 54} 55 56void 57TimingSimpleCPU::init() 58{ 59 BaseCPU::init(); 60#if FULL_SYSTEM 61 for (int i = 0; i < threadContexts.size(); ++i) { 62 ThreadContext *tc = threadContexts[i]; 63 64 // initialize CPU, including PC 65 TheISA::initCPU(tc, _cpuId); 66 } 67#endif 68} 69 70Tick 71TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt) 72{ 73 panic("TimingSimpleCPU doesn't expect recvAtomic callback!"); 74 return curTick; 75} 76 77void 78TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt) 79{ 80 //No internal storage to update, jusst return 81 return; 82} 83 84void 85TimingSimpleCPU::CpuPort::recvStatusChange(Status status) 86{ 87 if (status == RangeChange) { 88 if (!snoopRangeSent) { 89 snoopRangeSent = true; 90 sendStatusChange(Port::RangeChange); 91 } 92 return; 93 } 94 95 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!"); 96} 97 98 99void 100TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t) 101{ 102 pkt = _pkt; 103 cpu->schedule(this, t); 104} 105 106TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p) 107 : BaseSimpleCPU(p), icachePort(this, p->clock), dcachePort(this, p->clock), fetchEvent(this) 108{ 109 _status = Idle; 110 111 icachePort.snoopRangeSent = false; 112 dcachePort.snoopRangeSent = false; 113 114 ifetch_pkt = dcache_pkt = NULL; 115 drainEvent = NULL; 116 previousTick = 0; 117 changeState(SimObject::Running); 118} 119 120 121TimingSimpleCPU::~TimingSimpleCPU() 122{ 123} 124 125void 126TimingSimpleCPU::serialize(ostream &os) 127{ 128 SimObject::State so_state = SimObject::getState(); 129 SERIALIZE_ENUM(so_state); 130 BaseSimpleCPU::serialize(os); 131} 132 133void 134TimingSimpleCPU::unserialize(Checkpoint *cp, const string §ion) 135{ 136 SimObject::State so_state; 137 UNSERIALIZE_ENUM(so_state); 138 BaseSimpleCPU::unserialize(cp, section); 139} 140 141unsigned int 142TimingSimpleCPU::drain(Event *drain_event) 143{ 144 // TimingSimpleCPU is ready to drain if it's not waiting for 145 // an access to complete. 146 if (_status == Idle || _status == Running || _status == SwitchedOut) { 147 changeState(SimObject::Drained); 148 return 0; 149 } else { 150 changeState(SimObject::Draining); 151 drainEvent = drain_event; 152 return 1; 153 } 154} 155 156void 157TimingSimpleCPU::resume() 158{ 159 DPRINTF(SimpleCPU, "Resume\n"); 160 if (_status != SwitchedOut && _status != Idle) { 161 assert(system->getMemoryMode() == Enums::timing); 162 163 if (fetchEvent.scheduled()) 164 deschedule(fetchEvent); 165 166 schedule(fetchEvent, nextCycle()); 167 } 168 169 changeState(SimObject::Running); 170} 171 172void 173TimingSimpleCPU::switchOut() 174{ 175 assert(_status == Running || _status == Idle); 176 _status = SwitchedOut; 177 numCycles += tickToCycles(curTick - previousTick); 178 179 // If we've been scheduled to resume but are then told to switch out, 180 // we'll need to cancel it. 181 if (fetchEvent.scheduled()) 182 deschedule(fetchEvent); 183} 184 185 186void 187TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 188{ 189 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort); 190 191 // if any of this CPU's ThreadContexts are active, mark the CPU as 192 // running and schedule its tick event. 193 for (int i = 0; i < threadContexts.size(); ++i) { 194 ThreadContext *tc = threadContexts[i]; 195 if (tc->status() == ThreadContext::Active && _status != Running) { 196 _status = Running; 197 break; 198 } 199 } 200 201 if (_status != Running) { 202 _status = Idle; 203 } 204 assert(threadContexts.size() == 1); 205 previousTick = curTick; 206} 207 208 209void 210TimingSimpleCPU::activateContext(int thread_num, int delay) 211{ 212 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 213 214 assert(thread_num == 0); 215 assert(thread); 216 217 assert(_status == Idle); 218 219 notIdleFraction++; 220 _status = Running; 221 222 // kick things off by initiating the fetch of the next instruction 223 schedule(fetchEvent, nextCycle(curTick + ticks(delay))); 224} 225 226 227void 228TimingSimpleCPU::suspendContext(int thread_num) 229{ 230 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 231 232 assert(thread_num == 0); 233 assert(thread); 234 235 assert(_status == Running); 236 237 // just change status to Idle... if status != Running, 238 // completeInst() will not initiate fetch of next instruction. 239 240 notIdleFraction--; 241 _status = Idle; 242} 243 244bool 245TimingSimpleCPU::handleReadPacket(PacketPtr pkt) 246{ 247 RequestPtr req = pkt->req; 248 if (req->isMmapedIpr()) { 249 Tick delay; 250 delay = TheISA::handleIprRead(thread->getTC(), pkt); 251 new IprEvent(pkt, this, nextCycle(curTick + delay)); 252 _status = DcacheWaitResponse; 253 dcache_pkt = NULL; 254 } else if (!dcachePort.sendTiming(pkt)) { 255 _status = DcacheRetry; 256 dcache_pkt = pkt; 257 } else { 258 _status = DcacheWaitResponse; 259 // memory system takes ownership of packet 260 dcache_pkt = NULL; 261 } 262 return dcache_pkt == NULL; 263} 264 265Fault 266TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2, 267 RequestPtr &req, Addr split_addr, uint8_t *data, bool read) 268{ 269 Fault fault; 270 RequestPtr req1, req2; 271 assert(!req->isLocked() && !req->isSwap()); 272 req->splitOnVaddr(split_addr, req1, req2); 273 274 pkt1 = pkt2 = NULL; 275 if ((fault = buildPacket(pkt1, req1, read)) != NoFault || 276 (fault = buildPacket(pkt2, req2, read)) != NoFault) { 277 delete req;
|
| 278 delete req1;
|
278 delete pkt1; 279 req = NULL; 280 pkt1 = NULL; 281 return fault; 282 } 283 284 assert(!req1->isMmapedIpr() && !req2->isMmapedIpr()); 285 286 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags()); 287 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand(), 288 Packet::Broadcast);
| 279 delete pkt1; 280 req = NULL; 281 pkt1 = NULL; 282 return fault; 283 } 284 285 assert(!req1->isMmapedIpr() && !req2->isMmapedIpr()); 286 287 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags()); 288 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand(), 289 Packet::Broadcast);
|
| 290 if (req->getFlags().isSet(Request::NO_ACCESS)) { 291 delete req1; 292 delete pkt1; 293 delete req2; 294 delete pkt2; 295 pkt1 = pkt; 296 pkt2 = NULL; 297 return NoFault; 298 }
|
289 290 pkt->dataDynamic<uint8_t>(data); 291 pkt1->dataStatic<uint8_t>(data); 292 pkt2->dataStatic<uint8_t>(data + req1->getSize()); 293 294 SplitMainSenderState * main_send_state = new SplitMainSenderState; 295 pkt->senderState = main_send_state; 296 main_send_state->fragments[0] = pkt1; 297 main_send_state->fragments[1] = pkt2; 298 main_send_state->outstanding = 2; 299 pkt1->senderState = new SplitFragmentSenderState(pkt, 0); 300 pkt2->senderState = new SplitFragmentSenderState(pkt, 1); 301 return fault; 302} 303 304Fault 305TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr &req, bool read) 306{
| 299 300 pkt->dataDynamic<uint8_t>(data); 301 pkt1->dataStatic<uint8_t>(data); 302 pkt2->dataStatic<uint8_t>(data + req1->getSize()); 303 304 SplitMainSenderState * main_send_state = new SplitMainSenderState; 305 pkt->senderState = main_send_state; 306 main_send_state->fragments[0] = pkt1; 307 main_send_state->fragments[1] = pkt2; 308 main_send_state->outstanding = 2; 309 pkt1->senderState = new SplitFragmentSenderState(pkt, 0); 310 pkt2->senderState = new SplitFragmentSenderState(pkt, 1); 311 return fault; 312} 313 314Fault 315TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr &req, bool read) 316{
|
307 Fault fault = read ? thread->translateDataReadReq(req) : 308 thread->translateDataWriteReq(req);
| 317 Fault fault = thread->dtb->translate(req, tc, !read);
|
309 MemCmd cmd; 310 if (fault != NoFault) { 311 delete req; 312 req = NULL; 313 pkt = NULL; 314 return fault; 315 } else if (read) { 316 cmd = MemCmd::ReadReq; 317 if (req->isLocked()) 318 cmd = MemCmd::LoadLockedReq; 319 } else { 320 cmd = MemCmd::WriteReq; 321 if (req->isLocked()) { 322 cmd = MemCmd::StoreCondReq; 323 } else if (req->isSwap()) { 324 cmd = MemCmd::SwapReq; 325 } 326 } 327 pkt = new Packet(req, cmd, Packet::Broadcast); 328 return NoFault; 329} 330 331template <class T> 332Fault 333TimingSimpleCPU::read(Addr addr, T &data, unsigned flags) 334{ 335 Fault fault; 336 const int asid = 0; 337 const int thread_id = 0; 338 const Addr pc = thread->readPC(); 339 int block_size = dcachePort.peerBlockSize(); 340 int data_size = sizeof(T); 341 342 PacketPtr pkt; 343 RequestPtr req = new Request(asid, addr, data_size, 344 flags, pc, _cpuId, thread_id); 345 346 Addr split_addr = roundDown(addr + data_size - 1, block_size); 347 assert(split_addr <= addr || split_addr - addr < block_size); 348 349 if (split_addr > addr) { 350 PacketPtr pkt1, pkt2;
| 318 MemCmd cmd; 319 if (fault != NoFault) { 320 delete req; 321 req = NULL; 322 pkt = NULL; 323 return fault; 324 } else if (read) { 325 cmd = MemCmd::ReadReq; 326 if (req->isLocked()) 327 cmd = MemCmd::LoadLockedReq; 328 } else { 329 cmd = MemCmd::WriteReq; 330 if (req->isLocked()) { 331 cmd = MemCmd::StoreCondReq; 332 } else if (req->isSwap()) { 333 cmd = MemCmd::SwapReq; 334 } 335 } 336 pkt = new Packet(req, cmd, Packet::Broadcast); 337 return NoFault; 338} 339 340template <class T> 341Fault 342TimingSimpleCPU::read(Addr addr, T &data, unsigned flags) 343{ 344 Fault fault; 345 const int asid = 0; 346 const int thread_id = 0; 347 const Addr pc = thread->readPC(); 348 int block_size = dcachePort.peerBlockSize(); 349 int data_size = sizeof(T); 350 351 PacketPtr pkt; 352 RequestPtr req = new Request(asid, addr, data_size, 353 flags, pc, _cpuId, thread_id); 354 355 Addr split_addr = roundDown(addr + data_size - 1, block_size); 356 assert(split_addr <= addr || split_addr - addr < block_size); 357 358 if (split_addr > addr) { 359 PacketPtr pkt1, pkt2;
|
351 this->buildSplitPacket(pkt1, pkt2, req,
| 360 Fault fault = this->buildSplitPacket(pkt1, pkt2, req,
|
352 split_addr, (uint8_t *)(new T), true);
| 361 split_addr, (uint8_t *)(new T), true);
|
353 if (handleReadPacket(pkt1)) {
| 362 if (fault != NoFault) 363 return fault; 364 if (req->getFlags().isSet(Request::NO_ACCESS)) { 365 dcache_pkt = pkt1; 366 } else if (handleReadPacket(pkt1)) {
|
354 SplitFragmentSenderState * send_state = 355 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 356 send_state->clearFromParent(); 357 if (handleReadPacket(pkt2)) { 358 send_state = 359 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 360 send_state->clearFromParent(); 361 } 362 } 363 } else { 364 Fault fault = buildPacket(pkt, req, true); 365 if (fault != NoFault) { 366 return fault; 367 }
| 367 SplitFragmentSenderState * send_state = 368 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 369 send_state->clearFromParent(); 370 if (handleReadPacket(pkt2)) { 371 send_state = 372 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 373 send_state->clearFromParent(); 374 } 375 } 376 } else { 377 Fault fault = buildPacket(pkt, req, true); 378 if (fault != NoFault) { 379 return fault; 380 }
|
368 pkt->dataDynamic<T>(new T); 369 370 handleReadPacket(pkt);
| 381 if (req->getFlags().isSet(Request::NO_ACCESS)) { 382 dcache_pkt = pkt; 383 } else { 384 pkt->dataDynamic<T>(new T); 385 handleReadPacket(pkt); 386 }
|
371 } 372 373 if (traceData) { 374 traceData->setData(data); 375 traceData->setAddr(addr); 376 } 377 378 // This will need a new way to tell if it has a dcache attached. 379 if (req->isUncacheable()) 380 recordEvent("Uncached Read"); 381 382 return NoFault; 383} 384
| 387 } 388 389 if (traceData) { 390 traceData->setData(data); 391 traceData->setAddr(addr); 392 } 393 394 // This will need a new way to tell if it has a dcache attached. 395 if (req->isUncacheable()) 396 recordEvent("Uncached Read"); 397 398 return NoFault; 399} 400
|
385Fault 386TimingSimpleCPU::translateDataReadAddr(Addr vaddr, Addr &paddr, 387 int size, unsigned flags) 388{ 389 Request *req = 390 new Request(0, vaddr, size, flags, thread->readPC(), _cpuId, 0); 391 392 if (traceData) { 393 traceData->setAddr(vaddr); 394 } 395 396 Fault fault = thread->translateDataWriteReq(req); 397 398 if (fault == NoFault) 399 paddr = req->getPaddr(); 400 401 delete req; 402 return fault; 403} 404
| |
405#ifndef DOXYGEN_SHOULD_SKIP_THIS 406 407template 408Fault 409TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags); 410 411template 412Fault 413TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags); 414 415template 416Fault 417TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags); 418 419template 420Fault 421TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags); 422 423template 424Fault 425TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags); 426 427template 428Fault 429TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags); 430 431#endif //DOXYGEN_SHOULD_SKIP_THIS 432 433template<> 434Fault 435TimingSimpleCPU::read(Addr addr, double &data, unsigned flags) 436{ 437 return read(addr, *(uint64_t*)&data, flags); 438} 439 440template<> 441Fault 442TimingSimpleCPU::read(Addr addr, float &data, unsigned flags) 443{ 444 return read(addr, *(uint32_t*)&data, flags); 445} 446 447 448template<> 449Fault 450TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags) 451{ 452 return read(addr, (uint32_t&)data, flags); 453} 454 455bool 456TimingSimpleCPU::handleWritePacket() 457{ 458 RequestPtr req = dcache_pkt->req; 459 if (req->isMmapedIpr()) { 460 Tick delay; 461 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt); 462 new IprEvent(dcache_pkt, this, nextCycle(curTick + delay)); 463 _status = DcacheWaitResponse; 464 dcache_pkt = NULL; 465 } else if (!dcachePort.sendTiming(dcache_pkt)) { 466 _status = DcacheRetry; 467 } else { 468 _status = DcacheWaitResponse; 469 // memory system takes ownership of packet 470 dcache_pkt = NULL; 471 } 472 return dcache_pkt == NULL; 473} 474 475template <class T> 476Fault 477TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) 478{ 479 const int asid = 0; 480 const int thread_id = 0; 481 const Addr pc = thread->readPC(); 482 int block_size = dcachePort.peerBlockSize(); 483 int data_size = sizeof(T); 484 485 RequestPtr req = new Request(asid, addr, data_size, 486 flags, pc, _cpuId, thread_id); 487 488 Addr split_addr = roundDown(addr + data_size - 1, block_size); 489 assert(split_addr <= addr || split_addr - addr < block_size); 490 491 if (split_addr > addr) { 492 PacketPtr pkt1, pkt2; 493 T *dataP = new T; 494 *dataP = data; 495 Fault fault = this->buildSplitPacket(pkt1, pkt2, req, split_addr, 496 (uint8_t *)dataP, false); 497 if (fault != NoFault) 498 return fault; 499 dcache_pkt = pkt1;
| 401#ifndef DOXYGEN_SHOULD_SKIP_THIS 402 403template 404Fault 405TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags); 406 407template 408Fault 409TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags); 410 411template 412Fault 413TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags); 414 415template 416Fault 417TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags); 418 419template 420Fault 421TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags); 422 423template 424Fault 425TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags); 426 427#endif //DOXYGEN_SHOULD_SKIP_THIS 428 429template<> 430Fault 431TimingSimpleCPU::read(Addr addr, double &data, unsigned flags) 432{ 433 return read(addr, *(uint64_t*)&data, flags); 434} 435 436template<> 437Fault 438TimingSimpleCPU::read(Addr addr, float &data, unsigned flags) 439{ 440 return read(addr, *(uint32_t*)&data, flags); 441} 442 443 444template<> 445Fault 446TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags) 447{ 448 return read(addr, (uint32_t&)data, flags); 449} 450 451bool 452TimingSimpleCPU::handleWritePacket() 453{ 454 RequestPtr req = dcache_pkt->req; 455 if (req->isMmapedIpr()) { 456 Tick delay; 457 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt); 458 new IprEvent(dcache_pkt, this, nextCycle(curTick + delay)); 459 _status = DcacheWaitResponse; 460 dcache_pkt = NULL; 461 } else if (!dcachePort.sendTiming(dcache_pkt)) { 462 _status = DcacheRetry; 463 } else { 464 _status = DcacheWaitResponse; 465 // memory system takes ownership of packet 466 dcache_pkt = NULL; 467 } 468 return dcache_pkt == NULL; 469} 470 471template <class T> 472Fault 473TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) 474{ 475 const int asid = 0; 476 const int thread_id = 0; 477 const Addr pc = thread->readPC(); 478 int block_size = dcachePort.peerBlockSize(); 479 int data_size = sizeof(T); 480 481 RequestPtr req = new Request(asid, addr, data_size, 482 flags, pc, _cpuId, thread_id); 483 484 Addr split_addr = roundDown(addr + data_size - 1, block_size); 485 assert(split_addr <= addr || split_addr - addr < block_size); 486 487 if (split_addr > addr) { 488 PacketPtr pkt1, pkt2; 489 T *dataP = new T; 490 *dataP = data; 491 Fault fault = this->buildSplitPacket(pkt1, pkt2, req, split_addr, 492 (uint8_t *)dataP, false); 493 if (fault != NoFault) 494 return fault; 495 dcache_pkt = pkt1;
|
500 if (handleWritePacket()) { 501 SplitFragmentSenderState * send_state = 502 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 503 send_state->clearFromParent(); 504 dcache_pkt = pkt2; 505 if (handleReadPacket(pkt2)) { 506 send_state = 507 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
| 496 if (!req->getFlags().isSet(Request::NO_ACCESS)) { 497 if (handleWritePacket()) { 498 SplitFragmentSenderState * send_state = 499 dynamic_cast<SplitFragmentSenderState *>( 500 pkt1->senderState);
|
508 send_state->clearFromParent();
| 501 send_state->clearFromParent();
|
| 502 dcache_pkt = pkt2; 503 if (handleReadPacket(pkt2)) { 504 send_state = 505 dynamic_cast<SplitFragmentSenderState *>( 506 pkt1->senderState); 507 send_state->clearFromParent(); 508 }
|
509 } 510 } 511 } else { 512 bool do_access = true; // flag to suppress cache access 513 514 Fault fault = buildPacket(dcache_pkt, req, false); 515 if (fault != NoFault) 516 return fault; 517
| 509 } 510 } 511 } else { 512 bool do_access = true; // flag to suppress cache access 513 514 Fault fault = buildPacket(dcache_pkt, req, false); 515 if (fault != NoFault) 516 return fault; 517
|
518 if (req->isLocked()) { 519 do_access = TheISA::handleLockedWrite(thread, req); 520 } else if (req->isCondSwap()) { 521 assert(res); 522 req->setExtraData(*res); 523 }
| 518 if (!req->getFlags().isSet(Request::NO_ACCESS)) { 519 if (req->isLocked()) { 520 do_access = TheISA::handleLockedWrite(thread, req); 521 } else if (req->isCondSwap()) { 522 assert(res); 523 req->setExtraData(*res); 524 }
|
524
| 525
|
525 dcache_pkt->allocate(); 526 if (req->isMmapedIpr()) 527 dcache_pkt->set(htog(data)); 528 else 529 dcache_pkt->set(data);
| 526 dcache_pkt->allocate(); 527 if (req->isMmapedIpr()) 528 dcache_pkt->set(htog(data)); 529 else 530 dcache_pkt->set(data);
|
530
| 531
|
531 if (do_access) 532 handleWritePacket();
| 532 if (do_access) 533 handleWritePacket(); 534 }
|
533 } 534 535 if (traceData) { 536 traceData->setAddr(req->getVaddr()); 537 traceData->setData(data); 538 } 539 540 // This will need a new way to tell if it's hooked up to a cache or not. 541 if (req->isUncacheable()) 542 recordEvent("Uncached Write"); 543 544 // If the write needs to have a fault on the access, consider calling 545 // changeStatus() and changing it to "bad addr write" or something. 546 return NoFault; 547} 548
| 535 } 536 537 if (traceData) { 538 traceData->setAddr(req->getVaddr()); 539 traceData->setData(data); 540 } 541 542 // This will need a new way to tell if it's hooked up to a cache or not. 543 if (req->isUncacheable()) 544 recordEvent("Uncached Write"); 545 546 // If the write needs to have a fault on the access, consider calling 547 // changeStatus() and changing it to "bad addr write" or something. 548 return NoFault; 549} 550
|
549Fault 550TimingSimpleCPU::translateDataWriteAddr(Addr vaddr, Addr &paddr, 551 int size, unsigned flags) 552{ 553 Request *req = 554 new Request(0, vaddr, size, flags, thread->readPC(), _cpuId, 0);
| |
555
| 551
|
556 if (traceData) { 557 traceData->setAddr(vaddr); 558 } 559 560 Fault fault = thread->translateDataWriteReq(req); 561 562 if (fault == NoFault) 563 paddr = req->getPaddr(); 564 565 delete req; 566 return fault; 567} 568 569
| |
570#ifndef DOXYGEN_SHOULD_SKIP_THIS 571template 572Fault 573TimingSimpleCPU::write(Twin32_t data, Addr addr, 574 unsigned flags, uint64_t *res); 575 576template 577Fault 578TimingSimpleCPU::write(Twin64_t data, Addr addr, 579 unsigned flags, uint64_t *res); 580 581template 582Fault 583TimingSimpleCPU::write(uint64_t data, Addr addr, 584 unsigned flags, uint64_t *res); 585 586template 587Fault 588TimingSimpleCPU::write(uint32_t data, Addr addr, 589 unsigned flags, uint64_t *res); 590 591template 592Fault 593TimingSimpleCPU::write(uint16_t data, Addr addr, 594 unsigned flags, uint64_t *res); 595 596template 597Fault 598TimingSimpleCPU::write(uint8_t data, Addr addr, 599 unsigned flags, uint64_t *res); 600 601#endif //DOXYGEN_SHOULD_SKIP_THIS 602 603template<> 604Fault 605TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res) 606{ 607 return write(*(uint64_t*)&data, addr, flags, res); 608} 609 610template<> 611Fault 612TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res) 613{ 614 return write(*(uint32_t*)&data, addr, flags, res); 615} 616 617 618template<> 619Fault 620TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res) 621{ 622 return write((uint32_t)data, addr, flags, res); 623} 624 625 626void 627TimingSimpleCPU::fetch() 628{ 629 DPRINTF(SimpleCPU, "Fetch\n"); 630 631 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 632 checkForInterrupts(); 633 634 checkPcEventQueue(); 635 636 bool fromRom = isRomMicroPC(thread->readMicroPC()); 637 638 if (!fromRom) { 639 Request *ifetch_req = new Request(); 640 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0); 641 Fault fault = setupFetchRequest(ifetch_req); 642 643 ifetch_pkt = new Packet(ifetch_req, MemCmd::ReadReq, Packet::Broadcast); 644 ifetch_pkt->dataStatic(&inst); 645 646 if (fault == NoFault) { 647 if (!icachePort.sendTiming(ifetch_pkt)) { 648 // Need to wait for retry 649 _status = IcacheRetry; 650 } else { 651 // Need to wait for cache to respond 652 _status = IcacheWaitResponse; 653 // ownership of packet transferred to memory system 654 ifetch_pkt = NULL; 655 } 656 } else { 657 delete ifetch_req; 658 delete ifetch_pkt; 659 // fetch fault: advance directly to next instruction (fault handler) 660 advanceInst(fault); 661 } 662 } else { 663 _status = IcacheWaitResponse; 664 completeIfetch(NULL); 665 } 666 667 numCycles += tickToCycles(curTick - previousTick); 668 previousTick = curTick; 669} 670 671 672void 673TimingSimpleCPU::advanceInst(Fault fault) 674{ 675 if (fault != NoFault || !stayAtPC) 676 advancePC(fault); 677 678 if (_status == Running) { 679 // kick off fetch of next instruction... callback from icache 680 // response will cause that instruction to be executed, 681 // keeping the CPU running. 682 fetch(); 683 } 684} 685 686 687void 688TimingSimpleCPU::completeIfetch(PacketPtr pkt) 689{ 690 DPRINTF(SimpleCPU, "Complete ICache Fetch\n"); 691 692 // received a response from the icache: execute the received 693 // instruction 694 695 assert(!pkt || !pkt->isError()); 696 assert(_status == IcacheWaitResponse); 697 698 _status = Running; 699 700 numCycles += tickToCycles(curTick - previousTick); 701 previousTick = curTick; 702 703 if (getState() == SimObject::Draining) { 704 if (pkt) { 705 delete pkt->req; 706 delete pkt; 707 } 708 709 completeDrain(); 710 return; 711 } 712 713 preExecute(); 714 if (curStaticInst && 715 curStaticInst->isMemRef() && !curStaticInst->isDataPrefetch()) { 716 // load or store: just send to dcache 717 Fault fault = curStaticInst->initiateAcc(this, traceData); 718 if (_status != Running) { 719 // instruction will complete in dcache response callback 720 assert(_status == DcacheWaitResponse || _status == DcacheRetry); 721 assert(fault == NoFault); 722 } else { 723 if (fault == NoFault) { 724 // Note that ARM can have NULL packets if the instruction gets 725 // squashed due to predication 726 // early fail on store conditional: complete now 727 assert(dcache_pkt != NULL || THE_ISA == ARM_ISA); 728 729 fault = curStaticInst->completeAcc(dcache_pkt, this, 730 traceData); 731 if (dcache_pkt != NULL) 732 { 733 delete dcache_pkt->req; 734 delete dcache_pkt; 735 dcache_pkt = NULL; 736 } 737 738 // keep an instruction count 739 if (fault == NoFault) 740 countInst(); 741 } else if (traceData) { 742 // If there was a fault, we shouldn't trace this instruction. 743 delete traceData; 744 traceData = NULL; 745 } 746 747 postExecute(); 748 // @todo remove me after debugging with legion done 749 if (curStaticInst && (!curStaticInst->isMicroop() || 750 curStaticInst->isFirstMicroop())) 751 instCnt++; 752 advanceInst(fault); 753 } 754 } else if (curStaticInst) { 755 // non-memory instruction: execute completely now 756 Fault fault = curStaticInst->execute(this, traceData); 757 758 // keep an instruction count 759 if (fault == NoFault) 760 countInst(); 761 else if (traceData) { 762 // If there was a fault, we shouldn't trace this instruction. 763 delete traceData; 764 traceData = NULL; 765 } 766 767 postExecute(); 768 // @todo remove me after debugging with legion done 769 if (curStaticInst && (!curStaticInst->isMicroop() || 770 curStaticInst->isFirstMicroop())) 771 instCnt++; 772 advanceInst(fault); 773 } else { 774 advanceInst(NoFault); 775 } 776 777 if (pkt) { 778 delete pkt->req; 779 delete pkt; 780 } 781} 782 783void 784TimingSimpleCPU::IcachePort::ITickEvent::process() 785{ 786 cpu->completeIfetch(pkt); 787} 788 789bool 790TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt) 791{ 792 if (pkt->isResponse() && !pkt->wasNacked()) { 793 // delay processing of returned data until next CPU clock edge 794 Tick next_tick = cpu->nextCycle(curTick); 795 796 if (next_tick == curTick) 797 cpu->completeIfetch(pkt); 798 else 799 tickEvent.schedule(pkt, next_tick); 800 801 return true; 802 } 803 else if (pkt->wasNacked()) { 804 assert(cpu->_status == IcacheWaitResponse); 805 pkt->reinitNacked(); 806 if (!sendTiming(pkt)) { 807 cpu->_status = IcacheRetry; 808 cpu->ifetch_pkt = pkt; 809 } 810 } 811 //Snooping a Coherence Request, do nothing 812 return true; 813} 814 815void 816TimingSimpleCPU::IcachePort::recvRetry() 817{ 818 // we shouldn't get a retry unless we have a packet that we're 819 // waiting to transmit 820 assert(cpu->ifetch_pkt != NULL); 821 assert(cpu->_status == IcacheRetry); 822 PacketPtr tmp = cpu->ifetch_pkt; 823 if (sendTiming(tmp)) { 824 cpu->_status = IcacheWaitResponse; 825 cpu->ifetch_pkt = NULL; 826 } 827} 828 829void 830TimingSimpleCPU::completeDataAccess(PacketPtr pkt) 831{ 832 // received a response from the dcache: complete the load or store 833 // instruction 834 assert(!pkt->isError()); 835 836 numCycles += tickToCycles(curTick - previousTick); 837 previousTick = curTick; 838 839 if (pkt->senderState) { 840 SplitFragmentSenderState * send_state = 841 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState); 842 assert(send_state); 843 delete pkt->req; 844 delete pkt; 845 PacketPtr big_pkt = send_state->bigPkt; 846 delete send_state; 847 848 SplitMainSenderState * main_send_state = 849 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState); 850 assert(main_send_state); 851 // Record the fact that this packet is no longer outstanding. 852 assert(main_send_state->outstanding != 0); 853 main_send_state->outstanding--; 854 855 if (main_send_state->outstanding) { 856 return; 857 } else { 858 delete main_send_state; 859 big_pkt->senderState = NULL; 860 pkt = big_pkt; 861 } 862 } 863 864 assert(_status == DcacheWaitResponse); 865 _status = Running; 866 867 Fault fault = curStaticInst->completeAcc(pkt, this, traceData); 868 869 // keep an instruction count 870 if (fault == NoFault) 871 countInst(); 872 else if (traceData) { 873 // If there was a fault, we shouldn't trace this instruction. 874 delete traceData; 875 traceData = NULL; 876 } 877 878 // the locked flag may be cleared on the response packet, so check 879 // pkt->req and not pkt to see if it was a load-locked 880 if (pkt->isRead() && pkt->req->isLocked()) { 881 TheISA::handleLockedRead(thread, pkt->req); 882 } 883 884 delete pkt->req; 885 delete pkt; 886 887 postExecute(); 888 889 if (getState() == SimObject::Draining) { 890 advancePC(fault); 891 completeDrain(); 892 893 return; 894 } 895 896 advanceInst(fault); 897} 898 899 900void 901TimingSimpleCPU::completeDrain() 902{ 903 DPRINTF(Config, "Done draining\n"); 904 changeState(SimObject::Drained); 905 drainEvent->process(); 906} 907 908void 909TimingSimpleCPU::DcachePort::setPeer(Port *port) 910{ 911 Port::setPeer(port); 912 913#if FULL_SYSTEM 914 // Update the ThreadContext's memory ports (Functional/Virtual 915 // Ports) 916 cpu->tcBase()->connectMemPorts(cpu->tcBase()); 917#endif 918} 919 920bool 921TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt) 922{ 923 if (pkt->isResponse() && !pkt->wasNacked()) { 924 // delay processing of returned data until next CPU clock edge 925 Tick next_tick = cpu->nextCycle(curTick); 926 927 if (next_tick == curTick) { 928 cpu->completeDataAccess(pkt); 929 } else { 930 tickEvent.schedule(pkt, next_tick); 931 } 932 933 return true; 934 } 935 else if (pkt->wasNacked()) { 936 assert(cpu->_status == DcacheWaitResponse); 937 pkt->reinitNacked(); 938 if (!sendTiming(pkt)) { 939 cpu->_status = DcacheRetry; 940 cpu->dcache_pkt = pkt; 941 } 942 } 943 //Snooping a Coherence Request, do nothing 944 return true; 945} 946 947void 948TimingSimpleCPU::DcachePort::DTickEvent::process() 949{ 950 cpu->completeDataAccess(pkt); 951} 952 953void 954TimingSimpleCPU::DcachePort::recvRetry() 955{ 956 // we shouldn't get a retry unless we have a packet that we're 957 // waiting to transmit 958 assert(cpu->dcache_pkt != NULL); 959 assert(cpu->_status == DcacheRetry); 960 PacketPtr tmp = cpu->dcache_pkt; 961 if (tmp->senderState) { 962 // This is a packet from a split access. 963 SplitFragmentSenderState * send_state = 964 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState); 965 assert(send_state); 966 PacketPtr big_pkt = send_state->bigPkt; 967 968 SplitMainSenderState * main_send_state = 969 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState); 970 assert(main_send_state); 971 972 if (sendTiming(tmp)) { 973 // If we were able to send without retrying, record that fact 974 // and try sending the other fragment. 975 send_state->clearFromParent(); 976 int other_index = main_send_state->getPendingFragment(); 977 if (other_index > 0) { 978 tmp = main_send_state->fragments[other_index]; 979 cpu->dcache_pkt = tmp; 980 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) || 981 (big_pkt->isWrite() && cpu->handleWritePacket())) { 982 main_send_state->fragments[other_index] = NULL; 983 } 984 } else { 985 cpu->_status = DcacheWaitResponse; 986 // memory system takes ownership of packet 987 cpu->dcache_pkt = NULL; 988 } 989 } 990 } else if (sendTiming(tmp)) { 991 cpu->_status = DcacheWaitResponse; 992 // memory system takes ownership of packet 993 cpu->dcache_pkt = NULL; 994 } 995} 996 997TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu, 998 Tick t) 999 : pkt(_pkt), cpu(_cpu) 1000{ 1001 cpu->schedule(this, t); 1002} 1003 1004void 1005TimingSimpleCPU::IprEvent::process() 1006{ 1007 cpu->completeDataAccess(pkt); 1008} 1009 1010const char * 1011TimingSimpleCPU::IprEvent::description() const 1012{ 1013 return "Timing Simple CPU Delay IPR event"; 1014} 1015 1016 1017void 1018TimingSimpleCPU::printAddr(Addr a) 1019{ 1020 dcachePort.printAddr(a); 1021} 1022 1023 1024//////////////////////////////////////////////////////////////////////// 1025// 1026// TimingSimpleCPU Simulation Object 1027// 1028TimingSimpleCPU * 1029TimingSimpleCPUParams::create() 1030{ 1031 numThreads = 1; 1032#if !FULL_SYSTEM 1033 if (workload.size() != 1) 1034 panic("only one workload allowed"); 1035#endif 1036 return new TimingSimpleCPU(this); 1037}
| 552#ifndef DOXYGEN_SHOULD_SKIP_THIS 553template 554Fault 555TimingSimpleCPU::write(Twin32_t data, Addr addr, 556 unsigned flags, uint64_t *res); 557 558template 559Fault 560TimingSimpleCPU::write(Twin64_t data, Addr addr, 561 unsigned flags, uint64_t *res); 562 563template 564Fault 565TimingSimpleCPU::write(uint64_t data, Addr addr, 566 unsigned flags, uint64_t *res); 567 568template 569Fault 570TimingSimpleCPU::write(uint32_t data, Addr addr, 571 unsigned flags, uint64_t *res); 572 573template 574Fault 575TimingSimpleCPU::write(uint16_t data, Addr addr, 576 unsigned flags, uint64_t *res); 577 578template 579Fault 580TimingSimpleCPU::write(uint8_t data, Addr addr, 581 unsigned flags, uint64_t *res); 582 583#endif //DOXYGEN_SHOULD_SKIP_THIS 584 585template<> 586Fault 587TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res) 588{ 589 return write(*(uint64_t*)&data, addr, flags, res); 590} 591 592template<> 593Fault 594TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res) 595{ 596 return write(*(uint32_t*)&data, addr, flags, res); 597} 598 599 600template<> 601Fault 602TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res) 603{ 604 return write((uint32_t)data, addr, flags, res); 605} 606 607 608void 609TimingSimpleCPU::fetch() 610{ 611 DPRINTF(SimpleCPU, "Fetch\n"); 612 613 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 614 checkForInterrupts(); 615 616 checkPcEventQueue(); 617 618 bool fromRom = isRomMicroPC(thread->readMicroPC()); 619 620 if (!fromRom) { 621 Request *ifetch_req = new Request(); 622 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0); 623 Fault fault = setupFetchRequest(ifetch_req); 624 625 ifetch_pkt = new Packet(ifetch_req, MemCmd::ReadReq, Packet::Broadcast); 626 ifetch_pkt->dataStatic(&inst); 627 628 if (fault == NoFault) { 629 if (!icachePort.sendTiming(ifetch_pkt)) { 630 // Need to wait for retry 631 _status = IcacheRetry; 632 } else { 633 // Need to wait for cache to respond 634 _status = IcacheWaitResponse; 635 // ownership of packet transferred to memory system 636 ifetch_pkt = NULL; 637 } 638 } else { 639 delete ifetch_req; 640 delete ifetch_pkt; 641 // fetch fault: advance directly to next instruction (fault handler) 642 advanceInst(fault); 643 } 644 } else { 645 _status = IcacheWaitResponse; 646 completeIfetch(NULL); 647 } 648 649 numCycles += tickToCycles(curTick - previousTick); 650 previousTick = curTick; 651} 652 653 654void 655TimingSimpleCPU::advanceInst(Fault fault) 656{ 657 if (fault != NoFault || !stayAtPC) 658 advancePC(fault); 659 660 if (_status == Running) { 661 // kick off fetch of next instruction... callback from icache 662 // response will cause that instruction to be executed, 663 // keeping the CPU running. 664 fetch(); 665 } 666} 667 668 669void 670TimingSimpleCPU::completeIfetch(PacketPtr pkt) 671{ 672 DPRINTF(SimpleCPU, "Complete ICache Fetch\n"); 673 674 // received a response from the icache: execute the received 675 // instruction 676 677 assert(!pkt || !pkt->isError()); 678 assert(_status == IcacheWaitResponse); 679 680 _status = Running; 681 682 numCycles += tickToCycles(curTick - previousTick); 683 previousTick = curTick; 684 685 if (getState() == SimObject::Draining) { 686 if (pkt) { 687 delete pkt->req; 688 delete pkt; 689 } 690 691 completeDrain(); 692 return; 693 } 694 695 preExecute(); 696 if (curStaticInst && 697 curStaticInst->isMemRef() && !curStaticInst->isDataPrefetch()) { 698 // load or store: just send to dcache 699 Fault fault = curStaticInst->initiateAcc(this, traceData); 700 if (_status != Running) { 701 // instruction will complete in dcache response callback 702 assert(_status == DcacheWaitResponse || _status == DcacheRetry); 703 assert(fault == NoFault); 704 } else { 705 if (fault == NoFault) { 706 // Note that ARM can have NULL packets if the instruction gets 707 // squashed due to predication 708 // early fail on store conditional: complete now 709 assert(dcache_pkt != NULL || THE_ISA == ARM_ISA); 710 711 fault = curStaticInst->completeAcc(dcache_pkt, this, 712 traceData); 713 if (dcache_pkt != NULL) 714 { 715 delete dcache_pkt->req; 716 delete dcache_pkt; 717 dcache_pkt = NULL; 718 } 719 720 // keep an instruction count 721 if (fault == NoFault) 722 countInst(); 723 } else if (traceData) { 724 // If there was a fault, we shouldn't trace this instruction. 725 delete traceData; 726 traceData = NULL; 727 } 728 729 postExecute(); 730 // @todo remove me after debugging with legion done 731 if (curStaticInst && (!curStaticInst->isMicroop() || 732 curStaticInst->isFirstMicroop())) 733 instCnt++; 734 advanceInst(fault); 735 } 736 } else if (curStaticInst) { 737 // non-memory instruction: execute completely now 738 Fault fault = curStaticInst->execute(this, traceData); 739 740 // keep an instruction count 741 if (fault == NoFault) 742 countInst(); 743 else if (traceData) { 744 // If there was a fault, we shouldn't trace this instruction. 745 delete traceData; 746 traceData = NULL; 747 } 748 749 postExecute(); 750 // @todo remove me after debugging with legion done 751 if (curStaticInst && (!curStaticInst->isMicroop() || 752 curStaticInst->isFirstMicroop())) 753 instCnt++; 754 advanceInst(fault); 755 } else { 756 advanceInst(NoFault); 757 } 758 759 if (pkt) { 760 delete pkt->req; 761 delete pkt; 762 } 763} 764 765void 766TimingSimpleCPU::IcachePort::ITickEvent::process() 767{ 768 cpu->completeIfetch(pkt); 769} 770 771bool 772TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt) 773{ 774 if (pkt->isResponse() && !pkt->wasNacked()) { 775 // delay processing of returned data until next CPU clock edge 776 Tick next_tick = cpu->nextCycle(curTick); 777 778 if (next_tick == curTick) 779 cpu->completeIfetch(pkt); 780 else 781 tickEvent.schedule(pkt, next_tick); 782 783 return true; 784 } 785 else if (pkt->wasNacked()) { 786 assert(cpu->_status == IcacheWaitResponse); 787 pkt->reinitNacked(); 788 if (!sendTiming(pkt)) { 789 cpu->_status = IcacheRetry; 790 cpu->ifetch_pkt = pkt; 791 } 792 } 793 //Snooping a Coherence Request, do nothing 794 return true; 795} 796 797void 798TimingSimpleCPU::IcachePort::recvRetry() 799{ 800 // we shouldn't get a retry unless we have a packet that we're 801 // waiting to transmit 802 assert(cpu->ifetch_pkt != NULL); 803 assert(cpu->_status == IcacheRetry); 804 PacketPtr tmp = cpu->ifetch_pkt; 805 if (sendTiming(tmp)) { 806 cpu->_status = IcacheWaitResponse; 807 cpu->ifetch_pkt = NULL; 808 } 809} 810 811void 812TimingSimpleCPU::completeDataAccess(PacketPtr pkt) 813{ 814 // received a response from the dcache: complete the load or store 815 // instruction 816 assert(!pkt->isError()); 817 818 numCycles += tickToCycles(curTick - previousTick); 819 previousTick = curTick; 820 821 if (pkt->senderState) { 822 SplitFragmentSenderState * send_state = 823 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState); 824 assert(send_state); 825 delete pkt->req; 826 delete pkt; 827 PacketPtr big_pkt = send_state->bigPkt; 828 delete send_state; 829 830 SplitMainSenderState * main_send_state = 831 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState); 832 assert(main_send_state); 833 // Record the fact that this packet is no longer outstanding. 834 assert(main_send_state->outstanding != 0); 835 main_send_state->outstanding--; 836 837 if (main_send_state->outstanding) { 838 return; 839 } else { 840 delete main_send_state; 841 big_pkt->senderState = NULL; 842 pkt = big_pkt; 843 } 844 } 845 846 assert(_status == DcacheWaitResponse); 847 _status = Running; 848 849 Fault fault = curStaticInst->completeAcc(pkt, this, traceData); 850 851 // keep an instruction count 852 if (fault == NoFault) 853 countInst(); 854 else if (traceData) { 855 // If there was a fault, we shouldn't trace this instruction. 856 delete traceData; 857 traceData = NULL; 858 } 859 860 // the locked flag may be cleared on the response packet, so check 861 // pkt->req and not pkt to see if it was a load-locked 862 if (pkt->isRead() && pkt->req->isLocked()) { 863 TheISA::handleLockedRead(thread, pkt->req); 864 } 865 866 delete pkt->req; 867 delete pkt; 868 869 postExecute(); 870 871 if (getState() == SimObject::Draining) { 872 advancePC(fault); 873 completeDrain(); 874 875 return; 876 } 877 878 advanceInst(fault); 879} 880 881 882void 883TimingSimpleCPU::completeDrain() 884{ 885 DPRINTF(Config, "Done draining\n"); 886 changeState(SimObject::Drained); 887 drainEvent->process(); 888} 889 890void 891TimingSimpleCPU::DcachePort::setPeer(Port *port) 892{ 893 Port::setPeer(port); 894 895#if FULL_SYSTEM 896 // Update the ThreadContext's memory ports (Functional/Virtual 897 // Ports) 898 cpu->tcBase()->connectMemPorts(cpu->tcBase()); 899#endif 900} 901 902bool 903TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt) 904{ 905 if (pkt->isResponse() && !pkt->wasNacked()) { 906 // delay processing of returned data until next CPU clock edge 907 Tick next_tick = cpu->nextCycle(curTick); 908 909 if (next_tick == curTick) { 910 cpu->completeDataAccess(pkt); 911 } else { 912 tickEvent.schedule(pkt, next_tick); 913 } 914 915 return true; 916 } 917 else if (pkt->wasNacked()) { 918 assert(cpu->_status == DcacheWaitResponse); 919 pkt->reinitNacked(); 920 if (!sendTiming(pkt)) { 921 cpu->_status = DcacheRetry; 922 cpu->dcache_pkt = pkt; 923 } 924 } 925 //Snooping a Coherence Request, do nothing 926 return true; 927} 928 929void 930TimingSimpleCPU::DcachePort::DTickEvent::process() 931{ 932 cpu->completeDataAccess(pkt); 933} 934 935void 936TimingSimpleCPU::DcachePort::recvRetry() 937{ 938 // we shouldn't get a retry unless we have a packet that we're 939 // waiting to transmit 940 assert(cpu->dcache_pkt != NULL); 941 assert(cpu->_status == DcacheRetry); 942 PacketPtr tmp = cpu->dcache_pkt; 943 if (tmp->senderState) { 944 // This is a packet from a split access. 945 SplitFragmentSenderState * send_state = 946 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState); 947 assert(send_state); 948 PacketPtr big_pkt = send_state->bigPkt; 949 950 SplitMainSenderState * main_send_state = 951 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState); 952 assert(main_send_state); 953 954 if (sendTiming(tmp)) { 955 // If we were able to send without retrying, record that fact 956 // and try sending the other fragment. 957 send_state->clearFromParent(); 958 int other_index = main_send_state->getPendingFragment(); 959 if (other_index > 0) { 960 tmp = main_send_state->fragments[other_index]; 961 cpu->dcache_pkt = tmp; 962 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) || 963 (big_pkt->isWrite() && cpu->handleWritePacket())) { 964 main_send_state->fragments[other_index] = NULL; 965 } 966 } else { 967 cpu->_status = DcacheWaitResponse; 968 // memory system takes ownership of packet 969 cpu->dcache_pkt = NULL; 970 } 971 } 972 } else if (sendTiming(tmp)) { 973 cpu->_status = DcacheWaitResponse; 974 // memory system takes ownership of packet 975 cpu->dcache_pkt = NULL; 976 } 977} 978 979TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu, 980 Tick t) 981 : pkt(_pkt), cpu(_cpu) 982{ 983 cpu->schedule(this, t); 984} 985 986void 987TimingSimpleCPU::IprEvent::process() 988{ 989 cpu->completeDataAccess(pkt); 990} 991 992const char * 993TimingSimpleCPU::IprEvent::description() const 994{ 995 return "Timing Simple CPU Delay IPR event"; 996} 997 998 999void 1000TimingSimpleCPU::printAddr(Addr a) 1001{ 1002 dcachePort.printAddr(a); 1003} 1004 1005 1006//////////////////////////////////////////////////////////////////////// 1007// 1008// TimingSimpleCPU Simulation Object 1009// 1010TimingSimpleCPU * 1011TimingSimpleCPUParams::create() 1012{ 1013 numThreads = 1; 1014#if !FULL_SYSTEM 1015 if (workload.size() != 1) 1016 panic("only one workload allowed"); 1017#endif 1018 return new TimingSimpleCPU(this); 1019}
|