1/*
| 1/*
|
2 * Copyright (c) 2012 ARM Limited
| 2 * Copyright (c) 2012-2013 ARM Limited
|
3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Steve Reinhardt 41 */ 42 43#include "arch/locked_mem.hh" 44#include "arch/mmapped_ipr.hh" 45#include "arch/utility.hh" 46#include "base/bigint.hh" 47#include "base/output.hh" 48#include "config/the_isa.hh" 49#include "cpu/simple/atomic.hh" 50#include "cpu/exetrace.hh" 51#include "debug/Drain.hh" 52#include "debug/ExecFaulting.hh" 53#include "debug/SimpleCPU.hh" 54#include "mem/packet.hh" 55#include "mem/packet_access.hh" 56#include "mem/physical.hh" 57#include "params/AtomicSimpleCPU.hh" 58#include "sim/faults.hh" 59#include "sim/system.hh" 60#include "sim/full_system.hh" 61 62using namespace std; 63using namespace TheISA; 64 65AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 66 : Event(CPU_Tick_Pri), cpu(c) 67{ 68} 69 70 71void 72AtomicSimpleCPU::TickEvent::process() 73{ 74 cpu->tick(); 75} 76 77const char * 78AtomicSimpleCPU::TickEvent::description() const 79{ 80 return "AtomicSimpleCPU tick"; 81} 82 83void 84AtomicSimpleCPU::init() 85{ 86 BaseCPU::init(); 87 88 // Initialise the ThreadContext's memory proxies 89 tcBase()->initMemProxies(tcBase()); 90 91 if (FullSystem && !params()->switched_out) { 92 ThreadID size = threadContexts.size(); 93 for (ThreadID i = 0; i < size; ++i) { 94 ThreadContext *tc = threadContexts[i]; 95 // initialize CPU, including PC 96 TheISA::initCPU(tc, tc->contextId()); 97 } 98 } 99 100 // Atomic doesn't do MT right now, so contextId == threadId 101 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 102 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 103 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 104} 105 106AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 107 : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false), 108 simulate_data_stalls(p->simulate_data_stalls), 109 simulate_inst_stalls(p->simulate_inst_stalls), 110 drain_manager(NULL), 111 icachePort(name() + ".icache_port", this), 112 dcachePort(name() + ".dcache_port", this), 113 fastmem(p->fastmem), 114 simpoint(p->simpoint_profile), 115 intervalSize(p->simpoint_interval), 116 intervalCount(0), 117 intervalDrift(0), 118 simpointStream(NULL), 119 currentBBV(0, 0), 120 currentBBVInstCount(0) 121{ 122 _status = Idle; 123 124 if (simpoint) { 125 simpointStream = simout.create(p->simpoint_profile_file, false); 126 } 127} 128 129 130AtomicSimpleCPU::~AtomicSimpleCPU() 131{ 132 if (tickEvent.scheduled()) { 133 deschedule(tickEvent); 134 } 135 if (simpointStream) { 136 simout.close(simpointStream); 137 } 138} 139 140unsigned int 141AtomicSimpleCPU::drain(DrainManager *dm) 142{ 143 assert(!drain_manager); 144 if (switchedOut()) 145 return 0; 146 147 if (!isDrained()) { 148 DPRINTF(Drain, "Requesting drain: %s\n", pcState()); 149 drain_manager = dm; 150 return 1; 151 } else { 152 if (tickEvent.scheduled()) 153 deschedule(tickEvent); 154 155 DPRINTF(Drain, "Not executing microcode, no need to drain.\n"); 156 return 0; 157 } 158} 159 160void 161AtomicSimpleCPU::drainResume() 162{ 163 assert(!tickEvent.scheduled()); 164 assert(!drain_manager); 165 if (switchedOut()) 166 return; 167 168 DPRINTF(SimpleCPU, "Resume\n"); 169 verifyMemoryMode(); 170 171 assert(!threadContexts.empty()); 172 if (threadContexts.size() > 1) 173 fatal("The atomic CPU only supports one thread.\n"); 174 175 if (thread->status() == ThreadContext::Active) { 176 schedule(tickEvent, nextCycle()); 177 _status = BaseSimpleCPU::Running; 178 notIdleFraction = 1; 179 } else { 180 _status = BaseSimpleCPU::Idle; 181 notIdleFraction = 0; 182 } 183 184 system->totalNumInsts = 0; 185} 186 187bool 188AtomicSimpleCPU::tryCompleteDrain() 189{ 190 if (!drain_manager) 191 return false; 192 193 DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState()); 194 if (!isDrained()) 195 return false; 196 197 DPRINTF(Drain, "CPU done draining, processing drain event\n"); 198 drain_manager->signalDrainDone(); 199 drain_manager = NULL; 200 201 return true; 202} 203 204 205void 206AtomicSimpleCPU::switchOut() 207{ 208 BaseSimpleCPU::switchOut(); 209 210 assert(!tickEvent.scheduled()); 211 assert(_status == BaseSimpleCPU::Running || _status == Idle); 212 assert(isDrained()); 213} 214 215 216void 217AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 218{ 219 BaseSimpleCPU::takeOverFrom(oldCPU); 220 221 // The tick event should have been descheduled by drain() 222 assert(!tickEvent.scheduled()); 223 224 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 225 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 226 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 227} 228 229void 230AtomicSimpleCPU::verifyMemoryMode() const 231{ 232 if (!system->isAtomicMode()) { 233 fatal("The atomic CPU requires the memory system to be in " 234 "'atomic' mode.\n"); 235 } 236} 237 238void 239AtomicSimpleCPU::activateContext(ThreadID thread_num, Cycles delay) 240{ 241 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 242 243 assert(thread_num == 0); 244 assert(thread); 245 246 assert(_status == Idle); 247 assert(!tickEvent.scheduled()); 248 249 notIdleFraction = 1; 250 numCycles += ticksToCycles(thread->lastActivate - thread->lastSuspend); 251 252 //Make sure ticks are still on multiples of cycles 253 schedule(tickEvent, clockEdge(delay)); 254 _status = BaseSimpleCPU::Running; 255} 256 257 258void 259AtomicSimpleCPU::suspendContext(ThreadID thread_num) 260{ 261 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 262 263 assert(thread_num == 0); 264 assert(thread); 265 266 if (_status == Idle) 267 return; 268 269 assert(_status == BaseSimpleCPU::Running); 270 271 // tick event may not be scheduled if this gets called from inside 272 // an instruction's execution, e.g. "quiesce" 273 if (tickEvent.scheduled()) 274 deschedule(tickEvent); 275 276 notIdleFraction = 0; 277 _status = Idle; 278} 279 280
| 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Steve Reinhardt 41 */ 42 43#include "arch/locked_mem.hh" 44#include "arch/mmapped_ipr.hh" 45#include "arch/utility.hh" 46#include "base/bigint.hh" 47#include "base/output.hh" 48#include "config/the_isa.hh" 49#include "cpu/simple/atomic.hh" 50#include "cpu/exetrace.hh" 51#include "debug/Drain.hh" 52#include "debug/ExecFaulting.hh" 53#include "debug/SimpleCPU.hh" 54#include "mem/packet.hh" 55#include "mem/packet_access.hh" 56#include "mem/physical.hh" 57#include "params/AtomicSimpleCPU.hh" 58#include "sim/faults.hh" 59#include "sim/system.hh" 60#include "sim/full_system.hh" 61 62using namespace std; 63using namespace TheISA; 64 65AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 66 : Event(CPU_Tick_Pri), cpu(c) 67{ 68} 69 70 71void 72AtomicSimpleCPU::TickEvent::process() 73{ 74 cpu->tick(); 75} 76 77const char * 78AtomicSimpleCPU::TickEvent::description() const 79{ 80 return "AtomicSimpleCPU tick"; 81} 82 83void 84AtomicSimpleCPU::init() 85{ 86 BaseCPU::init(); 87 88 // Initialise the ThreadContext's memory proxies 89 tcBase()->initMemProxies(tcBase()); 90 91 if (FullSystem && !params()->switched_out) { 92 ThreadID size = threadContexts.size(); 93 for (ThreadID i = 0; i < size; ++i) { 94 ThreadContext *tc = threadContexts[i]; 95 // initialize CPU, including PC 96 TheISA::initCPU(tc, tc->contextId()); 97 } 98 } 99 100 // Atomic doesn't do MT right now, so contextId == threadId 101 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 102 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 103 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 104} 105 106AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 107 : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false), 108 simulate_data_stalls(p->simulate_data_stalls), 109 simulate_inst_stalls(p->simulate_inst_stalls), 110 drain_manager(NULL), 111 icachePort(name() + ".icache_port", this), 112 dcachePort(name() + ".dcache_port", this), 113 fastmem(p->fastmem), 114 simpoint(p->simpoint_profile), 115 intervalSize(p->simpoint_interval), 116 intervalCount(0), 117 intervalDrift(0), 118 simpointStream(NULL), 119 currentBBV(0, 0), 120 currentBBVInstCount(0) 121{ 122 _status = Idle; 123 124 if (simpoint) { 125 simpointStream = simout.create(p->simpoint_profile_file, false); 126 } 127} 128 129 130AtomicSimpleCPU::~AtomicSimpleCPU() 131{ 132 if (tickEvent.scheduled()) { 133 deschedule(tickEvent); 134 } 135 if (simpointStream) { 136 simout.close(simpointStream); 137 } 138} 139 140unsigned int 141AtomicSimpleCPU::drain(DrainManager *dm) 142{ 143 assert(!drain_manager); 144 if (switchedOut()) 145 return 0; 146 147 if (!isDrained()) { 148 DPRINTF(Drain, "Requesting drain: %s\n", pcState()); 149 drain_manager = dm; 150 return 1; 151 } else { 152 if (tickEvent.scheduled()) 153 deschedule(tickEvent); 154 155 DPRINTF(Drain, "Not executing microcode, no need to drain.\n"); 156 return 0; 157 } 158} 159 160void 161AtomicSimpleCPU::drainResume() 162{ 163 assert(!tickEvent.scheduled()); 164 assert(!drain_manager); 165 if (switchedOut()) 166 return; 167 168 DPRINTF(SimpleCPU, "Resume\n"); 169 verifyMemoryMode(); 170 171 assert(!threadContexts.empty()); 172 if (threadContexts.size() > 1) 173 fatal("The atomic CPU only supports one thread.\n"); 174 175 if (thread->status() == ThreadContext::Active) { 176 schedule(tickEvent, nextCycle()); 177 _status = BaseSimpleCPU::Running; 178 notIdleFraction = 1; 179 } else { 180 _status = BaseSimpleCPU::Idle; 181 notIdleFraction = 0; 182 } 183 184 system->totalNumInsts = 0; 185} 186 187bool 188AtomicSimpleCPU::tryCompleteDrain() 189{ 190 if (!drain_manager) 191 return false; 192 193 DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState()); 194 if (!isDrained()) 195 return false; 196 197 DPRINTF(Drain, "CPU done draining, processing drain event\n"); 198 drain_manager->signalDrainDone(); 199 drain_manager = NULL; 200 201 return true; 202} 203 204 205void 206AtomicSimpleCPU::switchOut() 207{ 208 BaseSimpleCPU::switchOut(); 209 210 assert(!tickEvent.scheduled()); 211 assert(_status == BaseSimpleCPU::Running || _status == Idle); 212 assert(isDrained()); 213} 214 215 216void 217AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 218{ 219 BaseSimpleCPU::takeOverFrom(oldCPU); 220 221 // The tick event should have been descheduled by drain() 222 assert(!tickEvent.scheduled()); 223 224 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 225 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 226 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 227} 228 229void 230AtomicSimpleCPU::verifyMemoryMode() const 231{ 232 if (!system->isAtomicMode()) { 233 fatal("The atomic CPU requires the memory system to be in " 234 "'atomic' mode.\n"); 235 } 236} 237 238void 239AtomicSimpleCPU::activateContext(ThreadID thread_num, Cycles delay) 240{ 241 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 242 243 assert(thread_num == 0); 244 assert(thread); 245 246 assert(_status == Idle); 247 assert(!tickEvent.scheduled()); 248 249 notIdleFraction = 1; 250 numCycles += ticksToCycles(thread->lastActivate - thread->lastSuspend); 251 252 //Make sure ticks are still on multiples of cycles 253 schedule(tickEvent, clockEdge(delay)); 254 _status = BaseSimpleCPU::Running; 255} 256 257 258void 259AtomicSimpleCPU::suspendContext(ThreadID thread_num) 260{ 261 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 262 263 assert(thread_num == 0); 264 assert(thread); 265 266 if (_status == Idle) 267 return; 268 269 assert(_status == BaseSimpleCPU::Running); 270 271 // tick event may not be scheduled if this gets called from inside 272 // an instruction's execution, e.g. "quiesce" 273 if (tickEvent.scheduled()) 274 deschedule(tickEvent); 275 276 notIdleFraction = 0; 277 _status = Idle; 278} 279 280
|
| 281Tick 282AtomicSimpleCPU::AtomicCPUDPort::recvAtomicSnoop(PacketPtr pkt) 283{ 284 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 285 pkt->cmdString()); 286 287 // if snoop invalidates, release any associated locks 288 if (pkt->isInvalidate()) { 289 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 290 pkt->getAddr()); 291 TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask); 292 } 293 294 return 0; 295} 296 297void 298AtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop(PacketPtr pkt) 299{ 300 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 301 pkt->cmdString()); 302 303 // if snoop invalidates, release any associated locks 304 if (pkt->isInvalidate()) { 305 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 306 pkt->getAddr()); 307 TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask); 308 } 309} 310
|
281Fault 282AtomicSimpleCPU::readMem(Addr addr, uint8_t * data, 283 unsigned size, unsigned flags) 284{ 285 // use the CPU's statically allocated read request and packet objects 286 Request *req = &data_read_req; 287 288 if (traceData) { 289 traceData->setAddr(addr); 290 } 291 292 //The size of the data we're trying to read. 293 int fullSize = size; 294 295 //The address of the second part of this access if it needs to be split 296 //across a cache line boundary. 297 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 298 299 if (secondAddr > addr) 300 size = secondAddr - addr; 301 302 dcache_latency = 0; 303 304 req->taskId(taskId()); 305 while (1) { 306 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 307 308 // translate to physical address 309 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read); 310 311 // Now do the access. 312 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 313 Packet pkt = Packet(req, 314 req->isLLSC() ? MemCmd::LoadLockedReq : 315 MemCmd::ReadReq); 316 pkt.dataStatic(data); 317 318 if (req->isMmappedIpr()) 319 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 320 else { 321 if (fastmem && system->isMemAddr(pkt.getAddr())) 322 system->getPhysMem().access(&pkt); 323 else 324 dcache_latency += dcachePort.sendAtomic(&pkt); 325 } 326 dcache_access = true; 327 328 assert(!pkt.isError()); 329 330 if (req->isLLSC()) { 331 TheISA::handleLockedRead(thread, req); 332 } 333 } 334 335 //If there's a fault, return it 336 if (fault != NoFault) { 337 if (req->isPrefetch()) { 338 return NoFault; 339 } else { 340 return fault; 341 } 342 } 343 344 //If we don't need to access a second cache line, stop now. 345 if (secondAddr <= addr) 346 { 347 if (req->isLocked() && fault == NoFault) { 348 assert(!locked); 349 locked = true; 350 } 351 return fault; 352 } 353 354 /* 355 * Set up for accessing the second cache line. 356 */ 357 358 //Move the pointer we're reading into to the correct location. 359 data += size; 360 //Adjust the size to get the remaining bytes. 361 size = addr + fullSize - secondAddr; 362 //And access the right address. 363 addr = secondAddr; 364 } 365} 366 367 368Fault 369AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, 370 Addr addr, unsigned flags, uint64_t *res) 371{ 372 // use the CPU's statically allocated write request and packet objects 373 Request *req = &data_write_req; 374 375 if (traceData) { 376 traceData->setAddr(addr); 377 } 378 379 //The size of the data we're trying to read. 380 int fullSize = size; 381 382 //The address of the second part of this access if it needs to be split 383 //across a cache line boundary. 384 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 385 386 if(secondAddr > addr) 387 size = secondAddr - addr; 388 389 dcache_latency = 0; 390 391 req->taskId(taskId()); 392 while(1) { 393 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 394 395 // translate to physical address 396 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write); 397 398 // Now do the access. 399 if (fault == NoFault) { 400 MemCmd cmd = MemCmd::WriteReq; // default 401 bool do_access = true; // flag to suppress cache access 402 403 if (req->isLLSC()) { 404 cmd = MemCmd::StoreCondReq;
| 311Fault 312AtomicSimpleCPU::readMem(Addr addr, uint8_t * data, 313 unsigned size, unsigned flags) 314{ 315 // use the CPU's statically allocated read request and packet objects 316 Request *req = &data_read_req; 317 318 if (traceData) { 319 traceData->setAddr(addr); 320 } 321 322 //The size of the data we're trying to read. 323 int fullSize = size; 324 325 //The address of the second part of this access if it needs to be split 326 //across a cache line boundary. 327 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 328 329 if (secondAddr > addr) 330 size = secondAddr - addr; 331 332 dcache_latency = 0; 333 334 req->taskId(taskId()); 335 while (1) { 336 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 337 338 // translate to physical address 339 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read); 340 341 // Now do the access. 342 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 343 Packet pkt = Packet(req, 344 req->isLLSC() ? MemCmd::LoadLockedReq : 345 MemCmd::ReadReq); 346 pkt.dataStatic(data); 347 348 if (req->isMmappedIpr()) 349 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 350 else { 351 if (fastmem && system->isMemAddr(pkt.getAddr())) 352 system->getPhysMem().access(&pkt); 353 else 354 dcache_latency += dcachePort.sendAtomic(&pkt); 355 } 356 dcache_access = true; 357 358 assert(!pkt.isError()); 359 360 if (req->isLLSC()) { 361 TheISA::handleLockedRead(thread, req); 362 } 363 } 364 365 //If there's a fault, return it 366 if (fault != NoFault) { 367 if (req->isPrefetch()) { 368 return NoFault; 369 } else { 370 return fault; 371 } 372 } 373 374 //If we don't need to access a second cache line, stop now. 375 if (secondAddr <= addr) 376 { 377 if (req->isLocked() && fault == NoFault) { 378 assert(!locked); 379 locked = true; 380 } 381 return fault; 382 } 383 384 /* 385 * Set up for accessing the second cache line. 386 */ 387 388 //Move the pointer we're reading into to the correct location. 389 data += size; 390 //Adjust the size to get the remaining bytes. 391 size = addr + fullSize - secondAddr; 392 //And access the right address. 393 addr = secondAddr; 394 } 395} 396 397 398Fault 399AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, 400 Addr addr, unsigned flags, uint64_t *res) 401{ 402 // use the CPU's statically allocated write request and packet objects 403 Request *req = &data_write_req; 404 405 if (traceData) { 406 traceData->setAddr(addr); 407 } 408 409 //The size of the data we're trying to read. 410 int fullSize = size; 411 412 //The address of the second part of this access if it needs to be split 413 //across a cache line boundary. 414 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 415 416 if(secondAddr > addr) 417 size = secondAddr - addr; 418 419 dcache_latency = 0; 420 421 req->taskId(taskId()); 422 while(1) { 423 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 424 425 // translate to physical address 426 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write); 427 428 // Now do the access. 429 if (fault == NoFault) { 430 MemCmd cmd = MemCmd::WriteReq; // default 431 bool do_access = true; // flag to suppress cache access 432 433 if (req->isLLSC()) { 434 cmd = MemCmd::StoreCondReq;
|
405 do_access = TheISA::handleLockedWrite(thread, req);
| 435 do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask);
|
406 } else if (req->isSwap()) { 407 cmd = MemCmd::SwapReq; 408 if (req->isCondSwap()) { 409 assert(res); 410 req->setExtraData(*res); 411 } 412 } 413 414 if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 415 Packet pkt = Packet(req, cmd); 416 pkt.dataStatic(data); 417 418 if (req->isMmappedIpr()) { 419 dcache_latency += 420 TheISA::handleIprWrite(thread->getTC(), &pkt); 421 } else { 422 if (fastmem && system->isMemAddr(pkt.getAddr())) 423 system->getPhysMem().access(&pkt); 424 else 425 dcache_latency += dcachePort.sendAtomic(&pkt); 426 } 427 dcache_access = true; 428 assert(!pkt.isError()); 429 430 if (req->isSwap()) { 431 assert(res); 432 memcpy(res, pkt.getPtr<uint8_t>(), fullSize); 433 } 434 } 435 436 if (res && !req->isSwap()) { 437 *res = req->getExtraData(); 438 } 439 } 440 441 //If there's a fault or we don't need to access a second cache line, 442 //stop now. 443 if (fault != NoFault || secondAddr <= addr) 444 { 445 if (req->isLocked() && fault == NoFault) { 446 assert(locked); 447 locked = false; 448 } 449 if (fault != NoFault && req->isPrefetch()) { 450 return NoFault; 451 } else { 452 return fault; 453 } 454 } 455 456 /* 457 * Set up for accessing the second cache line. 458 */ 459 460 //Move the pointer we're reading into to the correct location. 461 data += size; 462 //Adjust the size to get the remaining bytes. 463 size = addr + fullSize - secondAddr; 464 //And access the right address. 465 addr = secondAddr; 466 } 467} 468 469 470void 471AtomicSimpleCPU::tick() 472{ 473 DPRINTF(SimpleCPU, "Tick\n"); 474 475 Tick latency = 0; 476 477 for (int i = 0; i < width || locked; ++i) { 478 numCycles++; 479 480 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 481 checkForInterrupts(); 482 483 checkPcEventQueue(); 484 // We must have just got suspended by a PC event 485 if (_status == Idle) { 486 tryCompleteDrain(); 487 return; 488 } 489 490 Fault fault = NoFault; 491 492 TheISA::PCState pcState = thread->pcState(); 493 494 bool needToFetch = !isRomMicroPC(pcState.microPC()) && 495 !curMacroStaticInst; 496 if (needToFetch) { 497 ifetch_req.taskId(taskId()); 498 setupFetchRequest(&ifetch_req); 499 fault = thread->itb->translateAtomic(&ifetch_req, tc, 500 BaseTLB::Execute); 501 } 502 503 if (fault == NoFault) { 504 Tick icache_latency = 0; 505 bool icache_access = false; 506 dcache_access = false; // assume no dcache access 507 508 if (needToFetch) { 509 // This is commented out because the decoder would act like 510 // a tiny cache otherwise. It wouldn't be flushed when needed 511 // like the I cache. It should be flushed, and when that works 512 // this code should be uncommented. 513 //Fetch more instruction memory if necessary 514 //if(decoder.needMoreBytes()) 515 //{ 516 icache_access = true; 517 Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq); 518 ifetch_pkt.dataStatic(&inst); 519 520 if (fastmem && system->isMemAddr(ifetch_pkt.getAddr())) 521 system->getPhysMem().access(&ifetch_pkt); 522 else 523 icache_latency = icachePort.sendAtomic(&ifetch_pkt); 524 525 assert(!ifetch_pkt.isError()); 526 527 // ifetch_req is initialized to read the instruction directly 528 // into the CPU object's inst field. 529 //} 530 } 531 532 preExecute(); 533 534 if (curStaticInst) { 535 fault = curStaticInst->execute(this, traceData); 536 537 // keep an instruction count 538 if (fault == NoFault) 539 countInst(); 540 else if (traceData && !DTRACE(ExecFaulting)) { 541 delete traceData; 542 traceData = NULL; 543 } 544 545 postExecute(); 546 } 547 548 // @todo remove me after debugging with legion done 549 if (curStaticInst && (!curStaticInst->isMicroop() || 550 curStaticInst->isFirstMicroop())) 551 instCnt++; 552 553 // profile for SimPoints if enabled and macro inst is finished 554 if (simpoint && curStaticInst && (fault == NoFault) && 555 (!curStaticInst->isMicroop() || 556 curStaticInst->isLastMicroop())) { 557 profileSimPoint(); 558 } 559 560 Tick stall_ticks = 0; 561 if (simulate_inst_stalls && icache_access) 562 stall_ticks += icache_latency; 563 564 if (simulate_data_stalls && dcache_access) 565 stall_ticks += dcache_latency; 566 567 if (stall_ticks) { 568 // the atomic cpu does its accounting in ticks, so 569 // keep counting in ticks but round to the clock 570 // period 571 latency += divCeil(stall_ticks, clockPeriod()) * 572 clockPeriod(); 573 } 574 575 } 576 if(fault != NoFault || !stayAtPC) 577 advancePC(fault); 578 } 579 580 if (tryCompleteDrain()) 581 return; 582 583 // instruction takes at least one cycle 584 if (latency < clockPeriod()) 585 latency = clockPeriod(); 586 587 if (_status != Idle) 588 schedule(tickEvent, curTick() + latency); 589} 590 591 592void 593AtomicSimpleCPU::printAddr(Addr a) 594{ 595 dcachePort.printAddr(a); 596} 597 598void 599AtomicSimpleCPU::profileSimPoint() 600{ 601 if (!currentBBVInstCount) 602 currentBBV.first = thread->pcState().instAddr(); 603 604 ++intervalCount; 605 ++currentBBVInstCount; 606 607 // If inst is control inst, assume end of basic block. 608 if (curStaticInst->isControl()) { 609 currentBBV.second = thread->pcState().instAddr(); 610 611 auto map_itr = bbMap.find(currentBBV); 612 if (map_itr == bbMap.end()){ 613 // If a new (previously unseen) basic block is found, 614 // add a new unique id, record num of insts and insert into bbMap. 615 BBInfo info; 616 info.id = bbMap.size() + 1; 617 info.insts = currentBBVInstCount; 618 info.count = currentBBVInstCount; 619 bbMap.insert(std::make_pair(currentBBV, info)); 620 } else { 621 // If basic block is seen before, just increment the count by the 622 // number of insts in basic block. 623 BBInfo& info = map_itr->second; 624 info.count += currentBBVInstCount; 625 } 626 currentBBVInstCount = 0; 627 628 // Reached end of interval if the sum of the current inst count 629 // (intervalCount) and the excessive inst count from the previous 630 // interval (intervalDrift) is greater than/equal to the interval size. 631 if (intervalCount + intervalDrift >= intervalSize) { 632 // summarize interval and display BBV info 633 std::vector<pair<uint64_t, uint64_t> > counts; 634 for (auto map_itr = bbMap.begin(); map_itr != bbMap.end(); 635 ++map_itr) { 636 BBInfo& info = map_itr->second; 637 if (info.count != 0) { 638 counts.push_back(std::make_pair(info.id, info.count)); 639 info.count = 0; 640 } 641 } 642 std::sort(counts.begin(), counts.end()); 643 644 // Print output BBV info 645 *simpointStream << "T"; 646 for (auto cnt_itr = counts.begin(); cnt_itr != counts.end(); 647 ++cnt_itr) { 648 *simpointStream << ":" << cnt_itr->first 649 << ":" << cnt_itr->second << " "; 650 } 651 *simpointStream << "\n"; 652 653 intervalDrift = (intervalCount + intervalDrift) - intervalSize; 654 intervalCount = 0; 655 } 656 } 657} 658 659//////////////////////////////////////////////////////////////////////// 660// 661// AtomicSimpleCPU Simulation Object 662// 663AtomicSimpleCPU * 664AtomicSimpleCPUParams::create() 665{ 666 numThreads = 1; 667 if (!FullSystem && workload.size() != 1) 668 panic("only one workload allowed"); 669 return new AtomicSimpleCPU(this); 670}
| 436 } else if (req->isSwap()) { 437 cmd = MemCmd::SwapReq; 438 if (req->isCondSwap()) { 439 assert(res); 440 req->setExtraData(*res); 441 } 442 } 443 444 if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 445 Packet pkt = Packet(req, cmd); 446 pkt.dataStatic(data); 447 448 if (req->isMmappedIpr()) { 449 dcache_latency += 450 TheISA::handleIprWrite(thread->getTC(), &pkt); 451 } else { 452 if (fastmem && system->isMemAddr(pkt.getAddr())) 453 system->getPhysMem().access(&pkt); 454 else 455 dcache_latency += dcachePort.sendAtomic(&pkt); 456 } 457 dcache_access = true; 458 assert(!pkt.isError()); 459 460 if (req->isSwap()) { 461 assert(res); 462 memcpy(res, pkt.getPtr<uint8_t>(), fullSize); 463 } 464 } 465 466 if (res && !req->isSwap()) { 467 *res = req->getExtraData(); 468 } 469 } 470 471 //If there's a fault or we don't need to access a second cache line, 472 //stop now. 473 if (fault != NoFault || secondAddr <= addr) 474 { 475 if (req->isLocked() && fault == NoFault) { 476 assert(locked); 477 locked = false; 478 } 479 if (fault != NoFault && req->isPrefetch()) { 480 return NoFault; 481 } else { 482 return fault; 483 } 484 } 485 486 /* 487 * Set up for accessing the second cache line. 488 */ 489 490 //Move the pointer we're reading into to the correct location. 491 data += size; 492 //Adjust the size to get the remaining bytes. 493 size = addr + fullSize - secondAddr; 494 //And access the right address. 495 addr = secondAddr; 496 } 497} 498 499 500void 501AtomicSimpleCPU::tick() 502{ 503 DPRINTF(SimpleCPU, "Tick\n"); 504 505 Tick latency = 0; 506 507 for (int i = 0; i < width || locked; ++i) { 508 numCycles++; 509 510 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 511 checkForInterrupts(); 512 513 checkPcEventQueue(); 514 // We must have just got suspended by a PC event 515 if (_status == Idle) { 516 tryCompleteDrain(); 517 return; 518 } 519 520 Fault fault = NoFault; 521 522 TheISA::PCState pcState = thread->pcState(); 523 524 bool needToFetch = !isRomMicroPC(pcState.microPC()) && 525 !curMacroStaticInst; 526 if (needToFetch) { 527 ifetch_req.taskId(taskId()); 528 setupFetchRequest(&ifetch_req); 529 fault = thread->itb->translateAtomic(&ifetch_req, tc, 530 BaseTLB::Execute); 531 } 532 533 if (fault == NoFault) { 534 Tick icache_latency = 0; 535 bool icache_access = false; 536 dcache_access = false; // assume no dcache access 537 538 if (needToFetch) { 539 // This is commented out because the decoder would act like 540 // a tiny cache otherwise. It wouldn't be flushed when needed 541 // like the I cache. It should be flushed, and when that works 542 // this code should be uncommented. 543 //Fetch more instruction memory if necessary 544 //if(decoder.needMoreBytes()) 545 //{ 546 icache_access = true; 547 Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq); 548 ifetch_pkt.dataStatic(&inst); 549 550 if (fastmem && system->isMemAddr(ifetch_pkt.getAddr())) 551 system->getPhysMem().access(&ifetch_pkt); 552 else 553 icache_latency = icachePort.sendAtomic(&ifetch_pkt); 554 555 assert(!ifetch_pkt.isError()); 556 557 // ifetch_req is initialized to read the instruction directly 558 // into the CPU object's inst field. 559 //} 560 } 561 562 preExecute(); 563 564 if (curStaticInst) { 565 fault = curStaticInst->execute(this, traceData); 566 567 // keep an instruction count 568 if (fault == NoFault) 569 countInst(); 570 else if (traceData && !DTRACE(ExecFaulting)) { 571 delete traceData; 572 traceData = NULL; 573 } 574 575 postExecute(); 576 } 577 578 // @todo remove me after debugging with legion done 579 if (curStaticInst && (!curStaticInst->isMicroop() || 580 curStaticInst->isFirstMicroop())) 581 instCnt++; 582 583 // profile for SimPoints if enabled and macro inst is finished 584 if (simpoint && curStaticInst && (fault == NoFault) && 585 (!curStaticInst->isMicroop() || 586 curStaticInst->isLastMicroop())) { 587 profileSimPoint(); 588 } 589 590 Tick stall_ticks = 0; 591 if (simulate_inst_stalls && icache_access) 592 stall_ticks += icache_latency; 593 594 if (simulate_data_stalls && dcache_access) 595 stall_ticks += dcache_latency; 596 597 if (stall_ticks) { 598 // the atomic cpu does its accounting in ticks, so 599 // keep counting in ticks but round to the clock 600 // period 601 latency += divCeil(stall_ticks, clockPeriod()) * 602 clockPeriod(); 603 } 604 605 } 606 if(fault != NoFault || !stayAtPC) 607 advancePC(fault); 608 } 609 610 if (tryCompleteDrain()) 611 return; 612 613 // instruction takes at least one cycle 614 if (latency < clockPeriod()) 615 latency = clockPeriod(); 616 617 if (_status != Idle) 618 schedule(tickEvent, curTick() + latency); 619} 620 621 622void 623AtomicSimpleCPU::printAddr(Addr a) 624{ 625 dcachePort.printAddr(a); 626} 627 628void 629AtomicSimpleCPU::profileSimPoint() 630{ 631 if (!currentBBVInstCount) 632 currentBBV.first = thread->pcState().instAddr(); 633 634 ++intervalCount; 635 ++currentBBVInstCount; 636 637 // If inst is control inst, assume end of basic block. 638 if (curStaticInst->isControl()) { 639 currentBBV.second = thread->pcState().instAddr(); 640 641 auto map_itr = bbMap.find(currentBBV); 642 if (map_itr == bbMap.end()){ 643 // If a new (previously unseen) basic block is found, 644 // add a new unique id, record num of insts and insert into bbMap. 645 BBInfo info; 646 info.id = bbMap.size() + 1; 647 info.insts = currentBBVInstCount; 648 info.count = currentBBVInstCount; 649 bbMap.insert(std::make_pair(currentBBV, info)); 650 } else { 651 // If basic block is seen before, just increment the count by the 652 // number of insts in basic block. 653 BBInfo& info = map_itr->second; 654 info.count += currentBBVInstCount; 655 } 656 currentBBVInstCount = 0; 657 658 // Reached end of interval if the sum of the current inst count 659 // (intervalCount) and the excessive inst count from the previous 660 // interval (intervalDrift) is greater than/equal to the interval size. 661 if (intervalCount + intervalDrift >= intervalSize) { 662 // summarize interval and display BBV info 663 std::vector<pair<uint64_t, uint64_t> > counts; 664 for (auto map_itr = bbMap.begin(); map_itr != bbMap.end(); 665 ++map_itr) { 666 BBInfo& info = map_itr->second; 667 if (info.count != 0) { 668 counts.push_back(std::make_pair(info.id, info.count)); 669 info.count = 0; 670 } 671 } 672 std::sort(counts.begin(), counts.end()); 673 674 // Print output BBV info 675 *simpointStream << "T"; 676 for (auto cnt_itr = counts.begin(); cnt_itr != counts.end(); 677 ++cnt_itr) { 678 *simpointStream << ":" << cnt_itr->first 679 << ":" << cnt_itr->second << " "; 680 } 681 *simpointStream << "\n"; 682 683 intervalDrift = (intervalCount + intervalDrift) - intervalSize; 684 intervalCount = 0; 685 } 686 } 687} 688 689//////////////////////////////////////////////////////////////////////// 690// 691// AtomicSimpleCPU Simulation Object 692// 693AtomicSimpleCPU * 694AtomicSimpleCPUParams::create() 695{ 696 numThreads = 1; 697 if (!FullSystem && workload.size() != 1) 698 panic("only one workload allowed"); 699 return new AtomicSimpleCPU(this); 700}
|