atomic.cc (4182:5b2c0d266107) | atomic.cc (4192:7accc6365bb9) |
---|---|
1/* 2 * Copyright (c) 2002-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; --- 112 unchanged lines hidden (view full) --- 121} 122 123void 124AtomicSimpleCPU::CpuPort::recvRetry() 125{ 126 panic("AtomicSimpleCPU doesn't expect recvRetry callback!"); 127} 128 | 1/* 2 * Copyright (c) 2002-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; --- 112 unchanged lines hidden (view full) --- 121} 122 123void 124AtomicSimpleCPU::CpuPort::recvRetry() 125{ 126 panic("AtomicSimpleCPU doesn't expect recvRetry callback!"); 127} 128 |
129void 130AtomicSimpleCPU::DcachePort::setPeer(Port *port) 131{ 132 Port::setPeer(port); |
|
129 | 133 |
134#if FULL_SYSTEM 135 // Update the ThreadContext's memory ports (Functional/Virtual 136 // Ports) 137 cpu->tcBase()->connectMemPorts(); 138#endif 139} 140 |
|
130AtomicSimpleCPU::AtomicSimpleCPU(Params *p) 131 : BaseSimpleCPU(p), tickEvent(this), 132 width(p->width), simulate_stalls(p->simulate_stalls), 133 icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this) 134{ 135 _status = Idle; 136 137 icachePort.snoopRangeSent = false; --- 68 unchanged lines hidden (view full) --- 206 207 tickEvent.squash(); 208} 209 210 211void 212AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 213{ | 141AtomicSimpleCPU::AtomicSimpleCPU(Params *p) 142 : BaseSimpleCPU(p), tickEvent(this), 143 width(p->width), simulate_stalls(p->simulate_stalls), 144 icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this) 145{ 146 _status = Idle; 147 148 icachePort.snoopRangeSent = false; --- 68 unchanged lines hidden (view full) --- 217 218 tickEvent.squash(); 219} 220 221 222void 223AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 224{ |
214 BaseCPU::takeOverFrom(oldCPU); | 225 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort); |
215 216 assert(!tickEvent.scheduled()); 217 218 // if any of this CPU's ThreadContexts are active, mark the CPU as 219 // running and schedule its tick event. 220 for (int i = 0; i < threadContexts.size(); ++i) { 221 ThreadContext *tc = threadContexts[i]; 222 if (tc->status() == ThreadContext::Active && _status != Running) { --- 14 unchanged lines hidden (view full) --- 237 assert(thread_num == 0); 238 assert(thread); 239 240 assert(_status == Idle); 241 assert(!tickEvent.scheduled()); 242 243 notIdleFraction++; 244 | 226 227 assert(!tickEvent.scheduled()); 228 229 // if any of this CPU's ThreadContexts are active, mark the CPU as 230 // running and schedule its tick event. 231 for (int i = 0; i < threadContexts.size(); ++i) { 232 ThreadContext *tc = threadContexts[i]; 233 if (tc->status() == ThreadContext::Active && _status != Running) { --- 14 unchanged lines hidden (view full) --- 248 assert(thread_num == 0); 249 assert(thread); 250 251 assert(_status == Idle); 252 assert(!tickEvent.scheduled()); 253 254 notIdleFraction++; 255 |
245#if FULL_SYSTEM 246 // Connect the ThreadContext's memory ports (Functional/Virtual 247 // Ports) 248 tc->connectMemPorts(); 249#endif 250 | |
251 //Make sure ticks are still on multiples of cycles 252 tickEvent.schedule(nextCycle(curTick + cycles(delay))); 253 _status = Running; 254} 255 256 257void 258AtomicSimpleCPU::suspendContext(int thread_num) --- 236 unchanged lines hidden (view full) --- 495 numCycles++; 496 497 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 498 checkForInterrupts(); 499 500 Fault fault = setupFetchRequest(ifetch_req); 501 502 if (fault == NoFault) { | 256 //Make sure ticks are still on multiples of cycles 257 tickEvent.schedule(nextCycle(curTick + cycles(delay))); 258 _status = Running; 259} 260 261 262void 263AtomicSimpleCPU::suspendContext(int thread_num) --- 236 unchanged lines hidden (view full) --- 500 numCycles++; 501 502 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 503 checkForInterrupts(); 504 505 Fault fault = setupFetchRequest(ifetch_req); 506 507 if (fault == NoFault) { |
503 Tick icache_latency = 0; 504 bool icache_access = false; 505 dcache_access = false; // assume no dcache access | 508 ifetch_pkt->reinitFromRequest(); |
506 | 509 |
507 //Fetch more instruction memory if necessary 508 if(predecoder.needMoreBytes()) 509 { 510 icache_access = true; 511 ifetch_pkt->reinitFromRequest(); | 510 Tick icache_latency = icachePort.sendAtomic(ifetch_pkt); 511 // ifetch_req is initialized to read the instruction directly 512 // into the CPU object's inst field. |
512 | 513 |
513 icache_latency = icachePort.sendAtomic(ifetch_pkt); 514 // ifetch_req is initialized to read the instruction directly 515 // into the CPU object's inst field. 516 } 517 | 514 dcache_access = false; // assume no dcache access |
518 preExecute(); 519 | 515 preExecute(); 516 |
520 if(curStaticInst) 521 { 522 fault = curStaticInst->execute(this, traceData); 523 postExecute(); 524 } | 517 fault = curStaticInst->execute(this, traceData); 518 postExecute(); |
525 526 // @todo remove me after debugging with legion done 527 if (curStaticInst && (!curStaticInst->isMicroOp() || 528 curStaticInst->isFirstMicroOp())) 529 instCnt++; 530 531 if (simulate_stalls) { | 519 520 // @todo remove me after debugging with legion done 521 if (curStaticInst && (!curStaticInst->isMicroOp() || 522 curStaticInst->isFirstMicroOp())) 523 instCnt++; 524 525 if (simulate_stalls) { |
532 Tick icache_stall = 533 icache_access ? icache_latency - cycles(1) : 0; | 526 Tick icache_stall = icache_latency - cycles(1); |
534 Tick dcache_stall = 535 dcache_access ? dcache_latency - cycles(1) : 0; 536 Tick stall_cycles = (icache_stall + dcache_stall) / cycles(1); 537 if (cycles(stall_cycles) < (icache_stall + dcache_stall)) 538 latency += cycles(stall_cycles+1); 539 else 540 latency += cycles(stall_cycles); 541 } 542 543 } | 527 Tick dcache_stall = 528 dcache_access ? dcache_latency - cycles(1) : 0; 529 Tick stall_cycles = (icache_stall + dcache_stall) / cycles(1); 530 if (cycles(stall_cycles) < (icache_stall + dcache_stall)) 531 latency += cycles(stall_cycles+1); 532 else 533 latency += cycles(stall_cycles); 534 } 535 536 } |
544 if(predecoder.needMoreBytes()) 545 advancePC(fault); | 537 538 advancePC(fault); |
546 } 547 548 if (_status != Idle) 549 tickEvent.schedule(curTick + latency); 550} 551 552 553//////////////////////////////////////////////////////////////////////// --- 109 unchanged lines hidden --- | 539 } 540 541 if (_status != Idle) 542 tickEvent.schedule(curTick + latency); 543} 544 545 546//////////////////////////////////////////////////////////////////////// --- 109 unchanged lines hidden --- |