1/* 2 * Copyright (c) 2002-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; --- 28 unchanged lines hidden (view full) --- 37#include "cpu/exetrace.hh" 38#include "debug/ExecFaulting.hh" 39#include "debug/SimpleCPU.hh" 40#include "mem/packet.hh" 41#include "mem/packet_access.hh" 42#include "params/AtomicSimpleCPU.hh" 43#include "sim/faults.hh" 44#include "sim/system.hh" |
45#include "sim/full_system.hh" |
46 47using namespace std; 48using namespace TheISA; 49 50AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 51 : Event(CPU_Tick_Pri), cpu(c) 52{ 53} --- 25 unchanged lines hidden (view full) --- 79 else 80 panic("No Such Port\n"); 81} 82 83void 84AtomicSimpleCPU::init() 85{ 86 BaseCPU::init(); |
87 if (FullSystem) { 88 ThreadID size = threadContexts.size(); 89 for (ThreadID i = 0; i < size; ++i) { |
90#if FULL_SYSTEM |
91 ThreadContext *tc = threadContexts[i]; 92 // initialize CPU, including PC 93 TheISA::initCPU(tc, tc->contextId()); |
94#endif |
95 } 96 } |
97 if (hasPhysMemPort) { |
98 bool snoop = false; 99 AddrRangeList pmAddrList; 100 physmemPort.getPeerAddressRanges(pmAddrList, snoop); |
101 physMemAddr = *pmAddrList.begin(); 102 } 103 // Atomic doesn't do MT right now, so contextId == threadId 104 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 105 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 106 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 107} 108 |
109bool 110AtomicSimpleCPU::CpuPort::recvTiming(PacketPtr pkt) 111{ 112 panic("AtomicSimpleCPU doesn't expect recvTiming callback!"); 113 return true; 114} 115 116Tick 117AtomicSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt) 118{ 119 //Snooping a coherence request, just return 120 return 0; 121} 122 123void 124AtomicSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt) 125{ 126 //No internal storage to update, just return 127 return; 128} 129 130void 131AtomicSimpleCPU::CpuPort::recvStatusChange(Status status) 132{ 133 if (status == RangeChange) { 134 if (!snoopRangeSent) { 135 snoopRangeSent = true; 136 sendStatusChange(Port::RangeChange); 137 } 138 return; 139 } 140 141 panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!"); 142} 143 144void 145AtomicSimpleCPU::CpuPort::recvRetry() 146{ 147 panic("AtomicSimpleCPU doesn't expect recvRetry callback!"); 148} 149 150void 151AtomicSimpleCPU::DcachePort::setPeer(Port *port) 152{ 153 Port::setPeer(port); 154 155 if (FullSystem) { 156 // Update the ThreadContext's memory ports (Functional/Virtual 157 // Ports) 158 cpu->tcBase()->connectMemPorts(cpu->tcBase()); 159 } 160} 161 |
162AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 163 : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false), 164 simulate_data_stalls(p->simulate_data_stalls), 165 simulate_inst_stalls(p->simulate_inst_stalls), 166 icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this), 167 physmemPort(name() + "-iport", this), hasPhysMemPort(false) 168{ 169 _status = Idle; |
170 171 icachePort.snoopRangeSent = false; 172 dcachePort.snoopRangeSent = false; 173 |
174} 175 176 177AtomicSimpleCPU::~AtomicSimpleCPU() 178{ 179 if (tickEvent.scheduled()) { 180 deschedule(tickEvent); 181 } --- 45 unchanged lines hidden (view full) --- 227 228 tickEvent.squash(); 229} 230 231 232void 233AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 234{ |
235 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort); |
236 237 assert(!tickEvent.scheduled()); 238 239 // if any of this CPU's ThreadContexts are active, mark the CPU as 240 // running and schedule its tick event. 241 ThreadID size = threadContexts.size(); 242 for (ThreadID i = 0; i < size; ++i) { 243 ThreadContext *tc = threadContexts[i]; --- 9 unchanged lines hidden (view full) --- 253 assert(threadContexts.size() == 1); 254 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 255 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 256 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 257} 258 259 260void |
261AtomicSimpleCPU::activateContext(int thread_num, int delay) |
262{ 263 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 264 265 assert(thread_num == 0); 266 assert(thread); 267 268 assert(_status == Idle); 269 assert(!tickEvent.scheduled()); 270 271 notIdleFraction++; 272 numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend); 273 274 //Make sure ticks are still on multiples of cycles 275 schedule(tickEvent, nextCycle(curTick() + ticks(delay))); 276 _status = Running; 277} 278 279 280void |
281AtomicSimpleCPU::suspendContext(int thread_num) |
282{ 283 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 284 285 assert(thread_num == 0); 286 assert(thread); 287 288 if (_status == Idle) 289 return; --- 324 unchanged lines hidden (view full) --- 614// 615// AtomicSimpleCPU Simulation Object 616// 617AtomicSimpleCPU * 618AtomicSimpleCPUParams::create() 619{ 620 numThreads = 1; 621#if !FULL_SYSTEM |
622 if (!FullSystem && workload.size() != 1) |
623 panic("only one workload allowed"); 624#endif 625 return new AtomicSimpleCPU(this); 626} |