3c3
< * Copyright (c) 2012-2013 ARM Limited
---
> * Copyright (c) 2012-2013,2015 ARM Limited
87c87
< BaseCPU::init();
---
> BaseSimpleCPU::init();
89,104c89,91
< // Initialise the ThreadContext's memory proxies
< tcBase()->initMemProxies(tcBase());
<
< if (FullSystem && !params()->switched_out) {
< ThreadID size = threadContexts.size();
< for (ThreadID i = 0; i < size; ++i) {
< ThreadContext *tc = threadContexts[i];
< // initialize CPU, including PC
< TheISA::initCPU(tc, tc->contextId());
< }
< }
<
< // Atomic doesn't do MT right now, so contextId == threadId
< ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
< data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
< data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
---
> ifetch_req.setThreadContext(_cpuId, 0);
> data_read_req.setThreadContext(_cpuId, 0);
> data_write_req.setThreadContext(_cpuId, 0);
134c121
< DPRINTF(Drain, "Requesting drain: %s\n", pcState());
---
> DPRINTF(Drain, "Requesting drain.\n");
139a127
> activeThreads.clear();
156,157d143
< if (threadContexts.size() > 1)
< fatal("The atomic CPU only supports one thread.\n");
159,165c145,159
< if (thread->status() == ThreadContext::Active) {
< schedule(tickEvent, nextCycle());
< _status = BaseSimpleCPU::Running;
< notIdleFraction = 1;
< } else {
< _status = BaseSimpleCPU::Idle;
< notIdleFraction = 0;
---
> _status = BaseSimpleCPU::Idle;
>
> for (ThreadID tid = 0; tid < numThreads; tid++) {
> if (threadInfo[tid]->thread->status() == ThreadContext::Active) {
> threadInfo[tid]->notIdleFraction = 1;
> activeThreads.push_back(tid);
> _status = BaseSimpleCPU::Running;
>
> // Tick if any threads active
> if (!tickEvent.scheduled()) {
> schedule(tickEvent, nextCycle());
> }
> } else {
> threadInfo[tid]->notIdleFraction = 0;
> }
175c169
< DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState());
---
> DPRINTF(Drain, "tryCompleteDrain.\n");
204,207d197
<
< ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
< data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
< data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
224,225c214
< assert(thread_num == 0);
< assert(thread);
---
> assert(thread_num < numThreads);
227,231c216,218
< assert(_status == Idle);
< assert(!tickEvent.scheduled());
<
< notIdleFraction = 1;
< Cycles delta = ticksToCycles(thread->lastActivate - thread->lastSuspend);
---
> threadInfo[thread_num]->notIdleFraction = 1;
> Cycles delta = ticksToCycles(threadInfo[thread_num]->thread->lastActivate -
> threadInfo[thread_num]->thread->lastSuspend);
235,236c222,225
< //Make sure ticks are still on multiples of cycles
< schedule(tickEvent, clockEdge(Cycles(0)));
---
> if (!tickEvent.scheduled()) {
> //Make sure ticks are still on multiples of cycles
> schedule(tickEvent, clockEdge(Cycles(0)));
> }
237a227,230
> if (std::find(activeThreads.begin(), activeThreads.end(), thread_num)
> == activeThreads.end()) {
> activeThreads.push_back(thread_num);
> }
246,247c239,240
< assert(thread_num == 0);
< assert(thread);
---
> assert(thread_num < numThreads);
> activeThreads.remove(thread_num);
254,257c247
< // tick event may not be scheduled if this gets called from inside
< // an instruction's execution, e.g. "quiesce"
< if (tickEvent.scheduled())
< deschedule(tickEvent);
---
> threadInfo[thread_num]->notIdleFraction = 0;
259,260c249,256
< notIdleFraction = 0;
< _status = Idle;
---
> if (activeThreads.empty()) {
> _status = Idle;
>
> if (tickEvent.scheduled()) {
> deschedule(tickEvent);
> }
> }
>
272c268
< if(cpu->getAddrMonitor()->doMonitor(pkt)) {
---
> if(cpu->getCpuAddrMonitor()->doMonitor(pkt)) {
280c276,278
< TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask);
---
> for (auto &t_info : cpu->threadInfo) {
> TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask);
> }
294c292
< if(cpu->getAddrMonitor()->doMonitor(pkt)) {
---
> if(cpu->getCpuAddrMonitor()->doMonitor(pkt)) {
302c300,302
< TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask);
---
> for (auto &t_info : cpu->threadInfo) {
> TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask);
> }
309a310,312
> SimpleExecContext& t_info = *threadInfo[curThread];
> SimpleThread* thread = t_info.thread;
>
333c336,337
< Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read);
---
> Fault fault = thread->dtb->translateAtomic(req, thread->getTC(),
> BaseTLB::Read);
372a377
>
394c399,400
<
---
> SimpleExecContext& t_info = *threadInfo[curThread];
> SimpleThread* thread = t_info.thread;
427c433
< Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write);
---
> Fault fault = thread->dtb->translateAtomic(req, thread->getTC(), BaseTLB::Write);
479a486,487
>
>
505a514,526
> // Change thread if multi-threaded
> swapActiveThread();
>
> // Set memroy request ids to current thread
> if (numThreads > 1) {
> ifetch_req.setThreadContext(_cpuId, curThread);
> data_read_req.setThreadContext(_cpuId, curThread);
> data_write_req.setThreadContext(_cpuId, curThread);
> }
>
> SimpleExecContext& t_info = *threadInfo[curThread];
> SimpleThread* thread = t_info.thread;
>
532c553
< fault = thread->itb->translateAtomic(&ifetch_req, tc,
---
> fault = thread->itb->translateAtomic(&ifetch_req, thread->getTC(),
568c589
< fault = curStaticInst->execute(this, traceData);
---
> fault = curStaticInst->execute(&t_info, traceData);
604c625
< if(fault != NoFault || !stayAtPC)
---
> if(fault != NoFault || !t_info.stayAtPC)
616c637
< schedule(tickEvent, curTick() + latency);
---
> reschedule(tickEvent, curTick() + latency, true);
641,643d661
< numThreads = 1;
< if (!FullSystem && workload.size() != 1)
< panic("only one workload allowed");