Deleted Added
sdiff udiff text old ( 12276:22c220be30c5 ) new ( 12284:b91c036913da )
full compact
1/*
2 * Copyright 2014 Google, Inc.
3 * Copyright (c) 2012-2013,2015,2017 ARM Limited
4 * All rights reserved.
5 *
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder. You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
14 *
15 * Copyright (c) 2002-2005 The Regents of The University of Michigan
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Steve Reinhardt
42 */
43
44#include "cpu/simple/atomic.hh"
45
46#include "arch/locked_mem.hh"
47#include "arch/mmapped_ipr.hh"
48#include "arch/utility.hh"
49#include "base/bigint.hh"
50#include "base/output.hh"
51#include "config/the_isa.hh"
52#include "cpu/exetrace.hh"
53#include "debug/Drain.hh"
54#include "debug/ExecFaulting.hh"
55#include "debug/SimpleCPU.hh"
56#include "mem/packet.hh"
57#include "mem/packet_access.hh"
58#include "mem/physical.hh"
59#include "params/AtomicSimpleCPU.hh"
60#include "sim/faults.hh"
61#include "sim/full_system.hh"
62#include "sim/system.hh"
63
64using namespace std;
65using namespace TheISA;
66
67void
68AtomicSimpleCPU::init()
69{
70 BaseSimpleCPU::init();
71
72 int cid = threadContexts[0]->contextId();
73 ifetch_req.setContext(cid);
74 data_read_req.setContext(cid);
75 data_write_req.setContext(cid);
76}
77
78AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p)
79 : BaseSimpleCPU(p),
80 tickEvent([this]{ tick(); }, "AtomicSimpleCPU tick",
81 false, Event::CPU_Tick_Pri),
82 width(p->width), locked(false),
83 simulate_data_stalls(p->simulate_data_stalls),
84 simulate_inst_stalls(p->simulate_inst_stalls),
85 icachePort(name() + ".icache_port", this),
86 dcachePort(name() + ".dcache_port", this),
87 fastmem(p->fastmem), dcache_access(false), dcache_latency(0),
88 ppCommit(nullptr)
89{
90 _status = Idle;
91}
92
93
94AtomicSimpleCPU::~AtomicSimpleCPU()
95{
96 if (tickEvent.scheduled()) {
97 deschedule(tickEvent);
98 }
99}
100
101DrainState
102AtomicSimpleCPU::drain()
103{
104 // Deschedule any power gating event (if any)
105 deschedulePowerGatingEvent();
106
107 if (switchedOut())
108 return DrainState::Drained;
109
110 if (!isDrained()) {
111 DPRINTF(Drain, "Requesting drain.\n");
112 return DrainState::Draining;
113 } else {
114 if (tickEvent.scheduled())
115 deschedule(tickEvent);
116
117 activeThreads.clear();
118 DPRINTF(Drain, "Not executing microcode, no need to drain.\n");
119 return DrainState::Drained;
120 }
121}
122
123void
124AtomicSimpleCPU::threadSnoop(PacketPtr pkt, ThreadID sender)
125{
126 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(),
127 pkt->cmdString());
128
129 for (ThreadID tid = 0; tid < numThreads; tid++) {
130 if (tid != sender) {
131 if (getCpuAddrMonitor(tid)->doMonitor(pkt)) {
132 wakeup(tid);
133 }
134
135 TheISA::handleLockedSnoop(threadInfo[tid]->thread,
136 pkt, dcachePort.cacheBlockMask);
137 }
138 }
139}
140
141void
142AtomicSimpleCPU::drainResume()
143{
144 assert(!tickEvent.scheduled());
145 if (switchedOut())
146 return;
147
148 DPRINTF(SimpleCPU, "Resume\n");
149 verifyMemoryMode();
150
151 assert(!threadContexts.empty());
152
153 _status = BaseSimpleCPU::Idle;
154
155 for (ThreadID tid = 0; tid < numThreads; tid++) {
156 if (threadInfo[tid]->thread->status() == ThreadContext::Active) {
157 threadInfo[tid]->notIdleFraction = 1;
158 activeThreads.push_back(tid);
159 _status = BaseSimpleCPU::Running;
160
161 // Tick if any threads active
162 if (!tickEvent.scheduled()) {
163 schedule(tickEvent, nextCycle());
164 }
165 } else {
166 threadInfo[tid]->notIdleFraction = 0;
167 }
168 }
169
170 // Reschedule any power gating event (if any)
171 schedulePowerGatingEvent();
172}
173
174bool
175AtomicSimpleCPU::tryCompleteDrain()
176{
177 if (drainState() != DrainState::Draining)
178 return false;
179
180 DPRINTF(Drain, "tryCompleteDrain.\n");
181 if (!isDrained())
182 return false;
183
184 DPRINTF(Drain, "CPU done draining, processing drain event\n");
185 signalDrainDone();
186
187 return true;
188}
189
190
191void
192AtomicSimpleCPU::switchOut()
193{
194 BaseSimpleCPU::switchOut();
195
196 assert(!tickEvent.scheduled());
197 assert(_status == BaseSimpleCPU::Running || _status == Idle);
198 assert(isDrained());
199}
200
201
202void
203AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
204{
205 BaseSimpleCPU::takeOverFrom(oldCPU);
206
207 // The tick event should have been descheduled by drain()
208 assert(!tickEvent.scheduled());
209}
210
211void
212AtomicSimpleCPU::verifyMemoryMode() const
213{
214 if (!system->isAtomicMode()) {
215 fatal("The atomic CPU requires the memory system to be in "
216 "'atomic' mode.\n");
217 }
218}
219
220void
221AtomicSimpleCPU::activateContext(ThreadID thread_num)
222{
223 DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num);
224
225 assert(thread_num < numThreads);
226
227 threadInfo[thread_num]->notIdleFraction = 1;
228 Cycles delta = ticksToCycles(threadInfo[thread_num]->thread->lastActivate -
229 threadInfo[thread_num]->thread->lastSuspend);
230 numCycles += delta;
231
232 if (!tickEvent.scheduled()) {
233 //Make sure ticks are still on multiples of cycles
234 schedule(tickEvent, clockEdge(Cycles(0)));
235 }
236 _status = BaseSimpleCPU::Running;
237 if (std::find(activeThreads.begin(), activeThreads.end(), thread_num)
238 == activeThreads.end()) {
239 activeThreads.push_back(thread_num);
240 }
241
242 BaseCPU::activateContext(thread_num);
243}
244
245
246void
247AtomicSimpleCPU::suspendContext(ThreadID thread_num)
248{
249 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
250
251 assert(thread_num < numThreads);
252 activeThreads.remove(thread_num);
253
254 if (_status == Idle)
255 return;
256
257 assert(_status == BaseSimpleCPU::Running);
258
259 threadInfo[thread_num]->notIdleFraction = 0;
260
261 if (activeThreads.empty()) {
262 _status = Idle;
263
264 if (tickEvent.scheduled()) {
265 deschedule(tickEvent);
266 }
267 }
268
269 BaseCPU::suspendContext(thread_num);
270}
271
272
273Tick
274AtomicSimpleCPU::AtomicCPUDPort::recvAtomicSnoop(PacketPtr pkt)
275{
276 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(),
277 pkt->cmdString());
278
279 // X86 ISA: Snooping an invalidation for monitor/mwait
280 AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner);
281
282 for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
283 if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
284 cpu->wakeup(tid);
285 }
286 }
287
288 // if snoop invalidates, release any associated locks
289 // When run without caches, Invalidation packets will not be received
290 // hence we must check if the incoming packets are writes and wakeup
291 // the processor accordingly
292 if (pkt->isInvalidate() || pkt->isWrite()) {
293 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n",
294 pkt->getAddr());
295 for (auto &t_info : cpu->threadInfo) {
296 TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask);
297 }
298 }
299
300 return 0;
301}
302
303void
304AtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop(PacketPtr pkt)
305{
306 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(),
307 pkt->cmdString());
308
309 // X86 ISA: Snooping an invalidation for monitor/mwait
310 AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner);
311 for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
312 if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
313 cpu->wakeup(tid);
314 }
315 }
316
317 // if snoop invalidates, release any associated locks
318 if (pkt->isInvalidate()) {
319 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n",
320 pkt->getAddr());
321 for (auto &t_info : cpu->threadInfo) {
322 TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask);
323 }
324 }
325}
326
327Fault
328AtomicSimpleCPU::readMem(Addr addr, uint8_t * data, unsigned size,
329 Request::Flags flags)
330{
331 SimpleExecContext& t_info = *threadInfo[curThread];
332 SimpleThread* thread = t_info.thread;
333
334 // use the CPU's statically allocated read request and packet objects
335 Request *req = &data_read_req;
336
337 if (traceData)
338 traceData->setMem(addr, size, flags);
339
340 //The size of the data we're trying to read.
341 int fullSize = size;
342
343 //The address of the second part of this access if it needs to be split
344 //across a cache line boundary.
345 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
346
347 if (secondAddr > addr)
348 size = secondAddr - addr;
349
350 dcache_latency = 0;
351
352 req->taskId(taskId());
353 while (1) {
354 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr());
355
356 // translate to physical address
357 Fault fault = thread->dtb->translateAtomic(req, thread->getTC(),
358 BaseTLB::Read);
359
360 // Now do the access.
361 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
362 Packet pkt(req, Packet::makeReadCmd(req));
363 pkt.dataStatic(data);
364
365 if (req->isMmappedIpr())
366 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt);
367 else {
368 if (fastmem && system->isMemAddr(pkt.getAddr()))
369 system->getPhysMem().access(&pkt);
370 else
371 dcache_latency += dcachePort.sendAtomic(&pkt);
372 }
373 dcache_access = true;
374
375 assert(!pkt.isError());
376
377 if (req->isLLSC()) {
378 TheISA::handleLockedRead(thread, req);
379 }
380 }
381
382 //If there's a fault, return it
383 if (fault != NoFault) {
384 if (req->isPrefetch()) {
385 return NoFault;
386 } else {
387 return fault;
388 }
389 }
390
391 //If we don't need to access a second cache line, stop now.
392 if (secondAddr <= addr)
393 {
394 if (req->isLockedRMW() && fault == NoFault) {
395 assert(!locked);
396 locked = true;
397 }
398
399 return fault;
400 }
401
402 /*
403 * Set up for accessing the second cache line.
404 */
405
406 //Move the pointer we're reading into to the correct location.
407 data += size;
408 //Adjust the size to get the remaining bytes.
409 size = addr + fullSize - secondAddr;
410 //And access the right address.
411 addr = secondAddr;
412 }
413}
414
415Fault
416AtomicSimpleCPU::initiateMemRead(Addr addr, unsigned size,
417 Request::Flags flags)
418{
419 panic("initiateMemRead() is for timing accesses, and should "
420 "never be called on AtomicSimpleCPU.\n");
421}
422
423Fault
424AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, Addr addr,
425 Request::Flags flags, uint64_t *res)
426{
427 SimpleExecContext& t_info = *threadInfo[curThread];
428 SimpleThread* thread = t_info.thread;
429 static uint8_t zero_array[64] = {};
430
431 if (data == NULL) {
432 assert(size <= 64);
433 assert(flags & Request::CACHE_BLOCK_ZERO);
434 // This must be a cache block cleaning request
435 data = zero_array;
436 }
437
438 // use the CPU's statically allocated write request and packet objects
439 Request *req = &data_write_req;
440
441 if (traceData)
442 traceData->setMem(addr, size, flags);
443
444 //The size of the data we're trying to read.
445 int fullSize = size;
446
447 //The address of the second part of this access if it needs to be split
448 //across a cache line boundary.
449 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
450
451 if (secondAddr > addr)
452 size = secondAddr - addr;
453
454 dcache_latency = 0;
455
456 req->taskId(taskId());
457 while (1) {
458 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr());
459
460 // translate to physical address
461 Fault fault = thread->dtb->translateAtomic(req, thread->getTC(), BaseTLB::Write);
462
463 // Now do the access.
464 if (fault == NoFault) {
465 MemCmd cmd = MemCmd::WriteReq; // default
466 bool do_access = true; // flag to suppress cache access
467
468 if (req->isLLSC()) {
469 cmd = MemCmd::StoreCondReq;
470 do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask);
471 } else if (req->isSwap()) {
472 cmd = MemCmd::SwapReq;
473 if (req->isCondSwap()) {
474 assert(res);
475 req->setExtraData(*res);
476 }
477 }
478
479 if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) {
480 Packet pkt = Packet(req, cmd);
481 pkt.dataStatic(data);
482
483 if (req->isMmappedIpr()) {
484 dcache_latency +=
485 TheISA::handleIprWrite(thread->getTC(), &pkt);
486 } else {
487 if (fastmem && system->isMemAddr(pkt.getAddr()))
488 system->getPhysMem().access(&pkt);
489 else
490 dcache_latency += dcachePort.sendAtomic(&pkt);
491
492 // Notify other threads on this CPU of write
493 threadSnoop(&pkt, curThread);
494 }
495 dcache_access = true;
496 assert(!pkt.isError());
497
498 if (req->isSwap()) {
499 assert(res);
500 memcpy(res, pkt.getConstPtr<uint8_t>(), fullSize);
501 }
502 }
503
504 if (res && !req->isSwap()) {
505 *res = req->getExtraData();
506 }
507 }
508
509 //If there's a fault or we don't need to access a second cache line,
510 //stop now.
511 if (fault != NoFault || secondAddr <= addr)
512 {
513 if (req->isLockedRMW() && fault == NoFault) {
514 assert(locked);
515 locked = false;
516 }
517
518
519 if (fault != NoFault && req->isPrefetch()) {
520 return NoFault;
521 } else {
522 return fault;
523 }
524 }
525
526 /*
527 * Set up for accessing the second cache line.
528 */
529
530 //Move the pointer we're reading into to the correct location.
531 data += size;
532 //Adjust the size to get the remaining bytes.
533 size = addr + fullSize - secondAddr;
534 //And access the right address.
535 addr = secondAddr;
536 }
537}
538
539
540void
541AtomicSimpleCPU::tick()
542{
543 DPRINTF(SimpleCPU, "Tick\n");
544
545 // Change thread if multi-threaded
546 swapActiveThread();
547
548 // Set memroy request ids to current thread
549 if (numThreads > 1) {
550 ContextID cid = threadContexts[curThread]->contextId();
551
552 ifetch_req.setContext(cid);
553 data_read_req.setContext(cid);
554 data_write_req.setContext(cid);
555 }
556
557 SimpleExecContext& t_info = *threadInfo[curThread];
558 SimpleThread* thread = t_info.thread;
559
560 Tick latency = 0;
561
562 for (int i = 0; i < width || locked; ++i) {
563 numCycles++;
564 updateCycleCounters(BaseCPU::CPU_STATE_ON);
565
566 if (!curStaticInst || !curStaticInst->isDelayedCommit()) {
567 checkForInterrupts();
568 checkPcEventQueue();
569 }
570
571 // We must have just got suspended by a PC event
572 if (_status == Idle) {
573 tryCompleteDrain();
574 return;
575 }
576
577 Fault fault = NoFault;
578
579 TheISA::PCState pcState = thread->pcState();
580
581 bool needToFetch = !isRomMicroPC(pcState.microPC()) &&
582 !curMacroStaticInst;
583 if (needToFetch) {
584 ifetch_req.taskId(taskId());
585 setupFetchRequest(&ifetch_req);
586 fault = thread->itb->translateAtomic(&ifetch_req, thread->getTC(),
587 BaseTLB::Execute);
588 }
589
590 if (fault == NoFault) {
591 Tick icache_latency = 0;
592 bool icache_access = false;
593 dcache_access = false; // assume no dcache access
594
595 if (needToFetch) {
596 // This is commented out because the decoder would act like
597 // a tiny cache otherwise. It wouldn't be flushed when needed
598 // like the I cache. It should be flushed, and when that works
599 // this code should be uncommented.
600 //Fetch more instruction memory if necessary
601 //if (decoder.needMoreBytes())
602 //{
603 icache_access = true;
604 Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq);
605 ifetch_pkt.dataStatic(&inst);
606
607 if (fastmem && system->isMemAddr(ifetch_pkt.getAddr()))
608 system->getPhysMem().access(&ifetch_pkt);
609 else
610 icache_latency = icachePort.sendAtomic(&ifetch_pkt);
611
612 assert(!ifetch_pkt.isError());
613
614 // ifetch_req is initialized to read the instruction directly
615 // into the CPU object's inst field.
616 //}
617 }
618
619 preExecute();
620
621 Tick stall_ticks = 0;
622 if (curStaticInst) {
623 fault = curStaticInst->execute(&t_info, traceData);
624
625 // keep an instruction count
626 if (fault == NoFault) {
627 countInst();
628 ppCommit->notify(std::make_pair(thread, curStaticInst));
629 }
630 else if (traceData && !DTRACE(ExecFaulting)) {
631 delete traceData;
632 traceData = NULL;
633 }
634
635 if (dynamic_pointer_cast<SyscallRetryFault>(fault)) {
636 // Retry execution of system calls after a delay.
637 // Prevents immediate re-execution since conditions which
638 // caused the retry are unlikely to change every tick.
639 stall_ticks += clockEdge(syscallRetryLatency) - curTick();
640 }
641
642 postExecute();
643 }
644
645 // @todo remove me after debugging with legion done
646 if (curStaticInst && (!curStaticInst->isMicroop() ||
647 curStaticInst->isFirstMicroop()))
648 instCnt++;
649
650 if (simulate_inst_stalls && icache_access)
651 stall_ticks += icache_latency;
652
653 if (simulate_data_stalls && dcache_access)
654 stall_ticks += dcache_latency;
655
656 if (stall_ticks) {
657 // the atomic cpu does its accounting in ticks, so
658 // keep counting in ticks but round to the clock
659 // period
660 latency += divCeil(stall_ticks, clockPeriod()) *
661 clockPeriod();
662 }
663
664 }
665 if (fault != NoFault || !t_info.stayAtPC)
666 advancePC(fault);
667 }
668
669 if (tryCompleteDrain())
670 return;
671
672 // instruction takes at least one cycle
673 if (latency < clockPeriod())
674 latency = clockPeriod();
675
676 if (_status != Idle)
677 reschedule(tickEvent, curTick() + latency, true);
678}
679
680void
681AtomicSimpleCPU::regProbePoints()
682{
683 BaseCPU::regProbePoints();
684
685 ppCommit = new ProbePointArg<pair<SimpleThread*, const StaticInstPtr>>
686 (getProbeManager(), "Commit");
687}
688
689void
690AtomicSimpleCPU::printAddr(Addr a)
691{
692 dcachePort.printAddr(a);
693}
694
695////////////////////////////////////////////////////////////////////////
696//
697// AtomicSimpleCPU Simulation Object
698//
699AtomicSimpleCPU *
700AtomicSimpleCPUParams::create()
701{
702 return new AtomicSimpleCPU(this);
703}