atomic.cc (7655:8bce423f2075) atomic.cc (7678:f19b6a3a8cec)
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Steve Reinhardt
29 */
30
31#include "arch/locked_mem.hh"
32#include "arch/mmaped_ipr.hh"
33#include "arch/utility.hh"
34#include "base/bigint.hh"
35#include "config/the_isa.hh"
36#include "cpu/exetrace.hh"
37#include "cpu/simple/atomic.hh"
38#include "mem/packet.hh"
39#include "mem/packet_access.hh"
40#include "params/AtomicSimpleCPU.hh"
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Steve Reinhardt
29 */
30
31#include "arch/locked_mem.hh"
32#include "arch/mmaped_ipr.hh"
33#include "arch/utility.hh"
34#include "base/bigint.hh"
35#include "config/the_isa.hh"
36#include "cpu/exetrace.hh"
37#include "cpu/simple/atomic.hh"
38#include "mem/packet.hh"
39#include "mem/packet_access.hh"
40#include "params/AtomicSimpleCPU.hh"
41#include "sim/faults.hh"
41#include "sim/system.hh"
42
43using namespace std;
44using namespace TheISA;
45
46AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c)
47 : Event(CPU_Tick_Pri), cpu(c)
48{
49}
50
51
52void
53AtomicSimpleCPU::TickEvent::process()
54{
55 cpu->tick();
56}
57
58const char *
59AtomicSimpleCPU::TickEvent::description() const
60{
61 return "AtomicSimpleCPU tick";
62}
63
64Port *
65AtomicSimpleCPU::getPort(const string &if_name, int idx)
66{
67 if (if_name == "dcache_port")
68 return &dcachePort;
69 else if (if_name == "icache_port")
70 return &icachePort;
71 else if (if_name == "physmem_port") {
72 hasPhysMemPort = true;
73 return &physmemPort;
74 }
75 else
76 panic("No Such Port\n");
77}
78
79void
80AtomicSimpleCPU::init()
81{
82 BaseCPU::init();
83#if FULL_SYSTEM
84 ThreadID size = threadContexts.size();
85 for (ThreadID i = 0; i < size; ++i) {
86 ThreadContext *tc = threadContexts[i];
87
88 // initialize CPU, including PC
89 TheISA::initCPU(tc, tc->contextId());
90 }
91#endif
92 if (hasPhysMemPort) {
93 bool snoop = false;
94 AddrRangeList pmAddrList;
95 physmemPort.getPeerAddressRanges(pmAddrList, snoop);
96 physMemAddr = *pmAddrList.begin();
97 }
98 // Atomic doesn't do MT right now, so contextId == threadId
99 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
100 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
101 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
102}
103
104bool
105AtomicSimpleCPU::CpuPort::recvTiming(PacketPtr pkt)
106{
107 panic("AtomicSimpleCPU doesn't expect recvTiming callback!");
108 return true;
109}
110
111Tick
112AtomicSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
113{
114 //Snooping a coherence request, just return
115 return 0;
116}
117
118void
119AtomicSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
120{
121 //No internal storage to update, just return
122 return;
123}
124
125void
126AtomicSimpleCPU::CpuPort::recvStatusChange(Status status)
127{
128 if (status == RangeChange) {
129 if (!snoopRangeSent) {
130 snoopRangeSent = true;
131 sendStatusChange(Port::RangeChange);
132 }
133 return;
134 }
135
136 panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!");
137}
138
139void
140AtomicSimpleCPU::CpuPort::recvRetry()
141{
142 panic("AtomicSimpleCPU doesn't expect recvRetry callback!");
143}
144
145void
146AtomicSimpleCPU::DcachePort::setPeer(Port *port)
147{
148 Port::setPeer(port);
149
150#if FULL_SYSTEM
151 // Update the ThreadContext's memory ports (Functional/Virtual
152 // Ports)
153 cpu->tcBase()->connectMemPorts(cpu->tcBase());
154#endif
155}
156
157AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p)
158 : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false),
159 simulate_data_stalls(p->simulate_data_stalls),
160 simulate_inst_stalls(p->simulate_inst_stalls),
161 icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this),
162 physmemPort(name() + "-iport", this), hasPhysMemPort(false)
163{
164 _status = Idle;
165
166 icachePort.snoopRangeSent = false;
167 dcachePort.snoopRangeSent = false;
168
169}
170
171
172AtomicSimpleCPU::~AtomicSimpleCPU()
173{
174 if (tickEvent.scheduled()) {
175 deschedule(tickEvent);
176 }
177}
178
179void
180AtomicSimpleCPU::serialize(ostream &os)
181{
182 SimObject::State so_state = SimObject::getState();
183 SERIALIZE_ENUM(so_state);
184 SERIALIZE_SCALAR(locked);
185 BaseSimpleCPU::serialize(os);
186 nameOut(os, csprintf("%s.tickEvent", name()));
187 tickEvent.serialize(os);
188}
189
190void
191AtomicSimpleCPU::unserialize(Checkpoint *cp, const string &section)
192{
193 SimObject::State so_state;
194 UNSERIALIZE_ENUM(so_state);
195 UNSERIALIZE_SCALAR(locked);
196 BaseSimpleCPU::unserialize(cp, section);
197 tickEvent.unserialize(cp, csprintf("%s.tickEvent", section));
198}
199
200void
201AtomicSimpleCPU::resume()
202{
203 if (_status == Idle || _status == SwitchedOut)
204 return;
205
206 DPRINTF(SimpleCPU, "Resume\n");
207 assert(system->getMemoryMode() == Enums::atomic);
208
209 changeState(SimObject::Running);
210 if (thread->status() == ThreadContext::Active) {
211 if (!tickEvent.scheduled())
212 schedule(tickEvent, nextCycle());
213 }
214}
215
216void
217AtomicSimpleCPU::switchOut()
218{
219 assert(_status == Running || _status == Idle);
220 _status = SwitchedOut;
221
222 tickEvent.squash();
223}
224
225
226void
227AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
228{
229 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
230
231 assert(!tickEvent.scheduled());
232
233 // if any of this CPU's ThreadContexts are active, mark the CPU as
234 // running and schedule its tick event.
235 ThreadID size = threadContexts.size();
236 for (ThreadID i = 0; i < size; ++i) {
237 ThreadContext *tc = threadContexts[i];
238 if (tc->status() == ThreadContext::Active && _status != Running) {
239 _status = Running;
240 schedule(tickEvent, nextCycle());
241 break;
242 }
243 }
244 if (_status != Running) {
245 _status = Idle;
246 }
247 assert(threadContexts.size() == 1);
248 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
249 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
250 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
251}
252
253
254void
255AtomicSimpleCPU::activateContext(int thread_num, int delay)
256{
257 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
258
259 assert(thread_num == 0);
260 assert(thread);
261
262 assert(_status == Idle);
263 assert(!tickEvent.scheduled());
264
265 notIdleFraction++;
266 numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend);
267
268 //Make sure ticks are still on multiples of cycles
269 schedule(tickEvent, nextCycle(curTick + ticks(delay)));
270 _status = Running;
271}
272
273
274void
275AtomicSimpleCPU::suspendContext(int thread_num)
276{
277 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
278
279 assert(thread_num == 0);
280 assert(thread);
281
282 if (_status == Idle)
283 return;
284
285 assert(_status == Running);
286
287 // tick event may not be scheduled if this gets called from inside
288 // an instruction's execution, e.g. "quiesce"
289 if (tickEvent.scheduled())
290 deschedule(tickEvent);
291
292 notIdleFraction--;
293 _status = Idle;
294}
295
296
297Fault
298AtomicSimpleCPU::readBytes(Addr addr, uint8_t * data,
299 unsigned size, unsigned flags)
300{
301 // use the CPU's statically allocated read request and packet objects
302 Request *req = &data_read_req;
303
304 if (traceData) {
305 traceData->setAddr(addr);
306 }
307
308 //The block size of our peer.
309 unsigned blockSize = dcachePort.peerBlockSize();
310 //The size of the data we're trying to read.
311 int fullSize = size;
312
313 //The address of the second part of this access if it needs to be split
314 //across a cache line boundary.
315 Addr secondAddr = roundDown(addr + size - 1, blockSize);
316
317 if (secondAddr > addr)
318 size = secondAddr - addr;
319
320 dcache_latency = 0;
321
322 while (1) {
323 req->setVirt(0, addr, size, flags, thread->readPC());
324
325 // translate to physical address
326 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read);
327
328 // Now do the access.
329 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
330 Packet pkt = Packet(req,
331 req->isLLSC() ? MemCmd::LoadLockedReq : MemCmd::ReadReq,
332 Packet::Broadcast);
333 pkt.dataStatic(data);
334
335 if (req->isMmapedIpr())
336 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt);
337 else {
338 if (hasPhysMemPort && pkt.getAddr() == physMemAddr)
339 dcache_latency += physmemPort.sendAtomic(&pkt);
340 else
341 dcache_latency += dcachePort.sendAtomic(&pkt);
342 }
343 dcache_access = true;
344
345 assert(!pkt.isError());
346
347 if (req->isLLSC()) {
348 TheISA::handleLockedRead(thread, req);
349 }
350 }
351
352 //If there's a fault, return it
353 if (fault != NoFault) {
354 if (req->isPrefetch()) {
355 return NoFault;
356 } else {
357 return fault;
358 }
359 }
360
361 //If we don't need to access a second cache line, stop now.
362 if (secondAddr <= addr)
363 {
364 if (req->isLocked() && fault == NoFault) {
365 assert(!locked);
366 locked = true;
367 }
368 return fault;
369 }
370
371 /*
372 * Set up for accessing the second cache line.
373 */
374
375 //Move the pointer we're reading into to the correct location.
376 data += size;
377 //Adjust the size to get the remaining bytes.
378 size = addr + fullSize - secondAddr;
379 //And access the right address.
380 addr = secondAddr;
381 }
382}
383
384
385template <class T>
386Fault
387AtomicSimpleCPU::read(Addr addr, T &data, unsigned flags)
388{
389 uint8_t *dataPtr = (uint8_t *)&data;
390 memset(dataPtr, 0, sizeof(data));
391 Fault fault = readBytes(addr, dataPtr, sizeof(data), flags);
392 if (fault == NoFault) {
393 data = gtoh(data);
394 if (traceData)
395 traceData->setData(data);
396 }
397 return fault;
398}
399
400#ifndef DOXYGEN_SHOULD_SKIP_THIS
401
402template
403Fault
404AtomicSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
405
406template
407Fault
408AtomicSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
409
410template
411Fault
412AtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
413
414template
415Fault
416AtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
417
418template
419Fault
420AtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
421
422template
423Fault
424AtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
425
426#endif //DOXYGEN_SHOULD_SKIP_THIS
427
428template<>
429Fault
430AtomicSimpleCPU::read(Addr addr, double &data, unsigned flags)
431{
432 return read(addr, *(uint64_t*)&data, flags);
433}
434
435template<>
436Fault
437AtomicSimpleCPU::read(Addr addr, float &data, unsigned flags)
438{
439 return read(addr, *(uint32_t*)&data, flags);
440}
441
442
443template<>
444Fault
445AtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
446{
447 return read(addr, (uint32_t&)data, flags);
448}
449
450
451Fault
452AtomicSimpleCPU::writeBytes(uint8_t *data, unsigned size,
453 Addr addr, unsigned flags, uint64_t *res)
454{
455 // use the CPU's statically allocated write request and packet objects
456 Request *req = &data_write_req;
457
458 if (traceData) {
459 traceData->setAddr(addr);
460 }
461
462 //The block size of our peer.
463 unsigned blockSize = dcachePort.peerBlockSize();
464 //The size of the data we're trying to read.
465 int fullSize = size;
466
467 //The address of the second part of this access if it needs to be split
468 //across a cache line boundary.
469 Addr secondAddr = roundDown(addr + size - 1, blockSize);
470
471 if(secondAddr > addr)
472 size = secondAddr - addr;
473
474 dcache_latency = 0;
475
476 while(1) {
477 req->setVirt(0, addr, size, flags, thread->readPC());
478
479 // translate to physical address
480 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write);
481
482 // Now do the access.
483 if (fault == NoFault) {
484 MemCmd cmd = MemCmd::WriteReq; // default
485 bool do_access = true; // flag to suppress cache access
486
487 if (req->isLLSC()) {
488 cmd = MemCmd::StoreCondReq;
489 do_access = TheISA::handleLockedWrite(thread, req);
490 } else if (req->isSwap()) {
491 cmd = MemCmd::SwapReq;
492 if (req->isCondSwap()) {
493 assert(res);
494 req->setExtraData(*res);
495 }
496 }
497
498 if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) {
499 Packet pkt = Packet(req, cmd, Packet::Broadcast);
500 pkt.dataStatic(data);
501
502 if (req->isMmapedIpr()) {
503 dcache_latency +=
504 TheISA::handleIprWrite(thread->getTC(), &pkt);
505 } else {
506 if (hasPhysMemPort && pkt.getAddr() == physMemAddr)
507 dcache_latency += physmemPort.sendAtomic(&pkt);
508 else
509 dcache_latency += dcachePort.sendAtomic(&pkt);
510 }
511 dcache_access = true;
512 assert(!pkt.isError());
513
514 if (req->isSwap()) {
515 assert(res);
516 memcpy(res, pkt.getPtr<uint8_t>(), fullSize);
517 }
518 }
519
520 if (res && !req->isSwap()) {
521 *res = req->getExtraData();
522 }
523 }
524
525 //If there's a fault or we don't need to access a second cache line,
526 //stop now.
527 if (fault != NoFault || secondAddr <= addr)
528 {
529 if (req->isLocked() && fault == NoFault) {
530 assert(locked);
531 locked = false;
532 }
533 if (fault != NoFault && req->isPrefetch()) {
534 return NoFault;
535 } else {
536 return fault;
537 }
538 }
539
540 /*
541 * Set up for accessing the second cache line.
542 */
543
544 //Move the pointer we're reading into to the correct location.
545 data += size;
546 //Adjust the size to get the remaining bytes.
547 size = addr + fullSize - secondAddr;
548 //And access the right address.
549 addr = secondAddr;
550 }
551}
552
553
554template <class T>
555Fault
556AtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
557{
558 uint8_t *dataPtr = (uint8_t *)&data;
559 if (traceData)
560 traceData->setData(data);
561 data = htog(data);
562
563 Fault fault = writeBytes(dataPtr, sizeof(data), addr, flags, res);
564 if (fault == NoFault && data_write_req.isSwap()) {
565 *res = gtoh((T)*res);
566 }
567 return fault;
568}
569
570
571#ifndef DOXYGEN_SHOULD_SKIP_THIS
572
573template
574Fault
575AtomicSimpleCPU::write(Twin32_t data, Addr addr,
576 unsigned flags, uint64_t *res);
577
578template
579Fault
580AtomicSimpleCPU::write(Twin64_t data, Addr addr,
581 unsigned flags, uint64_t *res);
582
583template
584Fault
585AtomicSimpleCPU::write(uint64_t data, Addr addr,
586 unsigned flags, uint64_t *res);
587
588template
589Fault
590AtomicSimpleCPU::write(uint32_t data, Addr addr,
591 unsigned flags, uint64_t *res);
592
593template
594Fault
595AtomicSimpleCPU::write(uint16_t data, Addr addr,
596 unsigned flags, uint64_t *res);
597
598template
599Fault
600AtomicSimpleCPU::write(uint8_t data, Addr addr,
601 unsigned flags, uint64_t *res);
602
603#endif //DOXYGEN_SHOULD_SKIP_THIS
604
605template<>
606Fault
607AtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
608{
609 return write(*(uint64_t*)&data, addr, flags, res);
610}
611
612template<>
613Fault
614AtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
615{
616 return write(*(uint32_t*)&data, addr, flags, res);
617}
618
619
620template<>
621Fault
622AtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
623{
624 return write((uint32_t)data, addr, flags, res);
625}
626
627
628void
629AtomicSimpleCPU::tick()
630{
631 DPRINTF(SimpleCPU, "Tick\n");
632
633 Tick latency = 0;
634
635 for (int i = 0; i < width || locked; ++i) {
636 numCycles++;
637
638 if (!curStaticInst || !curStaticInst->isDelayedCommit())
639 checkForInterrupts();
640
641 checkPcEventQueue();
642
643 Fault fault = NoFault;
644
645 bool fromRom = isRomMicroPC(thread->readMicroPC());
646 if (!fromRom && !curMacroStaticInst) {
647 setupFetchRequest(&ifetch_req);
648 fault = thread->itb->translateAtomic(&ifetch_req, tc,
649 BaseTLB::Execute);
650 }
651
652 if (fault == NoFault) {
653 Tick icache_latency = 0;
654 bool icache_access = false;
655 dcache_access = false; // assume no dcache access
656
657 if (!fromRom && !curMacroStaticInst) {
658 // This is commented out because the predecoder would act like
659 // a tiny cache otherwise. It wouldn't be flushed when needed
660 // like the I cache. It should be flushed, and when that works
661 // this code should be uncommented.
662 //Fetch more instruction memory if necessary
663 //if(predecoder.needMoreBytes())
664 //{
665 icache_access = true;
666 Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq,
667 Packet::Broadcast);
668 ifetch_pkt.dataStatic(&inst);
669
670 if (hasPhysMemPort && ifetch_pkt.getAddr() == physMemAddr)
671 icache_latency = physmemPort.sendAtomic(&ifetch_pkt);
672 else
673 icache_latency = icachePort.sendAtomic(&ifetch_pkt);
674
675 assert(!ifetch_pkt.isError());
676
677 // ifetch_req is initialized to read the instruction directly
678 // into the CPU object's inst field.
679 //}
680 }
681
682 preExecute();
683
684 if (curStaticInst) {
685 fault = curStaticInst->execute(this, traceData);
686
687 // keep an instruction count
688 if (fault == NoFault)
689 countInst();
690 else if (traceData && !DTRACE(ExecFaulting)) {
691 delete traceData;
692 traceData = NULL;
693 }
694
695 postExecute();
696 }
697
698 // @todo remove me after debugging with legion done
699 if (curStaticInst && (!curStaticInst->isMicroop() ||
700 curStaticInst->isFirstMicroop()))
701 instCnt++;
702
703 Tick stall_ticks = 0;
704 if (simulate_inst_stalls && icache_access)
705 stall_ticks += icache_latency;
706
707 if (simulate_data_stalls && dcache_access)
708 stall_ticks += dcache_latency;
709
710 if (stall_ticks) {
711 Tick stall_cycles = stall_ticks / ticks(1);
712 Tick aligned_stall_ticks = ticks(stall_cycles);
713
714 if (aligned_stall_ticks < stall_ticks)
715 aligned_stall_ticks += 1;
716
717 latency += aligned_stall_ticks;
718 }
719
720 }
721 if(fault != NoFault || !stayAtPC)
722 advancePC(fault);
723 }
724
725 // instruction takes at least one cycle
726 if (latency < ticks(1))
727 latency = ticks(1);
728
729 if (_status != Idle)
730 schedule(tickEvent, curTick + latency);
731}
732
733
734void
735AtomicSimpleCPU::printAddr(Addr a)
736{
737 dcachePort.printAddr(a);
738}
739
740
741////////////////////////////////////////////////////////////////////////
742//
743// AtomicSimpleCPU Simulation Object
744//
745AtomicSimpleCPU *
746AtomicSimpleCPUParams::create()
747{
748 numThreads = 1;
749#if !FULL_SYSTEM
750 if (workload.size() != 1)
751 panic("only one workload allowed");
752#endif
753 return new AtomicSimpleCPU(this);
754}
42#include "sim/system.hh"
43
44using namespace std;
45using namespace TheISA;
46
47AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c)
48 : Event(CPU_Tick_Pri), cpu(c)
49{
50}
51
52
53void
54AtomicSimpleCPU::TickEvent::process()
55{
56 cpu->tick();
57}
58
59const char *
60AtomicSimpleCPU::TickEvent::description() const
61{
62 return "AtomicSimpleCPU tick";
63}
64
65Port *
66AtomicSimpleCPU::getPort(const string &if_name, int idx)
67{
68 if (if_name == "dcache_port")
69 return &dcachePort;
70 else if (if_name == "icache_port")
71 return &icachePort;
72 else if (if_name == "physmem_port") {
73 hasPhysMemPort = true;
74 return &physmemPort;
75 }
76 else
77 panic("No Such Port\n");
78}
79
80void
81AtomicSimpleCPU::init()
82{
83 BaseCPU::init();
84#if FULL_SYSTEM
85 ThreadID size = threadContexts.size();
86 for (ThreadID i = 0; i < size; ++i) {
87 ThreadContext *tc = threadContexts[i];
88
89 // initialize CPU, including PC
90 TheISA::initCPU(tc, tc->contextId());
91 }
92#endif
93 if (hasPhysMemPort) {
94 bool snoop = false;
95 AddrRangeList pmAddrList;
96 physmemPort.getPeerAddressRanges(pmAddrList, snoop);
97 physMemAddr = *pmAddrList.begin();
98 }
99 // Atomic doesn't do MT right now, so contextId == threadId
100 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
101 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
102 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
103}
104
105bool
106AtomicSimpleCPU::CpuPort::recvTiming(PacketPtr pkt)
107{
108 panic("AtomicSimpleCPU doesn't expect recvTiming callback!");
109 return true;
110}
111
112Tick
113AtomicSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
114{
115 //Snooping a coherence request, just return
116 return 0;
117}
118
119void
120AtomicSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
121{
122 //No internal storage to update, just return
123 return;
124}
125
126void
127AtomicSimpleCPU::CpuPort::recvStatusChange(Status status)
128{
129 if (status == RangeChange) {
130 if (!snoopRangeSent) {
131 snoopRangeSent = true;
132 sendStatusChange(Port::RangeChange);
133 }
134 return;
135 }
136
137 panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!");
138}
139
140void
141AtomicSimpleCPU::CpuPort::recvRetry()
142{
143 panic("AtomicSimpleCPU doesn't expect recvRetry callback!");
144}
145
146void
147AtomicSimpleCPU::DcachePort::setPeer(Port *port)
148{
149 Port::setPeer(port);
150
151#if FULL_SYSTEM
152 // Update the ThreadContext's memory ports (Functional/Virtual
153 // Ports)
154 cpu->tcBase()->connectMemPorts(cpu->tcBase());
155#endif
156}
157
158AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p)
159 : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false),
160 simulate_data_stalls(p->simulate_data_stalls),
161 simulate_inst_stalls(p->simulate_inst_stalls),
162 icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this),
163 physmemPort(name() + "-iport", this), hasPhysMemPort(false)
164{
165 _status = Idle;
166
167 icachePort.snoopRangeSent = false;
168 dcachePort.snoopRangeSent = false;
169
170}
171
172
173AtomicSimpleCPU::~AtomicSimpleCPU()
174{
175 if (tickEvent.scheduled()) {
176 deschedule(tickEvent);
177 }
178}
179
180void
181AtomicSimpleCPU::serialize(ostream &os)
182{
183 SimObject::State so_state = SimObject::getState();
184 SERIALIZE_ENUM(so_state);
185 SERIALIZE_SCALAR(locked);
186 BaseSimpleCPU::serialize(os);
187 nameOut(os, csprintf("%s.tickEvent", name()));
188 tickEvent.serialize(os);
189}
190
191void
192AtomicSimpleCPU::unserialize(Checkpoint *cp, const string &section)
193{
194 SimObject::State so_state;
195 UNSERIALIZE_ENUM(so_state);
196 UNSERIALIZE_SCALAR(locked);
197 BaseSimpleCPU::unserialize(cp, section);
198 tickEvent.unserialize(cp, csprintf("%s.tickEvent", section));
199}
200
201void
202AtomicSimpleCPU::resume()
203{
204 if (_status == Idle || _status == SwitchedOut)
205 return;
206
207 DPRINTF(SimpleCPU, "Resume\n");
208 assert(system->getMemoryMode() == Enums::atomic);
209
210 changeState(SimObject::Running);
211 if (thread->status() == ThreadContext::Active) {
212 if (!tickEvent.scheduled())
213 schedule(tickEvent, nextCycle());
214 }
215}
216
217void
218AtomicSimpleCPU::switchOut()
219{
220 assert(_status == Running || _status == Idle);
221 _status = SwitchedOut;
222
223 tickEvent.squash();
224}
225
226
227void
228AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
229{
230 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
231
232 assert(!tickEvent.scheduled());
233
234 // if any of this CPU's ThreadContexts are active, mark the CPU as
235 // running and schedule its tick event.
236 ThreadID size = threadContexts.size();
237 for (ThreadID i = 0; i < size; ++i) {
238 ThreadContext *tc = threadContexts[i];
239 if (tc->status() == ThreadContext::Active && _status != Running) {
240 _status = Running;
241 schedule(tickEvent, nextCycle());
242 break;
243 }
244 }
245 if (_status != Running) {
246 _status = Idle;
247 }
248 assert(threadContexts.size() == 1);
249 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
250 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
251 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
252}
253
254
255void
256AtomicSimpleCPU::activateContext(int thread_num, int delay)
257{
258 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
259
260 assert(thread_num == 0);
261 assert(thread);
262
263 assert(_status == Idle);
264 assert(!tickEvent.scheduled());
265
266 notIdleFraction++;
267 numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend);
268
269 //Make sure ticks are still on multiples of cycles
270 schedule(tickEvent, nextCycle(curTick + ticks(delay)));
271 _status = Running;
272}
273
274
275void
276AtomicSimpleCPU::suspendContext(int thread_num)
277{
278 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
279
280 assert(thread_num == 0);
281 assert(thread);
282
283 if (_status == Idle)
284 return;
285
286 assert(_status == Running);
287
288 // tick event may not be scheduled if this gets called from inside
289 // an instruction's execution, e.g. "quiesce"
290 if (tickEvent.scheduled())
291 deschedule(tickEvent);
292
293 notIdleFraction--;
294 _status = Idle;
295}
296
297
298Fault
299AtomicSimpleCPU::readBytes(Addr addr, uint8_t * data,
300 unsigned size, unsigned flags)
301{
302 // use the CPU's statically allocated read request and packet objects
303 Request *req = &data_read_req;
304
305 if (traceData) {
306 traceData->setAddr(addr);
307 }
308
309 //The block size of our peer.
310 unsigned blockSize = dcachePort.peerBlockSize();
311 //The size of the data we're trying to read.
312 int fullSize = size;
313
314 //The address of the second part of this access if it needs to be split
315 //across a cache line boundary.
316 Addr secondAddr = roundDown(addr + size - 1, blockSize);
317
318 if (secondAddr > addr)
319 size = secondAddr - addr;
320
321 dcache_latency = 0;
322
323 while (1) {
324 req->setVirt(0, addr, size, flags, thread->readPC());
325
326 // translate to physical address
327 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read);
328
329 // Now do the access.
330 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
331 Packet pkt = Packet(req,
332 req->isLLSC() ? MemCmd::LoadLockedReq : MemCmd::ReadReq,
333 Packet::Broadcast);
334 pkt.dataStatic(data);
335
336 if (req->isMmapedIpr())
337 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt);
338 else {
339 if (hasPhysMemPort && pkt.getAddr() == physMemAddr)
340 dcache_latency += physmemPort.sendAtomic(&pkt);
341 else
342 dcache_latency += dcachePort.sendAtomic(&pkt);
343 }
344 dcache_access = true;
345
346 assert(!pkt.isError());
347
348 if (req->isLLSC()) {
349 TheISA::handleLockedRead(thread, req);
350 }
351 }
352
353 //If there's a fault, return it
354 if (fault != NoFault) {
355 if (req->isPrefetch()) {
356 return NoFault;
357 } else {
358 return fault;
359 }
360 }
361
362 //If we don't need to access a second cache line, stop now.
363 if (secondAddr <= addr)
364 {
365 if (req->isLocked() && fault == NoFault) {
366 assert(!locked);
367 locked = true;
368 }
369 return fault;
370 }
371
372 /*
373 * Set up for accessing the second cache line.
374 */
375
376 //Move the pointer we're reading into to the correct location.
377 data += size;
378 //Adjust the size to get the remaining bytes.
379 size = addr + fullSize - secondAddr;
380 //And access the right address.
381 addr = secondAddr;
382 }
383}
384
385
386template <class T>
387Fault
388AtomicSimpleCPU::read(Addr addr, T &data, unsigned flags)
389{
390 uint8_t *dataPtr = (uint8_t *)&data;
391 memset(dataPtr, 0, sizeof(data));
392 Fault fault = readBytes(addr, dataPtr, sizeof(data), flags);
393 if (fault == NoFault) {
394 data = gtoh(data);
395 if (traceData)
396 traceData->setData(data);
397 }
398 return fault;
399}
400
401#ifndef DOXYGEN_SHOULD_SKIP_THIS
402
403template
404Fault
405AtomicSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
406
407template
408Fault
409AtomicSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
410
411template
412Fault
413AtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
414
415template
416Fault
417AtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
418
419template
420Fault
421AtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
422
423template
424Fault
425AtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
426
427#endif //DOXYGEN_SHOULD_SKIP_THIS
428
429template<>
430Fault
431AtomicSimpleCPU::read(Addr addr, double &data, unsigned flags)
432{
433 return read(addr, *(uint64_t*)&data, flags);
434}
435
436template<>
437Fault
438AtomicSimpleCPU::read(Addr addr, float &data, unsigned flags)
439{
440 return read(addr, *(uint32_t*)&data, flags);
441}
442
443
444template<>
445Fault
446AtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
447{
448 return read(addr, (uint32_t&)data, flags);
449}
450
451
452Fault
453AtomicSimpleCPU::writeBytes(uint8_t *data, unsigned size,
454 Addr addr, unsigned flags, uint64_t *res)
455{
456 // use the CPU's statically allocated write request and packet objects
457 Request *req = &data_write_req;
458
459 if (traceData) {
460 traceData->setAddr(addr);
461 }
462
463 //The block size of our peer.
464 unsigned blockSize = dcachePort.peerBlockSize();
465 //The size of the data we're trying to read.
466 int fullSize = size;
467
468 //The address of the second part of this access if it needs to be split
469 //across a cache line boundary.
470 Addr secondAddr = roundDown(addr + size - 1, blockSize);
471
472 if(secondAddr > addr)
473 size = secondAddr - addr;
474
475 dcache_latency = 0;
476
477 while(1) {
478 req->setVirt(0, addr, size, flags, thread->readPC());
479
480 // translate to physical address
481 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write);
482
483 // Now do the access.
484 if (fault == NoFault) {
485 MemCmd cmd = MemCmd::WriteReq; // default
486 bool do_access = true; // flag to suppress cache access
487
488 if (req->isLLSC()) {
489 cmd = MemCmd::StoreCondReq;
490 do_access = TheISA::handleLockedWrite(thread, req);
491 } else if (req->isSwap()) {
492 cmd = MemCmd::SwapReq;
493 if (req->isCondSwap()) {
494 assert(res);
495 req->setExtraData(*res);
496 }
497 }
498
499 if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) {
500 Packet pkt = Packet(req, cmd, Packet::Broadcast);
501 pkt.dataStatic(data);
502
503 if (req->isMmapedIpr()) {
504 dcache_latency +=
505 TheISA::handleIprWrite(thread->getTC(), &pkt);
506 } else {
507 if (hasPhysMemPort && pkt.getAddr() == physMemAddr)
508 dcache_latency += physmemPort.sendAtomic(&pkt);
509 else
510 dcache_latency += dcachePort.sendAtomic(&pkt);
511 }
512 dcache_access = true;
513 assert(!pkt.isError());
514
515 if (req->isSwap()) {
516 assert(res);
517 memcpy(res, pkt.getPtr<uint8_t>(), fullSize);
518 }
519 }
520
521 if (res && !req->isSwap()) {
522 *res = req->getExtraData();
523 }
524 }
525
526 //If there's a fault or we don't need to access a second cache line,
527 //stop now.
528 if (fault != NoFault || secondAddr <= addr)
529 {
530 if (req->isLocked() && fault == NoFault) {
531 assert(locked);
532 locked = false;
533 }
534 if (fault != NoFault && req->isPrefetch()) {
535 return NoFault;
536 } else {
537 return fault;
538 }
539 }
540
541 /*
542 * Set up for accessing the second cache line.
543 */
544
545 //Move the pointer we're reading into to the correct location.
546 data += size;
547 //Adjust the size to get the remaining bytes.
548 size = addr + fullSize - secondAddr;
549 //And access the right address.
550 addr = secondAddr;
551 }
552}
553
554
555template <class T>
556Fault
557AtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
558{
559 uint8_t *dataPtr = (uint8_t *)&data;
560 if (traceData)
561 traceData->setData(data);
562 data = htog(data);
563
564 Fault fault = writeBytes(dataPtr, sizeof(data), addr, flags, res);
565 if (fault == NoFault && data_write_req.isSwap()) {
566 *res = gtoh((T)*res);
567 }
568 return fault;
569}
570
571
572#ifndef DOXYGEN_SHOULD_SKIP_THIS
573
574template
575Fault
576AtomicSimpleCPU::write(Twin32_t data, Addr addr,
577 unsigned flags, uint64_t *res);
578
579template
580Fault
581AtomicSimpleCPU::write(Twin64_t data, Addr addr,
582 unsigned flags, uint64_t *res);
583
584template
585Fault
586AtomicSimpleCPU::write(uint64_t data, Addr addr,
587 unsigned flags, uint64_t *res);
588
589template
590Fault
591AtomicSimpleCPU::write(uint32_t data, Addr addr,
592 unsigned flags, uint64_t *res);
593
594template
595Fault
596AtomicSimpleCPU::write(uint16_t data, Addr addr,
597 unsigned flags, uint64_t *res);
598
599template
600Fault
601AtomicSimpleCPU::write(uint8_t data, Addr addr,
602 unsigned flags, uint64_t *res);
603
604#endif //DOXYGEN_SHOULD_SKIP_THIS
605
606template<>
607Fault
608AtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
609{
610 return write(*(uint64_t*)&data, addr, flags, res);
611}
612
613template<>
614Fault
615AtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
616{
617 return write(*(uint32_t*)&data, addr, flags, res);
618}
619
620
621template<>
622Fault
623AtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
624{
625 return write((uint32_t)data, addr, flags, res);
626}
627
628
629void
630AtomicSimpleCPU::tick()
631{
632 DPRINTF(SimpleCPU, "Tick\n");
633
634 Tick latency = 0;
635
636 for (int i = 0; i < width || locked; ++i) {
637 numCycles++;
638
639 if (!curStaticInst || !curStaticInst->isDelayedCommit())
640 checkForInterrupts();
641
642 checkPcEventQueue();
643
644 Fault fault = NoFault;
645
646 bool fromRom = isRomMicroPC(thread->readMicroPC());
647 if (!fromRom && !curMacroStaticInst) {
648 setupFetchRequest(&ifetch_req);
649 fault = thread->itb->translateAtomic(&ifetch_req, tc,
650 BaseTLB::Execute);
651 }
652
653 if (fault == NoFault) {
654 Tick icache_latency = 0;
655 bool icache_access = false;
656 dcache_access = false; // assume no dcache access
657
658 if (!fromRom && !curMacroStaticInst) {
659 // This is commented out because the predecoder would act like
660 // a tiny cache otherwise. It wouldn't be flushed when needed
661 // like the I cache. It should be flushed, and when that works
662 // this code should be uncommented.
663 //Fetch more instruction memory if necessary
664 //if(predecoder.needMoreBytes())
665 //{
666 icache_access = true;
667 Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq,
668 Packet::Broadcast);
669 ifetch_pkt.dataStatic(&inst);
670
671 if (hasPhysMemPort && ifetch_pkt.getAddr() == physMemAddr)
672 icache_latency = physmemPort.sendAtomic(&ifetch_pkt);
673 else
674 icache_latency = icachePort.sendAtomic(&ifetch_pkt);
675
676 assert(!ifetch_pkt.isError());
677
678 // ifetch_req is initialized to read the instruction directly
679 // into the CPU object's inst field.
680 //}
681 }
682
683 preExecute();
684
685 if (curStaticInst) {
686 fault = curStaticInst->execute(this, traceData);
687
688 // keep an instruction count
689 if (fault == NoFault)
690 countInst();
691 else if (traceData && !DTRACE(ExecFaulting)) {
692 delete traceData;
693 traceData = NULL;
694 }
695
696 postExecute();
697 }
698
699 // @todo remove me after debugging with legion done
700 if (curStaticInst && (!curStaticInst->isMicroop() ||
701 curStaticInst->isFirstMicroop()))
702 instCnt++;
703
704 Tick stall_ticks = 0;
705 if (simulate_inst_stalls && icache_access)
706 stall_ticks += icache_latency;
707
708 if (simulate_data_stalls && dcache_access)
709 stall_ticks += dcache_latency;
710
711 if (stall_ticks) {
712 Tick stall_cycles = stall_ticks / ticks(1);
713 Tick aligned_stall_ticks = ticks(stall_cycles);
714
715 if (aligned_stall_ticks < stall_ticks)
716 aligned_stall_ticks += 1;
717
718 latency += aligned_stall_ticks;
719 }
720
721 }
722 if(fault != NoFault || !stayAtPC)
723 advancePC(fault);
724 }
725
726 // instruction takes at least one cycle
727 if (latency < ticks(1))
728 latency = ticks(1);
729
730 if (_status != Idle)
731 schedule(tickEvent, curTick + latency);
732}
733
734
735void
736AtomicSimpleCPU::printAddr(Addr a)
737{
738 dcachePort.printAddr(a);
739}
740
741
742////////////////////////////////////////////////////////////////////////
743//
744// AtomicSimpleCPU Simulation Object
745//
746AtomicSimpleCPU *
747AtomicSimpleCPUParams::create()
748{
749 numThreads = 1;
750#if !FULL_SYSTEM
751 if (workload.size() != 1)
752 panic("only one workload allowed");
753#endif
754 return new AtomicSimpleCPU(this);
755}