Deleted Added
sdiff udiff text old ( 5278:4c963dc4ab07 ) new ( 5310:4164e6bfcc8a )
full compact
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Steve Reinhardt
29 */
30
31#include "arch/locked_mem.hh"
32#include "arch/mmaped_ipr.hh"
33#include "arch/utility.hh"
34#include "base/bigint.hh"
35#include "cpu/exetrace.hh"
36#include "cpu/simple/atomic.hh"
37#include "mem/packet.hh"
38#include "mem/packet_access.hh"
39#include "params/AtomicSimpleCPU.hh"
40#include "sim/system.hh"
41
42using namespace std;
43using namespace TheISA;
44
45AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c)
46 : Event(&mainEventQueue, CPU_Tick_Pri), cpu(c)
47{
48}
49
50
51void
52AtomicSimpleCPU::TickEvent::process()
53{
54 cpu->tick();
55}
56
57const char *
58AtomicSimpleCPU::TickEvent::description()
59{
60 return "AtomicSimpleCPU tick";
61}
62
63Port *
64AtomicSimpleCPU::getPort(const std::string &if_name, int idx)
65{
66 if (if_name == "dcache_port")
67 return &dcachePort;
68 else if (if_name == "icache_port")
69 return &icachePort;
70 else if (if_name == "physmem_port") {
71 hasPhysMemPort = true;
72 return &physmemPort;
73 }
74 else
75 panic("No Such Port\n");
76}
77
78void
79AtomicSimpleCPU::init()
80{
81 BaseCPU::init();
82#if FULL_SYSTEM
83 for (int i = 0; i < threadContexts.size(); ++i) {
84 ThreadContext *tc = threadContexts[i];
85
86 // initialize CPU, including PC
87 TheISA::initCPU(tc, tc->readCpuId());
88 }
89#endif
90 if (hasPhysMemPort) {
91 bool snoop = false;
92 AddrRangeList pmAddrList;
93 physmemPort.getPeerAddressRanges(pmAddrList, snoop);
94 physMemAddr = *pmAddrList.begin();
95 }
96}
97
98bool
99AtomicSimpleCPU::CpuPort::recvTiming(PacketPtr pkt)
100{
101 panic("AtomicSimpleCPU doesn't expect recvTiming callback!");
102 return true;
103}
104
105Tick
106AtomicSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
107{
108 //Snooping a coherence request, just return
109 return 0;
110}
111
112void
113AtomicSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
114{
115 //No internal storage to update, just return
116 return;
117}
118
119void
120AtomicSimpleCPU::CpuPort::recvStatusChange(Status status)
121{
122 if (status == RangeChange) {
123 if (!snoopRangeSent) {
124 snoopRangeSent = true;
125 sendStatusChange(Port::RangeChange);
126 }
127 return;
128 }
129
130 panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!");
131}
132
133void
134AtomicSimpleCPU::CpuPort::recvRetry()
135{
136 panic("AtomicSimpleCPU doesn't expect recvRetry callback!");
137}
138
139void
140AtomicSimpleCPU::DcachePort::setPeer(Port *port)
141{
142 Port::setPeer(port);
143
144#if FULL_SYSTEM
145 // Update the ThreadContext's memory ports (Functional/Virtual
146 // Ports)
147 cpu->tcBase()->connectMemPorts();
148#endif
149}
150
151AtomicSimpleCPU::AtomicSimpleCPU(Params *p)
152 : BaseSimpleCPU(p), tickEvent(this),
153 width(p->width), simulate_stalls(p->simulate_stalls),
154 icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this),
155 physmemPort(name() + "-iport", this), hasPhysMemPort(false)
156{
157 _status = Idle;
158
159 icachePort.snoopRangeSent = false;
160 dcachePort.snoopRangeSent = false;
161
162 ifetch_req.setThreadContext(cpuId, 0); // Add thread ID if we add MT
163 data_read_req.setThreadContext(cpuId, 0); // Add thread ID here too
164 data_write_req.setThreadContext(cpuId, 0); // Add thread ID here too
165}
166
167
168AtomicSimpleCPU::~AtomicSimpleCPU()
169{
170}
171
172void
173AtomicSimpleCPU::serialize(ostream &os)
174{
175 SimObject::State so_state = SimObject::getState();
176 SERIALIZE_ENUM(so_state);
177 Status _status = status();
178 SERIALIZE_ENUM(_status);
179 BaseSimpleCPU::serialize(os);
180 nameOut(os, csprintf("%s.tickEvent", name()));
181 tickEvent.serialize(os);
182}
183
184void
185AtomicSimpleCPU::unserialize(Checkpoint *cp, const string &section)
186{
187 SimObject::State so_state;
188 UNSERIALIZE_ENUM(so_state);
189 UNSERIALIZE_ENUM(_status);
190 BaseSimpleCPU::unserialize(cp, section);
191 tickEvent.unserialize(cp, csprintf("%s.tickEvent", section));
192}
193
194void
195AtomicSimpleCPU::resume()
196{
197 if (_status == Idle || _status == SwitchedOut)
198 return;
199
200 DPRINTF(SimpleCPU, "Resume\n");
201 assert(system->getMemoryMode() == Enums::atomic);
202
203 changeState(SimObject::Running);
204 if (thread->status() == ThreadContext::Active) {
205 if (!tickEvent.scheduled()) {
206 tickEvent.schedule(nextCycle());
207 }
208 }
209}
210
211void
212AtomicSimpleCPU::switchOut()
213{
214 assert(status() == Running || status() == Idle);
215 _status = SwitchedOut;
216
217 tickEvent.squash();
218}
219
220
221void
222AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
223{
224 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
225
226 assert(!tickEvent.scheduled());
227
228 // if any of this CPU's ThreadContexts are active, mark the CPU as
229 // running and schedule its tick event.
230 for (int i = 0; i < threadContexts.size(); ++i) {
231 ThreadContext *tc = threadContexts[i];
232 if (tc->status() == ThreadContext::Active && _status != Running) {
233 _status = Running;
234 tickEvent.schedule(nextCycle());
235 break;
236 }
237 }
238 if (_status != Running) {
239 _status = Idle;
240 }
241 assert(threadContexts.size() == 1);
242 cpuId = tc->readCpuId();
243}
244
245
246void
247AtomicSimpleCPU::activateContext(int thread_num, int delay)
248{
249 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
250
251 assert(thread_num == 0);
252 assert(thread);
253
254 assert(_status == Idle);
255 assert(!tickEvent.scheduled());
256
257 notIdleFraction++;
258 numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend);
259
260 //Make sure ticks are still on multiples of cycles
261 tickEvent.schedule(nextCycle(curTick + ticks(delay)));
262 _status = Running;
263}
264
265
266void
267AtomicSimpleCPU::suspendContext(int thread_num)
268{
269 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
270
271 assert(thread_num == 0);
272 assert(thread);
273
274 assert(_status == Running);
275
276 // tick event may not be scheduled if this gets called from inside
277 // an instruction's execution, e.g. "quiesce"
278 if (tickEvent.scheduled())
279 tickEvent.deschedule();
280
281 notIdleFraction--;
282 _status = Idle;
283}
284
285
286template <class T>
287Fault
288AtomicSimpleCPU::read(Addr addr, T &data, unsigned flags)
289{
290 // use the CPU's statically allocated read request and packet objects
291 Request *req = &data_read_req;
292
293 if (traceData) {
294 traceData->setAddr(addr);
295 }
296
297 //The block size of our peer.
298 int blockSize = dcachePort.peerBlockSize();
299 //The size of the data we're trying to read.
300 int dataSize = sizeof(T);
301
302 uint8_t * dataPtr = (uint8_t *)&data;
303
304 //The address of the second part of this access if it needs to be split
305 //across a cache line boundary.
306 Addr secondAddr = roundDown(addr + dataSize - 1, blockSize);
307
308 if(secondAddr > addr)
309 dataSize = secondAddr - addr;
310
311 dcache_latency = 0;
312
313 while(1) {
314 req->setVirt(0, addr, dataSize, flags, thread->readPC());
315
316 // translate to physical address
317 Fault fault = thread->translateDataReadReq(req);
318
319 // Now do the access.
320 if (fault == NoFault) {
321 Packet pkt = Packet(req,
322 req->isLocked() ? MemCmd::LoadLockedReq : MemCmd::ReadReq,
323 Packet::Broadcast);
324 pkt.dataStatic(dataPtr);
325
326 if (req->isMmapedIpr())
327 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt);
328 else {
329 if (hasPhysMemPort && pkt.getAddr() == physMemAddr)
330 dcache_latency += physmemPort.sendAtomic(&pkt);
331 else
332 dcache_latency += dcachePort.sendAtomic(&pkt);
333 }
334 dcache_access = true;
335
336 assert(!pkt.isError());
337
338 if (req->isLocked()) {
339 TheISA::handleLockedRead(thread, req);
340 }
341 }
342
343 // This will need a new way to tell if it has a dcache attached.
344 if (req->isUncacheable())
345 recordEvent("Uncached Read");
346
347 //If there's a fault, return it
348 if (fault != NoFault)
349 return fault;
350 //If we don't need to access a second cache line, stop now.
351 if (secondAddr <= addr)
352 {
353 data = gtoh(data);
354 return fault;
355 }
356
357 /*
358 * Set up for accessing the second cache line.
359 */
360
361 //Move the pointer we're reading into to the correct location.
362 dataPtr += dataSize;
363 //Adjust the size to get the remaining bytes.
364 dataSize = addr + sizeof(T) - secondAddr;
365 //And access the right address.
366 addr = secondAddr;
367 }
368}
369
370Fault
371AtomicSimpleCPU::translateDataReadAddr(Addr vaddr, Addr & paddr,
372 int size, unsigned flags)
373{
374 // use the CPU's statically allocated read request and packet objects
375 Request *req = &data_read_req;
376
377 if (traceData) {
378 traceData->setAddr(vaddr);
379 }
380
381 //The block size of our peer.
382 int blockSize = dcachePort.peerBlockSize();
383 //The size of the data we're trying to read.
384 int dataSize = size;
385
386 bool firstTimeThrough = true;
387
388 //The address of the second part of this access if it needs to be split
389 //across a cache line boundary.
390 Addr secondAddr = roundDown(vaddr + dataSize - 1, blockSize);
391
392 if(secondAddr > vaddr)
393 dataSize = secondAddr - vaddr;
394
395 while(1) {
396 req->setVirt(0, vaddr, dataSize, flags, thread->readPC());
397
398 // translate to physical address
399 Fault fault = thread->translateDataReadReq(req);
400
401 //If there's a fault, return it
402 if (fault != NoFault)
403 return fault;
404
405 if (firstTimeThrough) {
406 paddr = req->getPaddr();
407 firstTimeThrough = false;
408 }
409
410 //If we don't need to access a second cache line, stop now.
411 if (secondAddr <= vaddr)
412 return fault;
413
414 /*
415 * Set up for accessing the second cache line.
416 */
417
418 //Adjust the size to get the remaining bytes.
419 dataSize = vaddr + size - secondAddr;
420 //And access the right address.
421 vaddr = secondAddr;
422 }
423}
424
425#ifndef DOXYGEN_SHOULD_SKIP_THIS
426
427template
428Fault
429AtomicSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
430
431template
432Fault
433AtomicSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
434
435template
436Fault
437AtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
438
439template
440Fault
441AtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
442
443template
444Fault
445AtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
446
447template
448Fault
449AtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
450
451#endif //DOXYGEN_SHOULD_SKIP_THIS
452
453template<>
454Fault
455AtomicSimpleCPU::read(Addr addr, double &data, unsigned flags)
456{
457 return read(addr, *(uint64_t*)&data, flags);
458}
459
460template<>
461Fault
462AtomicSimpleCPU::read(Addr addr, float &data, unsigned flags)
463{
464 return read(addr, *(uint32_t*)&data, flags);
465}
466
467
468template<>
469Fault
470AtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
471{
472 return read(addr, (uint32_t&)data, flags);
473}
474
475
476template <class T>
477Fault
478AtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
479{
480 // use the CPU's statically allocated write request and packet objects
481 Request *req = &data_write_req;
482
483 if (traceData) {
484 traceData->setAddr(addr);
485 }
486
487 //The block size of our peer.
488 int blockSize = dcachePort.peerBlockSize();
489 //The size of the data we're trying to read.
490 int dataSize = sizeof(T);
491
492 uint8_t * dataPtr = (uint8_t *)&data;
493
494 //The address of the second part of this access if it needs to be split
495 //across a cache line boundary.
496 Addr secondAddr = roundDown(addr + dataSize - 1, blockSize);
497
498 if(secondAddr > addr)
499 dataSize = secondAddr - addr;
500
501 dcache_latency = 0;
502
503 while(1) {
504 req->setVirt(0, addr, dataSize, flags, thread->readPC());
505
506 // translate to physical address
507 Fault fault = thread->translateDataWriteReq(req);
508
509 // Now do the access.
510 if (fault == NoFault) {
511 MemCmd cmd = MemCmd::WriteReq; // default
512 bool do_access = true; // flag to suppress cache access
513
514 if (req->isLocked()) {
515 cmd = MemCmd::StoreCondReq;
516 do_access = TheISA::handleLockedWrite(thread, req);
517 } else if (req->isSwap()) {
518 cmd = MemCmd::SwapReq;
519 if (req->isCondSwap()) {
520 assert(res);
521 req->setExtraData(*res);
522 }
523 }
524
525 if (do_access) {
526 Packet pkt = Packet(req, cmd, Packet::Broadcast);
527 pkt.dataStatic(dataPtr);
528
529 if (req->isMmapedIpr()) {
530 dcache_latency +=
531 TheISA::handleIprWrite(thread->getTC(), &pkt);
532 } else {
533 //XXX This needs to be outside of the loop in order to
534 //work properly for cache line boundary crossing
535 //accesses in transendian simulations.
536 data = htog(data);
537 if (hasPhysMemPort && pkt.getAddr() == physMemAddr)
538 dcache_latency += physmemPort.sendAtomic(&pkt);
539 else
540 dcache_latency += dcachePort.sendAtomic(&pkt);
541 }
542 dcache_access = true;
543 assert(!pkt.isError());
544
545 if (req->isSwap()) {
546 assert(res);
547 *res = pkt.get<T>();
548 }
549 }
550
551 if (res && !req->isSwap()) {
552 *res = req->getExtraData();
553 }
554 }
555
556 // This will need a new way to tell if it's hooked up to a cache or not.
557 if (req->isUncacheable())
558 recordEvent("Uncached Write");
559
560 //If there's a fault or we don't need to access a second cache line,
561 //stop now.
562 if (fault != NoFault || secondAddr <= addr)
563 {
564 // If the write needs to have a fault on the access, consider
565 // calling changeStatus() and changing it to "bad addr write"
566 // or something.
567 return fault;
568 }
569
570 /*
571 * Set up for accessing the second cache line.
572 */
573
574 //Move the pointer we're reading into to the correct location.
575 dataPtr += dataSize;
576 //Adjust the size to get the remaining bytes.
577 dataSize = addr + sizeof(T) - secondAddr;
578 //And access the right address.
579 addr = secondAddr;
580 }
581}
582
583Fault
584AtomicSimpleCPU::translateDataWriteAddr(Addr vaddr, Addr &paddr,
585 int size, unsigned flags)
586{
587 // use the CPU's statically allocated write request and packet objects
588 Request *req = &data_write_req;
589
590 if (traceData) {
591 traceData->setAddr(vaddr);
592 }
593
594 //The block size of our peer.
595 int blockSize = dcachePort.peerBlockSize();
596
597 //The address of the second part of this access if it needs to be split
598 //across a cache line boundary.
599 Addr secondAddr = roundDown(vaddr + size - 1, blockSize);
600
601 //The size of the data we're trying to read.
602 int dataSize = size;
603
604 bool firstTimeThrough = true;
605
606 if(secondAddr > vaddr)
607 dataSize = secondAddr - vaddr;
608
609 dcache_latency = 0;
610
611 while(1) {
612 req->setVirt(0, vaddr, dataSize, flags, thread->readPC());
613
614 // translate to physical address
615 Fault fault = thread->translateDataWriteReq(req);
616
617 //If there's a fault or we don't need to access a second cache line,
618 //stop now.
619 if (fault != NoFault)
620 return fault;
621
622 if (firstTimeThrough) {
623 paddr = req->getPaddr();
624 firstTimeThrough = false;
625 }
626
627 if (secondAddr <= vaddr)
628 return fault;
629
630 /*
631 * Set up for accessing the second cache line.
632 */
633
634 //Adjust the size to get the remaining bytes.
635 dataSize = vaddr + size - secondAddr;
636 //And access the right address.
637 vaddr = secondAddr;
638 }
639}
640
641
642#ifndef DOXYGEN_SHOULD_SKIP_THIS
643
644template
645Fault
646AtomicSimpleCPU::write(Twin32_t data, Addr addr,
647 unsigned flags, uint64_t *res);
648
649template
650Fault
651AtomicSimpleCPU::write(Twin64_t data, Addr addr,
652 unsigned flags, uint64_t *res);
653
654template
655Fault
656AtomicSimpleCPU::write(uint64_t data, Addr addr,
657 unsigned flags, uint64_t *res);
658
659template
660Fault
661AtomicSimpleCPU::write(uint32_t data, Addr addr,
662 unsigned flags, uint64_t *res);
663
664template
665Fault
666AtomicSimpleCPU::write(uint16_t data, Addr addr,
667 unsigned flags, uint64_t *res);
668
669template
670Fault
671AtomicSimpleCPU::write(uint8_t data, Addr addr,
672 unsigned flags, uint64_t *res);
673
674#endif //DOXYGEN_SHOULD_SKIP_THIS
675
676template<>
677Fault
678AtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
679{
680 return write(*(uint64_t*)&data, addr, flags, res);
681}
682
683template<>
684Fault
685AtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
686{
687 return write(*(uint32_t*)&data, addr, flags, res);
688}
689
690
691template<>
692Fault
693AtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
694{
695 return write((uint32_t)data, addr, flags, res);
696}
697
698
699void
700AtomicSimpleCPU::tick()
701{
702 DPRINTF(SimpleCPU, "Tick\n");
703
704 Tick latency = ticks(1); // instruction takes one cycle by default
705
706 for (int i = 0; i < width; ++i) {
707 numCycles++;
708
709 if (!curStaticInst || !curStaticInst->isDelayedCommit())
710 checkForInterrupts();
711
712 Fault fault = setupFetchRequest(&ifetch_req);
713
714 if (fault == NoFault) {
715 Tick icache_latency = 0;
716 bool icache_access = false;
717 dcache_access = false; // assume no dcache access
718
719 //Fetch more instruction memory if necessary
720 //if(predecoder.needMoreBytes())
721 //{
722 icache_access = true;
723 Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq,
724 Packet::Broadcast);
725 ifetch_pkt.dataStatic(&inst);
726
727 if (hasPhysMemPort && ifetch_pkt.getAddr() == physMemAddr)
728 icache_latency = physmemPort.sendAtomic(&ifetch_pkt);
729 else
730 icache_latency = icachePort.sendAtomic(&ifetch_pkt);
731
732 assert(!ifetch_pkt.isError());
733
734 // ifetch_req is initialized to read the instruction directly
735 // into the CPU object's inst field.
736 //}
737
738 preExecute();
739
740 if (curStaticInst) {
741 fault = curStaticInst->execute(this, traceData);
742
743 // keep an instruction count
744 if (fault == NoFault)
745 countInst();
746 else if (traceData) {
747 // If there was a fault, we should trace this instruction.
748 delete traceData;
749 traceData = NULL;
750 }
751
752 postExecute();
753 }
754
755 // @todo remove me after debugging with legion done
756 if (curStaticInst && (!curStaticInst->isMicroop() ||
757 curStaticInst->isFirstMicroop()))
758 instCnt++;
759
760 if (simulate_stalls) {
761 Tick icache_stall =
762 icache_access ? icache_latency - ticks(1) : 0;
763 Tick dcache_stall =
764 dcache_access ? dcache_latency - ticks(1) : 0;
765 Tick stall_cycles = (icache_stall + dcache_stall) / ticks(1);
766 if (ticks(stall_cycles) < (icache_stall + dcache_stall))
767 latency += ticks(stall_cycles+1);
768 else
769 latency += ticks(stall_cycles);
770 }
771
772 }
773 if(fault != NoFault || !stayAtPC)
774 advancePC(fault);
775 }
776
777 if (_status != Idle)
778 tickEvent.schedule(curTick + latency);
779}
780
781
782////////////////////////////////////////////////////////////////////////
783//
784// AtomicSimpleCPU Simulation Object
785//
786AtomicSimpleCPU *
787AtomicSimpleCPUParams::create()
788{
789 AtomicSimpleCPU::Params *params = new AtomicSimpleCPU::Params();
790 params->name = name;
791 params->numberOfThreads = 1;
792 params->max_insts_any_thread = max_insts_any_thread;
793 params->max_insts_all_threads = max_insts_all_threads;
794 params->max_loads_any_thread = max_loads_any_thread;
795 params->max_loads_all_threads = max_loads_all_threads;
796 params->progress_interval = progress_interval;
797 params->deferRegistration = defer_registration;
798 params->phase = phase;
799 params->clock = clock;
800 params->functionTrace = function_trace;
801 params->functionTraceStart = function_trace_start;
802 params->width = width;
803 params->simulate_stalls = simulate_stalls;
804 params->system = system;
805 params->cpu_id = cpu_id;
806 params->tracer = tracer;
807
808 params->itb = itb;
809 params->dtb = dtb;
810#if FULL_SYSTEM
811 params->profile = profile;
812 params->do_quiesce = do_quiesce;
813 params->do_checkpoint_insts = do_checkpoint_insts;
814 params->do_statistics_insts = do_statistics_insts;
815#else
816 if (workload.size() != 1)
817 panic("only one workload allowed");
818 params->process = workload[0];
819#endif
820
821 AtomicSimpleCPU *cpu = new AtomicSimpleCPU(params);
822 return cpu;
823}