atomic.cc revision 10774:68d688cbe26c
14604Sgblack@eecs.umich.edu/*
24604Sgblack@eecs.umich.edu * Copyright 2014 Google, Inc.
34604Sgblack@eecs.umich.edu * Copyright (c) 2012-2013 ARM Limited
44604Sgblack@eecs.umich.edu * All rights reserved.
54604Sgblack@eecs.umich.edu *
64604Sgblack@eecs.umich.edu * The license below extends only to copyright in the software and shall
74604Sgblack@eecs.umich.edu * not be construed as granting a license to any other intellectual
84604Sgblack@eecs.umich.edu * property including but not limited to intellectual property relating
94604Sgblack@eecs.umich.edu * to a hardware implementation of the functionality of the software
104604Sgblack@eecs.umich.edu * licensed hereunder.  You may use the software subject to the license
114604Sgblack@eecs.umich.edu * terms below provided that you ensure that this notice is replicated
124604Sgblack@eecs.umich.edu * unmodified and in its entirety in all distributions of the software,
134604Sgblack@eecs.umich.edu * modified or unmodified, in source code or in binary form.
144604Sgblack@eecs.umich.edu *
154604Sgblack@eecs.umich.edu * Copyright (c) 2002-2005 The Regents of The University of Michigan
164604Sgblack@eecs.umich.edu * All rights reserved.
174604Sgblack@eecs.umich.edu *
184604Sgblack@eecs.umich.edu * Redistribution and use in source and binary forms, with or without
194604Sgblack@eecs.umich.edu * modification, are permitted provided that the following conditions are
204604Sgblack@eecs.umich.edu * met: redistributions of source code must retain the above copyright
214604Sgblack@eecs.umich.edu * notice, this list of conditions and the following disclaimer;
224604Sgblack@eecs.umich.edu * redistributions in binary form must reproduce the above copyright
234604Sgblack@eecs.umich.edu * notice, this list of conditions and the following disclaimer in the
244604Sgblack@eecs.umich.edu * documentation and/or other materials provided with the distribution;
254604Sgblack@eecs.umich.edu * neither the name of the copyright holders nor the names of its
264604Sgblack@eecs.umich.edu * contributors may be used to endorse or promote products derived from
274604Sgblack@eecs.umich.edu * this software without specific prior written permission.
284604Sgblack@eecs.umich.edu *
294604Sgblack@eecs.umich.edu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
304604Sgblack@eecs.umich.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
314604Sgblack@eecs.umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
324604Sgblack@eecs.umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
334604Sgblack@eecs.umich.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
344604Sgblack@eecs.umich.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
354604Sgblack@eecs.umich.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
364604Sgblack@eecs.umich.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
374604Sgblack@eecs.umich.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
384604Sgblack@eecs.umich.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
394604Sgblack@eecs.umich.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
404604Sgblack@eecs.umich.edu *
414604Sgblack@eecs.umich.edu * Authors: Steve Reinhardt
424604Sgblack@eecs.umich.edu */
434604Sgblack@eecs.umich.edu
444604Sgblack@eecs.umich.edu#include "arch/locked_mem.hh"
454604Sgblack@eecs.umich.edu#include "arch/mmapped_ipr.hh"
464604Sgblack@eecs.umich.edu#include "arch/utility.hh"
474604Sgblack@eecs.umich.edu#include "base/bigint.hh"
484604Sgblack@eecs.umich.edu#include "base/output.hh"
494604Sgblack@eecs.umich.edu#include "config/the_isa.hh"
504604Sgblack@eecs.umich.edu#include "cpu/simple/atomic.hh"
514604Sgblack@eecs.umich.edu#include "cpu/exetrace.hh"
524604Sgblack@eecs.umich.edu#include "debug/Drain.hh"
534604Sgblack@eecs.umich.edu#include "debug/ExecFaulting.hh"
544604Sgblack@eecs.umich.edu#include "debug/SimpleCPU.hh"
554604Sgblack@eecs.umich.edu#include "mem/packet.hh"
564604Sgblack@eecs.umich.edu#include "mem/packet_access.hh"
574604Sgblack@eecs.umich.edu#include "mem/physical.hh"
585616Snate@binkert.org#include "params/AtomicSimpleCPU.hh"
595616Snate@binkert.org#include "sim/faults.hh"
604604Sgblack@eecs.umich.edu#include "sim/system.hh"
614604Sgblack@eecs.umich.edu#include "sim/full_system.hh"
624604Sgblack@eecs.umich.edu
634604Sgblack@eecs.umich.eduusing namespace std;
644604Sgblack@eecs.umich.eduusing namespace TheISA;
654604Sgblack@eecs.umich.edu
664604Sgblack@eecs.umich.eduAtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c)
674604Sgblack@eecs.umich.edu    : Event(CPU_Tick_Pri), cpu(c)
684604Sgblack@eecs.umich.edu{
694604Sgblack@eecs.umich.edu}
704604Sgblack@eecs.umich.edu
714712Sgblack@eecs.umich.edu
724712Sgblack@eecs.umich.eduvoid
734604Sgblack@eecs.umich.eduAtomicSimpleCPU::TickEvent::process()
744604Sgblack@eecs.umich.edu{
754604Sgblack@eecs.umich.edu    cpu->tick();
764604Sgblack@eecs.umich.edu}
774604Sgblack@eecs.umich.edu
784848Sgblack@eecs.umich.educonst char *
794604Sgblack@eecs.umich.eduAtomicSimpleCPU::TickEvent::description() const
804604Sgblack@eecs.umich.edu{
814604Sgblack@eecs.umich.edu    return "AtomicSimpleCPU tick";
824604Sgblack@eecs.umich.edu}
834604Sgblack@eecs.umich.edu
844604Sgblack@eecs.umich.eduvoid
854712Sgblack@eecs.umich.eduAtomicSimpleCPU::init()
864604Sgblack@eecs.umich.edu{
874712Sgblack@eecs.umich.edu    BaseCPU::init();
884712Sgblack@eecs.umich.edu
894848Sgblack@eecs.umich.edu    // Initialise the ThreadContext's memory proxies
904604Sgblack@eecs.umich.edu    tcBase()->initMemProxies(tcBase());
914604Sgblack@eecs.umich.edu
924604Sgblack@eecs.umich.edu    if (FullSystem && !params()->switched_out) {
934863Sgblack@eecs.umich.edu        ThreadID size = threadContexts.size();
944863Sgblack@eecs.umich.edu        for (ThreadID i = 0; i < size; ++i) {
954863Sgblack@eecs.umich.edu            ThreadContext *tc = threadContexts[i];
965570Snate@binkert.org            // initialize CPU, including PC
974863Sgblack@eecs.umich.edu            TheISA::initCPU(tc, tc->contextId());
984863Sgblack@eecs.umich.edu        }
994863Sgblack@eecs.umich.edu    }
1004863Sgblack@eecs.umich.edu
1014863Sgblack@eecs.umich.edu    // Atomic doesn't do MT right now, so contextId == threadId
1024863Sgblack@eecs.umich.edu    ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
1034863Sgblack@eecs.umich.edu    data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
1044863Sgblack@eecs.umich.edu    data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
1054863Sgblack@eecs.umich.edu}
1064604Sgblack@eecs.umich.edu
1074604Sgblack@eecs.umich.eduAtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p)
108    : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false),
109      simulate_data_stalls(p->simulate_data_stalls),
110      simulate_inst_stalls(p->simulate_inst_stalls),
111      drain_manager(NULL),
112      icachePort(name() + ".icache_port", this),
113      dcachePort(name() + ".dcache_port", this),
114      fastmem(p->fastmem), dcache_access(false), dcache_latency(0),
115      ppCommit(nullptr)
116{
117    _status = Idle;
118}
119
120
121AtomicSimpleCPU::~AtomicSimpleCPU()
122{
123    if (tickEvent.scheduled()) {
124        deschedule(tickEvent);
125    }
126}
127
128unsigned int
129AtomicSimpleCPU::drain(DrainManager *dm)
130{
131    assert(!drain_manager);
132    if (switchedOut())
133        return 0;
134
135    if (!isDrained()) {
136        DPRINTF(Drain, "Requesting drain: %s\n", pcState());
137        drain_manager = dm;
138        return 1;
139    } else {
140        if (tickEvent.scheduled())
141            deschedule(tickEvent);
142
143        DPRINTF(Drain, "Not executing microcode, no need to drain.\n");
144        return 0;
145    }
146}
147
148void
149AtomicSimpleCPU::drainResume()
150{
151    assert(!tickEvent.scheduled());
152    assert(!drain_manager);
153    if (switchedOut())
154        return;
155
156    DPRINTF(SimpleCPU, "Resume\n");
157    verifyMemoryMode();
158
159    assert(!threadContexts.empty());
160    if (threadContexts.size() > 1)
161        fatal("The atomic CPU only supports one thread.\n");
162
163    if (thread->status() == ThreadContext::Active) {
164        schedule(tickEvent, nextCycle());
165        _status = BaseSimpleCPU::Running;
166        notIdleFraction = 1;
167    } else {
168        _status = BaseSimpleCPU::Idle;
169        notIdleFraction = 0;
170    }
171}
172
173bool
174AtomicSimpleCPU::tryCompleteDrain()
175{
176    if (!drain_manager)
177        return false;
178
179    DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState());
180    if (!isDrained())
181        return false;
182
183    DPRINTF(Drain, "CPU done draining, processing drain event\n");
184    drain_manager->signalDrainDone();
185    drain_manager = NULL;
186
187    return true;
188}
189
190
191void
192AtomicSimpleCPU::switchOut()
193{
194    BaseSimpleCPU::switchOut();
195
196    assert(!tickEvent.scheduled());
197    assert(_status == BaseSimpleCPU::Running || _status == Idle);
198    assert(isDrained());
199}
200
201
202void
203AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
204{
205    BaseSimpleCPU::takeOverFrom(oldCPU);
206
207    // The tick event should have been descheduled by drain()
208    assert(!tickEvent.scheduled());
209
210    ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
211    data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
212    data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
213}
214
215void
216AtomicSimpleCPU::verifyMemoryMode() const
217{
218    if (!system->isAtomicMode()) {
219        fatal("The atomic CPU requires the memory system to be in "
220              "'atomic' mode.\n");
221    }
222}
223
224void
225AtomicSimpleCPU::activateContext(ThreadID thread_num)
226{
227    DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num);
228
229    assert(thread_num == 0);
230    assert(thread);
231
232    assert(_status == Idle);
233    assert(!tickEvent.scheduled());
234
235    notIdleFraction = 1;
236    Cycles delta = ticksToCycles(thread->lastActivate - thread->lastSuspend);
237    numCycles += delta;
238    ppCycles->notify(delta);
239
240    //Make sure ticks are still on multiples of cycles
241    schedule(tickEvent, clockEdge(Cycles(0)));
242    _status = BaseSimpleCPU::Running;
243}
244
245
246void
247AtomicSimpleCPU::suspendContext(ThreadID thread_num)
248{
249    DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
250
251    assert(thread_num == 0);
252    assert(thread);
253
254    if (_status == Idle)
255        return;
256
257    assert(_status == BaseSimpleCPU::Running);
258
259    // tick event may not be scheduled if this gets called from inside
260    // an instruction's execution, e.g. "quiesce"
261    if (tickEvent.scheduled())
262        deschedule(tickEvent);
263
264    notIdleFraction = 0;
265    _status = Idle;
266}
267
268
269Tick
270AtomicSimpleCPU::AtomicCPUDPort::recvAtomicSnoop(PacketPtr pkt)
271{
272    DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(),
273            pkt->cmdString());
274
275    // X86 ISA: Snooping an invalidation for monitor/mwait
276    AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner);
277    if(cpu->getAddrMonitor()->doMonitor(pkt)) {
278        cpu->wakeup();
279    }
280
281    // if snoop invalidates, release any associated locks
282    if (pkt->isInvalidate()) {
283        DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n",
284                pkt->getAddr());
285        TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask);
286    }
287
288    return 0;
289}
290
291void
292AtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop(PacketPtr pkt)
293{
294    DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(),
295            pkt->cmdString());
296
297    // X86 ISA: Snooping an invalidation for monitor/mwait
298    AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner);
299    if(cpu->getAddrMonitor()->doMonitor(pkt)) {
300        cpu->wakeup();
301    }
302
303    // if snoop invalidates, release any associated locks
304    if (pkt->isInvalidate()) {
305        DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n",
306                pkt->getAddr());
307        TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask);
308    }
309}
310
311Fault
312AtomicSimpleCPU::readMem(Addr addr, uint8_t * data,
313                         unsigned size, unsigned flags)
314{
315    // use the CPU's statically allocated read request and packet objects
316    Request *req = &data_read_req;
317
318    if (traceData)
319        traceData->setMem(addr, size, flags);
320
321    //The size of the data we're trying to read.
322    int fullSize = size;
323
324    //The address of the second part of this access if it needs to be split
325    //across a cache line boundary.
326    Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
327
328    if (secondAddr > addr)
329        size = secondAddr - addr;
330
331    dcache_latency = 0;
332
333    req->taskId(taskId());
334    while (1) {
335        req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr());
336
337        // translate to physical address
338        Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read);
339
340        // Now do the access.
341        if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
342            Packet pkt(req, Packet::makeReadCmd(req));
343            pkt.dataStatic(data);
344
345            if (req->isMmappedIpr())
346                dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt);
347            else {
348                if (fastmem && system->isMemAddr(pkt.getAddr()))
349                    system->getPhysMem().access(&pkt);
350                else
351                    dcache_latency += dcachePort.sendAtomic(&pkt);
352            }
353            dcache_access = true;
354
355            assert(!pkt.isError());
356
357            if (req->isLLSC()) {
358                TheISA::handleLockedRead(thread, req);
359            }
360        }
361
362        //If there's a fault, return it
363        if (fault != NoFault) {
364            if (req->isPrefetch()) {
365                return NoFault;
366            } else {
367                return fault;
368            }
369        }
370
371        //If we don't need to access a second cache line, stop now.
372        if (secondAddr <= addr)
373        {
374            if (req->isLockedRMW() && fault == NoFault) {
375                assert(!locked);
376                locked = true;
377            }
378            return fault;
379        }
380
381        /*
382         * Set up for accessing the second cache line.
383         */
384
385        //Move the pointer we're reading into to the correct location.
386        data += size;
387        //Adjust the size to get the remaining bytes.
388        size = addr + fullSize - secondAddr;
389        //And access the right address.
390        addr = secondAddr;
391    }
392}
393
394
395Fault
396AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size,
397                          Addr addr, unsigned flags, uint64_t *res)
398{
399
400    static uint8_t zero_array[64] = {};
401
402    if (data == NULL) {
403        assert(size <= 64);
404        assert(flags & Request::CACHE_BLOCK_ZERO);
405        // This must be a cache block cleaning request
406        data = zero_array;
407    }
408
409    // use the CPU's statically allocated write request and packet objects
410    Request *req = &data_write_req;
411
412    if (traceData)
413        traceData->setMem(addr, size, flags);
414
415    //The size of the data we're trying to read.
416    int fullSize = size;
417
418    //The address of the second part of this access if it needs to be split
419    //across a cache line boundary.
420    Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
421
422    if(secondAddr > addr)
423        size = secondAddr - addr;
424
425    dcache_latency = 0;
426
427    req->taskId(taskId());
428    while(1) {
429        req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr());
430
431        // translate to physical address
432        Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write);
433
434        // Now do the access.
435        if (fault == NoFault) {
436            MemCmd cmd = MemCmd::WriteReq; // default
437            bool do_access = true;  // flag to suppress cache access
438
439            if (req->isLLSC()) {
440                cmd = MemCmd::StoreCondReq;
441                do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask);
442            } else if (req->isSwap()) {
443                cmd = MemCmd::SwapReq;
444                if (req->isCondSwap()) {
445                    assert(res);
446                    req->setExtraData(*res);
447                }
448            }
449
450            if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) {
451                Packet pkt = Packet(req, cmd);
452                pkt.dataStatic(data);
453
454                if (req->isMmappedIpr()) {
455                    dcache_latency +=
456                        TheISA::handleIprWrite(thread->getTC(), &pkt);
457                } else {
458                    if (fastmem && system->isMemAddr(pkt.getAddr()))
459                        system->getPhysMem().access(&pkt);
460                    else
461                        dcache_latency += dcachePort.sendAtomic(&pkt);
462                }
463                dcache_access = true;
464                assert(!pkt.isError());
465
466                if (req->isSwap()) {
467                    assert(res);
468                    memcpy(res, pkt.getConstPtr<uint8_t>(), fullSize);
469                }
470            }
471
472            if (res && !req->isSwap()) {
473                *res = req->getExtraData();
474            }
475        }
476
477        //If there's a fault or we don't need to access a second cache line,
478        //stop now.
479        if (fault != NoFault || secondAddr <= addr)
480        {
481            if (req->isLockedRMW() && fault == NoFault) {
482                assert(locked);
483                locked = false;
484            }
485            if (fault != NoFault && req->isPrefetch()) {
486                return NoFault;
487            } else {
488                return fault;
489            }
490        }
491
492        /*
493         * Set up for accessing the second cache line.
494         */
495
496        //Move the pointer we're reading into to the correct location.
497        data += size;
498        //Adjust the size to get the remaining bytes.
499        size = addr + fullSize - secondAddr;
500        //And access the right address.
501        addr = secondAddr;
502    }
503}
504
505
506void
507AtomicSimpleCPU::tick()
508{
509    DPRINTF(SimpleCPU, "Tick\n");
510
511    Tick latency = 0;
512
513    for (int i = 0; i < width || locked; ++i) {
514        numCycles++;
515        ppCycles->notify(1);
516
517        if (!curStaticInst || !curStaticInst->isDelayedCommit()) {
518            checkForInterrupts();
519            checkPcEventQueue();
520        }
521
522        // We must have just got suspended by a PC event
523        if (_status == Idle) {
524            tryCompleteDrain();
525            return;
526        }
527
528        Fault fault = NoFault;
529
530        TheISA::PCState pcState = thread->pcState();
531
532        bool needToFetch = !isRomMicroPC(pcState.microPC()) &&
533                           !curMacroStaticInst;
534        if (needToFetch) {
535            ifetch_req.taskId(taskId());
536            setupFetchRequest(&ifetch_req);
537            fault = thread->itb->translateAtomic(&ifetch_req, tc,
538                                                 BaseTLB::Execute);
539        }
540
541        if (fault == NoFault) {
542            Tick icache_latency = 0;
543            bool icache_access = false;
544            dcache_access = false; // assume no dcache access
545
546            if (needToFetch) {
547                // This is commented out because the decoder would act like
548                // a tiny cache otherwise. It wouldn't be flushed when needed
549                // like the I cache. It should be flushed, and when that works
550                // this code should be uncommented.
551                //Fetch more instruction memory if necessary
552                //if(decoder.needMoreBytes())
553                //{
554                    icache_access = true;
555                    Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq);
556                    ifetch_pkt.dataStatic(&inst);
557
558                    if (fastmem && system->isMemAddr(ifetch_pkt.getAddr()))
559                        system->getPhysMem().access(&ifetch_pkt);
560                    else
561                        icache_latency = icachePort.sendAtomic(&ifetch_pkt);
562
563                    assert(!ifetch_pkt.isError());
564
565                    // ifetch_req is initialized to read the instruction directly
566                    // into the CPU object's inst field.
567                //}
568            }
569
570            preExecute();
571
572            if (curStaticInst) {
573                fault = curStaticInst->execute(this, traceData);
574
575                // keep an instruction count
576                if (fault == NoFault) {
577                    countInst();
578                    ppCommit->notify(std::make_pair(thread, curStaticInst));
579                }
580                else if (traceData && !DTRACE(ExecFaulting)) {
581                    delete traceData;
582                    traceData = NULL;
583                }
584
585                postExecute();
586            }
587
588            // @todo remove me after debugging with legion done
589            if (curStaticInst && (!curStaticInst->isMicroop() ||
590                        curStaticInst->isFirstMicroop()))
591                instCnt++;
592
593            Tick stall_ticks = 0;
594            if (simulate_inst_stalls && icache_access)
595                stall_ticks += icache_latency;
596
597            if (simulate_data_stalls && dcache_access)
598                stall_ticks += dcache_latency;
599
600            if (stall_ticks) {
601                // the atomic cpu does its accounting in ticks, so
602                // keep counting in ticks but round to the clock
603                // period
604                latency += divCeil(stall_ticks, clockPeriod()) *
605                    clockPeriod();
606            }
607
608        }
609        if(fault != NoFault || !stayAtPC)
610            advancePC(fault);
611    }
612
613    if (tryCompleteDrain())
614        return;
615
616    // instruction takes at least one cycle
617    if (latency < clockPeriod())
618        latency = clockPeriod();
619
620    if (_status != Idle)
621        schedule(tickEvent, curTick() + latency);
622}
623
624void
625AtomicSimpleCPU::regProbePoints()
626{
627    BaseCPU::regProbePoints();
628
629    ppCommit = new ProbePointArg<pair<SimpleThread*, const StaticInstPtr>>
630                                (getProbeManager(), "Commit");
631}
632
633void
634AtomicSimpleCPU::printAddr(Addr a)
635{
636    dcachePort.printAddr(a);
637}
638
639////////////////////////////////////////////////////////////////////////
640//
641//  AtomicSimpleCPU Simulation Object
642//
643AtomicSimpleCPU *
644AtomicSimpleCPUParams::create()
645{
646    numThreads = 1;
647    if (!FullSystem && workload.size() != 1)
648        panic("only one workload allowed");
649    return new AtomicSimpleCPU(this);
650}
651