atomic.cc revision 9424
1/*
2 * Copyright (c) 2012 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Steve Reinhardt
41 */
42
43#include "arch/locked_mem.hh"
44#include "arch/mmapped_ipr.hh"
45#include "arch/utility.hh"
46#include "base/bigint.hh"
47#include "config/the_isa.hh"
48#include "cpu/simple/atomic.hh"
49#include "cpu/exetrace.hh"
50#include "debug/ExecFaulting.hh"
51#include "debug/SimpleCPU.hh"
52#include "mem/packet.hh"
53#include "mem/packet_access.hh"
54#include "mem/physical.hh"
55#include "params/AtomicSimpleCPU.hh"
56#include "sim/faults.hh"
57#include "sim/system.hh"
58#include "sim/full_system.hh"
59
60using namespace std;
61using namespace TheISA;
62
63AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c)
64    : Event(CPU_Tick_Pri), cpu(c)
65{
66}
67
68
69void
70AtomicSimpleCPU::TickEvent::process()
71{
72    cpu->tick();
73}
74
75const char *
76AtomicSimpleCPU::TickEvent::description() const
77{
78    return "AtomicSimpleCPU tick";
79}
80
81void
82AtomicSimpleCPU::init()
83{
84    BaseCPU::init();
85
86    if (!params()->defer_registration &&
87        system->getMemoryMode() != Enums::atomic) {
88        fatal("The atomic CPU requires the memory system to be in "
89              "'atomic' mode.\n");
90    }
91
92    // Initialise the ThreadContext's memory proxies
93    tcBase()->initMemProxies(tcBase());
94
95    if (FullSystem && !params()->defer_registration) {
96        ThreadID size = threadContexts.size();
97        for (ThreadID i = 0; i < size; ++i) {
98            ThreadContext *tc = threadContexts[i];
99            // initialize CPU, including PC
100            TheISA::initCPU(tc, tc->contextId());
101        }
102    }
103
104    // Atomic doesn't do MT right now, so contextId == threadId
105    ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
106    data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
107    data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
108}
109
110AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p)
111    : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false),
112      simulate_data_stalls(p->simulate_data_stalls),
113      simulate_inst_stalls(p->simulate_inst_stalls),
114      icachePort(name() + ".icache_port", this),
115      dcachePort(name() + ".dcache_port", this),
116      fastmem(p->fastmem)
117{
118    _status = Idle;
119}
120
121
122AtomicSimpleCPU::~AtomicSimpleCPU()
123{
124    if (tickEvent.scheduled()) {
125        deschedule(tickEvent);
126    }
127}
128
129void
130AtomicSimpleCPU::serialize(ostream &os)
131{
132    Drainable::State so_state(getDrainState());
133    SERIALIZE_ENUM(so_state);
134    SERIALIZE_SCALAR(locked);
135    BaseSimpleCPU::serialize(os);
136    nameOut(os, csprintf("%s.tickEvent", name()));
137    tickEvent.serialize(os);
138}
139
140void
141AtomicSimpleCPU::unserialize(Checkpoint *cp, const string &section)
142{
143    Drainable::State so_state;
144    UNSERIALIZE_ENUM(so_state);
145    UNSERIALIZE_SCALAR(locked);
146    BaseSimpleCPU::unserialize(cp, section);
147    tickEvent.unserialize(cp, csprintf("%s.tickEvent", section));
148}
149
150unsigned int
151AtomicSimpleCPU::drain(DrainManager *drain_manager)
152{
153    setDrainState(Drainable::Drained);
154    return 0;
155}
156
157void
158AtomicSimpleCPU::drainResume()
159{
160    if (_status == Idle || _status == SwitchedOut)
161        return;
162
163    DPRINTF(SimpleCPU, "Resume\n");
164    if (system->getMemoryMode() != Enums::atomic) {
165        fatal("The atomic CPU requires the memory system to be in "
166              "'atomic' mode.\n");
167    }
168
169    setDrainState(Drainable::Running);
170    if (thread->status() == ThreadContext::Active) {
171        if (!tickEvent.scheduled())
172            schedule(tickEvent, nextCycle());
173    }
174    system->totalNumInsts = 0;
175}
176
177void
178AtomicSimpleCPU::switchOut()
179{
180    assert(_status == BaseSimpleCPU::Running || _status == Idle);
181    _status = SwitchedOut;
182
183    tickEvent.squash();
184}
185
186
187void
188AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
189{
190    BaseCPU::takeOverFrom(oldCPU);
191
192    assert(!tickEvent.scheduled());
193
194    // if any of this CPU's ThreadContexts are active, mark the CPU as
195    // running and schedule its tick event.
196    ThreadID size = threadContexts.size();
197    for (ThreadID i = 0; i < size; ++i) {
198        ThreadContext *tc = threadContexts[i];
199        if (tc->status() == ThreadContext::Active &&
200            _status != BaseSimpleCPU::Running) {
201            _status = BaseSimpleCPU::Running;
202            schedule(tickEvent, nextCycle());
203            break;
204        }
205    }
206    if (_status != BaseSimpleCPU::Running) {
207        _status = Idle;
208    }
209    assert(threadContexts.size() == 1);
210    ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
211    data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
212    data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
213}
214
215
216void
217AtomicSimpleCPU::activateContext(ThreadID thread_num, Cycles delay)
218{
219    DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
220
221    assert(thread_num == 0);
222    assert(thread);
223
224    assert(_status == Idle);
225    assert(!tickEvent.scheduled());
226
227    notIdleFraction++;
228    numCycles += ticksToCycles(thread->lastActivate - thread->lastSuspend);
229
230    //Make sure ticks are still on multiples of cycles
231    schedule(tickEvent, clockEdge(delay));
232    _status = BaseSimpleCPU::Running;
233}
234
235
236void
237AtomicSimpleCPU::suspendContext(ThreadID thread_num)
238{
239    DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
240
241    assert(thread_num == 0);
242    assert(thread);
243
244    if (_status == Idle)
245        return;
246
247    assert(_status == BaseSimpleCPU::Running);
248
249    // tick event may not be scheduled if this gets called from inside
250    // an instruction's execution, e.g. "quiesce"
251    if (tickEvent.scheduled())
252        deschedule(tickEvent);
253
254    notIdleFraction--;
255    _status = Idle;
256}
257
258
259Fault
260AtomicSimpleCPU::readMem(Addr addr, uint8_t * data,
261                         unsigned size, unsigned flags)
262{
263    // use the CPU's statically allocated read request and packet objects
264    Request *req = &data_read_req;
265
266    if (traceData) {
267        traceData->setAddr(addr);
268    }
269
270    //The block size of our peer.
271    unsigned blockSize = dcachePort.peerBlockSize();
272    //The size of the data we're trying to read.
273    int fullSize = size;
274
275    //The address of the second part of this access if it needs to be split
276    //across a cache line boundary.
277    Addr secondAddr = roundDown(addr + size - 1, blockSize);
278
279    if (secondAddr > addr)
280        size = secondAddr - addr;
281
282    dcache_latency = 0;
283
284    while (1) {
285        req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr());
286
287        // translate to physical address
288        Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read);
289
290        // Now do the access.
291        if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
292            Packet pkt = Packet(req,
293                                req->isLLSC() ? MemCmd::LoadLockedReq :
294                                MemCmd::ReadReq);
295            pkt.dataStatic(data);
296
297            if (req->isMmappedIpr())
298                dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt);
299            else {
300                if (fastmem && system->isMemAddr(pkt.getAddr()))
301                    system->getPhysMem().access(&pkt);
302                else
303                    dcache_latency += dcachePort.sendAtomic(&pkt);
304            }
305            dcache_access = true;
306
307            assert(!pkt.isError());
308
309            if (req->isLLSC()) {
310                TheISA::handleLockedRead(thread, req);
311            }
312        }
313
314        //If there's a fault, return it
315        if (fault != NoFault) {
316            if (req->isPrefetch()) {
317                return NoFault;
318            } else {
319                return fault;
320            }
321        }
322
323        //If we don't need to access a second cache line, stop now.
324        if (secondAddr <= addr)
325        {
326            if (req->isLocked() && fault == NoFault) {
327                assert(!locked);
328                locked = true;
329            }
330            return fault;
331        }
332
333        /*
334         * Set up for accessing the second cache line.
335         */
336
337        //Move the pointer we're reading into to the correct location.
338        data += size;
339        //Adjust the size to get the remaining bytes.
340        size = addr + fullSize - secondAddr;
341        //And access the right address.
342        addr = secondAddr;
343    }
344}
345
346
347Fault
348AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size,
349                          Addr addr, unsigned flags, uint64_t *res)
350{
351    // use the CPU's statically allocated write request and packet objects
352    Request *req = &data_write_req;
353
354    if (traceData) {
355        traceData->setAddr(addr);
356    }
357
358    //The block size of our peer.
359    unsigned blockSize = dcachePort.peerBlockSize();
360    //The size of the data we're trying to read.
361    int fullSize = size;
362
363    //The address of the second part of this access if it needs to be split
364    //across a cache line boundary.
365    Addr secondAddr = roundDown(addr + size - 1, blockSize);
366
367    if(secondAddr > addr)
368        size = secondAddr - addr;
369
370    dcache_latency = 0;
371
372    while(1) {
373        req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr());
374
375        // translate to physical address
376        Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write);
377
378        // Now do the access.
379        if (fault == NoFault) {
380            MemCmd cmd = MemCmd::WriteReq; // default
381            bool do_access = true;  // flag to suppress cache access
382
383            if (req->isLLSC()) {
384                cmd = MemCmd::StoreCondReq;
385                do_access = TheISA::handleLockedWrite(thread, req);
386            } else if (req->isSwap()) {
387                cmd = MemCmd::SwapReq;
388                if (req->isCondSwap()) {
389                    assert(res);
390                    req->setExtraData(*res);
391                }
392            }
393
394            if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) {
395                Packet pkt = Packet(req, cmd);
396                pkt.dataStatic(data);
397
398                if (req->isMmappedIpr()) {
399                    dcache_latency +=
400                        TheISA::handleIprWrite(thread->getTC(), &pkt);
401                } else {
402                    if (fastmem && system->isMemAddr(pkt.getAddr()))
403                        system->getPhysMem().access(&pkt);
404                    else
405                        dcache_latency += dcachePort.sendAtomic(&pkt);
406                }
407                dcache_access = true;
408                assert(!pkt.isError());
409
410                if (req->isSwap()) {
411                    assert(res);
412                    memcpy(res, pkt.getPtr<uint8_t>(), fullSize);
413                }
414            }
415
416            if (res && !req->isSwap()) {
417                *res = req->getExtraData();
418            }
419        }
420
421        //If there's a fault or we don't need to access a second cache line,
422        //stop now.
423        if (fault != NoFault || secondAddr <= addr)
424        {
425            if (req->isLocked() && fault == NoFault) {
426                assert(locked);
427                locked = false;
428            }
429            if (fault != NoFault && req->isPrefetch()) {
430                return NoFault;
431            } else {
432                return fault;
433            }
434        }
435
436        /*
437         * Set up for accessing the second cache line.
438         */
439
440        //Move the pointer we're reading into to the correct location.
441        data += size;
442        //Adjust the size to get the remaining bytes.
443        size = addr + fullSize - secondAddr;
444        //And access the right address.
445        addr = secondAddr;
446    }
447}
448
449
450void
451AtomicSimpleCPU::tick()
452{
453    DPRINTF(SimpleCPU, "Tick\n");
454
455    Tick latency = 0;
456
457    for (int i = 0; i < width || locked; ++i) {
458        numCycles++;
459
460        if (!curStaticInst || !curStaticInst->isDelayedCommit())
461            checkForInterrupts();
462
463        checkPcEventQueue();
464        // We must have just got suspended by a PC event
465        if (_status == Idle)
466            return;
467
468        Fault fault = NoFault;
469
470        TheISA::PCState pcState = thread->pcState();
471
472        bool needToFetch = !isRomMicroPC(pcState.microPC()) &&
473                           !curMacroStaticInst;
474        if (needToFetch) {
475            setupFetchRequest(&ifetch_req);
476            fault = thread->itb->translateAtomic(&ifetch_req, tc,
477                                                 BaseTLB::Execute);
478        }
479
480        if (fault == NoFault) {
481            Tick icache_latency = 0;
482            bool icache_access = false;
483            dcache_access = false; // assume no dcache access
484
485            if (needToFetch) {
486                // This is commented out because the decoder would act like
487                // a tiny cache otherwise. It wouldn't be flushed when needed
488                // like the I cache. It should be flushed, and when that works
489                // this code should be uncommented.
490                //Fetch more instruction memory if necessary
491                //if(decoder.needMoreBytes())
492                //{
493                    icache_access = true;
494                    Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq);
495                    ifetch_pkt.dataStatic(&inst);
496
497                    if (fastmem && system->isMemAddr(ifetch_pkt.getAddr()))
498                        system->getPhysMem().access(&ifetch_pkt);
499                    else
500                        icache_latency = icachePort.sendAtomic(&ifetch_pkt);
501
502                    assert(!ifetch_pkt.isError());
503
504                    // ifetch_req is initialized to read the instruction directly
505                    // into the CPU object's inst field.
506                //}
507            }
508
509            preExecute();
510
511            if (curStaticInst) {
512                fault = curStaticInst->execute(this, traceData);
513
514                // keep an instruction count
515                if (fault == NoFault)
516                    countInst();
517                else if (traceData && !DTRACE(ExecFaulting)) {
518                    delete traceData;
519                    traceData = NULL;
520                }
521
522                postExecute();
523            }
524
525            // @todo remove me after debugging with legion done
526            if (curStaticInst && (!curStaticInst->isMicroop() ||
527                        curStaticInst->isFirstMicroop()))
528                instCnt++;
529
530            Tick stall_ticks = 0;
531            if (simulate_inst_stalls && icache_access)
532                stall_ticks += icache_latency;
533
534            if (simulate_data_stalls && dcache_access)
535                stall_ticks += dcache_latency;
536
537            if (stall_ticks) {
538                // the atomic cpu does its accounting in ticks, so
539                // keep counting in ticks but round to the clock
540                // period
541                latency += divCeil(stall_ticks, clockPeriod()) *
542                    clockPeriod();
543            }
544
545        }
546        if(fault != NoFault || !stayAtPC)
547            advancePC(fault);
548    }
549
550    // instruction takes at least one cycle
551    if (latency < clockPeriod())
552        latency = clockPeriod();
553
554    if (_status != Idle)
555        schedule(tickEvent, curTick() + latency);
556}
557
558
559void
560AtomicSimpleCPU::printAddr(Addr a)
561{
562    dcachePort.printAddr(a);
563}
564
565
566////////////////////////////////////////////////////////////////////////
567//
568//  AtomicSimpleCPU Simulation Object
569//
570AtomicSimpleCPU *
571AtomicSimpleCPUParams::create()
572{
573    numThreads = 1;
574    if (!FullSystem && workload.size() != 1)
575        panic("only one workload allowed");
576    return new AtomicSimpleCPU(this);
577}
578