1/*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * Copyright (c) 2013 Advanced Micro Devices, Inc.
4 * Copyright (c) 2013 Mark D. Hill and David A. Wood
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
9 * met: redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer;
11 * redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution;
14 * neither the name of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * Authors: Nathan Binkert
31 *          Steve Reinhardt
32 */
33
34#include "sim/simulate.hh"
35
36#include <mutex>
37#include <thread>
38
39#include "base/logging.hh"
40#include "base/pollevent.hh"
41#include "base/types.hh"
42#include "sim/async.hh"
43#include "sim/eventq_impl.hh"
44#include "sim/sim_events.hh"
45#include "sim/sim_exit.hh"
46#include "sim/stat_control.hh"
47
48//! Mutex for handling async events.
49std::mutex asyncEventMutex;
50
51//! Global barrier for synchronizing threads entering/exiting the
52//! simulation loop.
53Barrier *threadBarrier;
54
55//! forward declaration
56Event *doSimLoop(EventQueue *);
57
58/**
59 * The main function for all subordinate threads (i.e., all threads
60 * other than the main thread).  These threads start by waiting on
61 * threadBarrier.  Once all threads have arrived at threadBarrier,
62 * they enter the simulation loop concurrently.  When they exit the
63 * loop, they return to waiting on threadBarrier.  This process is
64 * repeated until the simulation terminates.
65 */
66static void
67thread_loop(EventQueue *queue)
68{
69    while (true) {
70        threadBarrier->wait();
71        doSimLoop(queue);
72    }
73}
74
75GlobalSimLoopExitEvent *simulate_limit_event = nullptr;
76
77/** Simulate for num_cycles additional cycles.  If num_cycles is -1
78 * (the default), do not limit simulation; some other event must
79 * terminate the loop.  Exported to Python.
80 * @return The SimLoopExitEvent that caused the loop to exit.
81 */
82GlobalSimLoopExitEvent *
83simulate(Tick num_cycles)
84{
85    // The first time simulate() is called from the Python code, we need to
86    // create a thread for each of event queues referenced by the
87    // instantiated sim objects.
88    static bool threads_initialized = false;
89    static std::vector<std::thread *> threads;
90
91    if (!threads_initialized) {
92        threadBarrier = new Barrier(numMainEventQueues);
93
94        // the main thread (the one we're currently running on)
95        // handles queue 0, so we only need to allocate new threads
96        // for queues 1..N-1.  We'll call these the "subordinate" threads.
97        for (uint32_t i = 1; i < numMainEventQueues; i++) {
98            threads.push_back(new std::thread(thread_loop, mainEventQueue[i]));
99        }
100
101        threads_initialized = true;
102        simulate_limit_event =
103            new GlobalSimLoopExitEvent(mainEventQueue[0]->getCurTick(),
104                                       "simulate() limit reached", 0);
105    }
106
107    inform("Entering event queue @ %d.  Starting simulation...\n", curTick());
108
109    if (num_cycles < MaxTick - curTick())
110        num_cycles = curTick() + num_cycles;
111    else // counter would roll over or be set to MaxTick anyhow
112        num_cycles = MaxTick;
113
114    simulate_limit_event->reschedule(num_cycles);
115
116    GlobalSyncEvent *quantum_event = NULL;
117    if (numMainEventQueues > 1) {
118        if (simQuantum == 0) {
119            fatal("Quantum for multi-eventq simulation not specified");
120        }
121
122        quantum_event = new GlobalSyncEvent(curTick() + simQuantum, simQuantum,
123                            EventBase::Progress_Event_Pri, 0);
124
125        inParallelMode = true;
126    }
127
128    // all subordinate (created) threads should be waiting on the
129    // barrier; the arrival of the main thread here will satisfy the
130    // barrier, and all threads will enter doSimLoop in parallel
131    threadBarrier->wait();
132    Event *local_event = doSimLoop(mainEventQueue[0]);
133    assert(local_event != NULL);
134
135    inParallelMode = false;
136
137    // locate the global exit event and return it to Python
138    BaseGlobalEvent *global_event = local_event->globalEvent();
139    assert(global_event != NULL);
140
141    GlobalSimLoopExitEvent *global_exit_event =
142        dynamic_cast<GlobalSimLoopExitEvent *>(global_event);
143    assert(global_exit_event != NULL);
144
145    //! Delete the simulation quantum event.
146    if (quantum_event != NULL) {
147        quantum_event->deschedule();
148        delete quantum_event;
149    }
150
151    return global_exit_event;
152}
153
154/**
155 * Test and clear the global async_event flag, such that each time the
156 * flag is cleared, only one thread returns true (and thus is assigned
157 * to handle the corresponding async event(s)).
158 */
159static bool
160testAndClearAsyncEvent()
161{
162    bool was_set = false;
163    asyncEventMutex.lock();
164
165    if (async_event) {
166        was_set = true;
167        async_event = false;
168    }
169
170    asyncEventMutex.unlock();
171    return was_set;
172}
173
174/**
175 * The main per-thread simulation loop. This loop is executed by all
176 * simulation threads (the main thread and the subordinate threads) in
177 * parallel.
178 */
179Event *
180doSimLoop(EventQueue *eventq)
181{
182    // set the per thread current eventq pointer
183    curEventQueue(eventq);
184    eventq->handleAsyncInsertions();
185
186    while (1) {
187        // there should always be at least one event (the SimLoopExitEvent
188        // we just scheduled) in the queue
189        assert(!eventq->empty());
190        assert(curTick() <= eventq->nextTick() &&
191               "event scheduled in the past");
192
193        if (async_event && testAndClearAsyncEvent()) {
194            // Take the event queue lock in case any of the service
195            // routines want to schedule new events.
196            std::lock_guard<EventQueue> lock(*eventq);
197            if (async_statdump || async_statreset) {
198                Stats::schedStatEvent(async_statdump, async_statreset);
199                async_statdump = false;
200                async_statreset = false;
201            }
202
203            if (async_io) {
204                async_io = false;
205                pollQueue.service();
206            }
207
208            if (async_exit) {
209                async_exit = false;
210                exitSimLoop("user interrupt received");
211            }
212
213            if (async_exception) {
214                async_exception = false;
215                return NULL;
216            }
217        }
218
219        Event *exit_event = eventq->serviceOne();
220        if (exit_event != NULL) {
221            return exit_event;
222        }
223    }
224
225    // not reached... only exit is return on SimLoopExitEvent
226}
227