eventq.hh (10991:72781d410e48) eventq.hh (10992:c88952d67db2)
1/*
2 * Copyright (c) 2000-2005 The Regents of The University of Michigan
3 * Copyright (c) 2013 Advanced Micro Devices, Inc.
4 * Copyright (c) 2013 Mark D. Hill and David A. Wood
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
9 * met: redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer;
11 * redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution;
14 * neither the name of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * Authors: Steve Reinhardt
31 * Nathan Binkert
32 */
33
34/* @file
35 * EventQueue interfaces
36 */
37
38#ifndef __SIM_EVENTQ_HH__
39#define __SIM_EVENTQ_HH__
40
41#include <algorithm>
42#include <cassert>
43#include <climits>
44#include <iosfwd>
45#include <memory>
46#include <mutex>
47#include <string>
48
49#include "base/flags.hh"
50#include "base/misc.hh"
51#include "base/types.hh"
52#include "debug/Event.hh"
53#include "sim/serialize.hh"
54
55class EventQueue; // forward declaration
56class BaseGlobalEvent;
57
58//! Simulation Quantum for multiple eventq simulation.
59//! The quantum value is the period length after which the queues
60//! synchronize themselves with each other. This means that any
61//! event to scheduled on Queue A which is generated by an event on
62//! Queue B should be at least simQuantum ticks away in future.
63extern Tick simQuantum;
64
65//! Current number of allocated main event queues.
66extern uint32_t numMainEventQueues;
67
68//! Array for main event queues.
69extern std::vector<EventQueue *> mainEventQueue;
70
71#ifndef SWIG
72//! The current event queue for the running thread. Access to this queue
73//! does not require any locking from the thread.
74
75extern __thread EventQueue *_curEventQueue;
76
77#endif
78
79//! Current mode of execution: parallel / serial
80extern bool inParallelMode;
81
82//! Function for returning eventq queue for the provided
83//! index. The function allocates a new queue in case one
84//! does not exist for the index, provided that the index
85//! is with in bounds.
86EventQueue *getEventQueue(uint32_t index);
87
88inline EventQueue *curEventQueue() { return _curEventQueue; }
89inline void curEventQueue(EventQueue *q) { _curEventQueue = q; }
90
91/**
92 * Common base class for Event and GlobalEvent, so they can share flag
93 * and priority definitions and accessor functions. This class should
94 * not be used directly.
95 */
96class EventBase
97{
98 protected:
99 typedef unsigned short FlagsType;
100 typedef ::Flags<FlagsType> Flags;
101
102 static const FlagsType PublicRead = 0x003f; // public readable flags
103 static const FlagsType PublicWrite = 0x001d; // public writable flags
104 static const FlagsType Squashed = 0x0001; // has been squashed
105 static const FlagsType Scheduled = 0x0002; // has been scheduled
106 static const FlagsType AutoDelete = 0x0004; // delete after dispatch
107 static const FlagsType AutoSerialize = 0x0008; // must be serialized
108 static const FlagsType IsExitEvent = 0x0010; // special exit event
109 static const FlagsType IsMainQueue = 0x0020; // on main event queue
110 static const FlagsType Initialized = 0x7a40; // somewhat random bits
111 static const FlagsType InitMask = 0xffc0; // mask for init bits
112
113 public:
114 typedef int8_t Priority;
115
116 /// Event priorities, to provide tie-breakers for events scheduled
117 /// at the same cycle. Most events are scheduled at the default
118 /// priority; these values are used to control events that need to
119 /// be ordered within a cycle.
120
121 /// Minimum priority
122 static const Priority Minimum_Pri = SCHAR_MIN;
123
124 /// If we enable tracing on a particular cycle, do that as the
125 /// very first thing so we don't miss any of the events on
126 /// that cycle (even if we enter the debugger).
127 static const Priority Debug_Enable_Pri = -101;
128
129 /// Breakpoints should happen before anything else (except
130 /// enabling trace output), so we don't miss any action when
131 /// debugging.
132 static const Priority Debug_Break_Pri = -100;
133
134 /// CPU switches schedule the new CPU's tick event for the
135 /// same cycle (after unscheduling the old CPU's tick event).
136 /// The switch needs to come before any tick events to make
137 /// sure we don't tick both CPUs in the same cycle.
138 static const Priority CPU_Switch_Pri = -31;
139
140 /// For some reason "delayed" inter-cluster writebacks are
141 /// scheduled before regular writebacks (which have default
142 /// priority). Steve?
143 static const Priority Delayed_Writeback_Pri = -1;
144
145 /// Default is zero for historical reasons.
146 static const Priority Default_Pri = 0;
147
148 /// DVFS update event leads to stats dump therefore given a lower priority
149 /// to ensure all relevant states have been updated
150 static const Priority DVFS_Update_Pri = 31;
151
152 /// Serailization needs to occur before tick events also, so
153 /// that a serialize/unserialize is identical to an on-line
154 /// CPU switch.
155 static const Priority Serialize_Pri = 32;
156
157 /// CPU ticks must come after other associated CPU events
158 /// (such as writebacks).
159 static const Priority CPU_Tick_Pri = 50;
160
161 /// Statistics events (dump, reset, etc.) come after
162 /// everything else, but before exit.
163 static const Priority Stat_Event_Pri = 90;
164
165 /// Progress events come at the end.
166 static const Priority Progress_Event_Pri = 95;
167
168 /// If we want to exit on this cycle, it's the very last thing
169 /// we do.
170 static const Priority Sim_Exit_Pri = 100;
171
172 /// Maximum priority
173 static const Priority Maximum_Pri = SCHAR_MAX;
174};
175
176/*
177 * An item on an event queue. The action caused by a given
178 * event is specified by deriving a subclass and overriding the
179 * process() member function.
180 *
181 * Caution, the order of members is chosen to maximize data packing.
182 */
183class Event : public EventBase, public Serializable
184{
185 friend class EventQueue;
186
187 private:
188 // The event queue is now a linked list of linked lists. The
189 // 'nextBin' pointer is to find the bin, where a bin is defined as
190 // when+priority. All events in the same bin will be stored in a
191 // second linked list (a stack) maintained by the 'nextInBin'
192 // pointer. The list will be accessed in LIFO order. The end
193 // result is that the insert/removal in 'nextBin' is
194 // linear/constant, and the lookup/removal in 'nextInBin' is
195 // constant/constant. Hopefully this is a significant improvement
196 // over the current fully linear insertion.
197 Event *nextBin;
198 Event *nextInBin;
199
200 static Event *insertBefore(Event *event, Event *curr);
201 static Event *removeItem(Event *event, Event *last);
202
203 Tick _when; //!< timestamp when event should be processed
204 Priority _priority; //!< event priority
205 Flags flags;
206
207#ifndef NDEBUG
208 /// Global counter to generate unique IDs for Event instances
209 static Counter instanceCounter;
210
211 /// This event's unique ID. We can also use pointer values for
212 /// this but they're not consistent across runs making debugging
213 /// more difficult. Thus we use a global counter value when
214 /// debugging.
215 Counter instance;
216
217 /// queue to which this event belongs (though it may or may not be
218 /// scheduled on this queue yet)
219 EventQueue *queue;
220#endif
221
222#ifdef EVENTQ_DEBUG
223 Tick whenCreated; //!< time created
224 Tick whenScheduled; //!< time scheduled
225#endif
226
227 void
228 setWhen(Tick when, EventQueue *q)
229 {
230 _when = when;
231#ifndef NDEBUG
232 queue = q;
233#endif
234#ifdef EVENTQ_DEBUG
235 whenScheduled = curTick();
236#endif
237 }
238
239 bool
240 initialized() const
241 {
242 return (flags & InitMask) == Initialized;
243 }
244
245 protected:
246 /// Accessor for flags.
247 Flags
248 getFlags() const
249 {
250 return flags & PublicRead;
251 }
252
253 bool
254 isFlagSet(Flags _flags) const
255 {
256 assert(_flags.noneSet(~PublicRead));
257 return flags.isSet(_flags);
258 }
259
260 /// Accessor for flags.
261 void
262 setFlags(Flags _flags)
263 {
264 assert(_flags.noneSet(~PublicWrite));
265 flags.set(_flags);
266 }
267
268 void
269 clearFlags(Flags _flags)
270 {
271 assert(_flags.noneSet(~PublicWrite));
272 flags.clear(_flags);
273 }
274
275 void
276 clearFlags()
277 {
278 flags.clear(PublicWrite);
279 }
280
281 // This function isn't really useful if TRACING_ON is not defined
282 virtual void trace(const char *action); //!< trace event activity
283
284 public:
285
286 /*
287 * Event constructor
288 * @param queue that the event gets scheduled on
289 */
290 Event(Priority p = Default_Pri, Flags f = 0)
291 : nextBin(nullptr), nextInBin(nullptr), _when(0), _priority(p),
292 flags(Initialized | f)
293 {
294 assert(f.noneSet(~PublicWrite));
295#ifndef NDEBUG
296 instance = ++instanceCounter;
297 queue = NULL;
298#endif
299#ifdef EVENTQ_DEBUG
300 whenCreated = curTick();
301 whenScheduled = 0;
302#endif
303 }
304
305 virtual ~Event();
306 virtual const std::string name() const;
307
308 /// Return a C string describing the event. This string should
309 /// *not* be dynamically allocated; just a const char array
310 /// describing the event class.
311 virtual const char *description() const;
312
313 /// Dump the current event data
314 void dump() const;
315
316 public:
317 /*
318 * This member function is invoked when the event is processed
319 * (occurs). There is no default implementation; each subclass
320 * must provide its own implementation. The event is not
321 * automatically deleted after it is processed (to allow for
322 * statically allocated event objects).
323 *
324 * If the AutoDestroy flag is set, the object is deleted once it
325 * is processed.
326 */
327 virtual void process() = 0;
328
329 /// Determine if the current event is scheduled
330 bool scheduled() const { return flags.isSet(Scheduled); }
331
332 /// Squash the current event
333 void squash() { flags.set(Squashed); }
334
335 /// Check whether the event is squashed
336 bool squashed() const { return flags.isSet(Squashed); }
337
338 /// See if this is a SimExitEvent (without resorting to RTTI)
339 bool isExitEvent() const { return flags.isSet(IsExitEvent); }
340
1/*
2 * Copyright (c) 2000-2005 The Regents of The University of Michigan
3 * Copyright (c) 2013 Advanced Micro Devices, Inc.
4 * Copyright (c) 2013 Mark D. Hill and David A. Wood
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
9 * met: redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer;
11 * redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution;
14 * neither the name of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * Authors: Steve Reinhardt
31 * Nathan Binkert
32 */
33
34/* @file
35 * EventQueue interfaces
36 */
37
38#ifndef __SIM_EVENTQ_HH__
39#define __SIM_EVENTQ_HH__
40
41#include <algorithm>
42#include <cassert>
43#include <climits>
44#include <iosfwd>
45#include <memory>
46#include <mutex>
47#include <string>
48
49#include "base/flags.hh"
50#include "base/misc.hh"
51#include "base/types.hh"
52#include "debug/Event.hh"
53#include "sim/serialize.hh"
54
55class EventQueue; // forward declaration
56class BaseGlobalEvent;
57
58//! Simulation Quantum for multiple eventq simulation.
59//! The quantum value is the period length after which the queues
60//! synchronize themselves with each other. This means that any
61//! event to scheduled on Queue A which is generated by an event on
62//! Queue B should be at least simQuantum ticks away in future.
63extern Tick simQuantum;
64
65//! Current number of allocated main event queues.
66extern uint32_t numMainEventQueues;
67
68//! Array for main event queues.
69extern std::vector<EventQueue *> mainEventQueue;
70
71#ifndef SWIG
72//! The current event queue for the running thread. Access to this queue
73//! does not require any locking from the thread.
74
75extern __thread EventQueue *_curEventQueue;
76
77#endif
78
79//! Current mode of execution: parallel / serial
80extern bool inParallelMode;
81
82//! Function for returning eventq queue for the provided
83//! index. The function allocates a new queue in case one
84//! does not exist for the index, provided that the index
85//! is with in bounds.
86EventQueue *getEventQueue(uint32_t index);
87
88inline EventQueue *curEventQueue() { return _curEventQueue; }
89inline void curEventQueue(EventQueue *q) { _curEventQueue = q; }
90
91/**
92 * Common base class for Event and GlobalEvent, so they can share flag
93 * and priority definitions and accessor functions. This class should
94 * not be used directly.
95 */
96class EventBase
97{
98 protected:
99 typedef unsigned short FlagsType;
100 typedef ::Flags<FlagsType> Flags;
101
102 static const FlagsType PublicRead = 0x003f; // public readable flags
103 static const FlagsType PublicWrite = 0x001d; // public writable flags
104 static const FlagsType Squashed = 0x0001; // has been squashed
105 static const FlagsType Scheduled = 0x0002; // has been scheduled
106 static const FlagsType AutoDelete = 0x0004; // delete after dispatch
107 static const FlagsType AutoSerialize = 0x0008; // must be serialized
108 static const FlagsType IsExitEvent = 0x0010; // special exit event
109 static const FlagsType IsMainQueue = 0x0020; // on main event queue
110 static const FlagsType Initialized = 0x7a40; // somewhat random bits
111 static const FlagsType InitMask = 0xffc0; // mask for init bits
112
113 public:
114 typedef int8_t Priority;
115
116 /// Event priorities, to provide tie-breakers for events scheduled
117 /// at the same cycle. Most events are scheduled at the default
118 /// priority; these values are used to control events that need to
119 /// be ordered within a cycle.
120
121 /// Minimum priority
122 static const Priority Minimum_Pri = SCHAR_MIN;
123
124 /// If we enable tracing on a particular cycle, do that as the
125 /// very first thing so we don't miss any of the events on
126 /// that cycle (even if we enter the debugger).
127 static const Priority Debug_Enable_Pri = -101;
128
129 /// Breakpoints should happen before anything else (except
130 /// enabling trace output), so we don't miss any action when
131 /// debugging.
132 static const Priority Debug_Break_Pri = -100;
133
134 /// CPU switches schedule the new CPU's tick event for the
135 /// same cycle (after unscheduling the old CPU's tick event).
136 /// The switch needs to come before any tick events to make
137 /// sure we don't tick both CPUs in the same cycle.
138 static const Priority CPU_Switch_Pri = -31;
139
140 /// For some reason "delayed" inter-cluster writebacks are
141 /// scheduled before regular writebacks (which have default
142 /// priority). Steve?
143 static const Priority Delayed_Writeback_Pri = -1;
144
145 /// Default is zero for historical reasons.
146 static const Priority Default_Pri = 0;
147
148 /// DVFS update event leads to stats dump therefore given a lower priority
149 /// to ensure all relevant states have been updated
150 static const Priority DVFS_Update_Pri = 31;
151
152 /// Serailization needs to occur before tick events also, so
153 /// that a serialize/unserialize is identical to an on-line
154 /// CPU switch.
155 static const Priority Serialize_Pri = 32;
156
157 /// CPU ticks must come after other associated CPU events
158 /// (such as writebacks).
159 static const Priority CPU_Tick_Pri = 50;
160
161 /// Statistics events (dump, reset, etc.) come after
162 /// everything else, but before exit.
163 static const Priority Stat_Event_Pri = 90;
164
165 /// Progress events come at the end.
166 static const Priority Progress_Event_Pri = 95;
167
168 /// If we want to exit on this cycle, it's the very last thing
169 /// we do.
170 static const Priority Sim_Exit_Pri = 100;
171
172 /// Maximum priority
173 static const Priority Maximum_Pri = SCHAR_MAX;
174};
175
176/*
177 * An item on an event queue. The action caused by a given
178 * event is specified by deriving a subclass and overriding the
179 * process() member function.
180 *
181 * Caution, the order of members is chosen to maximize data packing.
182 */
183class Event : public EventBase, public Serializable
184{
185 friend class EventQueue;
186
187 private:
188 // The event queue is now a linked list of linked lists. The
189 // 'nextBin' pointer is to find the bin, where a bin is defined as
190 // when+priority. All events in the same bin will be stored in a
191 // second linked list (a stack) maintained by the 'nextInBin'
192 // pointer. The list will be accessed in LIFO order. The end
193 // result is that the insert/removal in 'nextBin' is
194 // linear/constant, and the lookup/removal in 'nextInBin' is
195 // constant/constant. Hopefully this is a significant improvement
196 // over the current fully linear insertion.
197 Event *nextBin;
198 Event *nextInBin;
199
200 static Event *insertBefore(Event *event, Event *curr);
201 static Event *removeItem(Event *event, Event *last);
202
203 Tick _when; //!< timestamp when event should be processed
204 Priority _priority; //!< event priority
205 Flags flags;
206
207#ifndef NDEBUG
208 /// Global counter to generate unique IDs for Event instances
209 static Counter instanceCounter;
210
211 /// This event's unique ID. We can also use pointer values for
212 /// this but they're not consistent across runs making debugging
213 /// more difficult. Thus we use a global counter value when
214 /// debugging.
215 Counter instance;
216
217 /// queue to which this event belongs (though it may or may not be
218 /// scheduled on this queue yet)
219 EventQueue *queue;
220#endif
221
222#ifdef EVENTQ_DEBUG
223 Tick whenCreated; //!< time created
224 Tick whenScheduled; //!< time scheduled
225#endif
226
227 void
228 setWhen(Tick when, EventQueue *q)
229 {
230 _when = when;
231#ifndef NDEBUG
232 queue = q;
233#endif
234#ifdef EVENTQ_DEBUG
235 whenScheduled = curTick();
236#endif
237 }
238
239 bool
240 initialized() const
241 {
242 return (flags & InitMask) == Initialized;
243 }
244
245 protected:
246 /// Accessor for flags.
247 Flags
248 getFlags() const
249 {
250 return flags & PublicRead;
251 }
252
253 bool
254 isFlagSet(Flags _flags) const
255 {
256 assert(_flags.noneSet(~PublicRead));
257 return flags.isSet(_flags);
258 }
259
260 /// Accessor for flags.
261 void
262 setFlags(Flags _flags)
263 {
264 assert(_flags.noneSet(~PublicWrite));
265 flags.set(_flags);
266 }
267
268 void
269 clearFlags(Flags _flags)
270 {
271 assert(_flags.noneSet(~PublicWrite));
272 flags.clear(_flags);
273 }
274
275 void
276 clearFlags()
277 {
278 flags.clear(PublicWrite);
279 }
280
281 // This function isn't really useful if TRACING_ON is not defined
282 virtual void trace(const char *action); //!< trace event activity
283
284 public:
285
286 /*
287 * Event constructor
288 * @param queue that the event gets scheduled on
289 */
290 Event(Priority p = Default_Pri, Flags f = 0)
291 : nextBin(nullptr), nextInBin(nullptr), _when(0), _priority(p),
292 flags(Initialized | f)
293 {
294 assert(f.noneSet(~PublicWrite));
295#ifndef NDEBUG
296 instance = ++instanceCounter;
297 queue = NULL;
298#endif
299#ifdef EVENTQ_DEBUG
300 whenCreated = curTick();
301 whenScheduled = 0;
302#endif
303 }
304
305 virtual ~Event();
306 virtual const std::string name() const;
307
308 /// Return a C string describing the event. This string should
309 /// *not* be dynamically allocated; just a const char array
310 /// describing the event class.
311 virtual const char *description() const;
312
313 /// Dump the current event data
314 void dump() const;
315
316 public:
317 /*
318 * This member function is invoked when the event is processed
319 * (occurs). There is no default implementation; each subclass
320 * must provide its own implementation. The event is not
321 * automatically deleted after it is processed (to allow for
322 * statically allocated event objects).
323 *
324 * If the AutoDestroy flag is set, the object is deleted once it
325 * is processed.
326 */
327 virtual void process() = 0;
328
329 /// Determine if the current event is scheduled
330 bool scheduled() const { return flags.isSet(Scheduled); }
331
332 /// Squash the current event
333 void squash() { flags.set(Squashed); }
334
335 /// Check whether the event is squashed
336 bool squashed() const { return flags.isSet(Squashed); }
337
338 /// See if this is a SimExitEvent (without resorting to RTTI)
339 bool isExitEvent() const { return flags.isSet(IsExitEvent); }
340
341 /// Check whether this event will auto-delete
342 bool isAutoDelete() const { return flags.isSet(AutoDelete); }
343
341 /// Get the time that the event is scheduled
342 Tick when() const { return _when; }
343
344 /// Get the event priority
345 Priority priority() const { return _priority; }
346
347 //! If this is part of a GlobalEvent, return the pointer to the
348 //! Global Event. By default, there is no GlobalEvent, so return
349 //! NULL. (Overridden in GlobalEvent::BarrierEvent.)
350 virtual BaseGlobalEvent *globalEvent() { return NULL; }
351
352#ifndef SWIG
353 void serialize(CheckpointOut &cp) const M5_ATTR_OVERRIDE;
354 void unserialize(CheckpointIn &cp) M5_ATTR_OVERRIDE;
355#endif
356};
357
358#ifndef SWIG
359inline bool
360operator<(const Event &l, const Event &r)
361{
362 return l.when() < r.when() ||
363 (l.when() == r.when() && l.priority() < r.priority());
364}
365
366inline bool
367operator>(const Event &l, const Event &r)
368{
369 return l.when() > r.when() ||
370 (l.when() == r.when() && l.priority() > r.priority());
371}
372
373inline bool
374operator<=(const Event &l, const Event &r)
375{
376 return l.when() < r.when() ||
377 (l.when() == r.when() && l.priority() <= r.priority());
378}
379inline bool
380operator>=(const Event &l, const Event &r)
381{
382 return l.when() > r.when() ||
383 (l.when() == r.when() && l.priority() >= r.priority());
384}
385
386inline bool
387operator==(const Event &l, const Event &r)
388{
389 return l.when() == r.when() && l.priority() == r.priority();
390}
391
392inline bool
393operator!=(const Event &l, const Event &r)
394{
395 return l.when() != r.when() || l.priority() != r.priority();
396}
397#endif
398
399/**
400 * Queue of events sorted in time order
401 *
402 * Events are scheduled (inserted into the event queue) using the
403 * schedule() method. This method either inserts a <i>synchronous</i>
404 * or <i>asynchronous</i> event.
405 *
406 * Synchronous events are scheduled using schedule() method with the
407 * argument 'global' set to false (default). This should only be done
408 * from a thread holding the event queue lock
409 * (EventQueue::service_mutex). The lock is always held when an event
410 * handler is called, it can therefore always insert events into its
411 * own event queue unless it voluntarily releases the lock.
412 *
413 * Events can be scheduled across thread (and event queue borders) by
414 * either scheduling asynchronous events or taking the target event
415 * queue's lock. However, the lock should <i>never</i> be taken
416 * directly since this is likely to cause deadlocks. Instead, code
417 * that needs to schedule events in other event queues should
418 * temporarily release its own queue and lock the new queue. This
419 * prevents deadlocks since a single thread never owns more than one
420 * event queue lock. This functionality is provided by the
421 * ScopedMigration helper class. Note that temporarily migrating
422 * between event queues can make the simulation non-deterministic, it
423 * should therefore be limited to cases where that can be tolerated
424 * (e.g., handling asynchronous IO or fast-forwarding in KVM).
425 *
426 * Asynchronous events can also be scheduled using the normal
427 * schedule() method with the 'global' parameter set to true. Unlike
428 * the previous queue migration strategy, this strategy is fully
429 * deterministic. This causes the event to be inserted in a separate
430 * queue of asynchronous events (async_queue), which is merged main
431 * event queue at the end of each simulation quantum (by calling the
432 * handleAsyncInsertions() method). Note that this implies that such
433 * events must happen at least one simulation quantum into the future,
434 * otherwise they risk being scheduled in the past by
435 * handleAsyncInsertions().
436 */
437class EventQueue : public Serializable
438{
439 private:
440 std::string objName;
441 Event *head;
442 Tick _curTick;
443
444 //! Mutex to protect async queue.
445 std::mutex async_queue_mutex;
446
447 //! List of events added by other threads to this event queue.
448 std::list<Event*> async_queue;
449
450 /**
451 * Lock protecting event handling.
452 *
453 * This lock is always taken when servicing events. It is assumed
454 * that the thread scheduling new events (not asynchronous events
455 * though) have taken this lock. This is normally done by
456 * serviceOne() since new events are typically scheduled as a
457 * response to an earlier event.
458 *
459 * This lock is intended to be used to temporarily steal an event
460 * queue to support inter-thread communication when some
461 * deterministic timing can be sacrificed for speed. For example,
462 * the KVM CPU can use this support to access devices running in a
463 * different thread.
464 *
465 * @see EventQueue::ScopedMigration.
466 * @see EventQueue::ScopedRelease
467 * @see EventQueue::lock()
468 * @see EventQueue::unlock()
469 */
470 std::mutex service_mutex;
471
472 //! Insert / remove event from the queue. Should only be called
473 //! by thread operating this queue.
474 void insert(Event *event);
475 void remove(Event *event);
476
477 //! Function for adding events to the async queue. The added events
478 //! are added to main event queue later. Threads, other than the
479 //! owning thread, should call this function instead of insert().
480 void asyncInsert(Event *event);
481
482 EventQueue(const EventQueue &);
483
484 public:
485#ifndef SWIG
486 /**
487 * Temporarily migrate execution to a different event queue.
488 *
489 * An instance of this class temporarily migrates execution to a
490 * different event queue by releasing the current queue, locking
491 * the new queue, and updating curEventQueue(). This can, for
492 * example, be useful when performing IO across thread event
493 * queues when timing is not crucial (e.g., during fast
494 * forwarding).
495 */
496 class ScopedMigration
497 {
498 public:
499 ScopedMigration(EventQueue *_new_eq)
500 : new_eq(*_new_eq), old_eq(*curEventQueue())
501 {
502 old_eq.unlock();
503 new_eq.lock();
504 curEventQueue(&new_eq);
505 }
506
507 ~ScopedMigration()
508 {
509 new_eq.unlock();
510 old_eq.lock();
511 curEventQueue(&old_eq);
512 }
513
514 private:
515 EventQueue &new_eq;
516 EventQueue &old_eq;
517 };
518
519 /**
520 * Temporarily release the event queue service lock.
521 *
522 * There are cases where it is desirable to temporarily release
523 * the event queue lock to prevent deadlocks. For example, when
524 * waiting on the global barrier, we need to release the lock to
525 * prevent deadlocks from happening when another thread tries to
526 * temporarily take over the event queue waiting on the barrier.
527 */
528 class ScopedRelease
529 {
530 public:
531 ScopedRelease(EventQueue *_eq)
532 : eq(*_eq)
533 {
534 eq.unlock();
535 }
536
537 ~ScopedRelease()
538 {
539 eq.lock();
540 }
541
542 private:
543 EventQueue &eq;
544 };
545#endif
546
547 EventQueue(const std::string &n);
548
549 virtual const std::string name() const { return objName; }
550 void name(const std::string &st) { objName = st; }
551
552 //! Schedule the given event on this queue. Safe to call from any
553 //! thread.
554 void schedule(Event *event, Tick when, bool global = false);
555
556 //! Deschedule the specified event. Should be called only from the
557 //! owning thread.
558 void deschedule(Event *event);
559
560 //! Reschedule the specified event. Should be called only from
561 //! the owning thread.
562 void reschedule(Event *event, Tick when, bool always = false);
563
564 Tick nextTick() const { return head->when(); }
565 void setCurTick(Tick newVal) { _curTick = newVal; }
566 Tick getCurTick() { return _curTick; }
567 Event *getHead() const { return head; }
568
569 Event *serviceOne();
570
571 // process all events up to the given timestamp. we inline a
572 // quick test to see if there are any events to process; if so,
573 // call the internal out-of-line version to process them all.
574 void
575 serviceEvents(Tick when)
576 {
577 while (!empty()) {
578 if (nextTick() > when)
579 break;
580
581 /**
582 * @todo this assert is a good bug catcher. I need to
583 * make it true again.
584 */
585 //assert(head->when() >= when && "event scheduled in the past");
586 serviceOne();
587 }
588
589 setCurTick(when);
590 }
591
592 // return true if no events are queued
593 bool empty() const { return head == NULL; }
594
595 void dump() const;
596
597 bool debugVerify() const;
598
599 //! Function for moving events from the async_queue to the main queue.
600 void handleAsyncInsertions();
601
602 /**
603 * Function to signal that the event loop should be woken up because
604 * an event has been scheduled by an agent outside the gem5 event
605 * loop(s) whose event insertion may not have been noticed by gem5.
606 * This function isn't needed by the usual gem5 event loop but may
607 * be necessary in derived EventQueues which host gem5 onto other
608 * schedulers.
609 *
610 * @param when Time of a delayed wakeup (if known). This parameter
611 * can be used by an implementation to schedule a wakeup in the
612 * future if it is sure it will remain active until then.
613 * Or it can be ignored and the event queue can be woken up now.
614 */
615 virtual void wakeup(Tick when = (Tick)-1) { }
616
617 /**
618 * function for replacing the head of the event queue, so that a
619 * different set of events can run without disturbing events that have
620 * already been scheduled. Already scheduled events can be processed
621 * by replacing the original head back.
622 * USING THIS FUNCTION CAN BE DANGEROUS TO THE HEALTH OF THE SIMULATOR.
623 * NOT RECOMMENDED FOR USE.
624 */
625 Event* replaceHead(Event* s);
626
627 /**@{*/
628 /**
629 * Provide an interface for locking/unlocking the event queue.
630 *
631 * @warn Do NOT use these methods directly unless you really know
632 * what you are doing. Incorrect use can easily lead to simulator
633 * deadlocks.
634 *
635 * @see EventQueue::ScopedMigration.
636 * @see EventQueue::ScopedRelease
637 * @see EventQueue
638 */
639 void lock() { service_mutex.lock(); }
640 void unlock() { service_mutex.unlock(); }
641 /**@}*/
642
643#ifndef SWIG
644 void serialize(CheckpointOut &cp) const M5_ATTR_OVERRIDE;
645 void unserialize(CheckpointIn &cp) M5_ATTR_OVERRIDE;
646#endif
647
648 /**
649 * Reschedule an event after a checkpoint.
650 *
651 * Since events don't know which event queue they belong to,
652 * parent objects need to reschedule events themselves. This
653 * method conditionally schedules an event that has the Scheduled
654 * flag set. It should be called by parent objects after
655 * unserializing an object.
656 *
657 * @warn Only use this method after unserializing an Event.
658 */
659 void checkpointReschedule(Event *event);
660
661 virtual ~EventQueue() { }
662};
663
664void dumpMainQueue();
665
666#ifndef SWIG
667class EventManager
668{
669 protected:
670 /** A pointer to this object's event queue */
671 EventQueue *eventq;
672
673 public:
674 EventManager(EventManager &em) : eventq(em.eventq) {}
675 EventManager(EventManager *em) : eventq(em->eventq) {}
676 EventManager(EventQueue *eq) : eventq(eq) {}
677
678 EventQueue *
679 eventQueue() const
680 {
681 return eventq;
682 }
683
684 void
685 schedule(Event &event, Tick when)
686 {
687 eventq->schedule(&event, when);
688 }
689
690 void
691 deschedule(Event &event)
692 {
693 eventq->deschedule(&event);
694 }
695
696 void
697 reschedule(Event &event, Tick when, bool always = false)
698 {
699 eventq->reschedule(&event, when, always);
700 }
701
702 void
703 schedule(Event *event, Tick when)
704 {
705 eventq->schedule(event, when);
706 }
707
708 void
709 deschedule(Event *event)
710 {
711 eventq->deschedule(event);
712 }
713
714 void
715 reschedule(Event *event, Tick when, bool always = false)
716 {
717 eventq->reschedule(event, when, always);
718 }
719
720 void wakeupEventQueue(Tick when = (Tick)-1)
721 {
722 eventq->wakeup(when);
723 }
724
725 void setCurTick(Tick newVal) { eventq->setCurTick(newVal); }
726};
727
728template <class T, void (T::* F)()>
729void
730DelayFunction(EventQueue *eventq, Tick when, T *object)
731{
732 class DelayEvent : public Event
733 {
734 private:
735 T *object;
736
737 public:
738 DelayEvent(T *o)
739 : Event(Default_Pri, AutoDelete), object(o)
740 { }
741 void process() { (object->*F)(); }
742 const char *description() const { return "delay"; }
743 };
744
745 eventq->schedule(new DelayEvent(object), when);
746}
747
748template <class T, void (T::* F)()>
749class EventWrapper : public Event
750{
751 private:
752 T *object;
753
754 public:
755 EventWrapper(T *obj, bool del = false, Priority p = Default_Pri)
756 : Event(p), object(obj)
757 {
758 if (del)
759 setFlags(AutoDelete);
760 }
761
762 EventWrapper(T &obj, bool del = false, Priority p = Default_Pri)
763 : Event(p), object(&obj)
764 {
765 if (del)
766 setFlags(AutoDelete);
767 }
768
769 void process() { (object->*F)(); }
770
771 const std::string
772 name() const
773 {
774 return object->name() + ".wrapped_event";
775 }
776
777 const char *description() const { return "EventWrapped"; }
778};
779#endif
780
781#endif // __SIM_EVENTQ_HH__
344 /// Get the time that the event is scheduled
345 Tick when() const { return _when; }
346
347 /// Get the event priority
348 Priority priority() const { return _priority; }
349
350 //! If this is part of a GlobalEvent, return the pointer to the
351 //! Global Event. By default, there is no GlobalEvent, so return
352 //! NULL. (Overridden in GlobalEvent::BarrierEvent.)
353 virtual BaseGlobalEvent *globalEvent() { return NULL; }
354
355#ifndef SWIG
356 void serialize(CheckpointOut &cp) const M5_ATTR_OVERRIDE;
357 void unserialize(CheckpointIn &cp) M5_ATTR_OVERRIDE;
358#endif
359};
360
361#ifndef SWIG
362inline bool
363operator<(const Event &l, const Event &r)
364{
365 return l.when() < r.when() ||
366 (l.when() == r.when() && l.priority() < r.priority());
367}
368
369inline bool
370operator>(const Event &l, const Event &r)
371{
372 return l.when() > r.when() ||
373 (l.when() == r.when() && l.priority() > r.priority());
374}
375
376inline bool
377operator<=(const Event &l, const Event &r)
378{
379 return l.when() < r.when() ||
380 (l.when() == r.when() && l.priority() <= r.priority());
381}
382inline bool
383operator>=(const Event &l, const Event &r)
384{
385 return l.when() > r.when() ||
386 (l.when() == r.when() && l.priority() >= r.priority());
387}
388
389inline bool
390operator==(const Event &l, const Event &r)
391{
392 return l.when() == r.when() && l.priority() == r.priority();
393}
394
395inline bool
396operator!=(const Event &l, const Event &r)
397{
398 return l.when() != r.when() || l.priority() != r.priority();
399}
400#endif
401
402/**
403 * Queue of events sorted in time order
404 *
405 * Events are scheduled (inserted into the event queue) using the
406 * schedule() method. This method either inserts a <i>synchronous</i>
407 * or <i>asynchronous</i> event.
408 *
409 * Synchronous events are scheduled using schedule() method with the
410 * argument 'global' set to false (default). This should only be done
411 * from a thread holding the event queue lock
412 * (EventQueue::service_mutex). The lock is always held when an event
413 * handler is called, it can therefore always insert events into its
414 * own event queue unless it voluntarily releases the lock.
415 *
416 * Events can be scheduled across thread (and event queue borders) by
417 * either scheduling asynchronous events or taking the target event
418 * queue's lock. However, the lock should <i>never</i> be taken
419 * directly since this is likely to cause deadlocks. Instead, code
420 * that needs to schedule events in other event queues should
421 * temporarily release its own queue and lock the new queue. This
422 * prevents deadlocks since a single thread never owns more than one
423 * event queue lock. This functionality is provided by the
424 * ScopedMigration helper class. Note that temporarily migrating
425 * between event queues can make the simulation non-deterministic, it
426 * should therefore be limited to cases where that can be tolerated
427 * (e.g., handling asynchronous IO or fast-forwarding in KVM).
428 *
429 * Asynchronous events can also be scheduled using the normal
430 * schedule() method with the 'global' parameter set to true. Unlike
431 * the previous queue migration strategy, this strategy is fully
432 * deterministic. This causes the event to be inserted in a separate
433 * queue of asynchronous events (async_queue), which is merged main
434 * event queue at the end of each simulation quantum (by calling the
435 * handleAsyncInsertions() method). Note that this implies that such
436 * events must happen at least one simulation quantum into the future,
437 * otherwise they risk being scheduled in the past by
438 * handleAsyncInsertions().
439 */
440class EventQueue : public Serializable
441{
442 private:
443 std::string objName;
444 Event *head;
445 Tick _curTick;
446
447 //! Mutex to protect async queue.
448 std::mutex async_queue_mutex;
449
450 //! List of events added by other threads to this event queue.
451 std::list<Event*> async_queue;
452
453 /**
454 * Lock protecting event handling.
455 *
456 * This lock is always taken when servicing events. It is assumed
457 * that the thread scheduling new events (not asynchronous events
458 * though) have taken this lock. This is normally done by
459 * serviceOne() since new events are typically scheduled as a
460 * response to an earlier event.
461 *
462 * This lock is intended to be used to temporarily steal an event
463 * queue to support inter-thread communication when some
464 * deterministic timing can be sacrificed for speed. For example,
465 * the KVM CPU can use this support to access devices running in a
466 * different thread.
467 *
468 * @see EventQueue::ScopedMigration.
469 * @see EventQueue::ScopedRelease
470 * @see EventQueue::lock()
471 * @see EventQueue::unlock()
472 */
473 std::mutex service_mutex;
474
475 //! Insert / remove event from the queue. Should only be called
476 //! by thread operating this queue.
477 void insert(Event *event);
478 void remove(Event *event);
479
480 //! Function for adding events to the async queue. The added events
481 //! are added to main event queue later. Threads, other than the
482 //! owning thread, should call this function instead of insert().
483 void asyncInsert(Event *event);
484
485 EventQueue(const EventQueue &);
486
487 public:
488#ifndef SWIG
489 /**
490 * Temporarily migrate execution to a different event queue.
491 *
492 * An instance of this class temporarily migrates execution to a
493 * different event queue by releasing the current queue, locking
494 * the new queue, and updating curEventQueue(). This can, for
495 * example, be useful when performing IO across thread event
496 * queues when timing is not crucial (e.g., during fast
497 * forwarding).
498 */
499 class ScopedMigration
500 {
501 public:
502 ScopedMigration(EventQueue *_new_eq)
503 : new_eq(*_new_eq), old_eq(*curEventQueue())
504 {
505 old_eq.unlock();
506 new_eq.lock();
507 curEventQueue(&new_eq);
508 }
509
510 ~ScopedMigration()
511 {
512 new_eq.unlock();
513 old_eq.lock();
514 curEventQueue(&old_eq);
515 }
516
517 private:
518 EventQueue &new_eq;
519 EventQueue &old_eq;
520 };
521
522 /**
523 * Temporarily release the event queue service lock.
524 *
525 * There are cases where it is desirable to temporarily release
526 * the event queue lock to prevent deadlocks. For example, when
527 * waiting on the global barrier, we need to release the lock to
528 * prevent deadlocks from happening when another thread tries to
529 * temporarily take over the event queue waiting on the barrier.
530 */
531 class ScopedRelease
532 {
533 public:
534 ScopedRelease(EventQueue *_eq)
535 : eq(*_eq)
536 {
537 eq.unlock();
538 }
539
540 ~ScopedRelease()
541 {
542 eq.lock();
543 }
544
545 private:
546 EventQueue &eq;
547 };
548#endif
549
550 EventQueue(const std::string &n);
551
552 virtual const std::string name() const { return objName; }
553 void name(const std::string &st) { objName = st; }
554
555 //! Schedule the given event on this queue. Safe to call from any
556 //! thread.
557 void schedule(Event *event, Tick when, bool global = false);
558
559 //! Deschedule the specified event. Should be called only from the
560 //! owning thread.
561 void deschedule(Event *event);
562
563 //! Reschedule the specified event. Should be called only from
564 //! the owning thread.
565 void reschedule(Event *event, Tick when, bool always = false);
566
567 Tick nextTick() const { return head->when(); }
568 void setCurTick(Tick newVal) { _curTick = newVal; }
569 Tick getCurTick() { return _curTick; }
570 Event *getHead() const { return head; }
571
572 Event *serviceOne();
573
574 // process all events up to the given timestamp. we inline a
575 // quick test to see if there are any events to process; if so,
576 // call the internal out-of-line version to process them all.
577 void
578 serviceEvents(Tick when)
579 {
580 while (!empty()) {
581 if (nextTick() > when)
582 break;
583
584 /**
585 * @todo this assert is a good bug catcher. I need to
586 * make it true again.
587 */
588 //assert(head->when() >= when && "event scheduled in the past");
589 serviceOne();
590 }
591
592 setCurTick(when);
593 }
594
595 // return true if no events are queued
596 bool empty() const { return head == NULL; }
597
598 void dump() const;
599
600 bool debugVerify() const;
601
602 //! Function for moving events from the async_queue to the main queue.
603 void handleAsyncInsertions();
604
605 /**
606 * Function to signal that the event loop should be woken up because
607 * an event has been scheduled by an agent outside the gem5 event
608 * loop(s) whose event insertion may not have been noticed by gem5.
609 * This function isn't needed by the usual gem5 event loop but may
610 * be necessary in derived EventQueues which host gem5 onto other
611 * schedulers.
612 *
613 * @param when Time of a delayed wakeup (if known). This parameter
614 * can be used by an implementation to schedule a wakeup in the
615 * future if it is sure it will remain active until then.
616 * Or it can be ignored and the event queue can be woken up now.
617 */
618 virtual void wakeup(Tick when = (Tick)-1) { }
619
620 /**
621 * function for replacing the head of the event queue, so that a
622 * different set of events can run without disturbing events that have
623 * already been scheduled. Already scheduled events can be processed
624 * by replacing the original head back.
625 * USING THIS FUNCTION CAN BE DANGEROUS TO THE HEALTH OF THE SIMULATOR.
626 * NOT RECOMMENDED FOR USE.
627 */
628 Event* replaceHead(Event* s);
629
630 /**@{*/
631 /**
632 * Provide an interface for locking/unlocking the event queue.
633 *
634 * @warn Do NOT use these methods directly unless you really know
635 * what you are doing. Incorrect use can easily lead to simulator
636 * deadlocks.
637 *
638 * @see EventQueue::ScopedMigration.
639 * @see EventQueue::ScopedRelease
640 * @see EventQueue
641 */
642 void lock() { service_mutex.lock(); }
643 void unlock() { service_mutex.unlock(); }
644 /**@}*/
645
646#ifndef SWIG
647 void serialize(CheckpointOut &cp) const M5_ATTR_OVERRIDE;
648 void unserialize(CheckpointIn &cp) M5_ATTR_OVERRIDE;
649#endif
650
651 /**
652 * Reschedule an event after a checkpoint.
653 *
654 * Since events don't know which event queue they belong to,
655 * parent objects need to reschedule events themselves. This
656 * method conditionally schedules an event that has the Scheduled
657 * flag set. It should be called by parent objects after
658 * unserializing an object.
659 *
660 * @warn Only use this method after unserializing an Event.
661 */
662 void checkpointReschedule(Event *event);
663
664 virtual ~EventQueue() { }
665};
666
667void dumpMainQueue();
668
669#ifndef SWIG
670class EventManager
671{
672 protected:
673 /** A pointer to this object's event queue */
674 EventQueue *eventq;
675
676 public:
677 EventManager(EventManager &em) : eventq(em.eventq) {}
678 EventManager(EventManager *em) : eventq(em->eventq) {}
679 EventManager(EventQueue *eq) : eventq(eq) {}
680
681 EventQueue *
682 eventQueue() const
683 {
684 return eventq;
685 }
686
687 void
688 schedule(Event &event, Tick when)
689 {
690 eventq->schedule(&event, when);
691 }
692
693 void
694 deschedule(Event &event)
695 {
696 eventq->deschedule(&event);
697 }
698
699 void
700 reschedule(Event &event, Tick when, bool always = false)
701 {
702 eventq->reschedule(&event, when, always);
703 }
704
705 void
706 schedule(Event *event, Tick when)
707 {
708 eventq->schedule(event, when);
709 }
710
711 void
712 deschedule(Event *event)
713 {
714 eventq->deschedule(event);
715 }
716
717 void
718 reschedule(Event *event, Tick when, bool always = false)
719 {
720 eventq->reschedule(event, when, always);
721 }
722
723 void wakeupEventQueue(Tick when = (Tick)-1)
724 {
725 eventq->wakeup(when);
726 }
727
728 void setCurTick(Tick newVal) { eventq->setCurTick(newVal); }
729};
730
731template <class T, void (T::* F)()>
732void
733DelayFunction(EventQueue *eventq, Tick when, T *object)
734{
735 class DelayEvent : public Event
736 {
737 private:
738 T *object;
739
740 public:
741 DelayEvent(T *o)
742 : Event(Default_Pri, AutoDelete), object(o)
743 { }
744 void process() { (object->*F)(); }
745 const char *description() const { return "delay"; }
746 };
747
748 eventq->schedule(new DelayEvent(object), when);
749}
750
751template <class T, void (T::* F)()>
752class EventWrapper : public Event
753{
754 private:
755 T *object;
756
757 public:
758 EventWrapper(T *obj, bool del = false, Priority p = Default_Pri)
759 : Event(p), object(obj)
760 {
761 if (del)
762 setFlags(AutoDelete);
763 }
764
765 EventWrapper(T &obj, bool del = false, Priority p = Default_Pri)
766 : Event(p), object(&obj)
767 {
768 if (del)
769 setFlags(AutoDelete);
770 }
771
772 void process() { (object->*F)(); }
773
774 const std::string
775 name() const
776 {
777 return object->name() + ".wrapped_event";
778 }
779
780 const char *description() const { return "EventWrapped"; }
781};
782#endif
783
784#endif // __SIM_EVENTQ_HH__