eventq.hh (11168:f98eb2da15a4) eventq.hh (11320:42ecb523c64a)
1/*
2 * Copyright (c) 2000-2005 The Regents of The University of Michigan
3 * Copyright (c) 2013 Advanced Micro Devices, Inc.
4 * Copyright (c) 2013 Mark D. Hill and David A. Wood
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
9 * met: redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer;
11 * redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution;
14 * neither the name of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * Authors: Steve Reinhardt
31 * Nathan Binkert
32 */
33
34/* @file
35 * EventQueue interfaces
36 */
37
38#ifndef __SIM_EVENTQ_HH__
39#define __SIM_EVENTQ_HH__
40
41#include <algorithm>
42#include <cassert>
43#include <climits>
44#include <iosfwd>
45#include <memory>
46#include <mutex>
47#include <string>
48
49#include "base/flags.hh"
50#include "base/misc.hh"
51#include "base/types.hh"
52#include "debug/Event.hh"
53#include "sim/serialize.hh"
54
55class EventQueue; // forward declaration
56class BaseGlobalEvent;
57
58//! Simulation Quantum for multiple eventq simulation.
59//! The quantum value is the period length after which the queues
60//! synchronize themselves with each other. This means that any
61//! event to scheduled on Queue A which is generated by an event on
62//! Queue B should be at least simQuantum ticks away in future.
63extern Tick simQuantum;
64
65//! Current number of allocated main event queues.
66extern uint32_t numMainEventQueues;
67
68//! Array for main event queues.
69extern std::vector<EventQueue *> mainEventQueue;
70
71#ifndef SWIG
72//! The current event queue for the running thread. Access to this queue
73//! does not require any locking from the thread.
74
75extern __thread EventQueue *_curEventQueue;
76
77#endif
78
79//! Current mode of execution: parallel / serial
80extern bool inParallelMode;
81
82//! Function for returning eventq queue for the provided
83//! index. The function allocates a new queue in case one
84//! does not exist for the index, provided that the index
85//! is with in bounds.
86EventQueue *getEventQueue(uint32_t index);
87
88inline EventQueue *curEventQueue() { return _curEventQueue; }
89inline void curEventQueue(EventQueue *q) { _curEventQueue = q; }
90
91/**
92 * Common base class for Event and GlobalEvent, so they can share flag
93 * and priority definitions and accessor functions. This class should
94 * not be used directly.
95 */
96class EventBase
97{
1/*
2 * Copyright (c) 2000-2005 The Regents of The University of Michigan
3 * Copyright (c) 2013 Advanced Micro Devices, Inc.
4 * Copyright (c) 2013 Mark D. Hill and David A. Wood
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
9 * met: redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer;
11 * redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution;
14 * neither the name of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * Authors: Steve Reinhardt
31 * Nathan Binkert
32 */
33
34/* @file
35 * EventQueue interfaces
36 */
37
38#ifndef __SIM_EVENTQ_HH__
39#define __SIM_EVENTQ_HH__
40
41#include <algorithm>
42#include <cassert>
43#include <climits>
44#include <iosfwd>
45#include <memory>
46#include <mutex>
47#include <string>
48
49#include "base/flags.hh"
50#include "base/misc.hh"
51#include "base/types.hh"
52#include "debug/Event.hh"
53#include "sim/serialize.hh"
54
55class EventQueue; // forward declaration
56class BaseGlobalEvent;
57
58//! Simulation Quantum for multiple eventq simulation.
59//! The quantum value is the period length after which the queues
60//! synchronize themselves with each other. This means that any
61//! event to scheduled on Queue A which is generated by an event on
62//! Queue B should be at least simQuantum ticks away in future.
63extern Tick simQuantum;
64
65//! Current number of allocated main event queues.
66extern uint32_t numMainEventQueues;
67
68//! Array for main event queues.
69extern std::vector<EventQueue *> mainEventQueue;
70
71#ifndef SWIG
72//! The current event queue for the running thread. Access to this queue
73//! does not require any locking from the thread.
74
75extern __thread EventQueue *_curEventQueue;
76
77#endif
78
79//! Current mode of execution: parallel / serial
80extern bool inParallelMode;
81
82//! Function for returning eventq queue for the provided
83//! index. The function allocates a new queue in case one
84//! does not exist for the index, provided that the index
85//! is with in bounds.
86EventQueue *getEventQueue(uint32_t index);
87
88inline EventQueue *curEventQueue() { return _curEventQueue; }
89inline void curEventQueue(EventQueue *q) { _curEventQueue = q; }
90
91/**
92 * Common base class for Event and GlobalEvent, so they can share flag
93 * and priority definitions and accessor functions. This class should
94 * not be used directly.
95 */
96class EventBase
97{
98 protected:
98 protected:
99 typedef unsigned short FlagsType;
100 typedef ::Flags<FlagsType> Flags;
101
102 static const FlagsType PublicRead = 0x003f; // public readable flags
103 static const FlagsType PublicWrite = 0x001d; // public writable flags
104 static const FlagsType Squashed = 0x0001; // has been squashed
105 static const FlagsType Scheduled = 0x0002; // has been scheduled
106 static const FlagsType AutoDelete = 0x0004; // delete after dispatch
107 /**
108 * This used to be AutoSerialize. This value can't be reused
109 * without changing the checkpoint version since the flag field
110 * gets serialized.
111 */
112 static const FlagsType Reserved0 = 0x0008;
113 static const FlagsType IsExitEvent = 0x0010; // special exit event
114 static const FlagsType IsMainQueue = 0x0020; // on main event queue
115 static const FlagsType Initialized = 0x7a40; // somewhat random bits
116 static const FlagsType InitMask = 0xffc0; // mask for init bits
117
118 public:
119 typedef int8_t Priority;
120
121 /// Event priorities, to provide tie-breakers for events scheduled
122 /// at the same cycle. Most events are scheduled at the default
123 /// priority; these values are used to control events that need to
124 /// be ordered within a cycle.
125
126 /// Minimum priority
127 static const Priority Minimum_Pri = SCHAR_MIN;
128
129 /// If we enable tracing on a particular cycle, do that as the
130 /// very first thing so we don't miss any of the events on
131 /// that cycle (even if we enter the debugger).
132 static const Priority Debug_Enable_Pri = -101;
133
134 /// Breakpoints should happen before anything else (except
135 /// enabling trace output), so we don't miss any action when
136 /// debugging.
137 static const Priority Debug_Break_Pri = -100;
138
139 /// CPU switches schedule the new CPU's tick event for the
140 /// same cycle (after unscheduling the old CPU's tick event).
141 /// The switch needs to come before any tick events to make
142 /// sure we don't tick both CPUs in the same cycle.
143 static const Priority CPU_Switch_Pri = -31;
144
145 /// For some reason "delayed" inter-cluster writebacks are
146 /// scheduled before regular writebacks (which have default
147 /// priority). Steve?
148 static const Priority Delayed_Writeback_Pri = -1;
149
150 /// Default is zero for historical reasons.
151 static const Priority Default_Pri = 0;
152
153 /// DVFS update event leads to stats dump therefore given a lower priority
154 /// to ensure all relevant states have been updated
155 static const Priority DVFS_Update_Pri = 31;
156
157 /// Serailization needs to occur before tick events also, so
158 /// that a serialize/unserialize is identical to an on-line
159 /// CPU switch.
160 static const Priority Serialize_Pri = 32;
161
162 /// CPU ticks must come after other associated CPU events
163 /// (such as writebacks).
164 static const Priority CPU_Tick_Pri = 50;
165
166 /// Statistics events (dump, reset, etc.) come after
167 /// everything else, but before exit.
168 static const Priority Stat_Event_Pri = 90;
169
170 /// Progress events come at the end.
171 static const Priority Progress_Event_Pri = 95;
172
173 /// If we want to exit on this cycle, it's the very last thing
174 /// we do.
175 static const Priority Sim_Exit_Pri = 100;
176
177 /// Maximum priority
178 static const Priority Maximum_Pri = SCHAR_MAX;
179};
180
181/*
182 * An item on an event queue. The action caused by a given
183 * event is specified by deriving a subclass and overriding the
184 * process() member function.
185 *
186 * Caution, the order of members is chosen to maximize data packing.
187 */
188class Event : public EventBase, public Serializable
189{
190 friend class EventQueue;
191
192 private:
193 // The event queue is now a linked list of linked lists. The
194 // 'nextBin' pointer is to find the bin, where a bin is defined as
195 // when+priority. All events in the same bin will be stored in a
196 // second linked list (a stack) maintained by the 'nextInBin'
197 // pointer. The list will be accessed in LIFO order. The end
198 // result is that the insert/removal in 'nextBin' is
199 // linear/constant, and the lookup/removal in 'nextInBin' is
200 // constant/constant. Hopefully this is a significant improvement
201 // over the current fully linear insertion.
202 Event *nextBin;
203 Event *nextInBin;
204
205 static Event *insertBefore(Event *event, Event *curr);
206 static Event *removeItem(Event *event, Event *last);
207
208 Tick _when; //!< timestamp when event should be processed
209 Priority _priority; //!< event priority
210 Flags flags;
211
212#ifndef NDEBUG
213 /// Global counter to generate unique IDs for Event instances
214 static Counter instanceCounter;
215
216 /// This event's unique ID. We can also use pointer values for
217 /// this but they're not consistent across runs making debugging
218 /// more difficult. Thus we use a global counter value when
219 /// debugging.
220 Counter instance;
221
222 /// queue to which this event belongs (though it may or may not be
223 /// scheduled on this queue yet)
224 EventQueue *queue;
225#endif
226
227#ifdef EVENTQ_DEBUG
228 Tick whenCreated; //!< time created
229 Tick whenScheduled; //!< time scheduled
230#endif
231
232 void
233 setWhen(Tick when, EventQueue *q)
234 {
235 _when = when;
236#ifndef NDEBUG
237 queue = q;
238#endif
239#ifdef EVENTQ_DEBUG
240 whenScheduled = curTick();
241#endif
242 }
243
244 bool
245 initialized() const
246 {
247 return (flags & InitMask) == Initialized;
248 }
249
250 protected:
251 /// Accessor for flags.
252 Flags
253 getFlags() const
254 {
255 return flags & PublicRead;
256 }
257
258 bool
259 isFlagSet(Flags _flags) const
260 {
261 assert(_flags.noneSet(~PublicRead));
262 return flags.isSet(_flags);
263 }
264
265 /// Accessor for flags.
266 void
267 setFlags(Flags _flags)
268 {
269 assert(_flags.noneSet(~PublicWrite));
270 flags.set(_flags);
271 }
272
273 void
274 clearFlags(Flags _flags)
275 {
276 assert(_flags.noneSet(~PublicWrite));
277 flags.clear(_flags);
278 }
279
280 void
281 clearFlags()
282 {
283 flags.clear(PublicWrite);
284 }
285
286 // This function isn't really useful if TRACING_ON is not defined
287 virtual void trace(const char *action); //!< trace event activity
288
289 public:
290
291 /*
292 * Event constructor
293 * @param queue that the event gets scheduled on
294 */
295 Event(Priority p = Default_Pri, Flags f = 0)
296 : nextBin(nullptr), nextInBin(nullptr), _when(0), _priority(p),
297 flags(Initialized | f)
298 {
299 assert(f.noneSet(~PublicWrite));
300#ifndef NDEBUG
301 instance = ++instanceCounter;
302 queue = NULL;
303#endif
304#ifdef EVENTQ_DEBUG
305 whenCreated = curTick();
306 whenScheduled = 0;
307#endif
308 }
309
310 virtual ~Event();
311 virtual const std::string name() const;
312
313 /// Return a C string describing the event. This string should
314 /// *not* be dynamically allocated; just a const char array
315 /// describing the event class.
316 virtual const char *description() const;
317
318 /// Dump the current event data
319 void dump() const;
320
321 public:
322 /*
323 * This member function is invoked when the event is processed
324 * (occurs). There is no default implementation; each subclass
325 * must provide its own implementation. The event is not
326 * automatically deleted after it is processed (to allow for
327 * statically allocated event objects).
328 *
329 * If the AutoDestroy flag is set, the object is deleted once it
330 * is processed.
331 */
332 virtual void process() = 0;
333
334 /// Determine if the current event is scheduled
335 bool scheduled() const { return flags.isSet(Scheduled); }
336
337 /// Squash the current event
338 void squash() { flags.set(Squashed); }
339
340 /// Check whether the event is squashed
341 bool squashed() const { return flags.isSet(Squashed); }
342
343 /// See if this is a SimExitEvent (without resorting to RTTI)
344 bool isExitEvent() const { return flags.isSet(IsExitEvent); }
345
346 /// Check whether this event will auto-delete
347 bool isAutoDelete() const { return flags.isSet(AutoDelete); }
348
349 /// Get the time that the event is scheduled
350 Tick when() const { return _when; }
351
352 /// Get the event priority
353 Priority priority() const { return _priority; }
354
355 //! If this is part of a GlobalEvent, return the pointer to the
356 //! Global Event. By default, there is no GlobalEvent, so return
357 //! NULL. (Overridden in GlobalEvent::BarrierEvent.)
358 virtual BaseGlobalEvent *globalEvent() { return NULL; }
359
360#ifndef SWIG
361 void serialize(CheckpointOut &cp) const override;
362 void unserialize(CheckpointIn &cp) override;
363#endif
364};
365
366#ifndef SWIG
367inline bool
368operator<(const Event &l, const Event &r)
369{
370 return l.when() < r.when() ||
371 (l.when() == r.when() && l.priority() < r.priority());
372}
373
374inline bool
375operator>(const Event &l, const Event &r)
376{
377 return l.when() > r.when() ||
378 (l.when() == r.when() && l.priority() > r.priority());
379}
380
381inline bool
382operator<=(const Event &l, const Event &r)
383{
384 return l.when() < r.when() ||
385 (l.when() == r.when() && l.priority() <= r.priority());
386}
387inline bool
388operator>=(const Event &l, const Event &r)
389{
390 return l.when() > r.when() ||
391 (l.when() == r.when() && l.priority() >= r.priority());
392}
393
394inline bool
395operator==(const Event &l, const Event &r)
396{
397 return l.when() == r.when() && l.priority() == r.priority();
398}
399
400inline bool
401operator!=(const Event &l, const Event &r)
402{
403 return l.when() != r.when() || l.priority() != r.priority();
404}
405#endif
406
407/**
408 * Queue of events sorted in time order
409 *
410 * Events are scheduled (inserted into the event queue) using the
411 * schedule() method. This method either inserts a <i>synchronous</i>
412 * or <i>asynchronous</i> event.
413 *
414 * Synchronous events are scheduled using schedule() method with the
415 * argument 'global' set to false (default). This should only be done
416 * from a thread holding the event queue lock
417 * (EventQueue::service_mutex). The lock is always held when an event
418 * handler is called, it can therefore always insert events into its
419 * own event queue unless it voluntarily releases the lock.
420 *
421 * Events can be scheduled across thread (and event queue borders) by
422 * either scheduling asynchronous events or taking the target event
423 * queue's lock. However, the lock should <i>never</i> be taken
424 * directly since this is likely to cause deadlocks. Instead, code
425 * that needs to schedule events in other event queues should
426 * temporarily release its own queue and lock the new queue. This
427 * prevents deadlocks since a single thread never owns more than one
428 * event queue lock. This functionality is provided by the
429 * ScopedMigration helper class. Note that temporarily migrating
430 * between event queues can make the simulation non-deterministic, it
431 * should therefore be limited to cases where that can be tolerated
432 * (e.g., handling asynchronous IO or fast-forwarding in KVM).
433 *
434 * Asynchronous events can also be scheduled using the normal
435 * schedule() method with the 'global' parameter set to true. Unlike
436 * the previous queue migration strategy, this strategy is fully
437 * deterministic. This causes the event to be inserted in a separate
438 * queue of asynchronous events (async_queue), which is merged main
439 * event queue at the end of each simulation quantum (by calling the
440 * handleAsyncInsertions() method). Note that this implies that such
441 * events must happen at least one simulation quantum into the future,
442 * otherwise they risk being scheduled in the past by
443 * handleAsyncInsertions().
444 */
445class EventQueue
446{
447 private:
448 std::string objName;
449 Event *head;
450 Tick _curTick;
451
452 //! Mutex to protect async queue.
453 std::mutex async_queue_mutex;
454
455 //! List of events added by other threads to this event queue.
456 std::list<Event*> async_queue;
457
458 /**
459 * Lock protecting event handling.
460 *
461 * This lock is always taken when servicing events. It is assumed
462 * that the thread scheduling new events (not asynchronous events
463 * though) have taken this lock. This is normally done by
464 * serviceOne() since new events are typically scheduled as a
465 * response to an earlier event.
466 *
467 * This lock is intended to be used to temporarily steal an event
468 * queue to support inter-thread communication when some
469 * deterministic timing can be sacrificed for speed. For example,
470 * the KVM CPU can use this support to access devices running in a
471 * different thread.
472 *
473 * @see EventQueue::ScopedMigration.
474 * @see EventQueue::ScopedRelease
475 * @see EventQueue::lock()
476 * @see EventQueue::unlock()
477 */
478 std::mutex service_mutex;
479
480 //! Insert / remove event from the queue. Should only be called
481 //! by thread operating this queue.
482 void insert(Event *event);
483 void remove(Event *event);
484
485 //! Function for adding events to the async queue. The added events
486 //! are added to main event queue later. Threads, other than the
487 //! owning thread, should call this function instead of insert().
488 void asyncInsert(Event *event);
489
490 EventQueue(const EventQueue &);
491
492 public:
493#ifndef SWIG
494 /**
495 * Temporarily migrate execution to a different event queue.
496 *
497 * An instance of this class temporarily migrates execution to a
498 * different event queue by releasing the current queue, locking
499 * the new queue, and updating curEventQueue(). This can, for
500 * example, be useful when performing IO across thread event
501 * queues when timing is not crucial (e.g., during fast
502 * forwarding).
503 */
504 class ScopedMigration
505 {
506 public:
507 ScopedMigration(EventQueue *_new_eq)
508 : new_eq(*_new_eq), old_eq(*curEventQueue())
509 {
510 old_eq.unlock();
511 new_eq.lock();
512 curEventQueue(&new_eq);
513 }
514
515 ~ScopedMigration()
516 {
517 new_eq.unlock();
518 old_eq.lock();
519 curEventQueue(&old_eq);
520 }
521
522 private:
523 EventQueue &new_eq;
524 EventQueue &old_eq;
525 };
526
527 /**
528 * Temporarily release the event queue service lock.
529 *
530 * There are cases where it is desirable to temporarily release
531 * the event queue lock to prevent deadlocks. For example, when
532 * waiting on the global barrier, we need to release the lock to
533 * prevent deadlocks from happening when another thread tries to
534 * temporarily take over the event queue waiting on the barrier.
535 */
536 class ScopedRelease
537 {
538 public:
539 ScopedRelease(EventQueue *_eq)
540 : eq(*_eq)
541 {
542 eq.unlock();
543 }
544
545 ~ScopedRelease()
546 {
547 eq.lock();
548 }
549
550 private:
551 EventQueue &eq;
552 };
553#endif
554
555 EventQueue(const std::string &n);
556
557 virtual const std::string name() const { return objName; }
558 void name(const std::string &st) { objName = st; }
559
560 //! Schedule the given event on this queue. Safe to call from any
561 //! thread.
562 void schedule(Event *event, Tick when, bool global = false);
563
564 //! Deschedule the specified event. Should be called only from the
565 //! owning thread.
566 void deschedule(Event *event);
567
568 //! Reschedule the specified event. Should be called only from
569 //! the owning thread.
570 void reschedule(Event *event, Tick when, bool always = false);
571
572 Tick nextTick() const { return head->when(); }
573 void setCurTick(Tick newVal) { _curTick = newVal; }
574 Tick getCurTick() const { return _curTick; }
575 Event *getHead() const { return head; }
576
577 Event *serviceOne();
578
579 // process all events up to the given timestamp. we inline a
580 // quick test to see if there are any events to process; if so,
581 // call the internal out-of-line version to process them all.
582 void
583 serviceEvents(Tick when)
584 {
585 while (!empty()) {
586 if (nextTick() > when)
587 break;
588
589 /**
590 * @todo this assert is a good bug catcher. I need to
591 * make it true again.
592 */
593 //assert(head->when() >= when && "event scheduled in the past");
594 serviceOne();
595 }
596
597 setCurTick(when);
598 }
599
600 // return true if no events are queued
601 bool empty() const { return head == NULL; }
602
603 void dump() const;
604
605 bool debugVerify() const;
606
607 //! Function for moving events from the async_queue to the main queue.
608 void handleAsyncInsertions();
609
610 /**
611 * Function to signal that the event loop should be woken up because
612 * an event has been scheduled by an agent outside the gem5 event
613 * loop(s) whose event insertion may not have been noticed by gem5.
614 * This function isn't needed by the usual gem5 event loop but may
615 * be necessary in derived EventQueues which host gem5 onto other
616 * schedulers.
617 *
618 * @param when Time of a delayed wakeup (if known). This parameter
619 * can be used by an implementation to schedule a wakeup in the
620 * future if it is sure it will remain active until then.
621 * Or it can be ignored and the event queue can be woken up now.
622 */
623 virtual void wakeup(Tick when = (Tick)-1) { }
624
625 /**
626 * function for replacing the head of the event queue, so that a
627 * different set of events can run without disturbing events that have
628 * already been scheduled. Already scheduled events can be processed
629 * by replacing the original head back.
630 * USING THIS FUNCTION CAN BE DANGEROUS TO THE HEALTH OF THE SIMULATOR.
631 * NOT RECOMMENDED FOR USE.
632 */
633 Event* replaceHead(Event* s);
634
635 /**@{*/
636 /**
637 * Provide an interface for locking/unlocking the event queue.
638 *
639 * @warn Do NOT use these methods directly unless you really know
640 * what you are doing. Incorrect use can easily lead to simulator
641 * deadlocks.
642 *
643 * @see EventQueue::ScopedMigration.
644 * @see EventQueue::ScopedRelease
645 * @see EventQueue
646 */
647 void lock() { service_mutex.lock(); }
648 void unlock() { service_mutex.unlock(); }
649 /**@}*/
650
651 /**
652 * Reschedule an event after a checkpoint.
653 *
654 * Since events don't know which event queue they belong to,
655 * parent objects need to reschedule events themselves. This
656 * method conditionally schedules an event that has the Scheduled
657 * flag set. It should be called by parent objects after
658 * unserializing an object.
659 *
660 * @warn Only use this method after unserializing an Event.
661 */
662 void checkpointReschedule(Event *event);
663
664 virtual ~EventQueue() { }
665};
666
667void dumpMainQueue();
668
669#ifndef SWIG
670class EventManager
671{
672 protected:
673 /** A pointer to this object's event queue */
674 EventQueue *eventq;
675
676 public:
677 EventManager(EventManager &em) : eventq(em.eventq) {}
678 EventManager(EventManager *em) : eventq(em->eventq) {}
679 EventManager(EventQueue *eq) : eventq(eq) {}
680
681 EventQueue *
682 eventQueue() const
683 {
684 return eventq;
685 }
686
687 void
688 schedule(Event &event, Tick when)
689 {
690 eventq->schedule(&event, when);
691 }
692
693 void
694 deschedule(Event &event)
695 {
696 eventq->deschedule(&event);
697 }
698
699 void
700 reschedule(Event &event, Tick when, bool always = false)
701 {
702 eventq->reschedule(&event, when, always);
703 }
704
705 void
706 schedule(Event *event, Tick when)
707 {
708 eventq->schedule(event, when);
709 }
710
711 void
712 deschedule(Event *event)
713 {
714 eventq->deschedule(event);
715 }
716
717 void
718 reschedule(Event *event, Tick when, bool always = false)
719 {
720 eventq->reschedule(event, when, always);
721 }
722
723 void wakeupEventQueue(Tick when = (Tick)-1)
724 {
725 eventq->wakeup(when);
726 }
727
728 void setCurTick(Tick newVal) { eventq->setCurTick(newVal); }
729};
730
731template <class T, void (T::* F)()>
732void
733DelayFunction(EventQueue *eventq, Tick when, T *object)
734{
735 class DelayEvent : public Event
736 {
737 private:
738 T *object;
739
740 public:
741 DelayEvent(T *o)
742 : Event(Default_Pri, AutoDelete), object(o)
743 { }
744 void process() { (object->*F)(); }
745 const char *description() const { return "delay"; }
746 };
747
748 eventq->schedule(new DelayEvent(object), when);
749}
750
751template <class T, void (T::* F)()>
752class EventWrapper : public Event
753{
754 private:
755 T *object;
756
757 public:
758 EventWrapper(T *obj, bool del = false, Priority p = Default_Pri)
759 : Event(p), object(obj)
760 {
761 if (del)
762 setFlags(AutoDelete);
763 }
764
765 EventWrapper(T &obj, bool del = false, Priority p = Default_Pri)
766 : Event(p), object(&obj)
767 {
768 if (del)
769 setFlags(AutoDelete);
770 }
771
772 void process() { (object->*F)(); }
773
774 const std::string
775 name() const
776 {
777 return object->name() + ".wrapped_event";
778 }
779
780 const char *description() const { return "EventWrapped"; }
781};
782#endif
783
784#endif // __SIM_EVENTQ_HH__
99 typedef unsigned short FlagsType;
100 typedef ::Flags<FlagsType> Flags;
101
102 static const FlagsType PublicRead = 0x003f; // public readable flags
103 static const FlagsType PublicWrite = 0x001d; // public writable flags
104 static const FlagsType Squashed = 0x0001; // has been squashed
105 static const FlagsType Scheduled = 0x0002; // has been scheduled
106 static const FlagsType AutoDelete = 0x0004; // delete after dispatch
107 /**
108 * This used to be AutoSerialize. This value can't be reused
109 * without changing the checkpoint version since the flag field
110 * gets serialized.
111 */
112 static const FlagsType Reserved0 = 0x0008;
113 static const FlagsType IsExitEvent = 0x0010; // special exit event
114 static const FlagsType IsMainQueue = 0x0020; // on main event queue
115 static const FlagsType Initialized = 0x7a40; // somewhat random bits
116 static const FlagsType InitMask = 0xffc0; // mask for init bits
117
118 public:
119 typedef int8_t Priority;
120
121 /// Event priorities, to provide tie-breakers for events scheduled
122 /// at the same cycle. Most events are scheduled at the default
123 /// priority; these values are used to control events that need to
124 /// be ordered within a cycle.
125
126 /// Minimum priority
127 static const Priority Minimum_Pri = SCHAR_MIN;
128
129 /// If we enable tracing on a particular cycle, do that as the
130 /// very first thing so we don't miss any of the events on
131 /// that cycle (even if we enter the debugger).
132 static const Priority Debug_Enable_Pri = -101;
133
134 /// Breakpoints should happen before anything else (except
135 /// enabling trace output), so we don't miss any action when
136 /// debugging.
137 static const Priority Debug_Break_Pri = -100;
138
139 /// CPU switches schedule the new CPU's tick event for the
140 /// same cycle (after unscheduling the old CPU's tick event).
141 /// The switch needs to come before any tick events to make
142 /// sure we don't tick both CPUs in the same cycle.
143 static const Priority CPU_Switch_Pri = -31;
144
145 /// For some reason "delayed" inter-cluster writebacks are
146 /// scheduled before regular writebacks (which have default
147 /// priority). Steve?
148 static const Priority Delayed_Writeback_Pri = -1;
149
150 /// Default is zero for historical reasons.
151 static const Priority Default_Pri = 0;
152
153 /// DVFS update event leads to stats dump therefore given a lower priority
154 /// to ensure all relevant states have been updated
155 static const Priority DVFS_Update_Pri = 31;
156
157 /// Serailization needs to occur before tick events also, so
158 /// that a serialize/unserialize is identical to an on-line
159 /// CPU switch.
160 static const Priority Serialize_Pri = 32;
161
162 /// CPU ticks must come after other associated CPU events
163 /// (such as writebacks).
164 static const Priority CPU_Tick_Pri = 50;
165
166 /// Statistics events (dump, reset, etc.) come after
167 /// everything else, but before exit.
168 static const Priority Stat_Event_Pri = 90;
169
170 /// Progress events come at the end.
171 static const Priority Progress_Event_Pri = 95;
172
173 /// If we want to exit on this cycle, it's the very last thing
174 /// we do.
175 static const Priority Sim_Exit_Pri = 100;
176
177 /// Maximum priority
178 static const Priority Maximum_Pri = SCHAR_MAX;
179};
180
181/*
182 * An item on an event queue. The action caused by a given
183 * event is specified by deriving a subclass and overriding the
184 * process() member function.
185 *
186 * Caution, the order of members is chosen to maximize data packing.
187 */
188class Event : public EventBase, public Serializable
189{
190 friend class EventQueue;
191
192 private:
193 // The event queue is now a linked list of linked lists. The
194 // 'nextBin' pointer is to find the bin, where a bin is defined as
195 // when+priority. All events in the same bin will be stored in a
196 // second linked list (a stack) maintained by the 'nextInBin'
197 // pointer. The list will be accessed in LIFO order. The end
198 // result is that the insert/removal in 'nextBin' is
199 // linear/constant, and the lookup/removal in 'nextInBin' is
200 // constant/constant. Hopefully this is a significant improvement
201 // over the current fully linear insertion.
202 Event *nextBin;
203 Event *nextInBin;
204
205 static Event *insertBefore(Event *event, Event *curr);
206 static Event *removeItem(Event *event, Event *last);
207
208 Tick _when; //!< timestamp when event should be processed
209 Priority _priority; //!< event priority
210 Flags flags;
211
212#ifndef NDEBUG
213 /// Global counter to generate unique IDs for Event instances
214 static Counter instanceCounter;
215
216 /// This event's unique ID. We can also use pointer values for
217 /// this but they're not consistent across runs making debugging
218 /// more difficult. Thus we use a global counter value when
219 /// debugging.
220 Counter instance;
221
222 /// queue to which this event belongs (though it may or may not be
223 /// scheduled on this queue yet)
224 EventQueue *queue;
225#endif
226
227#ifdef EVENTQ_DEBUG
228 Tick whenCreated; //!< time created
229 Tick whenScheduled; //!< time scheduled
230#endif
231
232 void
233 setWhen(Tick when, EventQueue *q)
234 {
235 _when = when;
236#ifndef NDEBUG
237 queue = q;
238#endif
239#ifdef EVENTQ_DEBUG
240 whenScheduled = curTick();
241#endif
242 }
243
244 bool
245 initialized() const
246 {
247 return (flags & InitMask) == Initialized;
248 }
249
250 protected:
251 /// Accessor for flags.
252 Flags
253 getFlags() const
254 {
255 return flags & PublicRead;
256 }
257
258 bool
259 isFlagSet(Flags _flags) const
260 {
261 assert(_flags.noneSet(~PublicRead));
262 return flags.isSet(_flags);
263 }
264
265 /// Accessor for flags.
266 void
267 setFlags(Flags _flags)
268 {
269 assert(_flags.noneSet(~PublicWrite));
270 flags.set(_flags);
271 }
272
273 void
274 clearFlags(Flags _flags)
275 {
276 assert(_flags.noneSet(~PublicWrite));
277 flags.clear(_flags);
278 }
279
280 void
281 clearFlags()
282 {
283 flags.clear(PublicWrite);
284 }
285
286 // This function isn't really useful if TRACING_ON is not defined
287 virtual void trace(const char *action); //!< trace event activity
288
289 public:
290
291 /*
292 * Event constructor
293 * @param queue that the event gets scheduled on
294 */
295 Event(Priority p = Default_Pri, Flags f = 0)
296 : nextBin(nullptr), nextInBin(nullptr), _when(0), _priority(p),
297 flags(Initialized | f)
298 {
299 assert(f.noneSet(~PublicWrite));
300#ifndef NDEBUG
301 instance = ++instanceCounter;
302 queue = NULL;
303#endif
304#ifdef EVENTQ_DEBUG
305 whenCreated = curTick();
306 whenScheduled = 0;
307#endif
308 }
309
310 virtual ~Event();
311 virtual const std::string name() const;
312
313 /// Return a C string describing the event. This string should
314 /// *not* be dynamically allocated; just a const char array
315 /// describing the event class.
316 virtual const char *description() const;
317
318 /// Dump the current event data
319 void dump() const;
320
321 public:
322 /*
323 * This member function is invoked when the event is processed
324 * (occurs). There is no default implementation; each subclass
325 * must provide its own implementation. The event is not
326 * automatically deleted after it is processed (to allow for
327 * statically allocated event objects).
328 *
329 * If the AutoDestroy flag is set, the object is deleted once it
330 * is processed.
331 */
332 virtual void process() = 0;
333
334 /// Determine if the current event is scheduled
335 bool scheduled() const { return flags.isSet(Scheduled); }
336
337 /// Squash the current event
338 void squash() { flags.set(Squashed); }
339
340 /// Check whether the event is squashed
341 bool squashed() const { return flags.isSet(Squashed); }
342
343 /// See if this is a SimExitEvent (without resorting to RTTI)
344 bool isExitEvent() const { return flags.isSet(IsExitEvent); }
345
346 /// Check whether this event will auto-delete
347 bool isAutoDelete() const { return flags.isSet(AutoDelete); }
348
349 /// Get the time that the event is scheduled
350 Tick when() const { return _when; }
351
352 /// Get the event priority
353 Priority priority() const { return _priority; }
354
355 //! If this is part of a GlobalEvent, return the pointer to the
356 //! Global Event. By default, there is no GlobalEvent, so return
357 //! NULL. (Overridden in GlobalEvent::BarrierEvent.)
358 virtual BaseGlobalEvent *globalEvent() { return NULL; }
359
360#ifndef SWIG
361 void serialize(CheckpointOut &cp) const override;
362 void unserialize(CheckpointIn &cp) override;
363#endif
364};
365
366#ifndef SWIG
367inline bool
368operator<(const Event &l, const Event &r)
369{
370 return l.when() < r.when() ||
371 (l.when() == r.when() && l.priority() < r.priority());
372}
373
374inline bool
375operator>(const Event &l, const Event &r)
376{
377 return l.when() > r.when() ||
378 (l.when() == r.when() && l.priority() > r.priority());
379}
380
381inline bool
382operator<=(const Event &l, const Event &r)
383{
384 return l.when() < r.when() ||
385 (l.when() == r.when() && l.priority() <= r.priority());
386}
387inline bool
388operator>=(const Event &l, const Event &r)
389{
390 return l.when() > r.when() ||
391 (l.when() == r.when() && l.priority() >= r.priority());
392}
393
394inline bool
395operator==(const Event &l, const Event &r)
396{
397 return l.when() == r.when() && l.priority() == r.priority();
398}
399
400inline bool
401operator!=(const Event &l, const Event &r)
402{
403 return l.when() != r.when() || l.priority() != r.priority();
404}
405#endif
406
407/**
408 * Queue of events sorted in time order
409 *
410 * Events are scheduled (inserted into the event queue) using the
411 * schedule() method. This method either inserts a <i>synchronous</i>
412 * or <i>asynchronous</i> event.
413 *
414 * Synchronous events are scheduled using schedule() method with the
415 * argument 'global' set to false (default). This should only be done
416 * from a thread holding the event queue lock
417 * (EventQueue::service_mutex). The lock is always held when an event
418 * handler is called, it can therefore always insert events into its
419 * own event queue unless it voluntarily releases the lock.
420 *
421 * Events can be scheduled across thread (and event queue borders) by
422 * either scheduling asynchronous events or taking the target event
423 * queue's lock. However, the lock should <i>never</i> be taken
424 * directly since this is likely to cause deadlocks. Instead, code
425 * that needs to schedule events in other event queues should
426 * temporarily release its own queue and lock the new queue. This
427 * prevents deadlocks since a single thread never owns more than one
428 * event queue lock. This functionality is provided by the
429 * ScopedMigration helper class. Note that temporarily migrating
430 * between event queues can make the simulation non-deterministic, it
431 * should therefore be limited to cases where that can be tolerated
432 * (e.g., handling asynchronous IO or fast-forwarding in KVM).
433 *
434 * Asynchronous events can also be scheduled using the normal
435 * schedule() method with the 'global' parameter set to true. Unlike
436 * the previous queue migration strategy, this strategy is fully
437 * deterministic. This causes the event to be inserted in a separate
438 * queue of asynchronous events (async_queue), which is merged main
439 * event queue at the end of each simulation quantum (by calling the
440 * handleAsyncInsertions() method). Note that this implies that such
441 * events must happen at least one simulation quantum into the future,
442 * otherwise they risk being scheduled in the past by
443 * handleAsyncInsertions().
444 */
445class EventQueue
446{
447 private:
448 std::string objName;
449 Event *head;
450 Tick _curTick;
451
452 //! Mutex to protect async queue.
453 std::mutex async_queue_mutex;
454
455 //! List of events added by other threads to this event queue.
456 std::list<Event*> async_queue;
457
458 /**
459 * Lock protecting event handling.
460 *
461 * This lock is always taken when servicing events. It is assumed
462 * that the thread scheduling new events (not asynchronous events
463 * though) have taken this lock. This is normally done by
464 * serviceOne() since new events are typically scheduled as a
465 * response to an earlier event.
466 *
467 * This lock is intended to be used to temporarily steal an event
468 * queue to support inter-thread communication when some
469 * deterministic timing can be sacrificed for speed. For example,
470 * the KVM CPU can use this support to access devices running in a
471 * different thread.
472 *
473 * @see EventQueue::ScopedMigration.
474 * @see EventQueue::ScopedRelease
475 * @see EventQueue::lock()
476 * @see EventQueue::unlock()
477 */
478 std::mutex service_mutex;
479
480 //! Insert / remove event from the queue. Should only be called
481 //! by thread operating this queue.
482 void insert(Event *event);
483 void remove(Event *event);
484
485 //! Function for adding events to the async queue. The added events
486 //! are added to main event queue later. Threads, other than the
487 //! owning thread, should call this function instead of insert().
488 void asyncInsert(Event *event);
489
490 EventQueue(const EventQueue &);
491
492 public:
493#ifndef SWIG
494 /**
495 * Temporarily migrate execution to a different event queue.
496 *
497 * An instance of this class temporarily migrates execution to a
498 * different event queue by releasing the current queue, locking
499 * the new queue, and updating curEventQueue(). This can, for
500 * example, be useful when performing IO across thread event
501 * queues when timing is not crucial (e.g., during fast
502 * forwarding).
503 */
504 class ScopedMigration
505 {
506 public:
507 ScopedMigration(EventQueue *_new_eq)
508 : new_eq(*_new_eq), old_eq(*curEventQueue())
509 {
510 old_eq.unlock();
511 new_eq.lock();
512 curEventQueue(&new_eq);
513 }
514
515 ~ScopedMigration()
516 {
517 new_eq.unlock();
518 old_eq.lock();
519 curEventQueue(&old_eq);
520 }
521
522 private:
523 EventQueue &new_eq;
524 EventQueue &old_eq;
525 };
526
527 /**
528 * Temporarily release the event queue service lock.
529 *
530 * There are cases where it is desirable to temporarily release
531 * the event queue lock to prevent deadlocks. For example, when
532 * waiting on the global barrier, we need to release the lock to
533 * prevent deadlocks from happening when another thread tries to
534 * temporarily take over the event queue waiting on the barrier.
535 */
536 class ScopedRelease
537 {
538 public:
539 ScopedRelease(EventQueue *_eq)
540 : eq(*_eq)
541 {
542 eq.unlock();
543 }
544
545 ~ScopedRelease()
546 {
547 eq.lock();
548 }
549
550 private:
551 EventQueue &eq;
552 };
553#endif
554
555 EventQueue(const std::string &n);
556
557 virtual const std::string name() const { return objName; }
558 void name(const std::string &st) { objName = st; }
559
560 //! Schedule the given event on this queue. Safe to call from any
561 //! thread.
562 void schedule(Event *event, Tick when, bool global = false);
563
564 //! Deschedule the specified event. Should be called only from the
565 //! owning thread.
566 void deschedule(Event *event);
567
568 //! Reschedule the specified event. Should be called only from
569 //! the owning thread.
570 void reschedule(Event *event, Tick when, bool always = false);
571
572 Tick nextTick() const { return head->when(); }
573 void setCurTick(Tick newVal) { _curTick = newVal; }
574 Tick getCurTick() const { return _curTick; }
575 Event *getHead() const { return head; }
576
577 Event *serviceOne();
578
579 // process all events up to the given timestamp. we inline a
580 // quick test to see if there are any events to process; if so,
581 // call the internal out-of-line version to process them all.
582 void
583 serviceEvents(Tick when)
584 {
585 while (!empty()) {
586 if (nextTick() > when)
587 break;
588
589 /**
590 * @todo this assert is a good bug catcher. I need to
591 * make it true again.
592 */
593 //assert(head->when() >= when && "event scheduled in the past");
594 serviceOne();
595 }
596
597 setCurTick(when);
598 }
599
600 // return true if no events are queued
601 bool empty() const { return head == NULL; }
602
603 void dump() const;
604
605 bool debugVerify() const;
606
607 //! Function for moving events from the async_queue to the main queue.
608 void handleAsyncInsertions();
609
610 /**
611 * Function to signal that the event loop should be woken up because
612 * an event has been scheduled by an agent outside the gem5 event
613 * loop(s) whose event insertion may not have been noticed by gem5.
614 * This function isn't needed by the usual gem5 event loop but may
615 * be necessary in derived EventQueues which host gem5 onto other
616 * schedulers.
617 *
618 * @param when Time of a delayed wakeup (if known). This parameter
619 * can be used by an implementation to schedule a wakeup in the
620 * future if it is sure it will remain active until then.
621 * Or it can be ignored and the event queue can be woken up now.
622 */
623 virtual void wakeup(Tick when = (Tick)-1) { }
624
625 /**
626 * function for replacing the head of the event queue, so that a
627 * different set of events can run without disturbing events that have
628 * already been scheduled. Already scheduled events can be processed
629 * by replacing the original head back.
630 * USING THIS FUNCTION CAN BE DANGEROUS TO THE HEALTH OF THE SIMULATOR.
631 * NOT RECOMMENDED FOR USE.
632 */
633 Event* replaceHead(Event* s);
634
635 /**@{*/
636 /**
637 * Provide an interface for locking/unlocking the event queue.
638 *
639 * @warn Do NOT use these methods directly unless you really know
640 * what you are doing. Incorrect use can easily lead to simulator
641 * deadlocks.
642 *
643 * @see EventQueue::ScopedMigration.
644 * @see EventQueue::ScopedRelease
645 * @see EventQueue
646 */
647 void lock() { service_mutex.lock(); }
648 void unlock() { service_mutex.unlock(); }
649 /**@}*/
650
651 /**
652 * Reschedule an event after a checkpoint.
653 *
654 * Since events don't know which event queue they belong to,
655 * parent objects need to reschedule events themselves. This
656 * method conditionally schedules an event that has the Scheduled
657 * flag set. It should be called by parent objects after
658 * unserializing an object.
659 *
660 * @warn Only use this method after unserializing an Event.
661 */
662 void checkpointReschedule(Event *event);
663
664 virtual ~EventQueue() { }
665};
666
667void dumpMainQueue();
668
669#ifndef SWIG
670class EventManager
671{
672 protected:
673 /** A pointer to this object's event queue */
674 EventQueue *eventq;
675
676 public:
677 EventManager(EventManager &em) : eventq(em.eventq) {}
678 EventManager(EventManager *em) : eventq(em->eventq) {}
679 EventManager(EventQueue *eq) : eventq(eq) {}
680
681 EventQueue *
682 eventQueue() const
683 {
684 return eventq;
685 }
686
687 void
688 schedule(Event &event, Tick when)
689 {
690 eventq->schedule(&event, when);
691 }
692
693 void
694 deschedule(Event &event)
695 {
696 eventq->deschedule(&event);
697 }
698
699 void
700 reschedule(Event &event, Tick when, bool always = false)
701 {
702 eventq->reschedule(&event, when, always);
703 }
704
705 void
706 schedule(Event *event, Tick when)
707 {
708 eventq->schedule(event, when);
709 }
710
711 void
712 deschedule(Event *event)
713 {
714 eventq->deschedule(event);
715 }
716
717 void
718 reschedule(Event *event, Tick when, bool always = false)
719 {
720 eventq->reschedule(event, when, always);
721 }
722
723 void wakeupEventQueue(Tick when = (Tick)-1)
724 {
725 eventq->wakeup(when);
726 }
727
728 void setCurTick(Tick newVal) { eventq->setCurTick(newVal); }
729};
730
731template <class T, void (T::* F)()>
732void
733DelayFunction(EventQueue *eventq, Tick when, T *object)
734{
735 class DelayEvent : public Event
736 {
737 private:
738 T *object;
739
740 public:
741 DelayEvent(T *o)
742 : Event(Default_Pri, AutoDelete), object(o)
743 { }
744 void process() { (object->*F)(); }
745 const char *description() const { return "delay"; }
746 };
747
748 eventq->schedule(new DelayEvent(object), when);
749}
750
751template <class T, void (T::* F)()>
752class EventWrapper : public Event
753{
754 private:
755 T *object;
756
757 public:
758 EventWrapper(T *obj, bool del = false, Priority p = Default_Pri)
759 : Event(p), object(obj)
760 {
761 if (del)
762 setFlags(AutoDelete);
763 }
764
765 EventWrapper(T &obj, bool del = false, Priority p = Default_Pri)
766 : Event(p), object(&obj)
767 {
768 if (del)
769 setFlags(AutoDelete);
770 }
771
772 void process() { (object->*F)(); }
773
774 const std::string
775 name() const
776 {
777 return object->name() + ".wrapped_event";
778 }
779
780 const char *description() const { return "EventWrapped"; }
781};
782#endif
783
784#endif // __SIM_EVENTQ_HH__