eventq.hh revision 10476:f058e09b7d69
1/*
2 * Copyright (c) 2000-2005 The Regents of The University of Michigan
3 * Copyright (c) 2013 Advanced Micro Devices, Inc.
4 * Copyright (c) 2013 Mark D. Hill and David A. Wood
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
9 * met: redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer;
11 * redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution;
14 * neither the name of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * Authors: Steve Reinhardt
31 *          Nathan Binkert
32 */
33
34/* @file
35 * EventQueue interfaces
36 */
37
38#ifndef __SIM_EVENTQ_HH__
39#define __SIM_EVENTQ_HH__
40
41#include <algorithm>
42#include <cassert>
43#include <climits>
44#include <iosfwd>
45#include <memory>
46#include <mutex>
47#include <string>
48
49#include "base/flags.hh"
50#include "base/misc.hh"
51#include "base/types.hh"
52#include "debug/Event.hh"
53#include "sim/serialize.hh"
54
55class EventQueue;       // forward declaration
56class BaseGlobalEvent;
57
58//! Simulation Quantum for multiple eventq simulation.
59//! The quantum value is the period length after which the queues
60//! synchronize themselves with each other. This means that any
61//! event to scheduled on Queue A which is generated by an event on
62//! Queue B should be at least simQuantum ticks away in future.
63extern Tick simQuantum;
64
65//! Current number of allocated main event queues.
66extern uint32_t numMainEventQueues;
67
68//! Array for main event queues.
69extern std::vector<EventQueue *> mainEventQueue;
70
71#ifndef SWIG
72//! The current event queue for the running thread. Access to this queue
73//! does not require any locking from the thread.
74
75extern __thread EventQueue *_curEventQueue;
76
77#endif
78
79//! Current mode of execution: parallel / serial
80extern bool inParallelMode;
81
82//! Function for returning eventq queue for the provided
83//! index. The function allocates a new queue in case one
84//! does not exist for the index, provided that the index
85//! is with in bounds.
86EventQueue *getEventQueue(uint32_t index);
87
88inline EventQueue *curEventQueue() { return _curEventQueue; }
89inline void curEventQueue(EventQueue *q) { _curEventQueue = q; }
90
91/**
92 * Common base class for Event and GlobalEvent, so they can share flag
93 * and priority definitions and accessor functions.  This class should
94 * not be used directly.
95 */
96class EventBase
97{
98  protected:
99    typedef unsigned short FlagsType;
100    typedef ::Flags<FlagsType> Flags;
101
102    static const FlagsType PublicRead    = 0x003f; // public readable flags
103    static const FlagsType PublicWrite   = 0x001d; // public writable flags
104    static const FlagsType Squashed      = 0x0001; // has been squashed
105    static const FlagsType Scheduled     = 0x0002; // has been scheduled
106    static const FlagsType AutoDelete    = 0x0004; // delete after dispatch
107    static const FlagsType AutoSerialize = 0x0008; // must be serialized
108    static const FlagsType IsExitEvent   = 0x0010; // special exit event
109    static const FlagsType IsMainQueue   = 0x0020; // on main event queue
110    static const FlagsType Initialized   = 0x7a40; // somewhat random bits
111    static const FlagsType InitMask      = 0xffc0; // mask for init bits
112
113  public:
114    typedef int8_t Priority;
115
116    /// Event priorities, to provide tie-breakers for events scheduled
117    /// at the same cycle.  Most events are scheduled at the default
118    /// priority; these values are used to control events that need to
119    /// be ordered within a cycle.
120
121    /// Minimum priority
122    static const Priority Minimum_Pri =          SCHAR_MIN;
123
124    /// If we enable tracing on a particular cycle, do that as the
125    /// very first thing so we don't miss any of the events on
126    /// that cycle (even if we enter the debugger).
127    static const Priority Debug_Enable_Pri =          -101;
128
129    /// Breakpoints should happen before anything else (except
130    /// enabling trace output), so we don't miss any action when
131    /// debugging.
132    static const Priority Debug_Break_Pri =           -100;
133
134    /// CPU switches schedule the new CPU's tick event for the
135    /// same cycle (after unscheduling the old CPU's tick event).
136    /// The switch needs to come before any tick events to make
137    /// sure we don't tick both CPUs in the same cycle.
138    static const Priority CPU_Switch_Pri =             -31;
139
140    /// For some reason "delayed" inter-cluster writebacks are
141    /// scheduled before regular writebacks (which have default
142    /// priority).  Steve?
143    static const Priority Delayed_Writeback_Pri =       -1;
144
145    /// Default is zero for historical reasons.
146    static const Priority Default_Pri =                  0;
147
148    /// DVFS update event leads to stats dump therefore given a lower priority
149    /// to ensure all relevant states have been updated
150    static const Priority DVFS_Update_Pri =             31;
151
152    /// Serailization needs to occur before tick events also, so
153    /// that a serialize/unserialize is identical to an on-line
154    /// CPU switch.
155    static const Priority Serialize_Pri =               32;
156
157    /// CPU ticks must come after other associated CPU events
158    /// (such as writebacks).
159    static const Priority CPU_Tick_Pri =                50;
160
161    /// Statistics events (dump, reset, etc.) come after
162    /// everything else, but before exit.
163    static const Priority Stat_Event_Pri =              90;
164
165    /// Progress events come at the end.
166    static const Priority Progress_Event_Pri =          95;
167
168    /// If we want to exit on this cycle, it's the very last thing
169    /// we do.
170    static const Priority Sim_Exit_Pri =               100;
171
172    /// Maximum priority
173    static const Priority Maximum_Pri =          SCHAR_MAX;
174};
175
176/*
177 * An item on an event queue.  The action caused by a given
178 * event is specified by deriving a subclass and overriding the
179 * process() member function.
180 *
181 * Caution, the order of members is chosen to maximize data packing.
182 */
183class Event : public EventBase, public Serializable
184{
185    friend class EventQueue;
186
187  private:
188    // The event queue is now a linked list of linked lists.  The
189    // 'nextBin' pointer is to find the bin, where a bin is defined as
190    // when+priority.  All events in the same bin will be stored in a
191    // second linked list (a stack) maintained by the 'nextInBin'
192    // pointer.  The list will be accessed in LIFO order.  The end
193    // result is that the insert/removal in 'nextBin' is
194    // linear/constant, and the lookup/removal in 'nextInBin' is
195    // constant/constant.  Hopefully this is a significant improvement
196    // over the current fully linear insertion.
197    Event *nextBin;
198    Event *nextInBin;
199
200    static Event *insertBefore(Event *event, Event *curr);
201    static Event *removeItem(Event *event, Event *last);
202
203    Tick _when;         //!< timestamp when event should be processed
204    Priority _priority; //!< event priority
205    Flags flags;
206
207#ifndef NDEBUG
208    /// Global counter to generate unique IDs for Event instances
209    static Counter instanceCounter;
210
211    /// This event's unique ID.  We can also use pointer values for
212    /// this but they're not consistent across runs making debugging
213    /// more difficult.  Thus we use a global counter value when
214    /// debugging.
215    Counter instance;
216
217    /// queue to which this event belongs (though it may or may not be
218    /// scheduled on this queue yet)
219    EventQueue *queue;
220#endif
221
222#ifdef EVENTQ_DEBUG
223    Tick whenCreated;   //!< time created
224    Tick whenScheduled; //!< time scheduled
225#endif
226
227    void
228    setWhen(Tick when, EventQueue *q)
229    {
230        _when = when;
231#ifndef NDEBUG
232        queue = q;
233#endif
234#ifdef EVENTQ_DEBUG
235        whenScheduled = curTick();
236#endif
237    }
238
239    bool
240    initialized() const
241    {
242        return this && (flags & InitMask) == Initialized;
243    }
244
245  protected:
246    /// Accessor for flags.
247    Flags
248    getFlags() const
249    {
250        return flags & PublicRead;
251    }
252
253    bool
254    isFlagSet(Flags _flags) const
255    {
256        assert(_flags.noneSet(~PublicRead));
257        return flags.isSet(_flags);
258    }
259
260    /// Accessor for flags.
261    void
262    setFlags(Flags _flags)
263    {
264        assert(_flags.noneSet(~PublicWrite));
265        flags.set(_flags);
266    }
267
268    void
269    clearFlags(Flags _flags)
270    {
271        assert(_flags.noneSet(~PublicWrite));
272        flags.clear(_flags);
273    }
274
275    void
276    clearFlags()
277    {
278        flags.clear(PublicWrite);
279    }
280
281    // This function isn't really useful if TRACING_ON is not defined
282    virtual void trace(const char *action);     //!< trace event activity
283
284  public:
285
286    /*
287     * Event constructor
288     * @param queue that the event gets scheduled on
289     */
290    Event(Priority p = Default_Pri, Flags f = 0)
291        : nextBin(nullptr), nextInBin(nullptr), _when(0), _priority(p),
292          flags(Initialized | f)
293    {
294        assert(f.noneSet(~PublicWrite));
295#ifndef NDEBUG
296        instance = ++instanceCounter;
297        queue = NULL;
298#endif
299#ifdef EVENTQ_DEBUG
300        whenCreated = curTick();
301        whenScheduled = 0;
302#endif
303    }
304
305    virtual ~Event();
306    virtual const std::string name() const;
307
308    /// Return a C string describing the event.  This string should
309    /// *not* be dynamically allocated; just a const char array
310    /// describing the event class.
311    virtual const char *description() const;
312
313    /// Dump the current event data
314    void dump() const;
315
316  public:
317    /*
318     * This member function is invoked when the event is processed
319     * (occurs).  There is no default implementation; each subclass
320     * must provide its own implementation.  The event is not
321     * automatically deleted after it is processed (to allow for
322     * statically allocated event objects).
323     *
324     * If the AutoDestroy flag is set, the object is deleted once it
325     * is processed.
326     */
327    virtual void process() = 0;
328
329    /// Determine if the current event is scheduled
330    bool scheduled() const { return flags.isSet(Scheduled); }
331
332    /// Squash the current event
333    void squash() { flags.set(Squashed); }
334
335    /// Check whether the event is squashed
336    bool squashed() const { return flags.isSet(Squashed); }
337
338    /// See if this is a SimExitEvent (without resorting to RTTI)
339    bool isExitEvent() const { return flags.isSet(IsExitEvent); }
340
341    /// Get the time that the event is scheduled
342    Tick when() const { return _when; }
343
344    /// Get the event priority
345    Priority priority() const { return _priority; }
346
347    //! If this is part of a GlobalEvent, return the pointer to the
348    //! Global Event.  By default, there is no GlobalEvent, so return
349    //! NULL.  (Overridden in GlobalEvent::BarrierEvent.)
350    virtual BaseGlobalEvent *globalEvent() { return NULL; }
351
352#ifndef SWIG
353    virtual void serialize(std::ostream &os);
354    virtual void unserialize(Checkpoint *cp, const std::string &section);
355
356    //! This function is required to support restoring from checkpoints
357    //! when running with multiple queues. Since we still have not thrashed
358    //! out all the details on checkpointing, this function is most likely
359    //! to be revisited in future.
360    virtual void unserialize(Checkpoint *cp, const std::string &section,
361                     EventQueue *eventq);
362#endif
363};
364
365#ifndef SWIG
366inline bool
367operator<(const Event &l, const Event &r)
368{
369    return l.when() < r.when() ||
370        (l.when() == r.when() && l.priority() < r.priority());
371}
372
373inline bool
374operator>(const Event &l, const Event &r)
375{
376    return l.when() > r.when() ||
377        (l.when() == r.when() && l.priority() > r.priority());
378}
379
380inline bool
381operator<=(const Event &l, const Event &r)
382{
383    return l.when() < r.when() ||
384        (l.when() == r.when() && l.priority() <= r.priority());
385}
386inline bool
387operator>=(const Event &l, const Event &r)
388{
389    return l.when() > r.when() ||
390        (l.when() == r.when() && l.priority() >= r.priority());
391}
392
393inline bool
394operator==(const Event &l, const Event &r)
395{
396    return l.when() == r.when() && l.priority() == r.priority();
397}
398
399inline bool
400operator!=(const Event &l, const Event &r)
401{
402    return l.when() != r.when() || l.priority() != r.priority();
403}
404#endif
405
406/**
407 * Queue of events sorted in time order
408 *
409 * Events are scheduled (inserted into the event queue) using the
410 * schedule() method. This method either inserts a <i>synchronous</i>
411 * or <i>asynchronous</i> event.
412 *
413 * Synchronous events are scheduled using schedule() method with the
414 * argument 'global' set to false (default). This should only be done
415 * from a thread holding the event queue lock
416 * (EventQueue::service_mutex). The lock is always held when an event
417 * handler is called, it can therefore always insert events into its
418 * own event queue unless it voluntarily releases the lock.
419 *
420 * Events can be scheduled across thread (and event queue borders) by
421 * either scheduling asynchronous events or taking the target event
422 * queue's lock. However, the lock should <i>never</i> be taken
423 * directly since this is likely to cause deadlocks. Instead, code
424 * that needs to schedule events in other event queues should
425 * temporarily release its own queue and lock the new queue. This
426 * prevents deadlocks since a single thread never owns more than one
427 * event queue lock. This functionality is provided by the
428 * ScopedMigration helper class. Note that temporarily migrating
429 * between event queues can make the simulation non-deterministic, it
430 * should therefore be limited to cases where that can be tolerated
431 * (e.g., handling asynchronous IO or fast-forwarding in KVM).
432 *
433 * Asynchronous events can also be scheduled using the normal
434 * schedule() method with the 'global' parameter set to true. Unlike
435 * the previous queue migration strategy, this strategy is fully
436 * deterministic. This causes the event to be inserted in a separate
437 * queue of asynchronous events (async_queue), which is merged main
438 * event queue at the end of each simulation quantum (by calling the
439 * handleAsyncInsertions() method). Note that this implies that such
440 * events must happen at least one simulation quantum into the future,
441 * otherwise they risk being scheduled in the past by
442 * handleAsyncInsertions().
443 */
444class EventQueue : public Serializable
445{
446  private:
447    std::string objName;
448    Event *head;
449    Tick _curTick;
450
451    //! Mutex to protect async queue.
452    std::mutex async_queue_mutex;
453
454    //! List of events added by other threads to this event queue.
455    std::list<Event*> async_queue;
456
457    /**
458     * Lock protecting event handling.
459     *
460     * This lock is always taken when servicing events. It is assumed
461     * that the thread scheduling new events (not asynchronous events
462     * though) have taken this lock. This is normally done by
463     * serviceOne() since new events are typically scheduled as a
464     * response to an earlier event.
465     *
466     * This lock is intended to be used to temporarily steal an event
467     * queue to support inter-thread communication when some
468     * deterministic timing can be sacrificed for speed. For example,
469     * the KVM CPU can use this support to access devices running in a
470     * different thread.
471     *
472     * @see EventQueue::ScopedMigration.
473     * @see EventQueue::ScopedRelease
474     * @see EventQueue::lock()
475     * @see EventQueue::unlock()
476     */
477    std::mutex service_mutex;
478
479    //! Insert / remove event from the queue. Should only be called
480    //! by thread operating this queue.
481    void insert(Event *event);
482    void remove(Event *event);
483
484    //! Function for adding events to the async queue. The added events
485    //! are added to main event queue later. Threads, other than the
486    //! owning thread, should call this function instead of insert().
487    void asyncInsert(Event *event);
488
489    EventQueue(const EventQueue &);
490
491  public:
492#ifndef SWIG
493    /**
494     * Temporarily migrate execution to a different event queue.
495     *
496     * An instance of this class temporarily migrates execution to a
497     * different event queue by releasing the current queue, locking
498     * the new queue, and updating curEventQueue(). This can, for
499     * example, be useful when performing IO across thread event
500     * queues when timing is not crucial (e.g., during fast
501     * forwarding).
502     */
503    class ScopedMigration
504    {
505      public:
506        ScopedMigration(EventQueue *_new_eq)
507            :  new_eq(*_new_eq), old_eq(*curEventQueue())
508        {
509            old_eq.unlock();
510            new_eq.lock();
511            curEventQueue(&new_eq);
512        }
513
514        ~ScopedMigration()
515        {
516            new_eq.unlock();
517            old_eq.lock();
518            curEventQueue(&old_eq);
519        }
520
521      private:
522        EventQueue &new_eq;
523        EventQueue &old_eq;
524    };
525
526    /**
527     * Temporarily release the event queue service lock.
528     *
529     * There are cases where it is desirable to temporarily release
530     * the event queue lock to prevent deadlocks. For example, when
531     * waiting on the global barrier, we need to release the lock to
532     * prevent deadlocks from happening when another thread tries to
533     * temporarily take over the event queue waiting on the barrier.
534     */
535    class ScopedRelease
536    {
537      public:
538        ScopedRelease(EventQueue *_eq)
539            :  eq(*_eq)
540        {
541            eq.unlock();
542        }
543
544        ~ScopedRelease()
545        {
546            eq.lock();
547        }
548
549      private:
550        EventQueue &eq;
551    };
552#endif
553
554    EventQueue(const std::string &n);
555
556    virtual const std::string name() const { return objName; }
557    void name(const std::string &st) { objName = st; }
558
559    //! Schedule the given event on this queue. Safe to call from any
560    //! thread.
561    void schedule(Event *event, Tick when, bool global = false);
562
563    //! Deschedule the specified event. Should be called only from the
564    //! owning thread.
565    void deschedule(Event *event);
566
567    //! Reschedule the specified event. Should be called only from
568    //! the owning thread.
569    void reschedule(Event *event, Tick when, bool always = false);
570
571    Tick nextTick() const { return head->when(); }
572    void setCurTick(Tick newVal) { _curTick = newVal; }
573    Tick getCurTick() { return _curTick; }
574
575    Event *serviceOne();
576
577    // process all events up to the given timestamp.  we inline a
578    // quick test to see if there are any events to process; if so,
579    // call the internal out-of-line version to process them all.
580    void
581    serviceEvents(Tick when)
582    {
583        while (!empty()) {
584            if (nextTick() > when)
585                break;
586
587            /**
588             * @todo this assert is a good bug catcher.  I need to
589             * make it true again.
590             */
591            //assert(head->when() >= when && "event scheduled in the past");
592            serviceOne();
593        }
594
595        setCurTick(when);
596    }
597
598    // return true if no events are queued
599    bool empty() const { return head == NULL; }
600
601    void dump() const;
602
603    bool debugVerify() const;
604
605    //! Function for moving events from the async_queue to the main queue.
606    void handleAsyncInsertions();
607
608    /**
609     *  Function to signal that the event loop should be woken up because
610     *  an event has been scheduled by an agent outside the gem5 event
611     *  loop(s) whose event insertion may not have been noticed by gem5.
612     *  This function isn't needed by the usual gem5 event loop but may
613     *  be necessary in derived EventQueues which host gem5 onto other
614     *  schedulers.
615     *
616     *  @param when Time of a delayed wakeup (if known). This parameter
617     *  can be used by an implementation to schedule a wakeup in the
618     *  future if it is sure it will remain active until then.
619     *  Or it can be ignored and the event queue can be woken up now.
620     */
621    virtual void wakeup(Tick when = (Tick)-1) { }
622
623    /**
624     *  function for replacing the head of the event queue, so that a
625     *  different set of events can run without disturbing events that have
626     *  already been scheduled. Already scheduled events can be processed
627     *  by replacing the original head back.
628     *  USING THIS FUNCTION CAN BE DANGEROUS TO THE HEALTH OF THE SIMULATOR.
629     *  NOT RECOMMENDED FOR USE.
630     */
631    Event* replaceHead(Event* s);
632
633    /**@{*/
634    /**
635     * Provide an interface for locking/unlocking the event queue.
636     *
637     * @warn Do NOT use these methods directly unless you really know
638     * what you are doing. Incorrect use can easily lead to simulator
639     * deadlocks.
640     *
641     * @see EventQueue::ScopedMigration.
642     * @see EventQueue::ScopedRelease
643     * @see EventQueue
644     */
645    void lock() { service_mutex.lock(); }
646    void unlock() { service_mutex.unlock(); }
647    /**@}*/
648
649#ifndef SWIG
650    virtual void serialize(std::ostream &os);
651    virtual void unserialize(Checkpoint *cp, const std::string &section);
652#endif
653
654    virtual ~EventQueue() { }
655};
656
657void dumpMainQueue();
658
659#ifndef SWIG
660class EventManager
661{
662  protected:
663    /** A pointer to this object's event queue */
664    EventQueue *eventq;
665
666  public:
667    EventManager(EventManager &em) : eventq(em.eventq) {}
668    EventManager(EventManager *em) : eventq(em->eventq) {}
669    EventManager(EventQueue *eq) : eventq(eq) {}
670
671    EventQueue *
672    eventQueue() const
673    {
674        return eventq;
675    }
676
677    void
678    schedule(Event &event, Tick when)
679    {
680        eventq->schedule(&event, when);
681    }
682
683    void
684    deschedule(Event &event)
685    {
686        eventq->deschedule(&event);
687    }
688
689    void
690    reschedule(Event &event, Tick when, bool always = false)
691    {
692        eventq->reschedule(&event, when, always);
693    }
694
695    void
696    schedule(Event *event, Tick when)
697    {
698        eventq->schedule(event, when);
699    }
700
701    void
702    deschedule(Event *event)
703    {
704        eventq->deschedule(event);
705    }
706
707    void
708    reschedule(Event *event, Tick when, bool always = false)
709    {
710        eventq->reschedule(event, when, always);
711    }
712
713    void wakeupEventQueue(Tick when = (Tick)-1)
714    {
715        eventq->wakeup(when);
716    }
717
718    void setCurTick(Tick newVal) { eventq->setCurTick(newVal); }
719};
720
721template <class T, void (T::* F)()>
722void
723DelayFunction(EventQueue *eventq, Tick when, T *object)
724{
725    class DelayEvent : public Event
726    {
727      private:
728        T *object;
729
730      public:
731        DelayEvent(T *o)
732            : Event(Default_Pri, AutoDelete), object(o)
733        { }
734        void process() { (object->*F)(); }
735        const char *description() const { return "delay"; }
736    };
737
738    eventq->schedule(new DelayEvent(object), when);
739}
740
741template <class T, void (T::* F)()>
742class EventWrapper : public Event
743{
744  private:
745    T *object;
746
747  public:
748    EventWrapper(T *obj, bool del = false, Priority p = Default_Pri)
749        : Event(p), object(obj)
750    {
751        if (del)
752            setFlags(AutoDelete);
753    }
754
755    EventWrapper(T &obj, bool del = false, Priority p = Default_Pri)
756        : Event(p), object(&obj)
757    {
758        if (del)
759            setFlags(AutoDelete);
760    }
761
762    void process() { (object->*F)(); }
763
764    const std::string
765    name() const
766    {
767        return object->name() + ".wrapped_event";
768    }
769
770    const char *description() const { return "EventWrapped"; }
771};
772#endif
773
774#endif // __SIM_EVENTQ_HH__
775