12SN/A/*
21762SN/A * Copyright (c) 2000-2005 The Regents of The University of Michigan
39983Sstever@gmail.com * Copyright (c) 2013 Advanced Micro Devices, Inc.
49983Sstever@gmail.com * Copyright (c) 2013 Mark D. Hill and David A. Wood
52SN/A * All rights reserved.
62SN/A *
72SN/A * Redistribution and use in source and binary forms, with or without
82SN/A * modification, are permitted provided that the following conditions are
92SN/A * met: redistributions of source code must retain the above copyright
102SN/A * notice, this list of conditions and the following disclaimer;
112SN/A * redistributions in binary form must reproduce the above copyright
122SN/A * notice, this list of conditions and the following disclaimer in the
132SN/A * documentation and/or other materials provided with the distribution;
142SN/A * neither the name of the copyright holders nor the names of its
152SN/A * contributors may be used to endorse or promote products derived from
162SN/A * this software without specific prior written permission.
172SN/A *
182SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
192SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
202SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
212SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
222SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
232SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
242SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
252SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
262SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
272SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
282SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
292665Ssaidi@eecs.umich.edu *
302665Ssaidi@eecs.umich.edu * Authors: Steve Reinhardt
312665Ssaidi@eecs.umich.edu *          Nathan Binkert
322SN/A */
332SN/A
342SN/A/* @file
352SN/A * EventQueue interfaces
362SN/A */
372SN/A
381354SN/A#ifndef __SIM_EVENTQ_HH__
391354SN/A#define __SIM_EVENTQ_HH__
402SN/A
412SN/A#include <algorithm>
425501Snate@binkert.org#include <cassert>
435546Snate@binkert.org#include <climits>
4412392Sjason@lowepower.com#include <functional>
457004Snate@binkert.org#include <iosfwd>
4610412Sandreas.hansson@arm.com#include <memory>
479983Sstever@gmail.com#include <mutex>
482SN/A#include <string>
492SN/A
505769Snate@binkert.org#include "base/flags.hh"
516216Snate@binkert.org#include "base/types.hh"
528232Snate@binkert.org#include "debug/Event.hh"
5356SN/A#include "sim/serialize.hh"
542SN/A
555543Ssaidi@eecs.umich.educlass EventQueue;       // forward declaration
569983Sstever@gmail.comclass BaseGlobalEvent;
572SN/A
589983Sstever@gmail.com//! Simulation Quantum for multiple eventq simulation.
599983Sstever@gmail.com//! The quantum value is the period length after which the queues
609983Sstever@gmail.com//! synchronize themselves with each other. This means that any
619983Sstever@gmail.com//! event to scheduled on Queue A which is generated by an event on
629983Sstever@gmail.com//! Queue B should be at least simQuantum ticks away in future.
639983Sstever@gmail.comextern Tick simQuantum;
641354SN/A
659983Sstever@gmail.com//! Current number of allocated main event queues.
669983Sstever@gmail.comextern uint32_t numMainEventQueues;
679983Sstever@gmail.com
689983Sstever@gmail.com//! Array for main event queues.
699983Sstever@gmail.comextern std::vector<EventQueue *> mainEventQueue;
709983Sstever@gmail.com
719983Sstever@gmail.com//! The current event queue for the running thread. Access to this queue
729983Sstever@gmail.com//! does not require any locking from the thread.
739983Sstever@gmail.com
749983Sstever@gmail.comextern __thread EventQueue *_curEventQueue;
759983Sstever@gmail.com
769983Sstever@gmail.com//! Current mode of execution: parallel / serial
779983Sstever@gmail.comextern bool inParallelMode;
789983Sstever@gmail.com
799983Sstever@gmail.com//! Function for returning eventq queue for the provided
809983Sstever@gmail.com//! index. The function allocates a new queue in case one
819983Sstever@gmail.com//! does not exist for the index, provided that the index
829983Sstever@gmail.com//! is with in bounds.
839983Sstever@gmail.comEventQueue *getEventQueue(uint32_t index);
849983Sstever@gmail.com
859983Sstever@gmail.cominline EventQueue *curEventQueue() { return _curEventQueue; }
869983Sstever@gmail.cominline void curEventQueue(EventQueue *q) { _curEventQueue = q; }
879983Sstever@gmail.com
889983Sstever@gmail.com/**
899983Sstever@gmail.com * Common base class for Event and GlobalEvent, so they can share flag
909983Sstever@gmail.com * and priority definitions and accessor functions.  This class should
919983Sstever@gmail.com * not be used directly.
922SN/A */
939983Sstever@gmail.comclass EventBase
942SN/A{
9511320Ssteve.reinhardt@amd.com  protected:
968902Sandreas.hansson@arm.com    typedef unsigned short FlagsType;
975769Snate@binkert.org    typedef ::Flags<FlagsType> Flags;
985769Snate@binkert.org
997059Snate@binkert.org    static const FlagsType PublicRead    = 0x003f; // public readable flags
1007059Snate@binkert.org    static const FlagsType PublicWrite   = 0x001d; // public writable flags
1017059Snate@binkert.org    static const FlagsType Squashed      = 0x0001; // has been squashed
1027059Snate@binkert.org    static const FlagsType Scheduled     = 0x0002; // has been scheduled
10312040Sandreas.sandberg@arm.com    static const FlagsType Managed       = 0x0004; // Use life cycle manager
10412040Sandreas.sandberg@arm.com    static const FlagsType AutoDelete    = Managed; // delete after dispatch
10511072Sandreas.sandberg@arm.com    /**
10611072Sandreas.sandberg@arm.com     * This used to be AutoSerialize. This value can't be reused
10711072Sandreas.sandberg@arm.com     * without changing the checkpoint version since the flag field
10811072Sandreas.sandberg@arm.com     * gets serialized.
10911072Sandreas.sandberg@arm.com     */
11011072Sandreas.sandberg@arm.com    static const FlagsType Reserved0     = 0x0008;
1117059Snate@binkert.org    static const FlagsType IsExitEvent   = 0x0010; // special exit event
1127059Snate@binkert.org    static const FlagsType IsMainQueue   = 0x0020; // on main event queue
1137059Snate@binkert.org    static const FlagsType Initialized   = 0x7a40; // somewhat random bits
1147059Snate@binkert.org    static const FlagsType InitMask      = 0xffc0; // mask for init bits
1157059Snate@binkert.org
1167058Snate@binkert.org  public:
1177058Snate@binkert.org    typedef int8_t Priority;
1187058Snate@binkert.org
119396SN/A    /// Event priorities, to provide tie-breakers for events scheduled
120396SN/A    /// at the same cycle.  Most events are scheduled at the default
121396SN/A    /// priority; these values are used to control events that need to
122396SN/A    /// be ordered within a cycle.
1235501Snate@binkert.org
1247058Snate@binkert.org    /// Minimum priority
1257058Snate@binkert.org    static const Priority Minimum_Pri =          SCHAR_MIN;
1263329Sstever@eecs.umich.edu
1277058Snate@binkert.org    /// If we enable tracing on a particular cycle, do that as the
1287058Snate@binkert.org    /// very first thing so we don't miss any of the events on
1297058Snate@binkert.org    /// that cycle (even if we enter the debugger).
1309979Satgutier@umich.edu    static const Priority Debug_Enable_Pri =          -101;
131396SN/A
1327058Snate@binkert.org    /// Breakpoints should happen before anything else (except
1337058Snate@binkert.org    /// enabling trace output), so we don't miss any action when
1347058Snate@binkert.org    /// debugging.
1357058Snate@binkert.org    static const Priority Debug_Break_Pri =           -100;
1363329Sstever@eecs.umich.edu
1377058Snate@binkert.org    /// CPU switches schedule the new CPU's tick event for the
1387058Snate@binkert.org    /// same cycle (after unscheduling the old CPU's tick event).
1397058Snate@binkert.org    /// The switch needs to come before any tick events to make
1407058Snate@binkert.org    /// sure we don't tick both CPUs in the same cycle.
1417058Snate@binkert.org    static const Priority CPU_Switch_Pri =             -31;
142396SN/A
1437058Snate@binkert.org    /// For some reason "delayed" inter-cluster writebacks are
1447058Snate@binkert.org    /// scheduled before regular writebacks (which have default
1457058Snate@binkert.org    /// priority).  Steve?
1467058Snate@binkert.org    static const Priority Delayed_Writeback_Pri =       -1;
147396SN/A
1487058Snate@binkert.org    /// Default is zero for historical reasons.
1497058Snate@binkert.org    static const Priority Default_Pri =                  0;
150396SN/A
15110249Sstephan.diestelhorst@arm.com    /// DVFS update event leads to stats dump therefore given a lower priority
15210249Sstephan.diestelhorst@arm.com    /// to ensure all relevant states have been updated
15310249Sstephan.diestelhorst@arm.com    static const Priority DVFS_Update_Pri =             31;
15410249Sstephan.diestelhorst@arm.com
1557058Snate@binkert.org    /// Serailization needs to occur before tick events also, so
1567058Snate@binkert.org    /// that a serialize/unserialize is identical to an on-line
1577058Snate@binkert.org    /// CPU switch.
1587058Snate@binkert.org    static const Priority Serialize_Pri =               32;
159396SN/A
1607058Snate@binkert.org    /// CPU ticks must come after other associated CPU events
1617058Snate@binkert.org    /// (such as writebacks).
1627058Snate@binkert.org    static const Priority CPU_Tick_Pri =                50;
163396SN/A
16413641Sqtt2@cornell.edu    /// If we want to exit a thread in a CPU, it comes after CPU_Tick_Pri
16513641Sqtt2@cornell.edu    static const Priority CPU_Exit_Pri =                64;
16613641Sqtt2@cornell.edu
1677058Snate@binkert.org    /// Statistics events (dump, reset, etc.) come after
1687058Snate@binkert.org    /// everything else, but before exit.
1697058Snate@binkert.org    static const Priority Stat_Event_Pri =              90;
1704075Sbinkertn@umich.edu
1717058Snate@binkert.org    /// Progress events come at the end.
1727058Snate@binkert.org    static const Priority Progress_Event_Pri =          95;
1735501Snate@binkert.org
1747058Snate@binkert.org    /// If we want to exit on this cycle, it's the very last thing
1757058Snate@binkert.org    /// we do.
1767058Snate@binkert.org    static const Priority Sim_Exit_Pri =               100;
1777058Snate@binkert.org
1787058Snate@binkert.org    /// Maximum priority
1797058Snate@binkert.org    static const Priority Maximum_Pri =          SCHAR_MAX;
1809983Sstever@gmail.com};
1819983Sstever@gmail.com
1829983Sstever@gmail.com/*
1839983Sstever@gmail.com * An item on an event queue.  The action caused by a given
1849983Sstever@gmail.com * event is specified by deriving a subclass and overriding the
1859983Sstever@gmail.com * process() member function.
1869983Sstever@gmail.com *
1879983Sstever@gmail.com * Caution, the order of members is chosen to maximize data packing.
1889983Sstever@gmail.com */
1899983Sstever@gmail.comclass Event : public EventBase, public Serializable
1909983Sstever@gmail.com{
1919983Sstever@gmail.com    friend class EventQueue;
1929983Sstever@gmail.com
1939983Sstever@gmail.com  private:
1949983Sstever@gmail.com    // The event queue is now a linked list of linked lists.  The
1959983Sstever@gmail.com    // 'nextBin' pointer is to find the bin, where a bin is defined as
1969983Sstever@gmail.com    // when+priority.  All events in the same bin will be stored in a
1979983Sstever@gmail.com    // second linked list (a stack) maintained by the 'nextInBin'
1989983Sstever@gmail.com    // pointer.  The list will be accessed in LIFO order.  The end
1999983Sstever@gmail.com    // result is that the insert/removal in 'nextBin' is
2009983Sstever@gmail.com    // linear/constant, and the lookup/removal in 'nextInBin' is
2019983Sstever@gmail.com    // constant/constant.  Hopefully this is a significant improvement
2029983Sstever@gmail.com    // over the current fully linear insertion.
2039983Sstever@gmail.com    Event *nextBin;
2049983Sstever@gmail.com    Event *nextInBin;
2059983Sstever@gmail.com
2069983Sstever@gmail.com    static Event *insertBefore(Event *event, Event *curr);
2079983Sstever@gmail.com    static Event *removeItem(Event *event, Event *last);
2089983Sstever@gmail.com
2099983Sstever@gmail.com    Tick _when;         //!< timestamp when event should be processed
2109983Sstever@gmail.com    Priority _priority; //!< event priority
2119983Sstever@gmail.com    Flags flags;
2129983Sstever@gmail.com
2139983Sstever@gmail.com#ifndef NDEBUG
2149983Sstever@gmail.com    /// Global counter to generate unique IDs for Event instances
2159983Sstever@gmail.com    static Counter instanceCounter;
2169983Sstever@gmail.com
2179983Sstever@gmail.com    /// This event's unique ID.  We can also use pointer values for
2189983Sstever@gmail.com    /// this but they're not consistent across runs making debugging
2199983Sstever@gmail.com    /// more difficult.  Thus we use a global counter value when
2209983Sstever@gmail.com    /// debugging.
2219983Sstever@gmail.com    Counter instance;
2229983Sstever@gmail.com
2239983Sstever@gmail.com    /// queue to which this event belongs (though it may or may not be
2249983Sstever@gmail.com    /// scheduled on this queue yet)
2259983Sstever@gmail.com    EventQueue *queue;
2269983Sstever@gmail.com#endif
2279983Sstever@gmail.com
2289983Sstever@gmail.com#ifdef EVENTQ_DEBUG
2299983Sstever@gmail.com    Tick whenCreated;   //!< time created
2309983Sstever@gmail.com    Tick whenScheduled; //!< time scheduled
2319983Sstever@gmail.com#endif
2329983Sstever@gmail.com
2339983Sstever@gmail.com    void
2349983Sstever@gmail.com    setWhen(Tick when, EventQueue *q)
2359983Sstever@gmail.com    {
2369983Sstever@gmail.com        _when = when;
2379983Sstever@gmail.com#ifndef NDEBUG
2389983Sstever@gmail.com        queue = q;
2399983Sstever@gmail.com#endif
2409983Sstever@gmail.com#ifdef EVENTQ_DEBUG
2419983Sstever@gmail.com        whenScheduled = curTick();
2429983Sstever@gmail.com#endif
2439983Sstever@gmail.com    }
2449983Sstever@gmail.com
2459983Sstever@gmail.com    bool
2469983Sstever@gmail.com    initialized() const
2479983Sstever@gmail.com    {
24810673SAndreas.Sandberg@ARM.com        return (flags & InitMask) == Initialized;
2499983Sstever@gmail.com    }
2509983Sstever@gmail.com
2519983Sstever@gmail.com  protected:
2529983Sstever@gmail.com    /// Accessor for flags.
2539983Sstever@gmail.com    Flags
2549983Sstever@gmail.com    getFlags() const
2559983Sstever@gmail.com    {
2569983Sstever@gmail.com        return flags & PublicRead;
2579983Sstever@gmail.com    }
2589983Sstever@gmail.com
2599983Sstever@gmail.com    bool
2609983Sstever@gmail.com    isFlagSet(Flags _flags) const
2619983Sstever@gmail.com    {
2629983Sstever@gmail.com        assert(_flags.noneSet(~PublicRead));
2639983Sstever@gmail.com        return flags.isSet(_flags);
2649983Sstever@gmail.com    }
2659983Sstever@gmail.com
2669983Sstever@gmail.com    /// Accessor for flags.
2679983Sstever@gmail.com    void
2689983Sstever@gmail.com    setFlags(Flags _flags)
2699983Sstever@gmail.com    {
2709983Sstever@gmail.com        assert(_flags.noneSet(~PublicWrite));
2719983Sstever@gmail.com        flags.set(_flags);
2729983Sstever@gmail.com    }
2739983Sstever@gmail.com
2749983Sstever@gmail.com    void
2759983Sstever@gmail.com    clearFlags(Flags _flags)
2769983Sstever@gmail.com    {
2779983Sstever@gmail.com        assert(_flags.noneSet(~PublicWrite));
2789983Sstever@gmail.com        flags.clear(_flags);
2799983Sstever@gmail.com    }
2809983Sstever@gmail.com
2819983Sstever@gmail.com    void
2829983Sstever@gmail.com    clearFlags()
2839983Sstever@gmail.com    {
2849983Sstever@gmail.com        flags.clear(PublicWrite);
2859983Sstever@gmail.com    }
2869983Sstever@gmail.com
2879983Sstever@gmail.com    // This function isn't really useful if TRACING_ON is not defined
2889983Sstever@gmail.com    virtual void trace(const char *action);     //!< trace event activity
2899983Sstever@gmail.com
29012040Sandreas.sandberg@arm.com  protected: /* Memory management */
29112040Sandreas.sandberg@arm.com    /**
29212040Sandreas.sandberg@arm.com     * @{
29312040Sandreas.sandberg@arm.com     * Memory management hooks for events that have the Managed flag set
29412040Sandreas.sandberg@arm.com     *
29512040Sandreas.sandberg@arm.com     * Events can use automatic memory management by setting the
29612040Sandreas.sandberg@arm.com     * Managed flag. The default implementation automatically deletes
29712040Sandreas.sandberg@arm.com     * events once they have been removed from the event queue. This
29812040Sandreas.sandberg@arm.com     * typically happens when events are descheduled or have been
29912040Sandreas.sandberg@arm.com     * triggered and not rescheduled.
30012040Sandreas.sandberg@arm.com     *
30112040Sandreas.sandberg@arm.com     * The methods below may be overridden by events that need custom
30212040Sandreas.sandberg@arm.com     * memory management. For example, events exported to Python need
30312040Sandreas.sandberg@arm.com     * to impement reference counting to ensure that the Python
30412040Sandreas.sandberg@arm.com     * implementation of the event is kept alive while it lives in the
30512040Sandreas.sandberg@arm.com     * event queue.
30612040Sandreas.sandberg@arm.com     *
30712040Sandreas.sandberg@arm.com     * @note Memory managers are responsible for implementing
30812040Sandreas.sandberg@arm.com     * reference counting (by overriding both acquireImpl() and
30912040Sandreas.sandberg@arm.com     * releaseImpl()) or checking if an event is no longer scheduled
31012040Sandreas.sandberg@arm.com     * in releaseImpl() before deallocating it.
31112040Sandreas.sandberg@arm.com     */
31212040Sandreas.sandberg@arm.com
31312040Sandreas.sandberg@arm.com    /**
31412040Sandreas.sandberg@arm.com     * Managed event scheduled and being held in the event queue.
31512040Sandreas.sandberg@arm.com     */
31612040Sandreas.sandberg@arm.com    void acquire()
31712040Sandreas.sandberg@arm.com    {
31812040Sandreas.sandberg@arm.com        if (flags.isSet(Event::Managed))
31912040Sandreas.sandberg@arm.com            acquireImpl();
32012040Sandreas.sandberg@arm.com    }
32112040Sandreas.sandberg@arm.com
32212040Sandreas.sandberg@arm.com    /**
32312040Sandreas.sandberg@arm.com     * Managed event removed from the event queue.
32412040Sandreas.sandberg@arm.com     */
32512040Sandreas.sandberg@arm.com    void release() {
32612040Sandreas.sandberg@arm.com        if (flags.isSet(Event::Managed))
32712040Sandreas.sandberg@arm.com            releaseImpl();
32812040Sandreas.sandberg@arm.com    }
32912040Sandreas.sandberg@arm.com
33012040Sandreas.sandberg@arm.com    virtual void acquireImpl() {}
33112040Sandreas.sandberg@arm.com
33212040Sandreas.sandberg@arm.com    virtual void releaseImpl() {
33312040Sandreas.sandberg@arm.com        if (!scheduled())
33412040Sandreas.sandberg@arm.com            delete this;
33512040Sandreas.sandberg@arm.com    }
33612040Sandreas.sandberg@arm.com
33712040Sandreas.sandberg@arm.com    /** @} */
33812040Sandreas.sandberg@arm.com
3399983Sstever@gmail.com  public:
340396SN/A
3412SN/A    /*
3422SN/A     * Event constructor
3432SN/A     * @param queue that the event gets scheduled on
3442SN/A     */
3458581Ssteve.reinhardt@amd.com    Event(Priority p = Default_Pri, Flags f = 0)
34610360Sandreas.hansson@arm.com        : nextBin(nullptr), nextInBin(nullptr), _when(0), _priority(p),
3478581Ssteve.reinhardt@amd.com          flags(Initialized | f)
348224SN/A    {
3498581Ssteve.reinhardt@amd.com        assert(f.noneSet(~PublicWrite));
3504016Sstever@eecs.umich.edu#ifndef NDEBUG
3515501Snate@binkert.org        instance = ++instanceCounter;
3525605Snate@binkert.org        queue = NULL;
3535501Snate@binkert.org#endif
3545501Snate@binkert.org#ifdef EVENTQ_DEBUG
3557823Ssteve.reinhardt@amd.com        whenCreated = curTick();
3565501Snate@binkert.org        whenScheduled = 0;
3574016Sstever@eecs.umich.edu#endif
358224SN/A    }
359224SN/A
3605768Snate@binkert.org    virtual ~Event();
3615768Snate@binkert.org    virtual const std::string name() const;
362265SN/A
3635501Snate@binkert.org    /// Return a C string describing the event.  This string should
3645501Snate@binkert.org    /// *not* be dynamically allocated; just a const char array
3655501Snate@binkert.org    /// describing the event class.
3665501Snate@binkert.org    virtual const char *description() const;
3675501Snate@binkert.org
3685501Snate@binkert.org    /// Dump the current event data
3695501Snate@binkert.org    void dump() const;
3705501Snate@binkert.org
3715501Snate@binkert.org  public:
3725501Snate@binkert.org    /*
3735501Snate@binkert.org     * This member function is invoked when the event is processed
3745501Snate@binkert.org     * (occurs).  There is no default implementation; each subclass
3755501Snate@binkert.org     * must provide its own implementation.  The event is not
3765501Snate@binkert.org     * automatically deleted after it is processed (to allow for
3775501Snate@binkert.org     * statically allocated event objects).
3785501Snate@binkert.org     *
3795501Snate@binkert.org     * If the AutoDestroy flag is set, the object is deleted once it
3805501Snate@binkert.org     * is processed.
3815501Snate@binkert.org     */
3825501Snate@binkert.org    virtual void process() = 0;
3835501Snate@binkert.org
3842SN/A    /// Determine if the current event is scheduled
3855769Snate@binkert.org    bool scheduled() const { return flags.isSet(Scheduled); }
3862SN/A
3872SN/A    /// Squash the current event
3885769Snate@binkert.org    void squash() { flags.set(Squashed); }
3892SN/A
3902SN/A    /// Check whether the event is squashed
3915769Snate@binkert.org    bool squashed() const { return flags.isSet(Squashed); }
3922SN/A
3932667Sstever@eecs.umich.edu    /// See if this is a SimExitEvent (without resorting to RTTI)
3945769Snate@binkert.org    bool isExitEvent() const { return flags.isSet(IsExitEvent); }
3952667Sstever@eecs.umich.edu
39610992Stimothy.jones@cl.cam.ac.uk    /// Check whether this event will auto-delete
39712040Sandreas.sandberg@arm.com    bool isManaged() const { return flags.isSet(Managed); }
39812040Sandreas.sandberg@arm.com    bool isAutoDelete() const { return isManaged(); }
39910992Stimothy.jones@cl.cam.ac.uk
4002SN/A    /// Get the time that the event is scheduled
4012SN/A    Tick when() const { return _when; }
4022SN/A
4032SN/A    /// Get the event priority
4047058Snate@binkert.org    Priority priority() const { return _priority; }
4052SN/A
4069983Sstever@gmail.com    //! If this is part of a GlobalEvent, return the pointer to the
4079983Sstever@gmail.com    //! Global Event.  By default, there is no GlobalEvent, so return
4089983Sstever@gmail.com    //! NULL.  (Overridden in GlobalEvent::BarrierEvent.)
4099983Sstever@gmail.com    virtual BaseGlobalEvent *globalEvent() { return NULL; }
4109983Sstever@gmail.com
41111168Sandreas.hansson@arm.com    void serialize(CheckpointOut &cp) const override;
41211168Sandreas.hansson@arm.com    void unserialize(CheckpointIn &cp) override;
413571SN/A};
414571SN/A
4157005Snate@binkert.orginline bool
4167005Snate@binkert.orgoperator<(const Event &l, const Event &r)
4177005Snate@binkert.org{
4187005Snate@binkert.org    return l.when() < r.when() ||
4197005Snate@binkert.org        (l.when() == r.when() && l.priority() < r.priority());
4207005Snate@binkert.org}
4217005Snate@binkert.org
4227005Snate@binkert.orginline bool
4237005Snate@binkert.orgoperator>(const Event &l, const Event &r)
4247005Snate@binkert.org{
4257005Snate@binkert.org    return l.when() > r.when() ||
4267005Snate@binkert.org        (l.when() == r.when() && l.priority() > r.priority());
4277005Snate@binkert.org}
4287005Snate@binkert.org
4297005Snate@binkert.orginline bool
4307005Snate@binkert.orgoperator<=(const Event &l, const Event &r)
4317005Snate@binkert.org{
4327005Snate@binkert.org    return l.when() < r.when() ||
4337005Snate@binkert.org        (l.when() == r.when() && l.priority() <= r.priority());
4347005Snate@binkert.org}
4357005Snate@binkert.orginline bool
4367005Snate@binkert.orgoperator>=(const Event &l, const Event &r)
4377005Snate@binkert.org{
4387005Snate@binkert.org    return l.when() > r.when() ||
4397005Snate@binkert.org        (l.when() == r.when() && l.priority() >= r.priority());
4407005Snate@binkert.org}
4417005Snate@binkert.org
4427005Snate@binkert.orginline bool
4437005Snate@binkert.orgoperator==(const Event &l, const Event &r)
4447005Snate@binkert.org{
4457005Snate@binkert.org    return l.when() == r.when() && l.priority() == r.priority();
4467005Snate@binkert.org}
4477005Snate@binkert.org
4487005Snate@binkert.orginline bool
4497005Snate@binkert.orgoperator!=(const Event &l, const Event &r)
4507005Snate@binkert.org{
4517005Snate@binkert.org    return l.when() != r.when() || l.priority() != r.priority();
4527005Snate@binkert.org}
4537005Snate@binkert.org
45410153Sandreas@sandberg.pp.se/**
4552SN/A * Queue of events sorted in time order
45610153Sandreas@sandberg.pp.se *
45710153Sandreas@sandberg.pp.se * Events are scheduled (inserted into the event queue) using the
45810153Sandreas@sandberg.pp.se * schedule() method. This method either inserts a <i>synchronous</i>
45910153Sandreas@sandberg.pp.se * or <i>asynchronous</i> event.
46010153Sandreas@sandberg.pp.se *
46110153Sandreas@sandberg.pp.se * Synchronous events are scheduled using schedule() method with the
46210153Sandreas@sandberg.pp.se * argument 'global' set to false (default). This should only be done
46310153Sandreas@sandberg.pp.se * from a thread holding the event queue lock
46410153Sandreas@sandberg.pp.se * (EventQueue::service_mutex). The lock is always held when an event
46510153Sandreas@sandberg.pp.se * handler is called, it can therefore always insert events into its
46610153Sandreas@sandberg.pp.se * own event queue unless it voluntarily releases the lock.
46710153Sandreas@sandberg.pp.se *
46810153Sandreas@sandberg.pp.se * Events can be scheduled across thread (and event queue borders) by
46910153Sandreas@sandberg.pp.se * either scheduling asynchronous events or taking the target event
47010153Sandreas@sandberg.pp.se * queue's lock. However, the lock should <i>never</i> be taken
47110153Sandreas@sandberg.pp.se * directly since this is likely to cause deadlocks. Instead, code
47210153Sandreas@sandberg.pp.se * that needs to schedule events in other event queues should
47310153Sandreas@sandberg.pp.se * temporarily release its own queue and lock the new queue. This
47410153Sandreas@sandberg.pp.se * prevents deadlocks since a single thread never owns more than one
47510153Sandreas@sandberg.pp.se * event queue lock. This functionality is provided by the
47610153Sandreas@sandberg.pp.se * ScopedMigration helper class. Note that temporarily migrating
47710153Sandreas@sandberg.pp.se * between event queues can make the simulation non-deterministic, it
47810153Sandreas@sandberg.pp.se * should therefore be limited to cases where that can be tolerated
47910153Sandreas@sandberg.pp.se * (e.g., handling asynchronous IO or fast-forwarding in KVM).
48010153Sandreas@sandberg.pp.se *
48110153Sandreas@sandberg.pp.se * Asynchronous events can also be scheduled using the normal
48210153Sandreas@sandberg.pp.se * schedule() method with the 'global' parameter set to true. Unlike
48310153Sandreas@sandberg.pp.se * the previous queue migration strategy, this strategy is fully
48410153Sandreas@sandberg.pp.se * deterministic. This causes the event to be inserted in a separate
48510153Sandreas@sandberg.pp.se * queue of asynchronous events (async_queue), which is merged main
48610153Sandreas@sandberg.pp.se * event queue at the end of each simulation quantum (by calling the
48710153Sandreas@sandberg.pp.se * handleAsyncInsertions() method). Note that this implies that such
48810153Sandreas@sandberg.pp.se * events must happen at least one simulation quantum into the future,
48910153Sandreas@sandberg.pp.se * otherwise they risk being scheduled in the past by
49010153Sandreas@sandberg.pp.se * handleAsyncInsertions().
4912SN/A */
49211072Sandreas.sandberg@arm.comclass EventQueue
4932SN/A{
4945605Snate@binkert.org  private:
495265SN/A    std::string objName;
4962SN/A    Event *head;
4979356Snilay@cs.wisc.edu    Tick _curTick;
4982SN/A
4999983Sstever@gmail.com    //! Mutex to protect async queue.
50010412Sandreas.hansson@arm.com    std::mutex async_queue_mutex;
5019983Sstever@gmail.com
5029983Sstever@gmail.com    //! List of events added by other threads to this event queue.
5039983Sstever@gmail.com    std::list<Event*> async_queue;
5049983Sstever@gmail.com
50510153Sandreas@sandberg.pp.se    /**
50610153Sandreas@sandberg.pp.se     * Lock protecting event handling.
50710153Sandreas@sandberg.pp.se     *
50810153Sandreas@sandberg.pp.se     * This lock is always taken when servicing events. It is assumed
50910153Sandreas@sandberg.pp.se     * that the thread scheduling new events (not asynchronous events
51010153Sandreas@sandberg.pp.se     * though) have taken this lock. This is normally done by
51110153Sandreas@sandberg.pp.se     * serviceOne() since new events are typically scheduled as a
51210153Sandreas@sandberg.pp.se     * response to an earlier event.
51310153Sandreas@sandberg.pp.se     *
51410153Sandreas@sandberg.pp.se     * This lock is intended to be used to temporarily steal an event
51510153Sandreas@sandberg.pp.se     * queue to support inter-thread communication when some
51610153Sandreas@sandberg.pp.se     * deterministic timing can be sacrificed for speed. For example,
51710153Sandreas@sandberg.pp.se     * the KVM CPU can use this support to access devices running in a
51810153Sandreas@sandberg.pp.se     * different thread.
51910153Sandreas@sandberg.pp.se     *
52010153Sandreas@sandberg.pp.se     * @see EventQueue::ScopedMigration.
52110153Sandreas@sandberg.pp.se     * @see EventQueue::ScopedRelease
52210153Sandreas@sandberg.pp.se     * @see EventQueue::lock()
52310153Sandreas@sandberg.pp.se     * @see EventQueue::unlock()
52410153Sandreas@sandberg.pp.se     */
52510153Sandreas@sandberg.pp.se    std::mutex service_mutex;
52610153Sandreas@sandberg.pp.se
5279983Sstever@gmail.com    //! Insert / remove event from the queue. Should only be called
5289983Sstever@gmail.com    //! by thread operating this queue.
5292SN/A    void insert(Event *event);
5302SN/A    void remove(Event *event);
5312SN/A
5329983Sstever@gmail.com    //! Function for adding events to the async queue. The added events
5339983Sstever@gmail.com    //! are added to main event queue later. Threads, other than the
5349983Sstever@gmail.com    //! owning thread, should call this function instead of insert().
5359983Sstever@gmail.com    void asyncInsert(Event *event);
5369983Sstever@gmail.com
5377063Snate@binkert.org    EventQueue(const EventQueue &);
5387063Snate@binkert.org
5392SN/A  public:
54010153Sandreas@sandberg.pp.se    /**
54110153Sandreas@sandberg.pp.se     * Temporarily migrate execution to a different event queue.
54210153Sandreas@sandberg.pp.se     *
54310153Sandreas@sandberg.pp.se     * An instance of this class temporarily migrates execution to a
54410153Sandreas@sandberg.pp.se     * different event queue by releasing the current queue, locking
54510153Sandreas@sandberg.pp.se     * the new queue, and updating curEventQueue(). This can, for
54610153Sandreas@sandberg.pp.se     * example, be useful when performing IO across thread event
54710153Sandreas@sandberg.pp.se     * queues when timing is not crucial (e.g., during fast
54810153Sandreas@sandberg.pp.se     * forwarding).
54912270Stiago.muck@arm.com     *
55012270Stiago.muck@arm.com     * ScopedMigration does nothing if both eqs are the same
55110153Sandreas@sandberg.pp.se     */
55210153Sandreas@sandberg.pp.se    class ScopedMigration
55310153Sandreas@sandberg.pp.se    {
55410153Sandreas@sandberg.pp.se      public:
55512270Stiago.muck@arm.com        ScopedMigration(EventQueue *_new_eq, bool _doMigrate = true)
55612270Stiago.muck@arm.com            :new_eq(*_new_eq), old_eq(*curEventQueue()),
55712270Stiago.muck@arm.com             doMigrate((&new_eq != &old_eq)&&_doMigrate)
55810153Sandreas@sandberg.pp.se        {
55912270Stiago.muck@arm.com            if (doMigrate){
56012270Stiago.muck@arm.com                old_eq.unlock();
56112270Stiago.muck@arm.com                new_eq.lock();
56212270Stiago.muck@arm.com                curEventQueue(&new_eq);
56312270Stiago.muck@arm.com            }
56410153Sandreas@sandberg.pp.se        }
56510153Sandreas@sandberg.pp.se
56610153Sandreas@sandberg.pp.se        ~ScopedMigration()
56710153Sandreas@sandberg.pp.se        {
56812270Stiago.muck@arm.com            if (doMigrate){
56912270Stiago.muck@arm.com                new_eq.unlock();
57012270Stiago.muck@arm.com                old_eq.lock();
57112270Stiago.muck@arm.com                curEventQueue(&old_eq);
57212270Stiago.muck@arm.com            }
57310153Sandreas@sandberg.pp.se        }
57410153Sandreas@sandberg.pp.se
57510153Sandreas@sandberg.pp.se      private:
57610153Sandreas@sandberg.pp.se        EventQueue &new_eq;
57710153Sandreas@sandberg.pp.se        EventQueue &old_eq;
57812270Stiago.muck@arm.com        bool doMigrate;
57910153Sandreas@sandberg.pp.se    };
58010153Sandreas@sandberg.pp.se
58110153Sandreas@sandberg.pp.se    /**
58210153Sandreas@sandberg.pp.se     * Temporarily release the event queue service lock.
58310153Sandreas@sandberg.pp.se     *
58410153Sandreas@sandberg.pp.se     * There are cases where it is desirable to temporarily release
58510153Sandreas@sandberg.pp.se     * the event queue lock to prevent deadlocks. For example, when
58610153Sandreas@sandberg.pp.se     * waiting on the global barrier, we need to release the lock to
58710153Sandreas@sandberg.pp.se     * prevent deadlocks from happening when another thread tries to
58810153Sandreas@sandberg.pp.se     * temporarily take over the event queue waiting on the barrier.
58910153Sandreas@sandberg.pp.se     */
59010153Sandreas@sandberg.pp.se    class ScopedRelease
59110153Sandreas@sandberg.pp.se    {
59210153Sandreas@sandberg.pp.se      public:
59310153Sandreas@sandberg.pp.se        ScopedRelease(EventQueue *_eq)
59410153Sandreas@sandberg.pp.se            :  eq(*_eq)
59510153Sandreas@sandberg.pp.se        {
59610153Sandreas@sandberg.pp.se            eq.unlock();
59710153Sandreas@sandberg.pp.se        }
59810153Sandreas@sandberg.pp.se
59910153Sandreas@sandberg.pp.se        ~ScopedRelease()
60010153Sandreas@sandberg.pp.se        {
60110153Sandreas@sandberg.pp.se            eq.lock();
60210153Sandreas@sandberg.pp.se        }
60310153Sandreas@sandberg.pp.se
60410153Sandreas@sandberg.pp.se      private:
60510153Sandreas@sandberg.pp.se        EventQueue &eq;
60610153Sandreas@sandberg.pp.se    };
60710153Sandreas@sandberg.pp.se
6087063Snate@binkert.org    EventQueue(const std::string &n);
6092SN/A
610512SN/A    virtual const std::string name() const { return objName; }
6119983Sstever@gmail.com    void name(const std::string &st) { objName = st; }
612265SN/A
6139983Sstever@gmail.com    //! Schedule the given event on this queue. Safe to call from any
6149983Sstever@gmail.com    //! thread.
6159983Sstever@gmail.com    void schedule(Event *event, Tick when, bool global = false);
6169983Sstever@gmail.com
6179983Sstever@gmail.com    //! Deschedule the specified event. Should be called only from the
6189983Sstever@gmail.com    //! owning thread.
6195738Snate@binkert.org    void deschedule(Event *event);
6209983Sstever@gmail.com
6219983Sstever@gmail.com    //! Reschedule the specified event. Should be called only from
6229983Sstever@gmail.com    //! the owning thread.
6235738Snate@binkert.org    void reschedule(Event *event, Tick when, bool always = false);
6242SN/A
6255501Snate@binkert.org    Tick nextTick() const { return head->when(); }
6269356Snilay@cs.wisc.edu    void setCurTick(Tick newVal) { _curTick = newVal; }
62711015Sandreas.sandberg@arm.com    Tick getCurTick() const { return _curTick; }
62810991Stimothy.jones@cl.cam.ac.uk    Event *getHead() const { return head; }
6299356Snilay@cs.wisc.edu
6302667Sstever@eecs.umich.edu    Event *serviceOne();
6312SN/A
6322SN/A    // process all events up to the given timestamp.  we inline a
6332SN/A    // quick test to see if there are any events to process; if so,
6342SN/A    // call the internal out-of-line version to process them all.
6355501Snate@binkert.org    void
6365501Snate@binkert.org    serviceEvents(Tick when)
6375501Snate@binkert.org    {
6382SN/A        while (!empty()) {
6392SN/A            if (nextTick() > when)
6402SN/A                break;
6412SN/A
6421634SN/A            /**
6431634SN/A             * @todo this assert is a good bug catcher.  I need to
6441634SN/A             * make it true again.
6451634SN/A             */
6461634SN/A            //assert(head->when() >= when && "event scheduled in the past");
6472SN/A            serviceOne();
6482SN/A        }
6499356Snilay@cs.wisc.edu
6509356Snilay@cs.wisc.edu        setCurTick(when);
6512SN/A    }
6522SN/A
6532SN/A    // return true if no events are queued
6545501Snate@binkert.org    bool empty() const { return head == NULL; }
6552SN/A
6565501Snate@binkert.org    void dump() const;
6572SN/A
6585502Snate@binkert.org    bool debugVerify() const;
6595502Snate@binkert.org
6609983Sstever@gmail.com    //! Function for moving events from the async_queue to the main queue.
6619983Sstever@gmail.com    void handleAsyncInsertions();
6629983Sstever@gmail.com
6638648Snilay@cs.wisc.edu    /**
66410476Sandreas.hansson@arm.com     *  Function to signal that the event loop should be woken up because
66510476Sandreas.hansson@arm.com     *  an event has been scheduled by an agent outside the gem5 event
66610476Sandreas.hansson@arm.com     *  loop(s) whose event insertion may not have been noticed by gem5.
66710476Sandreas.hansson@arm.com     *  This function isn't needed by the usual gem5 event loop but may
66810476Sandreas.hansson@arm.com     *  be necessary in derived EventQueues which host gem5 onto other
66910476Sandreas.hansson@arm.com     *  schedulers.
67010476Sandreas.hansson@arm.com     *
67110476Sandreas.hansson@arm.com     *  @param when Time of a delayed wakeup (if known). This parameter
67210476Sandreas.hansson@arm.com     *  can be used by an implementation to schedule a wakeup in the
67310476Sandreas.hansson@arm.com     *  future if it is sure it will remain active until then.
67410476Sandreas.hansson@arm.com     *  Or it can be ignored and the event queue can be woken up now.
67510476Sandreas.hansson@arm.com     */
67610476Sandreas.hansson@arm.com    virtual void wakeup(Tick when = (Tick)-1) { }
67710476Sandreas.hansson@arm.com
67810476Sandreas.hansson@arm.com    /**
6798648Snilay@cs.wisc.edu     *  function for replacing the head of the event queue, so that a
6808648Snilay@cs.wisc.edu     *  different set of events can run without disturbing events that have
6818648Snilay@cs.wisc.edu     *  already been scheduled. Already scheduled events can be processed
6828648Snilay@cs.wisc.edu     *  by replacing the original head back.
6838648Snilay@cs.wisc.edu     *  USING THIS FUNCTION CAN BE DANGEROUS TO THE HEALTH OF THE SIMULATOR.
6848648Snilay@cs.wisc.edu     *  NOT RECOMMENDED FOR USE.
6858648Snilay@cs.wisc.edu     */
6868648Snilay@cs.wisc.edu    Event* replaceHead(Event* s);
6878648Snilay@cs.wisc.edu
68810153Sandreas@sandberg.pp.se    /**@{*/
68910153Sandreas@sandberg.pp.se    /**
69010153Sandreas@sandberg.pp.se     * Provide an interface for locking/unlocking the event queue.
69110153Sandreas@sandberg.pp.se     *
69210153Sandreas@sandberg.pp.se     * @warn Do NOT use these methods directly unless you really know
69310153Sandreas@sandberg.pp.se     * what you are doing. Incorrect use can easily lead to simulator
69410153Sandreas@sandberg.pp.se     * deadlocks.
69510153Sandreas@sandberg.pp.se     *
69610153Sandreas@sandberg.pp.se     * @see EventQueue::ScopedMigration.
69710153Sandreas@sandberg.pp.se     * @see EventQueue::ScopedRelease
69810153Sandreas@sandberg.pp.se     * @see EventQueue
69910153Sandreas@sandberg.pp.se     */
70010153Sandreas@sandberg.pp.se    void lock() { service_mutex.lock(); }
70110153Sandreas@sandberg.pp.se    void unlock() { service_mutex.unlock(); }
70210153Sandreas@sandberg.pp.se    /**@}*/
70310153Sandreas@sandberg.pp.se
70410906Sandreas.sandberg@arm.com    /**
70510906Sandreas.sandberg@arm.com     * Reschedule an event after a checkpoint.
70610906Sandreas.sandberg@arm.com     *
70710906Sandreas.sandberg@arm.com     * Since events don't know which event queue they belong to,
70810906Sandreas.sandberg@arm.com     * parent objects need to reschedule events themselves. This
70910906Sandreas.sandberg@arm.com     * method conditionally schedules an event that has the Scheduled
71010906Sandreas.sandberg@arm.com     * flag set. It should be called by parent objects after
71110906Sandreas.sandberg@arm.com     * unserializing an object.
71210906Sandreas.sandberg@arm.com     *
71310906Sandreas.sandberg@arm.com     * @warn Only use this method after unserializing an Event.
71410906Sandreas.sandberg@arm.com     */
71510906Sandreas.sandberg@arm.com    void checkpointReschedule(Event *event);
71610906Sandreas.sandberg@arm.com
71713440Sgabeblack@google.com    virtual ~EventQueue()
71813440Sgabeblack@google.com    {
71913440Sgabeblack@google.com        while (!empty())
72013440Sgabeblack@google.com            deschedule(getHead());
72113440Sgabeblack@google.com    }
7222SN/A};
7232SN/A
7249554Sandreas.hansson@arm.comvoid dumpMainQueue();
7259554Sandreas.hansson@arm.com
7265605Snate@binkert.orgclass EventManager
7275605Snate@binkert.org{
7285605Snate@binkert.org  protected:
7295605Snate@binkert.org    /** A pointer to this object's event queue */
7305605Snate@binkert.org    EventQueue *eventq;
7312SN/A
7325605Snate@binkert.org  public:
7339099Sandreas.hansson@arm.com    EventManager(EventManager &em) : eventq(em.eventq) {}
7349159Sandreas.hansson@arm.com    EventManager(EventManager *em) : eventq(em->eventq) {}
7355605Snate@binkert.org    EventManager(EventQueue *eq) : eventq(eq) {}
7362SN/A
7375605Snate@binkert.org    EventQueue *
7389099Sandreas.hansson@arm.com    eventQueue() const
7397060Snate@binkert.org    {
7407060Snate@binkert.org        return eventq;
7417060Snate@binkert.org    }
7427060Snate@binkert.org
7435605Snate@binkert.org    void
7445605Snate@binkert.org    schedule(Event &event, Tick when)
7455605Snate@binkert.org    {
7465605Snate@binkert.org        eventq->schedule(&event, when);
7475605Snate@binkert.org    }
7485605Snate@binkert.org
7495605Snate@binkert.org    void
7505605Snate@binkert.org    deschedule(Event &event)
7515605Snate@binkert.org    {
7525605Snate@binkert.org        eventq->deschedule(&event);
7535605Snate@binkert.org    }
7545605Snate@binkert.org
7555605Snate@binkert.org    void
7565605Snate@binkert.org    reschedule(Event &event, Tick when, bool always = false)
7575605Snate@binkert.org    {
7585605Snate@binkert.org        eventq->reschedule(&event, when, always);
7595605Snate@binkert.org    }
7605605Snate@binkert.org
7615605Snate@binkert.org    void
7625605Snate@binkert.org    schedule(Event *event, Tick when)
7635605Snate@binkert.org    {
7645605Snate@binkert.org        eventq->schedule(event, when);
7655605Snate@binkert.org    }
7665605Snate@binkert.org
7675605Snate@binkert.org    void
7685605Snate@binkert.org    deschedule(Event *event)
7695605Snate@binkert.org    {
7705605Snate@binkert.org        eventq->deschedule(event);
7715605Snate@binkert.org    }
7725605Snate@binkert.org
7735605Snate@binkert.org    void
7745605Snate@binkert.org    reschedule(Event *event, Tick when, bool always = false)
7755605Snate@binkert.org    {
7765605Snate@binkert.org        eventq->reschedule(event, when, always);
7775605Snate@binkert.org    }
7789356Snilay@cs.wisc.edu
77910476Sandreas.hansson@arm.com    void wakeupEventQueue(Tick when = (Tick)-1)
78010476Sandreas.hansson@arm.com    {
78110476Sandreas.hansson@arm.com        eventq->wakeup(when);
78210476Sandreas.hansson@arm.com    }
78310476Sandreas.hansson@arm.com
7849356Snilay@cs.wisc.edu    void setCurTick(Tick newVal) { eventq->setCurTick(newVal); }
7855605Snate@binkert.org};
7865605Snate@binkert.org
7877005Snate@binkert.orgtemplate <class T, void (T::* F)()>
7887005Snate@binkert.orgclass EventWrapper : public Event
7895502Snate@binkert.org{
7907005Snate@binkert.org  private:
7917005Snate@binkert.org    T *object;
7925502Snate@binkert.org
7937005Snate@binkert.org  public:
7947005Snate@binkert.org    EventWrapper(T *obj, bool del = false, Priority p = Default_Pri)
7957005Snate@binkert.org        : Event(p), object(obj)
7967005Snate@binkert.org    {
7977005Snate@binkert.org        if (del)
7987005Snate@binkert.org            setFlags(AutoDelete);
7997005Snate@binkert.org    }
8005502Snate@binkert.org
8017066Snate@binkert.org    EventWrapper(T &obj, bool del = false, Priority p = Default_Pri)
8027066Snate@binkert.org        : Event(p), object(&obj)
8037066Snate@binkert.org    {
8047066Snate@binkert.org        if (del)
8057066Snate@binkert.org            setFlags(AutoDelete);
8067066Snate@binkert.org    }
8077066Snate@binkert.org
8087005Snate@binkert.org    void process() { (object->*F)(); }
8095502Snate@binkert.org
8107005Snate@binkert.org    const std::string
8117005Snate@binkert.org    name() const
8127005Snate@binkert.org    {
8137005Snate@binkert.org        return object->name() + ".wrapped_event";
8147005Snate@binkert.org    }
8157005Snate@binkert.org
8167005Snate@binkert.org    const char *description() const { return "EventWrapped"; }
8177005Snate@binkert.org};
8182SN/A
81912082Sspwilson2@wisc.educlass EventFunctionWrapper : public Event
82012082Sspwilson2@wisc.edu{
82112082Sspwilson2@wisc.edu  private:
82212082Sspwilson2@wisc.edu      std::function<void(void)> callback;
82312082Sspwilson2@wisc.edu      std::string _name;
82412082Sspwilson2@wisc.edu
82512082Sspwilson2@wisc.edu  public:
82612082Sspwilson2@wisc.edu    EventFunctionWrapper(const std::function<void(void)> &callback,
82712082Sspwilson2@wisc.edu                         const std::string &name,
82812082Sspwilson2@wisc.edu                         bool del = false,
82912082Sspwilson2@wisc.edu                         Priority p = Default_Pri)
83012082Sspwilson2@wisc.edu        : Event(p), callback(callback), _name(name)
83112082Sspwilson2@wisc.edu    {
83212082Sspwilson2@wisc.edu        if (del)
83312082Sspwilson2@wisc.edu            setFlags(AutoDelete);
83412082Sspwilson2@wisc.edu    }
83512082Sspwilson2@wisc.edu
83612082Sspwilson2@wisc.edu    void process() { callback(); }
83712082Sspwilson2@wisc.edu
83812082Sspwilson2@wisc.edu    const std::string
83912082Sspwilson2@wisc.edu    name() const
84012082Sspwilson2@wisc.edu    {
84112082Sspwilson2@wisc.edu        return _name + ".wrapped_function_event";
84212082Sspwilson2@wisc.edu    }
84312082Sspwilson2@wisc.edu
84412082Sspwilson2@wisc.edu    const char *description() const { return "EventFunctionWrapped"; }
84512082Sspwilson2@wisc.edu};
84612082Sspwilson2@wisc.edu
8471354SN/A#endif // __SIM_EVENTQ_HH__
848