eventq.hh revision 13440
12SN/A/* 21762SN/A * Copyright (c) 2000-2005 The Regents of The University of Michigan 39983Sstever@gmail.com * Copyright (c) 2013 Advanced Micro Devices, Inc. 49983Sstever@gmail.com * Copyright (c) 2013 Mark D. Hill and David A. Wood 52SN/A * All rights reserved. 62SN/A * 72SN/A * Redistribution and use in source and binary forms, with or without 82SN/A * modification, are permitted provided that the following conditions are 92SN/A * met: redistributions of source code must retain the above copyright 102SN/A * notice, this list of conditions and the following disclaimer; 112SN/A * redistributions in binary form must reproduce the above copyright 122SN/A * notice, this list of conditions and the following disclaimer in the 132SN/A * documentation and/or other materials provided with the distribution; 142SN/A * neither the name of the copyright holders nor the names of its 152SN/A * contributors may be used to endorse or promote products derived from 162SN/A * this software without specific prior written permission. 172SN/A * 182SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 192SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 202SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 212SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 222SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 232SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 242SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 252SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 262SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 272SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 282SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 292665Ssaidi@eecs.umich.edu * 302665Ssaidi@eecs.umich.edu * Authors: Steve Reinhardt 312665Ssaidi@eecs.umich.edu * Nathan Binkert 322SN/A */ 332SN/A 342SN/A/* @file 352SN/A * EventQueue interfaces 362SN/A */ 372SN/A 381354SN/A#ifndef __SIM_EVENTQ_HH__ 391354SN/A#define __SIM_EVENTQ_HH__ 402SN/A 412SN/A#include <algorithm> 425501Snate@binkert.org#include <cassert> 435546Snate@binkert.org#include <climits> 4412392Sjason@lowepower.com#include <functional> 457004Snate@binkert.org#include <iosfwd> 4610412Sandreas.hansson@arm.com#include <memory> 479983Sstever@gmail.com#include <mutex> 482SN/A#include <string> 492SN/A 505769Snate@binkert.org#include "base/flags.hh" 516216Snate@binkert.org#include "base/types.hh" 528232Snate@binkert.org#include "debug/Event.hh" 5356SN/A#include "sim/serialize.hh" 542SN/A 555543Ssaidi@eecs.umich.educlass EventQueue; // forward declaration 569983Sstever@gmail.comclass BaseGlobalEvent; 572SN/A 589983Sstever@gmail.com//! Simulation Quantum for multiple eventq simulation. 599983Sstever@gmail.com//! The quantum value is the period length after which the queues 609983Sstever@gmail.com//! synchronize themselves with each other. This means that any 619983Sstever@gmail.com//! event to scheduled on Queue A which is generated by an event on 629983Sstever@gmail.com//! Queue B should be at least simQuantum ticks away in future. 639983Sstever@gmail.comextern Tick simQuantum; 641354SN/A 659983Sstever@gmail.com//! Current number of allocated main event queues. 669983Sstever@gmail.comextern uint32_t numMainEventQueues; 679983Sstever@gmail.com 689983Sstever@gmail.com//! Array for main event queues. 699983Sstever@gmail.comextern std::vector<EventQueue *> mainEventQueue; 709983Sstever@gmail.com 719983Sstever@gmail.com//! The current event queue for the running thread. Access to this queue 729983Sstever@gmail.com//! does not require any locking from the thread. 739983Sstever@gmail.com 749983Sstever@gmail.comextern __thread EventQueue *_curEventQueue; 759983Sstever@gmail.com 769983Sstever@gmail.com//! Current mode of execution: parallel / serial 779983Sstever@gmail.comextern bool inParallelMode; 789983Sstever@gmail.com 799983Sstever@gmail.com//! Function for returning eventq queue for the provided 809983Sstever@gmail.com//! index. The function allocates a new queue in case one 819983Sstever@gmail.com//! does not exist for the index, provided that the index 829983Sstever@gmail.com//! is with in bounds. 839983Sstever@gmail.comEventQueue *getEventQueue(uint32_t index); 849983Sstever@gmail.com 859983Sstever@gmail.cominline EventQueue *curEventQueue() { return _curEventQueue; } 869983Sstever@gmail.cominline void curEventQueue(EventQueue *q) { _curEventQueue = q; } 879983Sstever@gmail.com 889983Sstever@gmail.com/** 899983Sstever@gmail.com * Common base class for Event and GlobalEvent, so they can share flag 909983Sstever@gmail.com * and priority definitions and accessor functions. This class should 919983Sstever@gmail.com * not be used directly. 922SN/A */ 939983Sstever@gmail.comclass EventBase 942SN/A{ 9511320Ssteve.reinhardt@amd.com protected: 968902Sandreas.hansson@arm.com typedef unsigned short FlagsType; 975769Snate@binkert.org typedef ::Flags<FlagsType> Flags; 985769Snate@binkert.org 997059Snate@binkert.org static const FlagsType PublicRead = 0x003f; // public readable flags 1007059Snate@binkert.org static const FlagsType PublicWrite = 0x001d; // public writable flags 1017059Snate@binkert.org static const FlagsType Squashed = 0x0001; // has been squashed 1027059Snate@binkert.org static const FlagsType Scheduled = 0x0002; // has been scheduled 10312040Sandreas.sandberg@arm.com static const FlagsType Managed = 0x0004; // Use life cycle manager 10412040Sandreas.sandberg@arm.com static const FlagsType AutoDelete = Managed; // delete after dispatch 10511072Sandreas.sandberg@arm.com /** 10611072Sandreas.sandberg@arm.com * This used to be AutoSerialize. This value can't be reused 10711072Sandreas.sandberg@arm.com * without changing the checkpoint version since the flag field 10811072Sandreas.sandberg@arm.com * gets serialized. 10911072Sandreas.sandberg@arm.com */ 11011072Sandreas.sandberg@arm.com static const FlagsType Reserved0 = 0x0008; 1117059Snate@binkert.org static const FlagsType IsExitEvent = 0x0010; // special exit event 1127059Snate@binkert.org static const FlagsType IsMainQueue = 0x0020; // on main event queue 1137059Snate@binkert.org static const FlagsType Initialized = 0x7a40; // somewhat random bits 1147059Snate@binkert.org static const FlagsType InitMask = 0xffc0; // mask for init bits 1157059Snate@binkert.org 1167058Snate@binkert.org public: 1177058Snate@binkert.org typedef int8_t Priority; 1187058Snate@binkert.org 119396SN/A /// Event priorities, to provide tie-breakers for events scheduled 120396SN/A /// at the same cycle. Most events are scheduled at the default 121396SN/A /// priority; these values are used to control events that need to 122396SN/A /// be ordered within a cycle. 1235501Snate@binkert.org 1247058Snate@binkert.org /// Minimum priority 1257058Snate@binkert.org static const Priority Minimum_Pri = SCHAR_MIN; 1263329Sstever@eecs.umich.edu 1277058Snate@binkert.org /// If we enable tracing on a particular cycle, do that as the 1287058Snate@binkert.org /// very first thing so we don't miss any of the events on 1297058Snate@binkert.org /// that cycle (even if we enter the debugger). 1309979Satgutier@umich.edu static const Priority Debug_Enable_Pri = -101; 131396SN/A 1327058Snate@binkert.org /// Breakpoints should happen before anything else (except 1337058Snate@binkert.org /// enabling trace output), so we don't miss any action when 1347058Snate@binkert.org /// debugging. 1357058Snate@binkert.org static const Priority Debug_Break_Pri = -100; 1363329Sstever@eecs.umich.edu 1377058Snate@binkert.org /// CPU switches schedule the new CPU's tick event for the 1387058Snate@binkert.org /// same cycle (after unscheduling the old CPU's tick event). 1397058Snate@binkert.org /// The switch needs to come before any tick events to make 1407058Snate@binkert.org /// sure we don't tick both CPUs in the same cycle. 1417058Snate@binkert.org static const Priority CPU_Switch_Pri = -31; 142396SN/A 1437058Snate@binkert.org /// For some reason "delayed" inter-cluster writebacks are 1447058Snate@binkert.org /// scheduled before regular writebacks (which have default 1457058Snate@binkert.org /// priority). Steve? 1467058Snate@binkert.org static const Priority Delayed_Writeback_Pri = -1; 147396SN/A 1487058Snate@binkert.org /// Default is zero for historical reasons. 1497058Snate@binkert.org static const Priority Default_Pri = 0; 150396SN/A 15110249Sstephan.diestelhorst@arm.com /// DVFS update event leads to stats dump therefore given a lower priority 15210249Sstephan.diestelhorst@arm.com /// to ensure all relevant states have been updated 15310249Sstephan.diestelhorst@arm.com static const Priority DVFS_Update_Pri = 31; 15410249Sstephan.diestelhorst@arm.com 1557058Snate@binkert.org /// Serailization needs to occur before tick events also, so 1567058Snate@binkert.org /// that a serialize/unserialize is identical to an on-line 1577058Snate@binkert.org /// CPU switch. 1587058Snate@binkert.org static const Priority Serialize_Pri = 32; 159396SN/A 1607058Snate@binkert.org /// CPU ticks must come after other associated CPU events 1617058Snate@binkert.org /// (such as writebacks). 1627058Snate@binkert.org static const Priority CPU_Tick_Pri = 50; 163396SN/A 1647058Snate@binkert.org /// Statistics events (dump, reset, etc.) come after 1657058Snate@binkert.org /// everything else, but before exit. 1667058Snate@binkert.org static const Priority Stat_Event_Pri = 90; 1674075Sbinkertn@umich.edu 1687058Snate@binkert.org /// Progress events come at the end. 1697058Snate@binkert.org static const Priority Progress_Event_Pri = 95; 1705501Snate@binkert.org 1717058Snate@binkert.org /// If we want to exit on this cycle, it's the very last thing 1727058Snate@binkert.org /// we do. 1737058Snate@binkert.org static const Priority Sim_Exit_Pri = 100; 1747058Snate@binkert.org 1757058Snate@binkert.org /// Maximum priority 1767058Snate@binkert.org static const Priority Maximum_Pri = SCHAR_MAX; 1779983Sstever@gmail.com}; 1789983Sstever@gmail.com 1799983Sstever@gmail.com/* 1809983Sstever@gmail.com * An item on an event queue. The action caused by a given 1819983Sstever@gmail.com * event is specified by deriving a subclass and overriding the 1829983Sstever@gmail.com * process() member function. 1839983Sstever@gmail.com * 1849983Sstever@gmail.com * Caution, the order of members is chosen to maximize data packing. 1859983Sstever@gmail.com */ 1869983Sstever@gmail.comclass Event : public EventBase, public Serializable 1879983Sstever@gmail.com{ 1889983Sstever@gmail.com friend class EventQueue; 1899983Sstever@gmail.com 1909983Sstever@gmail.com private: 1919983Sstever@gmail.com // The event queue is now a linked list of linked lists. The 1929983Sstever@gmail.com // 'nextBin' pointer is to find the bin, where a bin is defined as 1939983Sstever@gmail.com // when+priority. All events in the same bin will be stored in a 1949983Sstever@gmail.com // second linked list (a stack) maintained by the 'nextInBin' 1959983Sstever@gmail.com // pointer. The list will be accessed in LIFO order. The end 1969983Sstever@gmail.com // result is that the insert/removal in 'nextBin' is 1979983Sstever@gmail.com // linear/constant, and the lookup/removal in 'nextInBin' is 1989983Sstever@gmail.com // constant/constant. Hopefully this is a significant improvement 1999983Sstever@gmail.com // over the current fully linear insertion. 2009983Sstever@gmail.com Event *nextBin; 2019983Sstever@gmail.com Event *nextInBin; 2029983Sstever@gmail.com 2039983Sstever@gmail.com static Event *insertBefore(Event *event, Event *curr); 2049983Sstever@gmail.com static Event *removeItem(Event *event, Event *last); 2059983Sstever@gmail.com 2069983Sstever@gmail.com Tick _when; //!< timestamp when event should be processed 2079983Sstever@gmail.com Priority _priority; //!< event priority 2089983Sstever@gmail.com Flags flags; 2099983Sstever@gmail.com 2109983Sstever@gmail.com#ifndef NDEBUG 2119983Sstever@gmail.com /// Global counter to generate unique IDs for Event instances 2129983Sstever@gmail.com static Counter instanceCounter; 2139983Sstever@gmail.com 2149983Sstever@gmail.com /// This event's unique ID. We can also use pointer values for 2159983Sstever@gmail.com /// this but they're not consistent across runs making debugging 2169983Sstever@gmail.com /// more difficult. Thus we use a global counter value when 2179983Sstever@gmail.com /// debugging. 2189983Sstever@gmail.com Counter instance; 2199983Sstever@gmail.com 2209983Sstever@gmail.com /// queue to which this event belongs (though it may or may not be 2219983Sstever@gmail.com /// scheduled on this queue yet) 2229983Sstever@gmail.com EventQueue *queue; 2239983Sstever@gmail.com#endif 2249983Sstever@gmail.com 2259983Sstever@gmail.com#ifdef EVENTQ_DEBUG 2269983Sstever@gmail.com Tick whenCreated; //!< time created 2279983Sstever@gmail.com Tick whenScheduled; //!< time scheduled 2289983Sstever@gmail.com#endif 2299983Sstever@gmail.com 2309983Sstever@gmail.com void 2319983Sstever@gmail.com setWhen(Tick when, EventQueue *q) 2329983Sstever@gmail.com { 2339983Sstever@gmail.com _when = when; 2349983Sstever@gmail.com#ifndef NDEBUG 2359983Sstever@gmail.com queue = q; 2369983Sstever@gmail.com#endif 2379983Sstever@gmail.com#ifdef EVENTQ_DEBUG 2389983Sstever@gmail.com whenScheduled = curTick(); 2399983Sstever@gmail.com#endif 2409983Sstever@gmail.com } 2419983Sstever@gmail.com 2429983Sstever@gmail.com bool 2439983Sstever@gmail.com initialized() const 2449983Sstever@gmail.com { 24510673SAndreas.Sandberg@ARM.com return (flags & InitMask) == Initialized; 2469983Sstever@gmail.com } 2479983Sstever@gmail.com 2489983Sstever@gmail.com protected: 2499983Sstever@gmail.com /// Accessor for flags. 2509983Sstever@gmail.com Flags 2519983Sstever@gmail.com getFlags() const 2529983Sstever@gmail.com { 2539983Sstever@gmail.com return flags & PublicRead; 2549983Sstever@gmail.com } 2559983Sstever@gmail.com 2569983Sstever@gmail.com bool 2579983Sstever@gmail.com isFlagSet(Flags _flags) const 2589983Sstever@gmail.com { 2599983Sstever@gmail.com assert(_flags.noneSet(~PublicRead)); 2609983Sstever@gmail.com return flags.isSet(_flags); 2619983Sstever@gmail.com } 2629983Sstever@gmail.com 2639983Sstever@gmail.com /// Accessor for flags. 2649983Sstever@gmail.com void 2659983Sstever@gmail.com setFlags(Flags _flags) 2669983Sstever@gmail.com { 2679983Sstever@gmail.com assert(_flags.noneSet(~PublicWrite)); 2689983Sstever@gmail.com flags.set(_flags); 2699983Sstever@gmail.com } 2709983Sstever@gmail.com 2719983Sstever@gmail.com void 2729983Sstever@gmail.com clearFlags(Flags _flags) 2739983Sstever@gmail.com { 2749983Sstever@gmail.com assert(_flags.noneSet(~PublicWrite)); 2759983Sstever@gmail.com flags.clear(_flags); 2769983Sstever@gmail.com } 2779983Sstever@gmail.com 2789983Sstever@gmail.com void 2799983Sstever@gmail.com clearFlags() 2809983Sstever@gmail.com { 2819983Sstever@gmail.com flags.clear(PublicWrite); 2829983Sstever@gmail.com } 2839983Sstever@gmail.com 2849983Sstever@gmail.com // This function isn't really useful if TRACING_ON is not defined 2859983Sstever@gmail.com virtual void trace(const char *action); //!< trace event activity 2869983Sstever@gmail.com 28712040Sandreas.sandberg@arm.com protected: /* Memory management */ 28812040Sandreas.sandberg@arm.com /** 28912040Sandreas.sandberg@arm.com * @{ 29012040Sandreas.sandberg@arm.com * Memory management hooks for events that have the Managed flag set 29112040Sandreas.sandberg@arm.com * 29212040Sandreas.sandberg@arm.com * Events can use automatic memory management by setting the 29312040Sandreas.sandberg@arm.com * Managed flag. The default implementation automatically deletes 29412040Sandreas.sandberg@arm.com * events once they have been removed from the event queue. This 29512040Sandreas.sandberg@arm.com * typically happens when events are descheduled or have been 29612040Sandreas.sandberg@arm.com * triggered and not rescheduled. 29712040Sandreas.sandberg@arm.com * 29812040Sandreas.sandberg@arm.com * The methods below may be overridden by events that need custom 29912040Sandreas.sandberg@arm.com * memory management. For example, events exported to Python need 30012040Sandreas.sandberg@arm.com * to impement reference counting to ensure that the Python 30112040Sandreas.sandberg@arm.com * implementation of the event is kept alive while it lives in the 30212040Sandreas.sandberg@arm.com * event queue. 30312040Sandreas.sandberg@arm.com * 30412040Sandreas.sandberg@arm.com * @note Memory managers are responsible for implementing 30512040Sandreas.sandberg@arm.com * reference counting (by overriding both acquireImpl() and 30612040Sandreas.sandberg@arm.com * releaseImpl()) or checking if an event is no longer scheduled 30712040Sandreas.sandberg@arm.com * in releaseImpl() before deallocating it. 30812040Sandreas.sandberg@arm.com */ 30912040Sandreas.sandberg@arm.com 31012040Sandreas.sandberg@arm.com /** 31112040Sandreas.sandberg@arm.com * Managed event scheduled and being held in the event queue. 31212040Sandreas.sandberg@arm.com */ 31312040Sandreas.sandberg@arm.com void acquire() 31412040Sandreas.sandberg@arm.com { 31512040Sandreas.sandberg@arm.com if (flags.isSet(Event::Managed)) 31612040Sandreas.sandberg@arm.com acquireImpl(); 31712040Sandreas.sandberg@arm.com } 31812040Sandreas.sandberg@arm.com 31912040Sandreas.sandberg@arm.com /** 32012040Sandreas.sandberg@arm.com * Managed event removed from the event queue. 32112040Sandreas.sandberg@arm.com */ 32212040Sandreas.sandberg@arm.com void release() { 32312040Sandreas.sandberg@arm.com if (flags.isSet(Event::Managed)) 32412040Sandreas.sandberg@arm.com releaseImpl(); 32512040Sandreas.sandberg@arm.com } 32612040Sandreas.sandberg@arm.com 32712040Sandreas.sandberg@arm.com virtual void acquireImpl() {} 32812040Sandreas.sandberg@arm.com 32912040Sandreas.sandberg@arm.com virtual void releaseImpl() { 33012040Sandreas.sandberg@arm.com if (!scheduled()) 33112040Sandreas.sandberg@arm.com delete this; 33212040Sandreas.sandberg@arm.com } 33312040Sandreas.sandberg@arm.com 33412040Sandreas.sandberg@arm.com /** @} */ 33512040Sandreas.sandberg@arm.com 3369983Sstever@gmail.com public: 337396SN/A 3382SN/A /* 3392SN/A * Event constructor 3402SN/A * @param queue that the event gets scheduled on 3412SN/A */ 3428581Ssteve.reinhardt@amd.com Event(Priority p = Default_Pri, Flags f = 0) 34310360Sandreas.hansson@arm.com : nextBin(nullptr), nextInBin(nullptr), _when(0), _priority(p), 3448581Ssteve.reinhardt@amd.com flags(Initialized | f) 345224SN/A { 3468581Ssteve.reinhardt@amd.com assert(f.noneSet(~PublicWrite)); 3474016Sstever@eecs.umich.edu#ifndef NDEBUG 3485501Snate@binkert.org instance = ++instanceCounter; 3495605Snate@binkert.org queue = NULL; 3505501Snate@binkert.org#endif 3515501Snate@binkert.org#ifdef EVENTQ_DEBUG 3527823Ssteve.reinhardt@amd.com whenCreated = curTick(); 3535501Snate@binkert.org whenScheduled = 0; 3544016Sstever@eecs.umich.edu#endif 355224SN/A } 356224SN/A 3575768Snate@binkert.org virtual ~Event(); 3585768Snate@binkert.org virtual const std::string name() const; 359265SN/A 3605501Snate@binkert.org /// Return a C string describing the event. This string should 3615501Snate@binkert.org /// *not* be dynamically allocated; just a const char array 3625501Snate@binkert.org /// describing the event class. 3635501Snate@binkert.org virtual const char *description() const; 3645501Snate@binkert.org 3655501Snate@binkert.org /// Dump the current event data 3665501Snate@binkert.org void dump() const; 3675501Snate@binkert.org 3685501Snate@binkert.org public: 3695501Snate@binkert.org /* 3705501Snate@binkert.org * This member function is invoked when the event is processed 3715501Snate@binkert.org * (occurs). There is no default implementation; each subclass 3725501Snate@binkert.org * must provide its own implementation. The event is not 3735501Snate@binkert.org * automatically deleted after it is processed (to allow for 3745501Snate@binkert.org * statically allocated event objects). 3755501Snate@binkert.org * 3765501Snate@binkert.org * If the AutoDestroy flag is set, the object is deleted once it 3775501Snate@binkert.org * is processed. 3785501Snate@binkert.org */ 3795501Snate@binkert.org virtual void process() = 0; 3805501Snate@binkert.org 3812SN/A /// Determine if the current event is scheduled 3825769Snate@binkert.org bool scheduled() const { return flags.isSet(Scheduled); } 3832SN/A 3842SN/A /// Squash the current event 3855769Snate@binkert.org void squash() { flags.set(Squashed); } 3862SN/A 3872SN/A /// Check whether the event is squashed 3885769Snate@binkert.org bool squashed() const { return flags.isSet(Squashed); } 3892SN/A 3902667Sstever@eecs.umich.edu /// See if this is a SimExitEvent (without resorting to RTTI) 3915769Snate@binkert.org bool isExitEvent() const { return flags.isSet(IsExitEvent); } 3922667Sstever@eecs.umich.edu 39310992Stimothy.jones@cl.cam.ac.uk /// Check whether this event will auto-delete 39412040Sandreas.sandberg@arm.com bool isManaged() const { return flags.isSet(Managed); } 39512040Sandreas.sandberg@arm.com bool isAutoDelete() const { return isManaged(); } 39610992Stimothy.jones@cl.cam.ac.uk 3972SN/A /// Get the time that the event is scheduled 3982SN/A Tick when() const { return _when; } 3992SN/A 4002SN/A /// Get the event priority 4017058Snate@binkert.org Priority priority() const { return _priority; } 4022SN/A 4039983Sstever@gmail.com //! If this is part of a GlobalEvent, return the pointer to the 4049983Sstever@gmail.com //! Global Event. By default, there is no GlobalEvent, so return 4059983Sstever@gmail.com //! NULL. (Overridden in GlobalEvent::BarrierEvent.) 4069983Sstever@gmail.com virtual BaseGlobalEvent *globalEvent() { return NULL; } 4079983Sstever@gmail.com 40811168Sandreas.hansson@arm.com void serialize(CheckpointOut &cp) const override; 40911168Sandreas.hansson@arm.com void unserialize(CheckpointIn &cp) override; 410571SN/A}; 411571SN/A 4127005Snate@binkert.orginline bool 4137005Snate@binkert.orgoperator<(const Event &l, const Event &r) 4147005Snate@binkert.org{ 4157005Snate@binkert.org return l.when() < r.when() || 4167005Snate@binkert.org (l.when() == r.when() && l.priority() < r.priority()); 4177005Snate@binkert.org} 4187005Snate@binkert.org 4197005Snate@binkert.orginline bool 4207005Snate@binkert.orgoperator>(const Event &l, const Event &r) 4217005Snate@binkert.org{ 4227005Snate@binkert.org return l.when() > r.when() || 4237005Snate@binkert.org (l.when() == r.when() && l.priority() > r.priority()); 4247005Snate@binkert.org} 4257005Snate@binkert.org 4267005Snate@binkert.orginline bool 4277005Snate@binkert.orgoperator<=(const Event &l, const Event &r) 4287005Snate@binkert.org{ 4297005Snate@binkert.org return l.when() < r.when() || 4307005Snate@binkert.org (l.when() == r.when() && l.priority() <= r.priority()); 4317005Snate@binkert.org} 4327005Snate@binkert.orginline bool 4337005Snate@binkert.orgoperator>=(const Event &l, const Event &r) 4347005Snate@binkert.org{ 4357005Snate@binkert.org return l.when() > r.when() || 4367005Snate@binkert.org (l.when() == r.when() && l.priority() >= r.priority()); 4377005Snate@binkert.org} 4387005Snate@binkert.org 4397005Snate@binkert.orginline bool 4407005Snate@binkert.orgoperator==(const Event &l, const Event &r) 4417005Snate@binkert.org{ 4427005Snate@binkert.org return l.when() == r.when() && l.priority() == r.priority(); 4437005Snate@binkert.org} 4447005Snate@binkert.org 4457005Snate@binkert.orginline bool 4467005Snate@binkert.orgoperator!=(const Event &l, const Event &r) 4477005Snate@binkert.org{ 4487005Snate@binkert.org return l.when() != r.when() || l.priority() != r.priority(); 4497005Snate@binkert.org} 4507005Snate@binkert.org 45110153Sandreas@sandberg.pp.se/** 4522SN/A * Queue of events sorted in time order 45310153Sandreas@sandberg.pp.se * 45410153Sandreas@sandberg.pp.se * Events are scheduled (inserted into the event queue) using the 45510153Sandreas@sandberg.pp.se * schedule() method. This method either inserts a <i>synchronous</i> 45610153Sandreas@sandberg.pp.se * or <i>asynchronous</i> event. 45710153Sandreas@sandberg.pp.se * 45810153Sandreas@sandberg.pp.se * Synchronous events are scheduled using schedule() method with the 45910153Sandreas@sandberg.pp.se * argument 'global' set to false (default). This should only be done 46010153Sandreas@sandberg.pp.se * from a thread holding the event queue lock 46110153Sandreas@sandberg.pp.se * (EventQueue::service_mutex). The lock is always held when an event 46210153Sandreas@sandberg.pp.se * handler is called, it can therefore always insert events into its 46310153Sandreas@sandberg.pp.se * own event queue unless it voluntarily releases the lock. 46410153Sandreas@sandberg.pp.se * 46510153Sandreas@sandberg.pp.se * Events can be scheduled across thread (and event queue borders) by 46610153Sandreas@sandberg.pp.se * either scheduling asynchronous events or taking the target event 46710153Sandreas@sandberg.pp.se * queue's lock. However, the lock should <i>never</i> be taken 46810153Sandreas@sandberg.pp.se * directly since this is likely to cause deadlocks. Instead, code 46910153Sandreas@sandberg.pp.se * that needs to schedule events in other event queues should 47010153Sandreas@sandberg.pp.se * temporarily release its own queue and lock the new queue. This 47110153Sandreas@sandberg.pp.se * prevents deadlocks since a single thread never owns more than one 47210153Sandreas@sandberg.pp.se * event queue lock. This functionality is provided by the 47310153Sandreas@sandberg.pp.se * ScopedMigration helper class. Note that temporarily migrating 47410153Sandreas@sandberg.pp.se * between event queues can make the simulation non-deterministic, it 47510153Sandreas@sandberg.pp.se * should therefore be limited to cases where that can be tolerated 47610153Sandreas@sandberg.pp.se * (e.g., handling asynchronous IO or fast-forwarding in KVM). 47710153Sandreas@sandberg.pp.se * 47810153Sandreas@sandberg.pp.se * Asynchronous events can also be scheduled using the normal 47910153Sandreas@sandberg.pp.se * schedule() method with the 'global' parameter set to true. Unlike 48010153Sandreas@sandberg.pp.se * the previous queue migration strategy, this strategy is fully 48110153Sandreas@sandberg.pp.se * deterministic. This causes the event to be inserted in a separate 48210153Sandreas@sandberg.pp.se * queue of asynchronous events (async_queue), which is merged main 48310153Sandreas@sandberg.pp.se * event queue at the end of each simulation quantum (by calling the 48410153Sandreas@sandberg.pp.se * handleAsyncInsertions() method). Note that this implies that such 48510153Sandreas@sandberg.pp.se * events must happen at least one simulation quantum into the future, 48610153Sandreas@sandberg.pp.se * otherwise they risk being scheduled in the past by 48710153Sandreas@sandberg.pp.se * handleAsyncInsertions(). 4882SN/A */ 48911072Sandreas.sandberg@arm.comclass EventQueue 4902SN/A{ 4915605Snate@binkert.org private: 492265SN/A std::string objName; 4932SN/A Event *head; 4949356Snilay@cs.wisc.edu Tick _curTick; 4952SN/A 4969983Sstever@gmail.com //! Mutex to protect async queue. 49710412Sandreas.hansson@arm.com std::mutex async_queue_mutex; 4989983Sstever@gmail.com 4999983Sstever@gmail.com //! List of events added by other threads to this event queue. 5009983Sstever@gmail.com std::list<Event*> async_queue; 5019983Sstever@gmail.com 50210153Sandreas@sandberg.pp.se /** 50310153Sandreas@sandberg.pp.se * Lock protecting event handling. 50410153Sandreas@sandberg.pp.se * 50510153Sandreas@sandberg.pp.se * This lock is always taken when servicing events. It is assumed 50610153Sandreas@sandberg.pp.se * that the thread scheduling new events (not asynchronous events 50710153Sandreas@sandberg.pp.se * though) have taken this lock. This is normally done by 50810153Sandreas@sandberg.pp.se * serviceOne() since new events are typically scheduled as a 50910153Sandreas@sandberg.pp.se * response to an earlier event. 51010153Sandreas@sandberg.pp.se * 51110153Sandreas@sandberg.pp.se * This lock is intended to be used to temporarily steal an event 51210153Sandreas@sandberg.pp.se * queue to support inter-thread communication when some 51310153Sandreas@sandberg.pp.se * deterministic timing can be sacrificed for speed. For example, 51410153Sandreas@sandberg.pp.se * the KVM CPU can use this support to access devices running in a 51510153Sandreas@sandberg.pp.se * different thread. 51610153Sandreas@sandberg.pp.se * 51710153Sandreas@sandberg.pp.se * @see EventQueue::ScopedMigration. 51810153Sandreas@sandberg.pp.se * @see EventQueue::ScopedRelease 51910153Sandreas@sandberg.pp.se * @see EventQueue::lock() 52010153Sandreas@sandberg.pp.se * @see EventQueue::unlock() 52110153Sandreas@sandberg.pp.se */ 52210153Sandreas@sandberg.pp.se std::mutex service_mutex; 52310153Sandreas@sandberg.pp.se 5249983Sstever@gmail.com //! Insert / remove event from the queue. Should only be called 5259983Sstever@gmail.com //! by thread operating this queue. 5262SN/A void insert(Event *event); 5272SN/A void remove(Event *event); 5282SN/A 5299983Sstever@gmail.com //! Function for adding events to the async queue. The added events 5309983Sstever@gmail.com //! are added to main event queue later. Threads, other than the 5319983Sstever@gmail.com //! owning thread, should call this function instead of insert(). 5329983Sstever@gmail.com void asyncInsert(Event *event); 5339983Sstever@gmail.com 5347063Snate@binkert.org EventQueue(const EventQueue &); 5357063Snate@binkert.org 5362SN/A public: 53710153Sandreas@sandberg.pp.se /** 53810153Sandreas@sandberg.pp.se * Temporarily migrate execution to a different event queue. 53910153Sandreas@sandberg.pp.se * 54010153Sandreas@sandberg.pp.se * An instance of this class temporarily migrates execution to a 54110153Sandreas@sandberg.pp.se * different event queue by releasing the current queue, locking 54210153Sandreas@sandberg.pp.se * the new queue, and updating curEventQueue(). This can, for 54310153Sandreas@sandberg.pp.se * example, be useful when performing IO across thread event 54410153Sandreas@sandberg.pp.se * queues when timing is not crucial (e.g., during fast 54510153Sandreas@sandberg.pp.se * forwarding). 54612270Stiago.muck@arm.com * 54712270Stiago.muck@arm.com * ScopedMigration does nothing if both eqs are the same 54810153Sandreas@sandberg.pp.se */ 54910153Sandreas@sandberg.pp.se class ScopedMigration 55010153Sandreas@sandberg.pp.se { 55110153Sandreas@sandberg.pp.se public: 55212270Stiago.muck@arm.com ScopedMigration(EventQueue *_new_eq, bool _doMigrate = true) 55312270Stiago.muck@arm.com :new_eq(*_new_eq), old_eq(*curEventQueue()), 55412270Stiago.muck@arm.com doMigrate((&new_eq != &old_eq)&&_doMigrate) 55510153Sandreas@sandberg.pp.se { 55612270Stiago.muck@arm.com if (doMigrate){ 55712270Stiago.muck@arm.com old_eq.unlock(); 55812270Stiago.muck@arm.com new_eq.lock(); 55912270Stiago.muck@arm.com curEventQueue(&new_eq); 56012270Stiago.muck@arm.com } 56110153Sandreas@sandberg.pp.se } 56210153Sandreas@sandberg.pp.se 56310153Sandreas@sandberg.pp.se ~ScopedMigration() 56410153Sandreas@sandberg.pp.se { 56512270Stiago.muck@arm.com if (doMigrate){ 56612270Stiago.muck@arm.com new_eq.unlock(); 56712270Stiago.muck@arm.com old_eq.lock(); 56812270Stiago.muck@arm.com curEventQueue(&old_eq); 56912270Stiago.muck@arm.com } 57010153Sandreas@sandberg.pp.se } 57110153Sandreas@sandberg.pp.se 57210153Sandreas@sandberg.pp.se private: 57310153Sandreas@sandberg.pp.se EventQueue &new_eq; 57410153Sandreas@sandberg.pp.se EventQueue &old_eq; 57512270Stiago.muck@arm.com bool doMigrate; 57610153Sandreas@sandberg.pp.se }; 57710153Sandreas@sandberg.pp.se 57810153Sandreas@sandberg.pp.se /** 57910153Sandreas@sandberg.pp.se * Temporarily release the event queue service lock. 58010153Sandreas@sandberg.pp.se * 58110153Sandreas@sandberg.pp.se * There are cases where it is desirable to temporarily release 58210153Sandreas@sandberg.pp.se * the event queue lock to prevent deadlocks. For example, when 58310153Sandreas@sandberg.pp.se * waiting on the global barrier, we need to release the lock to 58410153Sandreas@sandberg.pp.se * prevent deadlocks from happening when another thread tries to 58510153Sandreas@sandberg.pp.se * temporarily take over the event queue waiting on the barrier. 58610153Sandreas@sandberg.pp.se */ 58710153Sandreas@sandberg.pp.se class ScopedRelease 58810153Sandreas@sandberg.pp.se { 58910153Sandreas@sandberg.pp.se public: 59010153Sandreas@sandberg.pp.se ScopedRelease(EventQueue *_eq) 59110153Sandreas@sandberg.pp.se : eq(*_eq) 59210153Sandreas@sandberg.pp.se { 59310153Sandreas@sandberg.pp.se eq.unlock(); 59410153Sandreas@sandberg.pp.se } 59510153Sandreas@sandberg.pp.se 59610153Sandreas@sandberg.pp.se ~ScopedRelease() 59710153Sandreas@sandberg.pp.se { 59810153Sandreas@sandberg.pp.se eq.lock(); 59910153Sandreas@sandberg.pp.se } 60010153Sandreas@sandberg.pp.se 60110153Sandreas@sandberg.pp.se private: 60210153Sandreas@sandberg.pp.se EventQueue &eq; 60310153Sandreas@sandberg.pp.se }; 60410153Sandreas@sandberg.pp.se 6057063Snate@binkert.org EventQueue(const std::string &n); 6062SN/A 607512SN/A virtual const std::string name() const { return objName; } 6089983Sstever@gmail.com void name(const std::string &st) { objName = st; } 609265SN/A 6109983Sstever@gmail.com //! Schedule the given event on this queue. Safe to call from any 6119983Sstever@gmail.com //! thread. 6129983Sstever@gmail.com void schedule(Event *event, Tick when, bool global = false); 6139983Sstever@gmail.com 6149983Sstever@gmail.com //! Deschedule the specified event. Should be called only from the 6159983Sstever@gmail.com //! owning thread. 6165738Snate@binkert.org void deschedule(Event *event); 6179983Sstever@gmail.com 6189983Sstever@gmail.com //! Reschedule the specified event. Should be called only from 6199983Sstever@gmail.com //! the owning thread. 6205738Snate@binkert.org void reschedule(Event *event, Tick when, bool always = false); 6212SN/A 6225501Snate@binkert.org Tick nextTick() const { return head->when(); } 6239356Snilay@cs.wisc.edu void setCurTick(Tick newVal) { _curTick = newVal; } 62411015Sandreas.sandberg@arm.com Tick getCurTick() const { return _curTick; } 62510991Stimothy.jones@cl.cam.ac.uk Event *getHead() const { return head; } 6269356Snilay@cs.wisc.edu 6272667Sstever@eecs.umich.edu Event *serviceOne(); 6282SN/A 6292SN/A // process all events up to the given timestamp. we inline a 6302SN/A // quick test to see if there are any events to process; if so, 6312SN/A // call the internal out-of-line version to process them all. 6325501Snate@binkert.org void 6335501Snate@binkert.org serviceEvents(Tick when) 6345501Snate@binkert.org { 6352SN/A while (!empty()) { 6362SN/A if (nextTick() > when) 6372SN/A break; 6382SN/A 6391634SN/A /** 6401634SN/A * @todo this assert is a good bug catcher. I need to 6411634SN/A * make it true again. 6421634SN/A */ 6431634SN/A //assert(head->when() >= when && "event scheduled in the past"); 6442SN/A serviceOne(); 6452SN/A } 6469356Snilay@cs.wisc.edu 6479356Snilay@cs.wisc.edu setCurTick(when); 6482SN/A } 6492SN/A 6502SN/A // return true if no events are queued 6515501Snate@binkert.org bool empty() const { return head == NULL; } 6522SN/A 6535501Snate@binkert.org void dump() const; 6542SN/A 6555502Snate@binkert.org bool debugVerify() const; 6565502Snate@binkert.org 6579983Sstever@gmail.com //! Function for moving events from the async_queue to the main queue. 6589983Sstever@gmail.com void handleAsyncInsertions(); 6599983Sstever@gmail.com 6608648Snilay@cs.wisc.edu /** 66110476Sandreas.hansson@arm.com * Function to signal that the event loop should be woken up because 66210476Sandreas.hansson@arm.com * an event has been scheduled by an agent outside the gem5 event 66310476Sandreas.hansson@arm.com * loop(s) whose event insertion may not have been noticed by gem5. 66410476Sandreas.hansson@arm.com * This function isn't needed by the usual gem5 event loop but may 66510476Sandreas.hansson@arm.com * be necessary in derived EventQueues which host gem5 onto other 66610476Sandreas.hansson@arm.com * schedulers. 66710476Sandreas.hansson@arm.com * 66810476Sandreas.hansson@arm.com * @param when Time of a delayed wakeup (if known). This parameter 66910476Sandreas.hansson@arm.com * can be used by an implementation to schedule a wakeup in the 67010476Sandreas.hansson@arm.com * future if it is sure it will remain active until then. 67110476Sandreas.hansson@arm.com * Or it can be ignored and the event queue can be woken up now. 67210476Sandreas.hansson@arm.com */ 67310476Sandreas.hansson@arm.com virtual void wakeup(Tick when = (Tick)-1) { } 67410476Sandreas.hansson@arm.com 67510476Sandreas.hansson@arm.com /** 6768648Snilay@cs.wisc.edu * function for replacing the head of the event queue, so that a 6778648Snilay@cs.wisc.edu * different set of events can run without disturbing events that have 6788648Snilay@cs.wisc.edu * already been scheduled. Already scheduled events can be processed 6798648Snilay@cs.wisc.edu * by replacing the original head back. 6808648Snilay@cs.wisc.edu * USING THIS FUNCTION CAN BE DANGEROUS TO THE HEALTH OF THE SIMULATOR. 6818648Snilay@cs.wisc.edu * NOT RECOMMENDED FOR USE. 6828648Snilay@cs.wisc.edu */ 6838648Snilay@cs.wisc.edu Event* replaceHead(Event* s); 6848648Snilay@cs.wisc.edu 68510153Sandreas@sandberg.pp.se /**@{*/ 68610153Sandreas@sandberg.pp.se /** 68710153Sandreas@sandberg.pp.se * Provide an interface for locking/unlocking the event queue. 68810153Sandreas@sandberg.pp.se * 68910153Sandreas@sandberg.pp.se * @warn Do NOT use these methods directly unless you really know 69010153Sandreas@sandberg.pp.se * what you are doing. Incorrect use can easily lead to simulator 69110153Sandreas@sandberg.pp.se * deadlocks. 69210153Sandreas@sandberg.pp.se * 69310153Sandreas@sandberg.pp.se * @see EventQueue::ScopedMigration. 69410153Sandreas@sandberg.pp.se * @see EventQueue::ScopedRelease 69510153Sandreas@sandberg.pp.se * @see EventQueue 69610153Sandreas@sandberg.pp.se */ 69710153Sandreas@sandberg.pp.se void lock() { service_mutex.lock(); } 69810153Sandreas@sandberg.pp.se void unlock() { service_mutex.unlock(); } 69910153Sandreas@sandberg.pp.se /**@}*/ 70010153Sandreas@sandberg.pp.se 70110906Sandreas.sandberg@arm.com /** 70210906Sandreas.sandberg@arm.com * Reschedule an event after a checkpoint. 70310906Sandreas.sandberg@arm.com * 70410906Sandreas.sandberg@arm.com * Since events don't know which event queue they belong to, 70510906Sandreas.sandberg@arm.com * parent objects need to reschedule events themselves. This 70610906Sandreas.sandberg@arm.com * method conditionally schedules an event that has the Scheduled 70710906Sandreas.sandberg@arm.com * flag set. It should be called by parent objects after 70810906Sandreas.sandberg@arm.com * unserializing an object. 70910906Sandreas.sandberg@arm.com * 71010906Sandreas.sandberg@arm.com * @warn Only use this method after unserializing an Event. 71110906Sandreas.sandberg@arm.com */ 71210906Sandreas.sandberg@arm.com void checkpointReschedule(Event *event); 71310906Sandreas.sandberg@arm.com 71413440Sgabeblack@google.com virtual ~EventQueue() 71513440Sgabeblack@google.com { 71613440Sgabeblack@google.com while (!empty()) 71713440Sgabeblack@google.com deschedule(getHead()); 71813440Sgabeblack@google.com } 7192SN/A}; 7202SN/A 7219554Sandreas.hansson@arm.comvoid dumpMainQueue(); 7229554Sandreas.hansson@arm.com 7235605Snate@binkert.orgclass EventManager 7245605Snate@binkert.org{ 7255605Snate@binkert.org protected: 7265605Snate@binkert.org /** A pointer to this object's event queue */ 7275605Snate@binkert.org EventQueue *eventq; 7282SN/A 7295605Snate@binkert.org public: 7309099Sandreas.hansson@arm.com EventManager(EventManager &em) : eventq(em.eventq) {} 7319159Sandreas.hansson@arm.com EventManager(EventManager *em) : eventq(em->eventq) {} 7325605Snate@binkert.org EventManager(EventQueue *eq) : eventq(eq) {} 7332SN/A 7345605Snate@binkert.org EventQueue * 7359099Sandreas.hansson@arm.com eventQueue() const 7367060Snate@binkert.org { 7377060Snate@binkert.org return eventq; 7387060Snate@binkert.org } 7397060Snate@binkert.org 7405605Snate@binkert.org void 7415605Snate@binkert.org schedule(Event &event, Tick when) 7425605Snate@binkert.org { 7435605Snate@binkert.org eventq->schedule(&event, when); 7445605Snate@binkert.org } 7455605Snate@binkert.org 7465605Snate@binkert.org void 7475605Snate@binkert.org deschedule(Event &event) 7485605Snate@binkert.org { 7495605Snate@binkert.org eventq->deschedule(&event); 7505605Snate@binkert.org } 7515605Snate@binkert.org 7525605Snate@binkert.org void 7535605Snate@binkert.org reschedule(Event &event, Tick when, bool always = false) 7545605Snate@binkert.org { 7555605Snate@binkert.org eventq->reschedule(&event, when, always); 7565605Snate@binkert.org } 7575605Snate@binkert.org 7585605Snate@binkert.org void 7595605Snate@binkert.org schedule(Event *event, Tick when) 7605605Snate@binkert.org { 7615605Snate@binkert.org eventq->schedule(event, when); 7625605Snate@binkert.org } 7635605Snate@binkert.org 7645605Snate@binkert.org void 7655605Snate@binkert.org deschedule(Event *event) 7665605Snate@binkert.org { 7675605Snate@binkert.org eventq->deschedule(event); 7685605Snate@binkert.org } 7695605Snate@binkert.org 7705605Snate@binkert.org void 7715605Snate@binkert.org reschedule(Event *event, Tick when, bool always = false) 7725605Snate@binkert.org { 7735605Snate@binkert.org eventq->reschedule(event, when, always); 7745605Snate@binkert.org } 7759356Snilay@cs.wisc.edu 77610476Sandreas.hansson@arm.com void wakeupEventQueue(Tick when = (Tick)-1) 77710476Sandreas.hansson@arm.com { 77810476Sandreas.hansson@arm.com eventq->wakeup(when); 77910476Sandreas.hansson@arm.com } 78010476Sandreas.hansson@arm.com 7819356Snilay@cs.wisc.edu void setCurTick(Tick newVal) { eventq->setCurTick(newVal); } 7825605Snate@binkert.org}; 7835605Snate@binkert.org 7847005Snate@binkert.orgtemplate <class T, void (T::* F)()> 7857005Snate@binkert.orgclass EventWrapper : public Event 7865502Snate@binkert.org{ 7877005Snate@binkert.org private: 7887005Snate@binkert.org T *object; 7895502Snate@binkert.org 7907005Snate@binkert.org public: 7917005Snate@binkert.org EventWrapper(T *obj, bool del = false, Priority p = Default_Pri) 7927005Snate@binkert.org : Event(p), object(obj) 7937005Snate@binkert.org { 7947005Snate@binkert.org if (del) 7957005Snate@binkert.org setFlags(AutoDelete); 7967005Snate@binkert.org } 7975502Snate@binkert.org 7987066Snate@binkert.org EventWrapper(T &obj, bool del = false, Priority p = Default_Pri) 7997066Snate@binkert.org : Event(p), object(&obj) 8007066Snate@binkert.org { 8017066Snate@binkert.org if (del) 8027066Snate@binkert.org setFlags(AutoDelete); 8037066Snate@binkert.org } 8047066Snate@binkert.org 8057005Snate@binkert.org void process() { (object->*F)(); } 8065502Snate@binkert.org 8077005Snate@binkert.org const std::string 8087005Snate@binkert.org name() const 8097005Snate@binkert.org { 8107005Snate@binkert.org return object->name() + ".wrapped_event"; 8117005Snate@binkert.org } 8127005Snate@binkert.org 8137005Snate@binkert.org const char *description() const { return "EventWrapped"; } 8147005Snate@binkert.org}; 8152SN/A 81612082Sspwilson2@wisc.educlass EventFunctionWrapper : public Event 81712082Sspwilson2@wisc.edu{ 81812082Sspwilson2@wisc.edu private: 81912082Sspwilson2@wisc.edu std::function<void(void)> callback; 82012082Sspwilson2@wisc.edu std::string _name; 82112082Sspwilson2@wisc.edu 82212082Sspwilson2@wisc.edu public: 82312082Sspwilson2@wisc.edu EventFunctionWrapper(const std::function<void(void)> &callback, 82412082Sspwilson2@wisc.edu const std::string &name, 82512082Sspwilson2@wisc.edu bool del = false, 82612082Sspwilson2@wisc.edu Priority p = Default_Pri) 82712082Sspwilson2@wisc.edu : Event(p), callback(callback), _name(name) 82812082Sspwilson2@wisc.edu { 82912082Sspwilson2@wisc.edu if (del) 83012082Sspwilson2@wisc.edu setFlags(AutoDelete); 83112082Sspwilson2@wisc.edu } 83212082Sspwilson2@wisc.edu 83312082Sspwilson2@wisc.edu void process() { callback(); } 83412082Sspwilson2@wisc.edu 83512082Sspwilson2@wisc.edu const std::string 83612082Sspwilson2@wisc.edu name() const 83712082Sspwilson2@wisc.edu { 83812082Sspwilson2@wisc.edu return _name + ".wrapped_function_event"; 83912082Sspwilson2@wisc.edu } 84012082Sspwilson2@wisc.edu 84112082Sspwilson2@wisc.edu const char *description() const { return "EventFunctionWrapped"; } 84212082Sspwilson2@wisc.edu}; 84312082Sspwilson2@wisc.edu 8441354SN/A#endif // __SIM_EVENTQ_HH__ 845