eventq.hh revision 12040
12SN/A/* 21762SN/A * Copyright (c) 2000-2005 The Regents of The University of Michigan 39983Sstever@gmail.com * Copyright (c) 2013 Advanced Micro Devices, Inc. 49983Sstever@gmail.com * Copyright (c) 2013 Mark D. Hill and David A. Wood 52SN/A * All rights reserved. 62SN/A * 72SN/A * Redistribution and use in source and binary forms, with or without 82SN/A * modification, are permitted provided that the following conditions are 92SN/A * met: redistributions of source code must retain the above copyright 102SN/A * notice, this list of conditions and the following disclaimer; 112SN/A * redistributions in binary form must reproduce the above copyright 122SN/A * notice, this list of conditions and the following disclaimer in the 132SN/A * documentation and/or other materials provided with the distribution; 142SN/A * neither the name of the copyright holders nor the names of its 152SN/A * contributors may be used to endorse or promote products derived from 162SN/A * this software without specific prior written permission. 172SN/A * 182SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 192SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 202SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 212SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 222SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 232SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 242SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 252SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 262SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 272SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 282SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 292665Ssaidi@eecs.umich.edu * 302665Ssaidi@eecs.umich.edu * Authors: Steve Reinhardt 312665Ssaidi@eecs.umich.edu * Nathan Binkert 322SN/A */ 332SN/A 342SN/A/* @file 352SN/A * EventQueue interfaces 362SN/A */ 372SN/A 381354SN/A#ifndef __SIM_EVENTQ_HH__ 391354SN/A#define __SIM_EVENTQ_HH__ 402SN/A 412SN/A#include <algorithm> 425501Snate@binkert.org#include <cassert> 435546Snate@binkert.org#include <climits> 447004Snate@binkert.org#include <iosfwd> 4510412Sandreas.hansson@arm.com#include <memory> 469983Sstever@gmail.com#include <mutex> 472SN/A#include <string> 482SN/A 495769Snate@binkert.org#include "base/flags.hh" 506216Snate@binkert.org#include "base/types.hh" 518232Snate@binkert.org#include "debug/Event.hh" 5256SN/A#include "sim/serialize.hh" 532SN/A 545543Ssaidi@eecs.umich.educlass EventQueue; // forward declaration 559983Sstever@gmail.comclass BaseGlobalEvent; 562SN/A 579983Sstever@gmail.com//! Simulation Quantum for multiple eventq simulation. 589983Sstever@gmail.com//! The quantum value is the period length after which the queues 599983Sstever@gmail.com//! synchronize themselves with each other. This means that any 609983Sstever@gmail.com//! event to scheduled on Queue A which is generated by an event on 619983Sstever@gmail.com//! Queue B should be at least simQuantum ticks away in future. 629983Sstever@gmail.comextern Tick simQuantum; 631354SN/A 649983Sstever@gmail.com//! Current number of allocated main event queues. 659983Sstever@gmail.comextern uint32_t numMainEventQueues; 669983Sstever@gmail.com 679983Sstever@gmail.com//! Array for main event queues. 689983Sstever@gmail.comextern std::vector<EventQueue *> mainEventQueue; 699983Sstever@gmail.com 709983Sstever@gmail.com//! The current event queue for the running thread. Access to this queue 719983Sstever@gmail.com//! does not require any locking from the thread. 729983Sstever@gmail.com 739983Sstever@gmail.comextern __thread EventQueue *_curEventQueue; 749983Sstever@gmail.com 759983Sstever@gmail.com//! Current mode of execution: parallel / serial 769983Sstever@gmail.comextern bool inParallelMode; 779983Sstever@gmail.com 789983Sstever@gmail.com//! Function for returning eventq queue for the provided 799983Sstever@gmail.com//! index. The function allocates a new queue in case one 809983Sstever@gmail.com//! does not exist for the index, provided that the index 819983Sstever@gmail.com//! is with in bounds. 829983Sstever@gmail.comEventQueue *getEventQueue(uint32_t index); 839983Sstever@gmail.com 849983Sstever@gmail.cominline EventQueue *curEventQueue() { return _curEventQueue; } 859983Sstever@gmail.cominline void curEventQueue(EventQueue *q) { _curEventQueue = q; } 869983Sstever@gmail.com 879983Sstever@gmail.com/** 889983Sstever@gmail.com * Common base class for Event and GlobalEvent, so they can share flag 899983Sstever@gmail.com * and priority definitions and accessor functions. This class should 909983Sstever@gmail.com * not be used directly. 912SN/A */ 929983Sstever@gmail.comclass EventBase 932SN/A{ 9411320Ssteve.reinhardt@amd.com protected: 958902Sandreas.hansson@arm.com typedef unsigned short FlagsType; 965769Snate@binkert.org typedef ::Flags<FlagsType> Flags; 975769Snate@binkert.org 987059Snate@binkert.org static const FlagsType PublicRead = 0x003f; // public readable flags 997059Snate@binkert.org static const FlagsType PublicWrite = 0x001d; // public writable flags 1007059Snate@binkert.org static const FlagsType Squashed = 0x0001; // has been squashed 1017059Snate@binkert.org static const FlagsType Scheduled = 0x0002; // has been scheduled 10212040Sandreas.sandberg@arm.com static const FlagsType Managed = 0x0004; // Use life cycle manager 10312040Sandreas.sandberg@arm.com static const FlagsType AutoDelete = Managed; // delete after dispatch 10411072Sandreas.sandberg@arm.com /** 10511072Sandreas.sandberg@arm.com * This used to be AutoSerialize. This value can't be reused 10611072Sandreas.sandberg@arm.com * without changing the checkpoint version since the flag field 10711072Sandreas.sandberg@arm.com * gets serialized. 10811072Sandreas.sandberg@arm.com */ 10911072Sandreas.sandberg@arm.com static const FlagsType Reserved0 = 0x0008; 1107059Snate@binkert.org static const FlagsType IsExitEvent = 0x0010; // special exit event 1117059Snate@binkert.org static const FlagsType IsMainQueue = 0x0020; // on main event queue 1127059Snate@binkert.org static const FlagsType Initialized = 0x7a40; // somewhat random bits 1137059Snate@binkert.org static const FlagsType InitMask = 0xffc0; // mask for init bits 1147059Snate@binkert.org 1157058Snate@binkert.org public: 1167058Snate@binkert.org typedef int8_t Priority; 1177058Snate@binkert.org 118396SN/A /// Event priorities, to provide tie-breakers for events scheduled 119396SN/A /// at the same cycle. Most events are scheduled at the default 120396SN/A /// priority; these values are used to control events that need to 121396SN/A /// be ordered within a cycle. 1225501Snate@binkert.org 1237058Snate@binkert.org /// Minimum priority 1247058Snate@binkert.org static const Priority Minimum_Pri = SCHAR_MIN; 1253329Sstever@eecs.umich.edu 1267058Snate@binkert.org /// If we enable tracing on a particular cycle, do that as the 1277058Snate@binkert.org /// very first thing so we don't miss any of the events on 1287058Snate@binkert.org /// that cycle (even if we enter the debugger). 1299979Satgutier@umich.edu static const Priority Debug_Enable_Pri = -101; 130396SN/A 1317058Snate@binkert.org /// Breakpoints should happen before anything else (except 1327058Snate@binkert.org /// enabling trace output), so we don't miss any action when 1337058Snate@binkert.org /// debugging. 1347058Snate@binkert.org static const Priority Debug_Break_Pri = -100; 1353329Sstever@eecs.umich.edu 1367058Snate@binkert.org /// CPU switches schedule the new CPU's tick event for the 1377058Snate@binkert.org /// same cycle (after unscheduling the old CPU's tick event). 1387058Snate@binkert.org /// The switch needs to come before any tick events to make 1397058Snate@binkert.org /// sure we don't tick both CPUs in the same cycle. 1407058Snate@binkert.org static const Priority CPU_Switch_Pri = -31; 141396SN/A 1427058Snate@binkert.org /// For some reason "delayed" inter-cluster writebacks are 1437058Snate@binkert.org /// scheduled before regular writebacks (which have default 1447058Snate@binkert.org /// priority). Steve? 1457058Snate@binkert.org static const Priority Delayed_Writeback_Pri = -1; 146396SN/A 1477058Snate@binkert.org /// Default is zero for historical reasons. 1487058Snate@binkert.org static const Priority Default_Pri = 0; 149396SN/A 15010249Sstephan.diestelhorst@arm.com /// DVFS update event leads to stats dump therefore given a lower priority 15110249Sstephan.diestelhorst@arm.com /// to ensure all relevant states have been updated 15210249Sstephan.diestelhorst@arm.com static const Priority DVFS_Update_Pri = 31; 15310249Sstephan.diestelhorst@arm.com 1547058Snate@binkert.org /// Serailization needs to occur before tick events also, so 1557058Snate@binkert.org /// that a serialize/unserialize is identical to an on-line 1567058Snate@binkert.org /// CPU switch. 1577058Snate@binkert.org static const Priority Serialize_Pri = 32; 158396SN/A 1597058Snate@binkert.org /// CPU ticks must come after other associated CPU events 1607058Snate@binkert.org /// (such as writebacks). 1617058Snate@binkert.org static const Priority CPU_Tick_Pri = 50; 162396SN/A 1637058Snate@binkert.org /// Statistics events (dump, reset, etc.) come after 1647058Snate@binkert.org /// everything else, but before exit. 1657058Snate@binkert.org static const Priority Stat_Event_Pri = 90; 1664075Sbinkertn@umich.edu 1677058Snate@binkert.org /// Progress events come at the end. 1687058Snate@binkert.org static const Priority Progress_Event_Pri = 95; 1695501Snate@binkert.org 1707058Snate@binkert.org /// If we want to exit on this cycle, it's the very last thing 1717058Snate@binkert.org /// we do. 1727058Snate@binkert.org static const Priority Sim_Exit_Pri = 100; 1737058Snate@binkert.org 1747058Snate@binkert.org /// Maximum priority 1757058Snate@binkert.org static const Priority Maximum_Pri = SCHAR_MAX; 1769983Sstever@gmail.com}; 1779983Sstever@gmail.com 1789983Sstever@gmail.com/* 1799983Sstever@gmail.com * An item on an event queue. The action caused by a given 1809983Sstever@gmail.com * event is specified by deriving a subclass and overriding the 1819983Sstever@gmail.com * process() member function. 1829983Sstever@gmail.com * 1839983Sstever@gmail.com * Caution, the order of members is chosen to maximize data packing. 1849983Sstever@gmail.com */ 1859983Sstever@gmail.comclass Event : public EventBase, public Serializable 1869983Sstever@gmail.com{ 1879983Sstever@gmail.com friend class EventQueue; 1889983Sstever@gmail.com 1899983Sstever@gmail.com private: 1909983Sstever@gmail.com // The event queue is now a linked list of linked lists. The 1919983Sstever@gmail.com // 'nextBin' pointer is to find the bin, where a bin is defined as 1929983Sstever@gmail.com // when+priority. All events in the same bin will be stored in a 1939983Sstever@gmail.com // second linked list (a stack) maintained by the 'nextInBin' 1949983Sstever@gmail.com // pointer. The list will be accessed in LIFO order. The end 1959983Sstever@gmail.com // result is that the insert/removal in 'nextBin' is 1969983Sstever@gmail.com // linear/constant, and the lookup/removal in 'nextInBin' is 1979983Sstever@gmail.com // constant/constant. Hopefully this is a significant improvement 1989983Sstever@gmail.com // over the current fully linear insertion. 1999983Sstever@gmail.com Event *nextBin; 2009983Sstever@gmail.com Event *nextInBin; 2019983Sstever@gmail.com 2029983Sstever@gmail.com static Event *insertBefore(Event *event, Event *curr); 2039983Sstever@gmail.com static Event *removeItem(Event *event, Event *last); 2049983Sstever@gmail.com 2059983Sstever@gmail.com Tick _when; //!< timestamp when event should be processed 2069983Sstever@gmail.com Priority _priority; //!< event priority 2079983Sstever@gmail.com Flags flags; 2089983Sstever@gmail.com 2099983Sstever@gmail.com#ifndef NDEBUG 2109983Sstever@gmail.com /// Global counter to generate unique IDs for Event instances 2119983Sstever@gmail.com static Counter instanceCounter; 2129983Sstever@gmail.com 2139983Sstever@gmail.com /// This event's unique ID. We can also use pointer values for 2149983Sstever@gmail.com /// this but they're not consistent across runs making debugging 2159983Sstever@gmail.com /// more difficult. Thus we use a global counter value when 2169983Sstever@gmail.com /// debugging. 2179983Sstever@gmail.com Counter instance; 2189983Sstever@gmail.com 2199983Sstever@gmail.com /// queue to which this event belongs (though it may or may not be 2209983Sstever@gmail.com /// scheduled on this queue yet) 2219983Sstever@gmail.com EventQueue *queue; 2229983Sstever@gmail.com#endif 2239983Sstever@gmail.com 2249983Sstever@gmail.com#ifdef EVENTQ_DEBUG 2259983Sstever@gmail.com Tick whenCreated; //!< time created 2269983Sstever@gmail.com Tick whenScheduled; //!< time scheduled 2279983Sstever@gmail.com#endif 2289983Sstever@gmail.com 2299983Sstever@gmail.com void 2309983Sstever@gmail.com setWhen(Tick when, EventQueue *q) 2319983Sstever@gmail.com { 2329983Sstever@gmail.com _when = when; 2339983Sstever@gmail.com#ifndef NDEBUG 2349983Sstever@gmail.com queue = q; 2359983Sstever@gmail.com#endif 2369983Sstever@gmail.com#ifdef EVENTQ_DEBUG 2379983Sstever@gmail.com whenScheduled = curTick(); 2389983Sstever@gmail.com#endif 2399983Sstever@gmail.com } 2409983Sstever@gmail.com 2419983Sstever@gmail.com bool 2429983Sstever@gmail.com initialized() const 2439983Sstever@gmail.com { 24410673SAndreas.Sandberg@ARM.com return (flags & InitMask) == Initialized; 2459983Sstever@gmail.com } 2469983Sstever@gmail.com 2479983Sstever@gmail.com protected: 2489983Sstever@gmail.com /// Accessor for flags. 2499983Sstever@gmail.com Flags 2509983Sstever@gmail.com getFlags() const 2519983Sstever@gmail.com { 2529983Sstever@gmail.com return flags & PublicRead; 2539983Sstever@gmail.com } 2549983Sstever@gmail.com 2559983Sstever@gmail.com bool 2569983Sstever@gmail.com isFlagSet(Flags _flags) const 2579983Sstever@gmail.com { 2589983Sstever@gmail.com assert(_flags.noneSet(~PublicRead)); 2599983Sstever@gmail.com return flags.isSet(_flags); 2609983Sstever@gmail.com } 2619983Sstever@gmail.com 2629983Sstever@gmail.com /// Accessor for flags. 2639983Sstever@gmail.com void 2649983Sstever@gmail.com setFlags(Flags _flags) 2659983Sstever@gmail.com { 2669983Sstever@gmail.com assert(_flags.noneSet(~PublicWrite)); 2679983Sstever@gmail.com flags.set(_flags); 2689983Sstever@gmail.com } 2699983Sstever@gmail.com 2709983Sstever@gmail.com void 2719983Sstever@gmail.com clearFlags(Flags _flags) 2729983Sstever@gmail.com { 2739983Sstever@gmail.com assert(_flags.noneSet(~PublicWrite)); 2749983Sstever@gmail.com flags.clear(_flags); 2759983Sstever@gmail.com } 2769983Sstever@gmail.com 2779983Sstever@gmail.com void 2789983Sstever@gmail.com clearFlags() 2799983Sstever@gmail.com { 2809983Sstever@gmail.com flags.clear(PublicWrite); 2819983Sstever@gmail.com } 2829983Sstever@gmail.com 2839983Sstever@gmail.com // This function isn't really useful if TRACING_ON is not defined 2849983Sstever@gmail.com virtual void trace(const char *action); //!< trace event activity 2859983Sstever@gmail.com 28612040Sandreas.sandberg@arm.com protected: /* Memory management */ 28712040Sandreas.sandberg@arm.com /** 28812040Sandreas.sandberg@arm.com * @{ 28912040Sandreas.sandberg@arm.com * Memory management hooks for events that have the Managed flag set 29012040Sandreas.sandberg@arm.com * 29112040Sandreas.sandberg@arm.com * Events can use automatic memory management by setting the 29212040Sandreas.sandberg@arm.com * Managed flag. The default implementation automatically deletes 29312040Sandreas.sandberg@arm.com * events once they have been removed from the event queue. This 29412040Sandreas.sandberg@arm.com * typically happens when events are descheduled or have been 29512040Sandreas.sandberg@arm.com * triggered and not rescheduled. 29612040Sandreas.sandberg@arm.com * 29712040Sandreas.sandberg@arm.com * The methods below may be overridden by events that need custom 29812040Sandreas.sandberg@arm.com * memory management. For example, events exported to Python need 29912040Sandreas.sandberg@arm.com * to impement reference counting to ensure that the Python 30012040Sandreas.sandberg@arm.com * implementation of the event is kept alive while it lives in the 30112040Sandreas.sandberg@arm.com * event queue. 30212040Sandreas.sandberg@arm.com * 30312040Sandreas.sandberg@arm.com * @note Memory managers are responsible for implementing 30412040Sandreas.sandberg@arm.com * reference counting (by overriding both acquireImpl() and 30512040Sandreas.sandberg@arm.com * releaseImpl()) or checking if an event is no longer scheduled 30612040Sandreas.sandberg@arm.com * in releaseImpl() before deallocating it. 30712040Sandreas.sandberg@arm.com */ 30812040Sandreas.sandberg@arm.com 30912040Sandreas.sandberg@arm.com /** 31012040Sandreas.sandberg@arm.com * Managed event scheduled and being held in the event queue. 31112040Sandreas.sandberg@arm.com */ 31212040Sandreas.sandberg@arm.com void acquire() 31312040Sandreas.sandberg@arm.com { 31412040Sandreas.sandberg@arm.com if (flags.isSet(Event::Managed)) 31512040Sandreas.sandberg@arm.com acquireImpl(); 31612040Sandreas.sandberg@arm.com } 31712040Sandreas.sandberg@arm.com 31812040Sandreas.sandberg@arm.com /** 31912040Sandreas.sandberg@arm.com * Managed event removed from the event queue. 32012040Sandreas.sandberg@arm.com */ 32112040Sandreas.sandberg@arm.com void release() { 32212040Sandreas.sandberg@arm.com if (flags.isSet(Event::Managed)) 32312040Sandreas.sandberg@arm.com releaseImpl(); 32412040Sandreas.sandberg@arm.com } 32512040Sandreas.sandberg@arm.com 32612040Sandreas.sandberg@arm.com virtual void acquireImpl() {} 32712040Sandreas.sandberg@arm.com 32812040Sandreas.sandberg@arm.com virtual void releaseImpl() { 32912040Sandreas.sandberg@arm.com if (!scheduled()) 33012040Sandreas.sandberg@arm.com delete this; 33112040Sandreas.sandberg@arm.com } 33212040Sandreas.sandberg@arm.com 33312040Sandreas.sandberg@arm.com /** @} */ 33412040Sandreas.sandberg@arm.com 3359983Sstever@gmail.com public: 336396SN/A 3372SN/A /* 3382SN/A * Event constructor 3392SN/A * @param queue that the event gets scheduled on 3402SN/A */ 3418581Ssteve.reinhardt@amd.com Event(Priority p = Default_Pri, Flags f = 0) 34210360Sandreas.hansson@arm.com : nextBin(nullptr), nextInBin(nullptr), _when(0), _priority(p), 3438581Ssteve.reinhardt@amd.com flags(Initialized | f) 344224SN/A { 3458581Ssteve.reinhardt@amd.com assert(f.noneSet(~PublicWrite)); 3464016Sstever@eecs.umich.edu#ifndef NDEBUG 3475501Snate@binkert.org instance = ++instanceCounter; 3485605Snate@binkert.org queue = NULL; 3495501Snate@binkert.org#endif 3505501Snate@binkert.org#ifdef EVENTQ_DEBUG 3517823Ssteve.reinhardt@amd.com whenCreated = curTick(); 3525501Snate@binkert.org whenScheduled = 0; 3534016Sstever@eecs.umich.edu#endif 354224SN/A } 355224SN/A 3565768Snate@binkert.org virtual ~Event(); 3575768Snate@binkert.org virtual const std::string name() const; 358265SN/A 3595501Snate@binkert.org /// Return a C string describing the event. This string should 3605501Snate@binkert.org /// *not* be dynamically allocated; just a const char array 3615501Snate@binkert.org /// describing the event class. 3625501Snate@binkert.org virtual const char *description() const; 3635501Snate@binkert.org 3645501Snate@binkert.org /// Dump the current event data 3655501Snate@binkert.org void dump() const; 3665501Snate@binkert.org 3675501Snate@binkert.org public: 3685501Snate@binkert.org /* 3695501Snate@binkert.org * This member function is invoked when the event is processed 3705501Snate@binkert.org * (occurs). There is no default implementation; each subclass 3715501Snate@binkert.org * must provide its own implementation. The event is not 3725501Snate@binkert.org * automatically deleted after it is processed (to allow for 3735501Snate@binkert.org * statically allocated event objects). 3745501Snate@binkert.org * 3755501Snate@binkert.org * If the AutoDestroy flag is set, the object is deleted once it 3765501Snate@binkert.org * is processed. 3775501Snate@binkert.org */ 3785501Snate@binkert.org virtual void process() = 0; 3795501Snate@binkert.org 3802SN/A /// Determine if the current event is scheduled 3815769Snate@binkert.org bool scheduled() const { return flags.isSet(Scheduled); } 3822SN/A 3832SN/A /// Squash the current event 3845769Snate@binkert.org void squash() { flags.set(Squashed); } 3852SN/A 3862SN/A /// Check whether the event is squashed 3875769Snate@binkert.org bool squashed() const { return flags.isSet(Squashed); } 3882SN/A 3892667Sstever@eecs.umich.edu /// See if this is a SimExitEvent (without resorting to RTTI) 3905769Snate@binkert.org bool isExitEvent() const { return flags.isSet(IsExitEvent); } 3912667Sstever@eecs.umich.edu 39210992Stimothy.jones@cl.cam.ac.uk /// Check whether this event will auto-delete 39312040Sandreas.sandberg@arm.com bool isManaged() const { return flags.isSet(Managed); } 39412040Sandreas.sandberg@arm.com bool isAutoDelete() const { return isManaged(); } 39510992Stimothy.jones@cl.cam.ac.uk 3962SN/A /// Get the time that the event is scheduled 3972SN/A Tick when() const { return _when; } 3982SN/A 3992SN/A /// Get the event priority 4007058Snate@binkert.org Priority priority() const { return _priority; } 4012SN/A 4029983Sstever@gmail.com //! If this is part of a GlobalEvent, return the pointer to the 4039983Sstever@gmail.com //! Global Event. By default, there is no GlobalEvent, so return 4049983Sstever@gmail.com //! NULL. (Overridden in GlobalEvent::BarrierEvent.) 4059983Sstever@gmail.com virtual BaseGlobalEvent *globalEvent() { return NULL; } 4069983Sstever@gmail.com 40711168Sandreas.hansson@arm.com void serialize(CheckpointOut &cp) const override; 40811168Sandreas.hansson@arm.com void unserialize(CheckpointIn &cp) override; 409571SN/A}; 410571SN/A 4117005Snate@binkert.orginline bool 4127005Snate@binkert.orgoperator<(const Event &l, const Event &r) 4137005Snate@binkert.org{ 4147005Snate@binkert.org return l.when() < r.when() || 4157005Snate@binkert.org (l.when() == r.when() && l.priority() < r.priority()); 4167005Snate@binkert.org} 4177005Snate@binkert.org 4187005Snate@binkert.orginline bool 4197005Snate@binkert.orgoperator>(const Event &l, const Event &r) 4207005Snate@binkert.org{ 4217005Snate@binkert.org return l.when() > r.when() || 4227005Snate@binkert.org (l.when() == r.when() && l.priority() > r.priority()); 4237005Snate@binkert.org} 4247005Snate@binkert.org 4257005Snate@binkert.orginline bool 4267005Snate@binkert.orgoperator<=(const Event &l, const Event &r) 4277005Snate@binkert.org{ 4287005Snate@binkert.org return l.when() < r.when() || 4297005Snate@binkert.org (l.when() == r.when() && l.priority() <= r.priority()); 4307005Snate@binkert.org} 4317005Snate@binkert.orginline bool 4327005Snate@binkert.orgoperator>=(const Event &l, const Event &r) 4337005Snate@binkert.org{ 4347005Snate@binkert.org return l.when() > r.when() || 4357005Snate@binkert.org (l.when() == r.when() && l.priority() >= r.priority()); 4367005Snate@binkert.org} 4377005Snate@binkert.org 4387005Snate@binkert.orginline bool 4397005Snate@binkert.orgoperator==(const Event &l, const Event &r) 4407005Snate@binkert.org{ 4417005Snate@binkert.org return l.when() == r.when() && l.priority() == r.priority(); 4427005Snate@binkert.org} 4437005Snate@binkert.org 4447005Snate@binkert.orginline bool 4457005Snate@binkert.orgoperator!=(const Event &l, const Event &r) 4467005Snate@binkert.org{ 4477005Snate@binkert.org return l.when() != r.when() || l.priority() != r.priority(); 4487005Snate@binkert.org} 4497005Snate@binkert.org 45010153Sandreas@sandberg.pp.se/** 4512SN/A * Queue of events sorted in time order 45210153Sandreas@sandberg.pp.se * 45310153Sandreas@sandberg.pp.se * Events are scheduled (inserted into the event queue) using the 45410153Sandreas@sandberg.pp.se * schedule() method. This method either inserts a <i>synchronous</i> 45510153Sandreas@sandberg.pp.se * or <i>asynchronous</i> event. 45610153Sandreas@sandberg.pp.se * 45710153Sandreas@sandberg.pp.se * Synchronous events are scheduled using schedule() method with the 45810153Sandreas@sandberg.pp.se * argument 'global' set to false (default). This should only be done 45910153Sandreas@sandberg.pp.se * from a thread holding the event queue lock 46010153Sandreas@sandberg.pp.se * (EventQueue::service_mutex). The lock is always held when an event 46110153Sandreas@sandberg.pp.se * handler is called, it can therefore always insert events into its 46210153Sandreas@sandberg.pp.se * own event queue unless it voluntarily releases the lock. 46310153Sandreas@sandberg.pp.se * 46410153Sandreas@sandberg.pp.se * Events can be scheduled across thread (and event queue borders) by 46510153Sandreas@sandberg.pp.se * either scheduling asynchronous events or taking the target event 46610153Sandreas@sandberg.pp.se * queue's lock. However, the lock should <i>never</i> be taken 46710153Sandreas@sandberg.pp.se * directly since this is likely to cause deadlocks. Instead, code 46810153Sandreas@sandberg.pp.se * that needs to schedule events in other event queues should 46910153Sandreas@sandberg.pp.se * temporarily release its own queue and lock the new queue. This 47010153Sandreas@sandberg.pp.se * prevents deadlocks since a single thread never owns more than one 47110153Sandreas@sandberg.pp.se * event queue lock. This functionality is provided by the 47210153Sandreas@sandberg.pp.se * ScopedMigration helper class. Note that temporarily migrating 47310153Sandreas@sandberg.pp.se * between event queues can make the simulation non-deterministic, it 47410153Sandreas@sandberg.pp.se * should therefore be limited to cases where that can be tolerated 47510153Sandreas@sandberg.pp.se * (e.g., handling asynchronous IO or fast-forwarding in KVM). 47610153Sandreas@sandberg.pp.se * 47710153Sandreas@sandberg.pp.se * Asynchronous events can also be scheduled using the normal 47810153Sandreas@sandberg.pp.se * schedule() method with the 'global' parameter set to true. Unlike 47910153Sandreas@sandberg.pp.se * the previous queue migration strategy, this strategy is fully 48010153Sandreas@sandberg.pp.se * deterministic. This causes the event to be inserted in a separate 48110153Sandreas@sandberg.pp.se * queue of asynchronous events (async_queue), which is merged main 48210153Sandreas@sandberg.pp.se * event queue at the end of each simulation quantum (by calling the 48310153Sandreas@sandberg.pp.se * handleAsyncInsertions() method). Note that this implies that such 48410153Sandreas@sandberg.pp.se * events must happen at least one simulation quantum into the future, 48510153Sandreas@sandberg.pp.se * otherwise they risk being scheduled in the past by 48610153Sandreas@sandberg.pp.se * handleAsyncInsertions(). 4872SN/A */ 48811072Sandreas.sandberg@arm.comclass EventQueue 4892SN/A{ 4905605Snate@binkert.org private: 491265SN/A std::string objName; 4922SN/A Event *head; 4939356Snilay@cs.wisc.edu Tick _curTick; 4942SN/A 4959983Sstever@gmail.com //! Mutex to protect async queue. 49610412Sandreas.hansson@arm.com std::mutex async_queue_mutex; 4979983Sstever@gmail.com 4989983Sstever@gmail.com //! List of events added by other threads to this event queue. 4999983Sstever@gmail.com std::list<Event*> async_queue; 5009983Sstever@gmail.com 50110153Sandreas@sandberg.pp.se /** 50210153Sandreas@sandberg.pp.se * Lock protecting event handling. 50310153Sandreas@sandberg.pp.se * 50410153Sandreas@sandberg.pp.se * This lock is always taken when servicing events. It is assumed 50510153Sandreas@sandberg.pp.se * that the thread scheduling new events (not asynchronous events 50610153Sandreas@sandberg.pp.se * though) have taken this lock. This is normally done by 50710153Sandreas@sandberg.pp.se * serviceOne() since new events are typically scheduled as a 50810153Sandreas@sandberg.pp.se * response to an earlier event. 50910153Sandreas@sandberg.pp.se * 51010153Sandreas@sandberg.pp.se * This lock is intended to be used to temporarily steal an event 51110153Sandreas@sandberg.pp.se * queue to support inter-thread communication when some 51210153Sandreas@sandberg.pp.se * deterministic timing can be sacrificed for speed. For example, 51310153Sandreas@sandberg.pp.se * the KVM CPU can use this support to access devices running in a 51410153Sandreas@sandberg.pp.se * different thread. 51510153Sandreas@sandberg.pp.se * 51610153Sandreas@sandberg.pp.se * @see EventQueue::ScopedMigration. 51710153Sandreas@sandberg.pp.se * @see EventQueue::ScopedRelease 51810153Sandreas@sandberg.pp.se * @see EventQueue::lock() 51910153Sandreas@sandberg.pp.se * @see EventQueue::unlock() 52010153Sandreas@sandberg.pp.se */ 52110153Sandreas@sandberg.pp.se std::mutex service_mutex; 52210153Sandreas@sandberg.pp.se 5239983Sstever@gmail.com //! Insert / remove event from the queue. Should only be called 5249983Sstever@gmail.com //! by thread operating this queue. 5252SN/A void insert(Event *event); 5262SN/A void remove(Event *event); 5272SN/A 5289983Sstever@gmail.com //! Function for adding events to the async queue. The added events 5299983Sstever@gmail.com //! are added to main event queue later. Threads, other than the 5309983Sstever@gmail.com //! owning thread, should call this function instead of insert(). 5319983Sstever@gmail.com void asyncInsert(Event *event); 5329983Sstever@gmail.com 5337063Snate@binkert.org EventQueue(const EventQueue &); 5347063Snate@binkert.org 5352SN/A public: 53610153Sandreas@sandberg.pp.se /** 53710153Sandreas@sandberg.pp.se * Temporarily migrate execution to a different event queue. 53810153Sandreas@sandberg.pp.se * 53910153Sandreas@sandberg.pp.se * An instance of this class temporarily migrates execution to a 54010153Sandreas@sandberg.pp.se * different event queue by releasing the current queue, locking 54110153Sandreas@sandberg.pp.se * the new queue, and updating curEventQueue(). This can, for 54210153Sandreas@sandberg.pp.se * example, be useful when performing IO across thread event 54310153Sandreas@sandberg.pp.se * queues when timing is not crucial (e.g., during fast 54410153Sandreas@sandberg.pp.se * forwarding). 54510153Sandreas@sandberg.pp.se */ 54610153Sandreas@sandberg.pp.se class ScopedMigration 54710153Sandreas@sandberg.pp.se { 54810153Sandreas@sandberg.pp.se public: 54910153Sandreas@sandberg.pp.se ScopedMigration(EventQueue *_new_eq) 55010153Sandreas@sandberg.pp.se : new_eq(*_new_eq), old_eq(*curEventQueue()) 55110153Sandreas@sandberg.pp.se { 55210153Sandreas@sandberg.pp.se old_eq.unlock(); 55310153Sandreas@sandberg.pp.se new_eq.lock(); 55410153Sandreas@sandberg.pp.se curEventQueue(&new_eq); 55510153Sandreas@sandberg.pp.se } 55610153Sandreas@sandberg.pp.se 55710153Sandreas@sandberg.pp.se ~ScopedMigration() 55810153Sandreas@sandberg.pp.se { 55910153Sandreas@sandberg.pp.se new_eq.unlock(); 56010153Sandreas@sandberg.pp.se old_eq.lock(); 56110153Sandreas@sandberg.pp.se curEventQueue(&old_eq); 56210153Sandreas@sandberg.pp.se } 56310153Sandreas@sandberg.pp.se 56410153Sandreas@sandberg.pp.se private: 56510153Sandreas@sandberg.pp.se EventQueue &new_eq; 56610153Sandreas@sandberg.pp.se EventQueue &old_eq; 56710153Sandreas@sandberg.pp.se }; 56810153Sandreas@sandberg.pp.se 56910153Sandreas@sandberg.pp.se /** 57010153Sandreas@sandberg.pp.se * Temporarily release the event queue service lock. 57110153Sandreas@sandberg.pp.se * 57210153Sandreas@sandberg.pp.se * There are cases where it is desirable to temporarily release 57310153Sandreas@sandberg.pp.se * the event queue lock to prevent deadlocks. For example, when 57410153Sandreas@sandberg.pp.se * waiting on the global barrier, we need to release the lock to 57510153Sandreas@sandberg.pp.se * prevent deadlocks from happening when another thread tries to 57610153Sandreas@sandberg.pp.se * temporarily take over the event queue waiting on the barrier. 57710153Sandreas@sandberg.pp.se */ 57810153Sandreas@sandberg.pp.se class ScopedRelease 57910153Sandreas@sandberg.pp.se { 58010153Sandreas@sandberg.pp.se public: 58110153Sandreas@sandberg.pp.se ScopedRelease(EventQueue *_eq) 58210153Sandreas@sandberg.pp.se : eq(*_eq) 58310153Sandreas@sandberg.pp.se { 58410153Sandreas@sandberg.pp.se eq.unlock(); 58510153Sandreas@sandberg.pp.se } 58610153Sandreas@sandberg.pp.se 58710153Sandreas@sandberg.pp.se ~ScopedRelease() 58810153Sandreas@sandberg.pp.se { 58910153Sandreas@sandberg.pp.se eq.lock(); 59010153Sandreas@sandberg.pp.se } 59110153Sandreas@sandberg.pp.se 59210153Sandreas@sandberg.pp.se private: 59310153Sandreas@sandberg.pp.se EventQueue &eq; 59410153Sandreas@sandberg.pp.se }; 59510153Sandreas@sandberg.pp.se 5967063Snate@binkert.org EventQueue(const std::string &n); 5972SN/A 598512SN/A virtual const std::string name() const { return objName; } 5999983Sstever@gmail.com void name(const std::string &st) { objName = st; } 600265SN/A 6019983Sstever@gmail.com //! Schedule the given event on this queue. Safe to call from any 6029983Sstever@gmail.com //! thread. 6039983Sstever@gmail.com void schedule(Event *event, Tick when, bool global = false); 6049983Sstever@gmail.com 6059983Sstever@gmail.com //! Deschedule the specified event. Should be called only from the 6069983Sstever@gmail.com //! owning thread. 6075738Snate@binkert.org void deschedule(Event *event); 6089983Sstever@gmail.com 6099983Sstever@gmail.com //! Reschedule the specified event. Should be called only from 6109983Sstever@gmail.com //! the owning thread. 6115738Snate@binkert.org void reschedule(Event *event, Tick when, bool always = false); 6122SN/A 6135501Snate@binkert.org Tick nextTick() const { return head->when(); } 6149356Snilay@cs.wisc.edu void setCurTick(Tick newVal) { _curTick = newVal; } 61511015Sandreas.sandberg@arm.com Tick getCurTick() const { return _curTick; } 61610991Stimothy.jones@cl.cam.ac.uk Event *getHead() const { return head; } 6179356Snilay@cs.wisc.edu 6182667Sstever@eecs.umich.edu Event *serviceOne(); 6192SN/A 6202SN/A // process all events up to the given timestamp. we inline a 6212SN/A // quick test to see if there are any events to process; if so, 6222SN/A // call the internal out-of-line version to process them all. 6235501Snate@binkert.org void 6245501Snate@binkert.org serviceEvents(Tick when) 6255501Snate@binkert.org { 6262SN/A while (!empty()) { 6272SN/A if (nextTick() > when) 6282SN/A break; 6292SN/A 6301634SN/A /** 6311634SN/A * @todo this assert is a good bug catcher. I need to 6321634SN/A * make it true again. 6331634SN/A */ 6341634SN/A //assert(head->when() >= when && "event scheduled in the past"); 6352SN/A serviceOne(); 6362SN/A } 6379356Snilay@cs.wisc.edu 6389356Snilay@cs.wisc.edu setCurTick(when); 6392SN/A } 6402SN/A 6412SN/A // return true if no events are queued 6425501Snate@binkert.org bool empty() const { return head == NULL; } 6432SN/A 6445501Snate@binkert.org void dump() const; 6452SN/A 6465502Snate@binkert.org bool debugVerify() const; 6475502Snate@binkert.org 6489983Sstever@gmail.com //! Function for moving events from the async_queue to the main queue. 6499983Sstever@gmail.com void handleAsyncInsertions(); 6509983Sstever@gmail.com 6518648Snilay@cs.wisc.edu /** 65210476Sandreas.hansson@arm.com * Function to signal that the event loop should be woken up because 65310476Sandreas.hansson@arm.com * an event has been scheduled by an agent outside the gem5 event 65410476Sandreas.hansson@arm.com * loop(s) whose event insertion may not have been noticed by gem5. 65510476Sandreas.hansson@arm.com * This function isn't needed by the usual gem5 event loop but may 65610476Sandreas.hansson@arm.com * be necessary in derived EventQueues which host gem5 onto other 65710476Sandreas.hansson@arm.com * schedulers. 65810476Sandreas.hansson@arm.com * 65910476Sandreas.hansson@arm.com * @param when Time of a delayed wakeup (if known). This parameter 66010476Sandreas.hansson@arm.com * can be used by an implementation to schedule a wakeup in the 66110476Sandreas.hansson@arm.com * future if it is sure it will remain active until then. 66210476Sandreas.hansson@arm.com * Or it can be ignored and the event queue can be woken up now. 66310476Sandreas.hansson@arm.com */ 66410476Sandreas.hansson@arm.com virtual void wakeup(Tick when = (Tick)-1) { } 66510476Sandreas.hansson@arm.com 66610476Sandreas.hansson@arm.com /** 6678648Snilay@cs.wisc.edu * function for replacing the head of the event queue, so that a 6688648Snilay@cs.wisc.edu * different set of events can run without disturbing events that have 6698648Snilay@cs.wisc.edu * already been scheduled. Already scheduled events can be processed 6708648Snilay@cs.wisc.edu * by replacing the original head back. 6718648Snilay@cs.wisc.edu * USING THIS FUNCTION CAN BE DANGEROUS TO THE HEALTH OF THE SIMULATOR. 6728648Snilay@cs.wisc.edu * NOT RECOMMENDED FOR USE. 6738648Snilay@cs.wisc.edu */ 6748648Snilay@cs.wisc.edu Event* replaceHead(Event* s); 6758648Snilay@cs.wisc.edu 67610153Sandreas@sandberg.pp.se /**@{*/ 67710153Sandreas@sandberg.pp.se /** 67810153Sandreas@sandberg.pp.se * Provide an interface for locking/unlocking the event queue. 67910153Sandreas@sandberg.pp.se * 68010153Sandreas@sandberg.pp.se * @warn Do NOT use these methods directly unless you really know 68110153Sandreas@sandberg.pp.se * what you are doing. Incorrect use can easily lead to simulator 68210153Sandreas@sandberg.pp.se * deadlocks. 68310153Sandreas@sandberg.pp.se * 68410153Sandreas@sandberg.pp.se * @see EventQueue::ScopedMigration. 68510153Sandreas@sandberg.pp.se * @see EventQueue::ScopedRelease 68610153Sandreas@sandberg.pp.se * @see EventQueue 68710153Sandreas@sandberg.pp.se */ 68810153Sandreas@sandberg.pp.se void lock() { service_mutex.lock(); } 68910153Sandreas@sandberg.pp.se void unlock() { service_mutex.unlock(); } 69010153Sandreas@sandberg.pp.se /**@}*/ 69110153Sandreas@sandberg.pp.se 69210906Sandreas.sandberg@arm.com /** 69310906Sandreas.sandberg@arm.com * Reschedule an event after a checkpoint. 69410906Sandreas.sandberg@arm.com * 69510906Sandreas.sandberg@arm.com * Since events don't know which event queue they belong to, 69610906Sandreas.sandberg@arm.com * parent objects need to reschedule events themselves. This 69710906Sandreas.sandberg@arm.com * method conditionally schedules an event that has the Scheduled 69810906Sandreas.sandberg@arm.com * flag set. It should be called by parent objects after 69910906Sandreas.sandberg@arm.com * unserializing an object. 70010906Sandreas.sandberg@arm.com * 70110906Sandreas.sandberg@arm.com * @warn Only use this method after unserializing an Event. 70210906Sandreas.sandberg@arm.com */ 70310906Sandreas.sandberg@arm.com void checkpointReschedule(Event *event); 70410906Sandreas.sandberg@arm.com 70510476Sandreas.hansson@arm.com virtual ~EventQueue() { } 7062SN/A}; 7072SN/A 7089554Sandreas.hansson@arm.comvoid dumpMainQueue(); 7099554Sandreas.hansson@arm.com 7105605Snate@binkert.orgclass EventManager 7115605Snate@binkert.org{ 7125605Snate@binkert.org protected: 7135605Snate@binkert.org /** A pointer to this object's event queue */ 7145605Snate@binkert.org EventQueue *eventq; 7152SN/A 7165605Snate@binkert.org public: 7179099Sandreas.hansson@arm.com EventManager(EventManager &em) : eventq(em.eventq) {} 7189159Sandreas.hansson@arm.com EventManager(EventManager *em) : eventq(em->eventq) {} 7195605Snate@binkert.org EventManager(EventQueue *eq) : eventq(eq) {} 7202SN/A 7215605Snate@binkert.org EventQueue * 7229099Sandreas.hansson@arm.com eventQueue() const 7237060Snate@binkert.org { 7247060Snate@binkert.org return eventq; 7257060Snate@binkert.org } 7267060Snate@binkert.org 7275605Snate@binkert.org void 7285605Snate@binkert.org schedule(Event &event, Tick when) 7295605Snate@binkert.org { 7305605Snate@binkert.org eventq->schedule(&event, when); 7315605Snate@binkert.org } 7325605Snate@binkert.org 7335605Snate@binkert.org void 7345605Snate@binkert.org deschedule(Event &event) 7355605Snate@binkert.org { 7365605Snate@binkert.org eventq->deschedule(&event); 7375605Snate@binkert.org } 7385605Snate@binkert.org 7395605Snate@binkert.org void 7405605Snate@binkert.org reschedule(Event &event, Tick when, bool always = false) 7415605Snate@binkert.org { 7425605Snate@binkert.org eventq->reschedule(&event, when, always); 7435605Snate@binkert.org } 7445605Snate@binkert.org 7455605Snate@binkert.org void 7465605Snate@binkert.org schedule(Event *event, Tick when) 7475605Snate@binkert.org { 7485605Snate@binkert.org eventq->schedule(event, when); 7495605Snate@binkert.org } 7505605Snate@binkert.org 7515605Snate@binkert.org void 7525605Snate@binkert.org deschedule(Event *event) 7535605Snate@binkert.org { 7545605Snate@binkert.org eventq->deschedule(event); 7555605Snate@binkert.org } 7565605Snate@binkert.org 7575605Snate@binkert.org void 7585605Snate@binkert.org reschedule(Event *event, Tick when, bool always = false) 7595605Snate@binkert.org { 7605605Snate@binkert.org eventq->reschedule(event, when, always); 7615605Snate@binkert.org } 7629356Snilay@cs.wisc.edu 76310476Sandreas.hansson@arm.com void wakeupEventQueue(Tick when = (Tick)-1) 76410476Sandreas.hansson@arm.com { 76510476Sandreas.hansson@arm.com eventq->wakeup(when); 76610476Sandreas.hansson@arm.com } 76710476Sandreas.hansson@arm.com 7689356Snilay@cs.wisc.edu void setCurTick(Tick newVal) { eventq->setCurTick(newVal); } 7695605Snate@binkert.org}; 7705605Snate@binkert.org 7717005Snate@binkert.orgtemplate <class T, void (T::* F)()> 7727005Snate@binkert.orgvoid 7737005Snate@binkert.orgDelayFunction(EventQueue *eventq, Tick when, T *object) 7745502Snate@binkert.org{ 7757005Snate@binkert.org class DelayEvent : public Event 7767005Snate@binkert.org { 7777005Snate@binkert.org private: 7787005Snate@binkert.org T *object; 7797005Snate@binkert.org 7807005Snate@binkert.org public: 7817005Snate@binkert.org DelayEvent(T *o) 7828581Ssteve.reinhardt@amd.com : Event(Default_Pri, AutoDelete), object(o) 7838581Ssteve.reinhardt@amd.com { } 7847005Snate@binkert.org void process() { (object->*F)(); } 7857005Snate@binkert.org const char *description() const { return "delay"; } 7867005Snate@binkert.org }; 7877005Snate@binkert.org 7887005Snate@binkert.org eventq->schedule(new DelayEvent(object), when); 7895502Snate@binkert.org} 7905502Snate@binkert.org 7917005Snate@binkert.orgtemplate <class T, void (T::* F)()> 7927005Snate@binkert.orgclass EventWrapper : public Event 7935502Snate@binkert.org{ 7947005Snate@binkert.org private: 7957005Snate@binkert.org T *object; 7965502Snate@binkert.org 7977005Snate@binkert.org public: 7987005Snate@binkert.org EventWrapper(T *obj, bool del = false, Priority p = Default_Pri) 7997005Snate@binkert.org : Event(p), object(obj) 8007005Snate@binkert.org { 8017005Snate@binkert.org if (del) 8027005Snate@binkert.org setFlags(AutoDelete); 8037005Snate@binkert.org } 8045502Snate@binkert.org 8057066Snate@binkert.org EventWrapper(T &obj, bool del = false, Priority p = Default_Pri) 8067066Snate@binkert.org : Event(p), object(&obj) 8077066Snate@binkert.org { 8087066Snate@binkert.org if (del) 8097066Snate@binkert.org setFlags(AutoDelete); 8107066Snate@binkert.org } 8117066Snate@binkert.org 8127005Snate@binkert.org void process() { (object->*F)(); } 8135502Snate@binkert.org 8147005Snate@binkert.org const std::string 8157005Snate@binkert.org name() const 8167005Snate@binkert.org { 8177005Snate@binkert.org return object->name() + ".wrapped_event"; 8187005Snate@binkert.org } 8197005Snate@binkert.org 8207005Snate@binkert.org const char *description() const { return "EventWrapped"; } 8217005Snate@binkert.org}; 8222SN/A 8231354SN/A#endif // __SIM_EVENTQ_HH__ 824