eventq.hh revision 10991
12SN/A/* 21762SN/A * Copyright (c) 2000-2005 The Regents of The University of Michigan 39983Sstever@gmail.com * Copyright (c) 2013 Advanced Micro Devices, Inc. 49983Sstever@gmail.com * Copyright (c) 2013 Mark D. Hill and David A. Wood 52SN/A * All rights reserved. 62SN/A * 72SN/A * Redistribution and use in source and binary forms, with or without 82SN/A * modification, are permitted provided that the following conditions are 92SN/A * met: redistributions of source code must retain the above copyright 102SN/A * notice, this list of conditions and the following disclaimer; 112SN/A * redistributions in binary form must reproduce the above copyright 122SN/A * notice, this list of conditions and the following disclaimer in the 132SN/A * documentation and/or other materials provided with the distribution; 142SN/A * neither the name of the copyright holders nor the names of its 152SN/A * contributors may be used to endorse or promote products derived from 162SN/A * this software without specific prior written permission. 172SN/A * 182SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 192SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 202SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 212SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 222SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 232SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 242SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 252SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 262SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 272SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 282SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 292665Ssaidi@eecs.umich.edu * 302665Ssaidi@eecs.umich.edu * Authors: Steve Reinhardt 312665Ssaidi@eecs.umich.edu * Nathan Binkert 322SN/A */ 332SN/A 342SN/A/* @file 352SN/A * EventQueue interfaces 362SN/A */ 372SN/A 381354SN/A#ifndef __SIM_EVENTQ_HH__ 391354SN/A#define __SIM_EVENTQ_HH__ 402SN/A 412SN/A#include <algorithm> 425501Snate@binkert.org#include <cassert> 435546Snate@binkert.org#include <climits> 447004Snate@binkert.org#include <iosfwd> 4510412Sandreas.hansson@arm.com#include <memory> 469983Sstever@gmail.com#include <mutex> 472SN/A#include <string> 482SN/A 495769Snate@binkert.org#include "base/flags.hh" 502361SN/A#include "base/misc.hh" 516216Snate@binkert.org#include "base/types.hh" 528232Snate@binkert.org#include "debug/Event.hh" 5356SN/A#include "sim/serialize.hh" 542SN/A 555543Ssaidi@eecs.umich.educlass EventQueue; // forward declaration 569983Sstever@gmail.comclass BaseGlobalEvent; 572SN/A 589983Sstever@gmail.com//! Simulation Quantum for multiple eventq simulation. 599983Sstever@gmail.com//! The quantum value is the period length after which the queues 609983Sstever@gmail.com//! synchronize themselves with each other. This means that any 619983Sstever@gmail.com//! event to scheduled on Queue A which is generated by an event on 629983Sstever@gmail.com//! Queue B should be at least simQuantum ticks away in future. 639983Sstever@gmail.comextern Tick simQuantum; 641354SN/A 659983Sstever@gmail.com//! Current number of allocated main event queues. 669983Sstever@gmail.comextern uint32_t numMainEventQueues; 679983Sstever@gmail.com 689983Sstever@gmail.com//! Array for main event queues. 699983Sstever@gmail.comextern std::vector<EventQueue *> mainEventQueue; 709983Sstever@gmail.com 719983Sstever@gmail.com#ifndef SWIG 729983Sstever@gmail.com//! The current event queue for the running thread. Access to this queue 739983Sstever@gmail.com//! does not require any locking from the thread. 749983Sstever@gmail.com 759983Sstever@gmail.comextern __thread EventQueue *_curEventQueue; 769983Sstever@gmail.com 779983Sstever@gmail.com#endif 789983Sstever@gmail.com 799983Sstever@gmail.com//! Current mode of execution: parallel / serial 809983Sstever@gmail.comextern bool inParallelMode; 819983Sstever@gmail.com 829983Sstever@gmail.com//! Function for returning eventq queue for the provided 839983Sstever@gmail.com//! index. The function allocates a new queue in case one 849983Sstever@gmail.com//! does not exist for the index, provided that the index 859983Sstever@gmail.com//! is with in bounds. 869983Sstever@gmail.comEventQueue *getEventQueue(uint32_t index); 879983Sstever@gmail.com 889983Sstever@gmail.cominline EventQueue *curEventQueue() { return _curEventQueue; } 899983Sstever@gmail.cominline void curEventQueue(EventQueue *q) { _curEventQueue = q; } 909983Sstever@gmail.com 919983Sstever@gmail.com/** 929983Sstever@gmail.com * Common base class for Event and GlobalEvent, so they can share flag 939983Sstever@gmail.com * and priority definitions and accessor functions. This class should 949983Sstever@gmail.com * not be used directly. 952SN/A */ 969983Sstever@gmail.comclass EventBase 972SN/A{ 985769Snate@binkert.org protected: 998902Sandreas.hansson@arm.com typedef unsigned short FlagsType; 1005769Snate@binkert.org typedef ::Flags<FlagsType> Flags; 1015769Snate@binkert.org 1027059Snate@binkert.org static const FlagsType PublicRead = 0x003f; // public readable flags 1037059Snate@binkert.org static const FlagsType PublicWrite = 0x001d; // public writable flags 1047059Snate@binkert.org static const FlagsType Squashed = 0x0001; // has been squashed 1057059Snate@binkert.org static const FlagsType Scheduled = 0x0002; // has been scheduled 1067059Snate@binkert.org static const FlagsType AutoDelete = 0x0004; // delete after dispatch 1077059Snate@binkert.org static const FlagsType AutoSerialize = 0x0008; // must be serialized 1087059Snate@binkert.org static const FlagsType IsExitEvent = 0x0010; // special exit event 1097059Snate@binkert.org static const FlagsType IsMainQueue = 0x0020; // on main event queue 1107059Snate@binkert.org static const FlagsType Initialized = 0x7a40; // somewhat random bits 1117059Snate@binkert.org static const FlagsType InitMask = 0xffc0; // mask for init bits 1127059Snate@binkert.org 1137058Snate@binkert.org public: 1147058Snate@binkert.org typedef int8_t Priority; 1157058Snate@binkert.org 116396SN/A /// Event priorities, to provide tie-breakers for events scheduled 117396SN/A /// at the same cycle. Most events are scheduled at the default 118396SN/A /// priority; these values are used to control events that need to 119396SN/A /// be ordered within a cycle. 1205501Snate@binkert.org 1217058Snate@binkert.org /// Minimum priority 1227058Snate@binkert.org static const Priority Minimum_Pri = SCHAR_MIN; 1233329Sstever@eecs.umich.edu 1247058Snate@binkert.org /// If we enable tracing on a particular cycle, do that as the 1257058Snate@binkert.org /// very first thing so we don't miss any of the events on 1267058Snate@binkert.org /// that cycle (even if we enter the debugger). 1279979Satgutier@umich.edu static const Priority Debug_Enable_Pri = -101; 128396SN/A 1297058Snate@binkert.org /// Breakpoints should happen before anything else (except 1307058Snate@binkert.org /// enabling trace output), so we don't miss any action when 1317058Snate@binkert.org /// debugging. 1327058Snate@binkert.org static const Priority Debug_Break_Pri = -100; 1333329Sstever@eecs.umich.edu 1347058Snate@binkert.org /// CPU switches schedule the new CPU's tick event for the 1357058Snate@binkert.org /// same cycle (after unscheduling the old CPU's tick event). 1367058Snate@binkert.org /// The switch needs to come before any tick events to make 1377058Snate@binkert.org /// sure we don't tick both CPUs in the same cycle. 1387058Snate@binkert.org static const Priority CPU_Switch_Pri = -31; 139396SN/A 1407058Snate@binkert.org /// For some reason "delayed" inter-cluster writebacks are 1417058Snate@binkert.org /// scheduled before regular writebacks (which have default 1427058Snate@binkert.org /// priority). Steve? 1437058Snate@binkert.org static const Priority Delayed_Writeback_Pri = -1; 144396SN/A 1457058Snate@binkert.org /// Default is zero for historical reasons. 1467058Snate@binkert.org static const Priority Default_Pri = 0; 147396SN/A 14810249Sstephan.diestelhorst@arm.com /// DVFS update event leads to stats dump therefore given a lower priority 14910249Sstephan.diestelhorst@arm.com /// to ensure all relevant states have been updated 15010249Sstephan.diestelhorst@arm.com static const Priority DVFS_Update_Pri = 31; 15110249Sstephan.diestelhorst@arm.com 1527058Snate@binkert.org /// Serailization needs to occur before tick events also, so 1537058Snate@binkert.org /// that a serialize/unserialize is identical to an on-line 1547058Snate@binkert.org /// CPU switch. 1557058Snate@binkert.org static const Priority Serialize_Pri = 32; 156396SN/A 1577058Snate@binkert.org /// CPU ticks must come after other associated CPU events 1587058Snate@binkert.org /// (such as writebacks). 1597058Snate@binkert.org static const Priority CPU_Tick_Pri = 50; 160396SN/A 1617058Snate@binkert.org /// Statistics events (dump, reset, etc.) come after 1627058Snate@binkert.org /// everything else, but before exit. 1637058Snate@binkert.org static const Priority Stat_Event_Pri = 90; 1644075Sbinkertn@umich.edu 1657058Snate@binkert.org /// Progress events come at the end. 1667058Snate@binkert.org static const Priority Progress_Event_Pri = 95; 1675501Snate@binkert.org 1687058Snate@binkert.org /// If we want to exit on this cycle, it's the very last thing 1697058Snate@binkert.org /// we do. 1707058Snate@binkert.org static const Priority Sim_Exit_Pri = 100; 1717058Snate@binkert.org 1727058Snate@binkert.org /// Maximum priority 1737058Snate@binkert.org static const Priority Maximum_Pri = SCHAR_MAX; 1749983Sstever@gmail.com}; 1759983Sstever@gmail.com 1769983Sstever@gmail.com/* 1779983Sstever@gmail.com * An item on an event queue. The action caused by a given 1789983Sstever@gmail.com * event is specified by deriving a subclass and overriding the 1799983Sstever@gmail.com * process() member function. 1809983Sstever@gmail.com * 1819983Sstever@gmail.com * Caution, the order of members is chosen to maximize data packing. 1829983Sstever@gmail.com */ 1839983Sstever@gmail.comclass Event : public EventBase, public Serializable 1849983Sstever@gmail.com{ 1859983Sstever@gmail.com friend class EventQueue; 1869983Sstever@gmail.com 1879983Sstever@gmail.com private: 1889983Sstever@gmail.com // The event queue is now a linked list of linked lists. The 1899983Sstever@gmail.com // 'nextBin' pointer is to find the bin, where a bin is defined as 1909983Sstever@gmail.com // when+priority. All events in the same bin will be stored in a 1919983Sstever@gmail.com // second linked list (a stack) maintained by the 'nextInBin' 1929983Sstever@gmail.com // pointer. The list will be accessed in LIFO order. The end 1939983Sstever@gmail.com // result is that the insert/removal in 'nextBin' is 1949983Sstever@gmail.com // linear/constant, and the lookup/removal in 'nextInBin' is 1959983Sstever@gmail.com // constant/constant. Hopefully this is a significant improvement 1969983Sstever@gmail.com // over the current fully linear insertion. 1979983Sstever@gmail.com Event *nextBin; 1989983Sstever@gmail.com Event *nextInBin; 1999983Sstever@gmail.com 2009983Sstever@gmail.com static Event *insertBefore(Event *event, Event *curr); 2019983Sstever@gmail.com static Event *removeItem(Event *event, Event *last); 2029983Sstever@gmail.com 2039983Sstever@gmail.com Tick _when; //!< timestamp when event should be processed 2049983Sstever@gmail.com Priority _priority; //!< event priority 2059983Sstever@gmail.com Flags flags; 2069983Sstever@gmail.com 2079983Sstever@gmail.com#ifndef NDEBUG 2089983Sstever@gmail.com /// Global counter to generate unique IDs for Event instances 2099983Sstever@gmail.com static Counter instanceCounter; 2109983Sstever@gmail.com 2119983Sstever@gmail.com /// This event's unique ID. We can also use pointer values for 2129983Sstever@gmail.com /// this but they're not consistent across runs making debugging 2139983Sstever@gmail.com /// more difficult. Thus we use a global counter value when 2149983Sstever@gmail.com /// debugging. 2159983Sstever@gmail.com Counter instance; 2169983Sstever@gmail.com 2179983Sstever@gmail.com /// queue to which this event belongs (though it may or may not be 2189983Sstever@gmail.com /// scheduled on this queue yet) 2199983Sstever@gmail.com EventQueue *queue; 2209983Sstever@gmail.com#endif 2219983Sstever@gmail.com 2229983Sstever@gmail.com#ifdef EVENTQ_DEBUG 2239983Sstever@gmail.com Tick whenCreated; //!< time created 2249983Sstever@gmail.com Tick whenScheduled; //!< time scheduled 2259983Sstever@gmail.com#endif 2269983Sstever@gmail.com 2279983Sstever@gmail.com void 2289983Sstever@gmail.com setWhen(Tick when, EventQueue *q) 2299983Sstever@gmail.com { 2309983Sstever@gmail.com _when = when; 2319983Sstever@gmail.com#ifndef NDEBUG 2329983Sstever@gmail.com queue = q; 2339983Sstever@gmail.com#endif 2349983Sstever@gmail.com#ifdef EVENTQ_DEBUG 2359983Sstever@gmail.com whenScheduled = curTick(); 2369983Sstever@gmail.com#endif 2379983Sstever@gmail.com } 2389983Sstever@gmail.com 2399983Sstever@gmail.com bool 2409983Sstever@gmail.com initialized() const 2419983Sstever@gmail.com { 24210673SAndreas.Sandberg@ARM.com return (flags & InitMask) == Initialized; 2439983Sstever@gmail.com } 2449983Sstever@gmail.com 2459983Sstever@gmail.com protected: 2469983Sstever@gmail.com /// Accessor for flags. 2479983Sstever@gmail.com Flags 2489983Sstever@gmail.com getFlags() const 2499983Sstever@gmail.com { 2509983Sstever@gmail.com return flags & PublicRead; 2519983Sstever@gmail.com } 2529983Sstever@gmail.com 2539983Sstever@gmail.com bool 2549983Sstever@gmail.com isFlagSet(Flags _flags) const 2559983Sstever@gmail.com { 2569983Sstever@gmail.com assert(_flags.noneSet(~PublicRead)); 2579983Sstever@gmail.com return flags.isSet(_flags); 2589983Sstever@gmail.com } 2599983Sstever@gmail.com 2609983Sstever@gmail.com /// Accessor for flags. 2619983Sstever@gmail.com void 2629983Sstever@gmail.com setFlags(Flags _flags) 2639983Sstever@gmail.com { 2649983Sstever@gmail.com assert(_flags.noneSet(~PublicWrite)); 2659983Sstever@gmail.com flags.set(_flags); 2669983Sstever@gmail.com } 2679983Sstever@gmail.com 2689983Sstever@gmail.com void 2699983Sstever@gmail.com clearFlags(Flags _flags) 2709983Sstever@gmail.com { 2719983Sstever@gmail.com assert(_flags.noneSet(~PublicWrite)); 2729983Sstever@gmail.com flags.clear(_flags); 2739983Sstever@gmail.com } 2749983Sstever@gmail.com 2759983Sstever@gmail.com void 2769983Sstever@gmail.com clearFlags() 2779983Sstever@gmail.com { 2789983Sstever@gmail.com flags.clear(PublicWrite); 2799983Sstever@gmail.com } 2809983Sstever@gmail.com 2819983Sstever@gmail.com // This function isn't really useful if TRACING_ON is not defined 2829983Sstever@gmail.com virtual void trace(const char *action); //!< trace event activity 2839983Sstever@gmail.com 2849983Sstever@gmail.com public: 285396SN/A 2862SN/A /* 2872SN/A * Event constructor 2882SN/A * @param queue that the event gets scheduled on 2892SN/A */ 2908581Ssteve.reinhardt@amd.com Event(Priority p = Default_Pri, Flags f = 0) 29110360Sandreas.hansson@arm.com : nextBin(nullptr), nextInBin(nullptr), _when(0), _priority(p), 2928581Ssteve.reinhardt@amd.com flags(Initialized | f) 293224SN/A { 2948581Ssteve.reinhardt@amd.com assert(f.noneSet(~PublicWrite)); 2954016Sstever@eecs.umich.edu#ifndef NDEBUG 2965501Snate@binkert.org instance = ++instanceCounter; 2975605Snate@binkert.org queue = NULL; 2985501Snate@binkert.org#endif 2995501Snate@binkert.org#ifdef EVENTQ_DEBUG 3007823Ssteve.reinhardt@amd.com whenCreated = curTick(); 3015501Snate@binkert.org whenScheduled = 0; 3024016Sstever@eecs.umich.edu#endif 303224SN/A } 304224SN/A 3055768Snate@binkert.org virtual ~Event(); 3065768Snate@binkert.org virtual const std::string name() const; 307265SN/A 3085501Snate@binkert.org /// Return a C string describing the event. This string should 3095501Snate@binkert.org /// *not* be dynamically allocated; just a const char array 3105501Snate@binkert.org /// describing the event class. 3115501Snate@binkert.org virtual const char *description() const; 3125501Snate@binkert.org 3135501Snate@binkert.org /// Dump the current event data 3145501Snate@binkert.org void dump() const; 3155501Snate@binkert.org 3165501Snate@binkert.org public: 3175501Snate@binkert.org /* 3185501Snate@binkert.org * This member function is invoked when the event is processed 3195501Snate@binkert.org * (occurs). There is no default implementation; each subclass 3205501Snate@binkert.org * must provide its own implementation. The event is not 3215501Snate@binkert.org * automatically deleted after it is processed (to allow for 3225501Snate@binkert.org * statically allocated event objects). 3235501Snate@binkert.org * 3245501Snate@binkert.org * If the AutoDestroy flag is set, the object is deleted once it 3255501Snate@binkert.org * is processed. 3265501Snate@binkert.org */ 3275501Snate@binkert.org virtual void process() = 0; 3285501Snate@binkert.org 3292SN/A /// Determine if the current event is scheduled 3305769Snate@binkert.org bool scheduled() const { return flags.isSet(Scheduled); } 3312SN/A 3322SN/A /// Squash the current event 3335769Snate@binkert.org void squash() { flags.set(Squashed); } 3342SN/A 3352SN/A /// Check whether the event is squashed 3365769Snate@binkert.org bool squashed() const { return flags.isSet(Squashed); } 3372SN/A 3382667Sstever@eecs.umich.edu /// See if this is a SimExitEvent (without resorting to RTTI) 3395769Snate@binkert.org bool isExitEvent() const { return flags.isSet(IsExitEvent); } 3402667Sstever@eecs.umich.edu 3412SN/A /// Get the time that the event is scheduled 3422SN/A Tick when() const { return _when; } 3432SN/A 3442SN/A /// Get the event priority 3457058Snate@binkert.org Priority priority() const { return _priority; } 3462SN/A 3479983Sstever@gmail.com //! If this is part of a GlobalEvent, return the pointer to the 3489983Sstever@gmail.com //! Global Event. By default, there is no GlobalEvent, so return 3499983Sstever@gmail.com //! NULL. (Overridden in GlobalEvent::BarrierEvent.) 3509983Sstever@gmail.com virtual BaseGlobalEvent *globalEvent() { return NULL; } 3519983Sstever@gmail.com 3525605Snate@binkert.org#ifndef SWIG 35310905Sandreas.sandberg@arm.com void serialize(CheckpointOut &cp) const M5_ATTR_OVERRIDE; 35410905Sandreas.sandberg@arm.com void unserialize(CheckpointIn &cp) M5_ATTR_OVERRIDE; 3555605Snate@binkert.org#endif 356571SN/A}; 357571SN/A 3587005Snate@binkert.org#ifndef SWIG 3597005Snate@binkert.orginline bool 3607005Snate@binkert.orgoperator<(const Event &l, const Event &r) 3617005Snate@binkert.org{ 3627005Snate@binkert.org return l.when() < r.when() || 3637005Snate@binkert.org (l.when() == r.when() && l.priority() < r.priority()); 3647005Snate@binkert.org} 3657005Snate@binkert.org 3667005Snate@binkert.orginline bool 3677005Snate@binkert.orgoperator>(const Event &l, const Event &r) 3687005Snate@binkert.org{ 3697005Snate@binkert.org return l.when() > r.when() || 3707005Snate@binkert.org (l.when() == r.when() && l.priority() > r.priority()); 3717005Snate@binkert.org} 3727005Snate@binkert.org 3737005Snate@binkert.orginline bool 3747005Snate@binkert.orgoperator<=(const Event &l, const Event &r) 3757005Snate@binkert.org{ 3767005Snate@binkert.org return l.when() < r.when() || 3777005Snate@binkert.org (l.when() == r.when() && l.priority() <= r.priority()); 3787005Snate@binkert.org} 3797005Snate@binkert.orginline bool 3807005Snate@binkert.orgoperator>=(const Event &l, const Event &r) 3817005Snate@binkert.org{ 3827005Snate@binkert.org return l.when() > r.when() || 3837005Snate@binkert.org (l.when() == r.when() && l.priority() >= r.priority()); 3847005Snate@binkert.org} 3857005Snate@binkert.org 3867005Snate@binkert.orginline bool 3877005Snate@binkert.orgoperator==(const Event &l, const Event &r) 3887005Snate@binkert.org{ 3897005Snate@binkert.org return l.when() == r.when() && l.priority() == r.priority(); 3907005Snate@binkert.org} 3917005Snate@binkert.org 3927005Snate@binkert.orginline bool 3937005Snate@binkert.orgoperator!=(const Event &l, const Event &r) 3947005Snate@binkert.org{ 3957005Snate@binkert.org return l.when() != r.when() || l.priority() != r.priority(); 3967005Snate@binkert.org} 3977005Snate@binkert.org#endif 3987005Snate@binkert.org 39910153Sandreas@sandberg.pp.se/** 4002SN/A * Queue of events sorted in time order 40110153Sandreas@sandberg.pp.se * 40210153Sandreas@sandberg.pp.se * Events are scheduled (inserted into the event queue) using the 40310153Sandreas@sandberg.pp.se * schedule() method. This method either inserts a <i>synchronous</i> 40410153Sandreas@sandberg.pp.se * or <i>asynchronous</i> event. 40510153Sandreas@sandberg.pp.se * 40610153Sandreas@sandberg.pp.se * Synchronous events are scheduled using schedule() method with the 40710153Sandreas@sandberg.pp.se * argument 'global' set to false (default). This should only be done 40810153Sandreas@sandberg.pp.se * from a thread holding the event queue lock 40910153Sandreas@sandberg.pp.se * (EventQueue::service_mutex). The lock is always held when an event 41010153Sandreas@sandberg.pp.se * handler is called, it can therefore always insert events into its 41110153Sandreas@sandberg.pp.se * own event queue unless it voluntarily releases the lock. 41210153Sandreas@sandberg.pp.se * 41310153Sandreas@sandberg.pp.se * Events can be scheduled across thread (and event queue borders) by 41410153Sandreas@sandberg.pp.se * either scheduling asynchronous events or taking the target event 41510153Sandreas@sandberg.pp.se * queue's lock. However, the lock should <i>never</i> be taken 41610153Sandreas@sandberg.pp.se * directly since this is likely to cause deadlocks. Instead, code 41710153Sandreas@sandberg.pp.se * that needs to schedule events in other event queues should 41810153Sandreas@sandberg.pp.se * temporarily release its own queue and lock the new queue. This 41910153Sandreas@sandberg.pp.se * prevents deadlocks since a single thread never owns more than one 42010153Sandreas@sandberg.pp.se * event queue lock. This functionality is provided by the 42110153Sandreas@sandberg.pp.se * ScopedMigration helper class. Note that temporarily migrating 42210153Sandreas@sandberg.pp.se * between event queues can make the simulation non-deterministic, it 42310153Sandreas@sandberg.pp.se * should therefore be limited to cases where that can be tolerated 42410153Sandreas@sandberg.pp.se * (e.g., handling asynchronous IO or fast-forwarding in KVM). 42510153Sandreas@sandberg.pp.se * 42610153Sandreas@sandberg.pp.se * Asynchronous events can also be scheduled using the normal 42710153Sandreas@sandberg.pp.se * schedule() method with the 'global' parameter set to true. Unlike 42810153Sandreas@sandberg.pp.se * the previous queue migration strategy, this strategy is fully 42910153Sandreas@sandberg.pp.se * deterministic. This causes the event to be inserted in a separate 43010153Sandreas@sandberg.pp.se * queue of asynchronous events (async_queue), which is merged main 43110153Sandreas@sandberg.pp.se * event queue at the end of each simulation quantum (by calling the 43210153Sandreas@sandberg.pp.se * handleAsyncInsertions() method). Note that this implies that such 43310153Sandreas@sandberg.pp.se * events must happen at least one simulation quantum into the future, 43410153Sandreas@sandberg.pp.se * otherwise they risk being scheduled in the past by 43510153Sandreas@sandberg.pp.se * handleAsyncInsertions(). 4362SN/A */ 437395SN/Aclass EventQueue : public Serializable 4382SN/A{ 4395605Snate@binkert.org private: 440265SN/A std::string objName; 4412SN/A Event *head; 4429356Snilay@cs.wisc.edu Tick _curTick; 4432SN/A 4449983Sstever@gmail.com //! Mutex to protect async queue. 44510412Sandreas.hansson@arm.com std::mutex async_queue_mutex; 4469983Sstever@gmail.com 4479983Sstever@gmail.com //! List of events added by other threads to this event queue. 4489983Sstever@gmail.com std::list<Event*> async_queue; 4499983Sstever@gmail.com 45010153Sandreas@sandberg.pp.se /** 45110153Sandreas@sandberg.pp.se * Lock protecting event handling. 45210153Sandreas@sandberg.pp.se * 45310153Sandreas@sandberg.pp.se * This lock is always taken when servicing events. It is assumed 45410153Sandreas@sandberg.pp.se * that the thread scheduling new events (not asynchronous events 45510153Sandreas@sandberg.pp.se * though) have taken this lock. This is normally done by 45610153Sandreas@sandberg.pp.se * serviceOne() since new events are typically scheduled as a 45710153Sandreas@sandberg.pp.se * response to an earlier event. 45810153Sandreas@sandberg.pp.se * 45910153Sandreas@sandberg.pp.se * This lock is intended to be used to temporarily steal an event 46010153Sandreas@sandberg.pp.se * queue to support inter-thread communication when some 46110153Sandreas@sandberg.pp.se * deterministic timing can be sacrificed for speed. For example, 46210153Sandreas@sandberg.pp.se * the KVM CPU can use this support to access devices running in a 46310153Sandreas@sandberg.pp.se * different thread. 46410153Sandreas@sandberg.pp.se * 46510153Sandreas@sandberg.pp.se * @see EventQueue::ScopedMigration. 46610153Sandreas@sandberg.pp.se * @see EventQueue::ScopedRelease 46710153Sandreas@sandberg.pp.se * @see EventQueue::lock() 46810153Sandreas@sandberg.pp.se * @see EventQueue::unlock() 46910153Sandreas@sandberg.pp.se */ 47010153Sandreas@sandberg.pp.se std::mutex service_mutex; 47110153Sandreas@sandberg.pp.se 4729983Sstever@gmail.com //! Insert / remove event from the queue. Should only be called 4739983Sstever@gmail.com //! by thread operating this queue. 4742SN/A void insert(Event *event); 4752SN/A void remove(Event *event); 4762SN/A 4779983Sstever@gmail.com //! Function for adding events to the async queue. The added events 4789983Sstever@gmail.com //! are added to main event queue later. Threads, other than the 4799983Sstever@gmail.com //! owning thread, should call this function instead of insert(). 4809983Sstever@gmail.com void asyncInsert(Event *event); 4819983Sstever@gmail.com 4827063Snate@binkert.org EventQueue(const EventQueue &); 4837063Snate@binkert.org 4842SN/A public: 48510153Sandreas@sandberg.pp.se#ifndef SWIG 48610153Sandreas@sandberg.pp.se /** 48710153Sandreas@sandberg.pp.se * Temporarily migrate execution to a different event queue. 48810153Sandreas@sandberg.pp.se * 48910153Sandreas@sandberg.pp.se * An instance of this class temporarily migrates execution to a 49010153Sandreas@sandberg.pp.se * different event queue by releasing the current queue, locking 49110153Sandreas@sandberg.pp.se * the new queue, and updating curEventQueue(). This can, for 49210153Sandreas@sandberg.pp.se * example, be useful when performing IO across thread event 49310153Sandreas@sandberg.pp.se * queues when timing is not crucial (e.g., during fast 49410153Sandreas@sandberg.pp.se * forwarding). 49510153Sandreas@sandberg.pp.se */ 49610153Sandreas@sandberg.pp.se class ScopedMigration 49710153Sandreas@sandberg.pp.se { 49810153Sandreas@sandberg.pp.se public: 49910153Sandreas@sandberg.pp.se ScopedMigration(EventQueue *_new_eq) 50010153Sandreas@sandberg.pp.se : new_eq(*_new_eq), old_eq(*curEventQueue()) 50110153Sandreas@sandberg.pp.se { 50210153Sandreas@sandberg.pp.se old_eq.unlock(); 50310153Sandreas@sandberg.pp.se new_eq.lock(); 50410153Sandreas@sandberg.pp.se curEventQueue(&new_eq); 50510153Sandreas@sandberg.pp.se } 50610153Sandreas@sandberg.pp.se 50710153Sandreas@sandberg.pp.se ~ScopedMigration() 50810153Sandreas@sandberg.pp.se { 50910153Sandreas@sandberg.pp.se new_eq.unlock(); 51010153Sandreas@sandberg.pp.se old_eq.lock(); 51110153Sandreas@sandberg.pp.se curEventQueue(&old_eq); 51210153Sandreas@sandberg.pp.se } 51310153Sandreas@sandberg.pp.se 51410153Sandreas@sandberg.pp.se private: 51510153Sandreas@sandberg.pp.se EventQueue &new_eq; 51610153Sandreas@sandberg.pp.se EventQueue &old_eq; 51710153Sandreas@sandberg.pp.se }; 51810153Sandreas@sandberg.pp.se 51910153Sandreas@sandberg.pp.se /** 52010153Sandreas@sandberg.pp.se * Temporarily release the event queue service lock. 52110153Sandreas@sandberg.pp.se * 52210153Sandreas@sandberg.pp.se * There are cases where it is desirable to temporarily release 52310153Sandreas@sandberg.pp.se * the event queue lock to prevent deadlocks. For example, when 52410153Sandreas@sandberg.pp.se * waiting on the global barrier, we need to release the lock to 52510153Sandreas@sandberg.pp.se * prevent deadlocks from happening when another thread tries to 52610153Sandreas@sandberg.pp.se * temporarily take over the event queue waiting on the barrier. 52710153Sandreas@sandberg.pp.se */ 52810153Sandreas@sandberg.pp.se class ScopedRelease 52910153Sandreas@sandberg.pp.se { 53010153Sandreas@sandberg.pp.se public: 53110153Sandreas@sandberg.pp.se ScopedRelease(EventQueue *_eq) 53210153Sandreas@sandberg.pp.se : eq(*_eq) 53310153Sandreas@sandberg.pp.se { 53410153Sandreas@sandberg.pp.se eq.unlock(); 53510153Sandreas@sandberg.pp.se } 53610153Sandreas@sandberg.pp.se 53710153Sandreas@sandberg.pp.se ~ScopedRelease() 53810153Sandreas@sandberg.pp.se { 53910153Sandreas@sandberg.pp.se eq.lock(); 54010153Sandreas@sandberg.pp.se } 54110153Sandreas@sandberg.pp.se 54210153Sandreas@sandberg.pp.se private: 54310153Sandreas@sandberg.pp.se EventQueue &eq; 54410153Sandreas@sandberg.pp.se }; 54510153Sandreas@sandberg.pp.se#endif 54610153Sandreas@sandberg.pp.se 5477063Snate@binkert.org EventQueue(const std::string &n); 5482SN/A 549512SN/A virtual const std::string name() const { return objName; } 5509983Sstever@gmail.com void name(const std::string &st) { objName = st; } 551265SN/A 5529983Sstever@gmail.com //! Schedule the given event on this queue. Safe to call from any 5539983Sstever@gmail.com //! thread. 5549983Sstever@gmail.com void schedule(Event *event, Tick when, bool global = false); 5559983Sstever@gmail.com 5569983Sstever@gmail.com //! Deschedule the specified event. Should be called only from the 5579983Sstever@gmail.com //! owning thread. 5585738Snate@binkert.org void deschedule(Event *event); 5599983Sstever@gmail.com 5609983Sstever@gmail.com //! Reschedule the specified event. Should be called only from 5619983Sstever@gmail.com //! the owning thread. 5625738Snate@binkert.org void reschedule(Event *event, Tick when, bool always = false); 5632SN/A 5645501Snate@binkert.org Tick nextTick() const { return head->when(); } 5659356Snilay@cs.wisc.edu void setCurTick(Tick newVal) { _curTick = newVal; } 5669356Snilay@cs.wisc.edu Tick getCurTick() { return _curTick; } 56710991Stimothy.jones@cl.cam.ac.uk Event *getHead() const { return head; } 5689356Snilay@cs.wisc.edu 5692667Sstever@eecs.umich.edu Event *serviceOne(); 5702SN/A 5712SN/A // process all events up to the given timestamp. we inline a 5722SN/A // quick test to see if there are any events to process; if so, 5732SN/A // call the internal out-of-line version to process them all. 5745501Snate@binkert.org void 5755501Snate@binkert.org serviceEvents(Tick when) 5765501Snate@binkert.org { 5772SN/A while (!empty()) { 5782SN/A if (nextTick() > when) 5792SN/A break; 5802SN/A 5811634SN/A /** 5821634SN/A * @todo this assert is a good bug catcher. I need to 5831634SN/A * make it true again. 5841634SN/A */ 5851634SN/A //assert(head->when() >= when && "event scheduled in the past"); 5862SN/A serviceOne(); 5872SN/A } 5889356Snilay@cs.wisc.edu 5899356Snilay@cs.wisc.edu setCurTick(when); 5902SN/A } 5912SN/A 5922SN/A // return true if no events are queued 5935501Snate@binkert.org bool empty() const { return head == NULL; } 5942SN/A 5955501Snate@binkert.org void dump() const; 5962SN/A 5975502Snate@binkert.org bool debugVerify() const; 5985502Snate@binkert.org 5999983Sstever@gmail.com //! Function for moving events from the async_queue to the main queue. 6009983Sstever@gmail.com void handleAsyncInsertions(); 6019983Sstever@gmail.com 6028648Snilay@cs.wisc.edu /** 60310476Sandreas.hansson@arm.com * Function to signal that the event loop should be woken up because 60410476Sandreas.hansson@arm.com * an event has been scheduled by an agent outside the gem5 event 60510476Sandreas.hansson@arm.com * loop(s) whose event insertion may not have been noticed by gem5. 60610476Sandreas.hansson@arm.com * This function isn't needed by the usual gem5 event loop but may 60710476Sandreas.hansson@arm.com * be necessary in derived EventQueues which host gem5 onto other 60810476Sandreas.hansson@arm.com * schedulers. 60910476Sandreas.hansson@arm.com * 61010476Sandreas.hansson@arm.com * @param when Time of a delayed wakeup (if known). This parameter 61110476Sandreas.hansson@arm.com * can be used by an implementation to schedule a wakeup in the 61210476Sandreas.hansson@arm.com * future if it is sure it will remain active until then. 61310476Sandreas.hansson@arm.com * Or it can be ignored and the event queue can be woken up now. 61410476Sandreas.hansson@arm.com */ 61510476Sandreas.hansson@arm.com virtual void wakeup(Tick when = (Tick)-1) { } 61610476Sandreas.hansson@arm.com 61710476Sandreas.hansson@arm.com /** 6188648Snilay@cs.wisc.edu * function for replacing the head of the event queue, so that a 6198648Snilay@cs.wisc.edu * different set of events can run without disturbing events that have 6208648Snilay@cs.wisc.edu * already been scheduled. Already scheduled events can be processed 6218648Snilay@cs.wisc.edu * by replacing the original head back. 6228648Snilay@cs.wisc.edu * USING THIS FUNCTION CAN BE DANGEROUS TO THE HEALTH OF THE SIMULATOR. 6238648Snilay@cs.wisc.edu * NOT RECOMMENDED FOR USE. 6248648Snilay@cs.wisc.edu */ 6258648Snilay@cs.wisc.edu Event* replaceHead(Event* s); 6268648Snilay@cs.wisc.edu 62710153Sandreas@sandberg.pp.se /**@{*/ 62810153Sandreas@sandberg.pp.se /** 62910153Sandreas@sandberg.pp.se * Provide an interface for locking/unlocking the event queue. 63010153Sandreas@sandberg.pp.se * 63110153Sandreas@sandberg.pp.se * @warn Do NOT use these methods directly unless you really know 63210153Sandreas@sandberg.pp.se * what you are doing. Incorrect use can easily lead to simulator 63310153Sandreas@sandberg.pp.se * deadlocks. 63410153Sandreas@sandberg.pp.se * 63510153Sandreas@sandberg.pp.se * @see EventQueue::ScopedMigration. 63610153Sandreas@sandberg.pp.se * @see EventQueue::ScopedRelease 63710153Sandreas@sandberg.pp.se * @see EventQueue 63810153Sandreas@sandberg.pp.se */ 63910153Sandreas@sandberg.pp.se void lock() { service_mutex.lock(); } 64010153Sandreas@sandberg.pp.se void unlock() { service_mutex.unlock(); } 64110153Sandreas@sandberg.pp.se /**@}*/ 64210153Sandreas@sandberg.pp.se 6435605Snate@binkert.org#ifndef SWIG 64410905Sandreas.sandberg@arm.com void serialize(CheckpointOut &cp) const M5_ATTR_OVERRIDE; 64510905Sandreas.sandberg@arm.com void unserialize(CheckpointIn &cp) M5_ATTR_OVERRIDE; 6465605Snate@binkert.org#endif 64710476Sandreas.hansson@arm.com 64810906Sandreas.sandberg@arm.com /** 64910906Sandreas.sandberg@arm.com * Reschedule an event after a checkpoint. 65010906Sandreas.sandberg@arm.com * 65110906Sandreas.sandberg@arm.com * Since events don't know which event queue they belong to, 65210906Sandreas.sandberg@arm.com * parent objects need to reschedule events themselves. This 65310906Sandreas.sandberg@arm.com * method conditionally schedules an event that has the Scheduled 65410906Sandreas.sandberg@arm.com * flag set. It should be called by parent objects after 65510906Sandreas.sandberg@arm.com * unserializing an object. 65610906Sandreas.sandberg@arm.com * 65710906Sandreas.sandberg@arm.com * @warn Only use this method after unserializing an Event. 65810906Sandreas.sandberg@arm.com */ 65910906Sandreas.sandberg@arm.com void checkpointReschedule(Event *event); 66010906Sandreas.sandberg@arm.com 66110476Sandreas.hansson@arm.com virtual ~EventQueue() { } 6622SN/A}; 6632SN/A 6649554Sandreas.hansson@arm.comvoid dumpMainQueue(); 6659554Sandreas.hansson@arm.com 6665605Snate@binkert.org#ifndef SWIG 6675605Snate@binkert.orgclass EventManager 6685605Snate@binkert.org{ 6695605Snate@binkert.org protected: 6705605Snate@binkert.org /** A pointer to this object's event queue */ 6715605Snate@binkert.org EventQueue *eventq; 6722SN/A 6735605Snate@binkert.org public: 6749099Sandreas.hansson@arm.com EventManager(EventManager &em) : eventq(em.eventq) {} 6759159Sandreas.hansson@arm.com EventManager(EventManager *em) : eventq(em->eventq) {} 6765605Snate@binkert.org EventManager(EventQueue *eq) : eventq(eq) {} 6772SN/A 6785605Snate@binkert.org EventQueue * 6799099Sandreas.hansson@arm.com eventQueue() const 6807060Snate@binkert.org { 6817060Snate@binkert.org return eventq; 6827060Snate@binkert.org } 6837060Snate@binkert.org 6845605Snate@binkert.org void 6855605Snate@binkert.org schedule(Event &event, Tick when) 6865605Snate@binkert.org { 6875605Snate@binkert.org eventq->schedule(&event, when); 6885605Snate@binkert.org } 6895605Snate@binkert.org 6905605Snate@binkert.org void 6915605Snate@binkert.org deschedule(Event &event) 6925605Snate@binkert.org { 6935605Snate@binkert.org eventq->deschedule(&event); 6945605Snate@binkert.org } 6955605Snate@binkert.org 6965605Snate@binkert.org void 6975605Snate@binkert.org reschedule(Event &event, Tick when, bool always = false) 6985605Snate@binkert.org { 6995605Snate@binkert.org eventq->reschedule(&event, when, always); 7005605Snate@binkert.org } 7015605Snate@binkert.org 7025605Snate@binkert.org void 7035605Snate@binkert.org schedule(Event *event, Tick when) 7045605Snate@binkert.org { 7055605Snate@binkert.org eventq->schedule(event, when); 7065605Snate@binkert.org } 7075605Snate@binkert.org 7085605Snate@binkert.org void 7095605Snate@binkert.org deschedule(Event *event) 7105605Snate@binkert.org { 7115605Snate@binkert.org eventq->deschedule(event); 7125605Snate@binkert.org } 7135605Snate@binkert.org 7145605Snate@binkert.org void 7155605Snate@binkert.org reschedule(Event *event, Tick when, bool always = false) 7165605Snate@binkert.org { 7175605Snate@binkert.org eventq->reschedule(event, when, always); 7185605Snate@binkert.org } 7199356Snilay@cs.wisc.edu 72010476Sandreas.hansson@arm.com void wakeupEventQueue(Tick when = (Tick)-1) 72110476Sandreas.hansson@arm.com { 72210476Sandreas.hansson@arm.com eventq->wakeup(when); 72310476Sandreas.hansson@arm.com } 72410476Sandreas.hansson@arm.com 7259356Snilay@cs.wisc.edu void setCurTick(Tick newVal) { eventq->setCurTick(newVal); } 7265605Snate@binkert.org}; 7275605Snate@binkert.org 7287005Snate@binkert.orgtemplate <class T, void (T::* F)()> 7297005Snate@binkert.orgvoid 7307005Snate@binkert.orgDelayFunction(EventQueue *eventq, Tick when, T *object) 7315502Snate@binkert.org{ 7327005Snate@binkert.org class DelayEvent : public Event 7337005Snate@binkert.org { 7347005Snate@binkert.org private: 7357005Snate@binkert.org T *object; 7367005Snate@binkert.org 7377005Snate@binkert.org public: 7387005Snate@binkert.org DelayEvent(T *o) 7398581Ssteve.reinhardt@amd.com : Event(Default_Pri, AutoDelete), object(o) 7408581Ssteve.reinhardt@amd.com { } 7417005Snate@binkert.org void process() { (object->*F)(); } 7427005Snate@binkert.org const char *description() const { return "delay"; } 7437005Snate@binkert.org }; 7447005Snate@binkert.org 7457005Snate@binkert.org eventq->schedule(new DelayEvent(object), when); 7465502Snate@binkert.org} 7475502Snate@binkert.org 7487005Snate@binkert.orgtemplate <class T, void (T::* F)()> 7497005Snate@binkert.orgclass EventWrapper : public Event 7505502Snate@binkert.org{ 7517005Snate@binkert.org private: 7527005Snate@binkert.org T *object; 7535502Snate@binkert.org 7547005Snate@binkert.org public: 7557005Snate@binkert.org EventWrapper(T *obj, bool del = false, Priority p = Default_Pri) 7567005Snate@binkert.org : Event(p), object(obj) 7577005Snate@binkert.org { 7587005Snate@binkert.org if (del) 7597005Snate@binkert.org setFlags(AutoDelete); 7607005Snate@binkert.org } 7615502Snate@binkert.org 7627066Snate@binkert.org EventWrapper(T &obj, bool del = false, Priority p = Default_Pri) 7637066Snate@binkert.org : Event(p), object(&obj) 7647066Snate@binkert.org { 7657066Snate@binkert.org if (del) 7667066Snate@binkert.org setFlags(AutoDelete); 7677066Snate@binkert.org } 7687066Snate@binkert.org 7697005Snate@binkert.org void process() { (object->*F)(); } 7705502Snate@binkert.org 7717005Snate@binkert.org const std::string 7727005Snate@binkert.org name() const 7737005Snate@binkert.org { 7747005Snate@binkert.org return object->name() + ".wrapped_event"; 7757005Snate@binkert.org } 7767005Snate@binkert.org 7777005Snate@binkert.org const char *description() const { return "EventWrapped"; } 7787005Snate@binkert.org}; 7795605Snate@binkert.org#endif 7802SN/A 7811354SN/A#endif // __SIM_EVENTQ_HH__ 782