eventq.hh revision 11320
12SN/A/* 21762SN/A * Copyright (c) 2000-2005 The Regents of The University of Michigan 39983Sstever@gmail.com * Copyright (c) 2013 Advanced Micro Devices, Inc. 49983Sstever@gmail.com * Copyright (c) 2013 Mark D. Hill and David A. Wood 52SN/A * All rights reserved. 62SN/A * 72SN/A * Redistribution and use in source and binary forms, with or without 82SN/A * modification, are permitted provided that the following conditions are 92SN/A * met: redistributions of source code must retain the above copyright 102SN/A * notice, this list of conditions and the following disclaimer; 112SN/A * redistributions in binary form must reproduce the above copyright 122SN/A * notice, this list of conditions and the following disclaimer in the 132SN/A * documentation and/or other materials provided with the distribution; 142SN/A * neither the name of the copyright holders nor the names of its 152SN/A * contributors may be used to endorse or promote products derived from 162SN/A * this software without specific prior written permission. 172SN/A * 182SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 192SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 202SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 212SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 222SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 232SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 242SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 252SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 262SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 272SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 282SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 292665Ssaidi@eecs.umich.edu * 302665Ssaidi@eecs.umich.edu * Authors: Steve Reinhardt 312665Ssaidi@eecs.umich.edu * Nathan Binkert 322SN/A */ 332SN/A 342SN/A/* @file 352SN/A * EventQueue interfaces 362SN/A */ 372SN/A 381354SN/A#ifndef __SIM_EVENTQ_HH__ 391354SN/A#define __SIM_EVENTQ_HH__ 402SN/A 412SN/A#include <algorithm> 425501Snate@binkert.org#include <cassert> 435546Snate@binkert.org#include <climits> 447004Snate@binkert.org#include <iosfwd> 4510412Sandreas.hansson@arm.com#include <memory> 469983Sstever@gmail.com#include <mutex> 472SN/A#include <string> 482SN/A 495769Snate@binkert.org#include "base/flags.hh" 502361SN/A#include "base/misc.hh" 516216Snate@binkert.org#include "base/types.hh" 528232Snate@binkert.org#include "debug/Event.hh" 5356SN/A#include "sim/serialize.hh" 542SN/A 555543Ssaidi@eecs.umich.educlass EventQueue; // forward declaration 569983Sstever@gmail.comclass BaseGlobalEvent; 572SN/A 589983Sstever@gmail.com//! Simulation Quantum for multiple eventq simulation. 599983Sstever@gmail.com//! The quantum value is the period length after which the queues 609983Sstever@gmail.com//! synchronize themselves with each other. This means that any 619983Sstever@gmail.com//! event to scheduled on Queue A which is generated by an event on 629983Sstever@gmail.com//! Queue B should be at least simQuantum ticks away in future. 639983Sstever@gmail.comextern Tick simQuantum; 641354SN/A 659983Sstever@gmail.com//! Current number of allocated main event queues. 669983Sstever@gmail.comextern uint32_t numMainEventQueues; 679983Sstever@gmail.com 689983Sstever@gmail.com//! Array for main event queues. 699983Sstever@gmail.comextern std::vector<EventQueue *> mainEventQueue; 709983Sstever@gmail.com 719983Sstever@gmail.com#ifndef SWIG 729983Sstever@gmail.com//! The current event queue for the running thread. Access to this queue 739983Sstever@gmail.com//! does not require any locking from the thread. 749983Sstever@gmail.com 759983Sstever@gmail.comextern __thread EventQueue *_curEventQueue; 769983Sstever@gmail.com 779983Sstever@gmail.com#endif 789983Sstever@gmail.com 799983Sstever@gmail.com//! Current mode of execution: parallel / serial 809983Sstever@gmail.comextern bool inParallelMode; 819983Sstever@gmail.com 829983Sstever@gmail.com//! Function for returning eventq queue for the provided 839983Sstever@gmail.com//! index. The function allocates a new queue in case one 849983Sstever@gmail.com//! does not exist for the index, provided that the index 859983Sstever@gmail.com//! is with in bounds. 869983Sstever@gmail.comEventQueue *getEventQueue(uint32_t index); 879983Sstever@gmail.com 889983Sstever@gmail.cominline EventQueue *curEventQueue() { return _curEventQueue; } 899983Sstever@gmail.cominline void curEventQueue(EventQueue *q) { _curEventQueue = q; } 909983Sstever@gmail.com 919983Sstever@gmail.com/** 929983Sstever@gmail.com * Common base class for Event and GlobalEvent, so they can share flag 939983Sstever@gmail.com * and priority definitions and accessor functions. This class should 949983Sstever@gmail.com * not be used directly. 952SN/A */ 969983Sstever@gmail.comclass EventBase 972SN/A{ 9811320Ssteve.reinhardt@amd.com protected: 998902Sandreas.hansson@arm.com typedef unsigned short FlagsType; 1005769Snate@binkert.org typedef ::Flags<FlagsType> Flags; 1015769Snate@binkert.org 1027059Snate@binkert.org static const FlagsType PublicRead = 0x003f; // public readable flags 1037059Snate@binkert.org static const FlagsType PublicWrite = 0x001d; // public writable flags 1047059Snate@binkert.org static const FlagsType Squashed = 0x0001; // has been squashed 1057059Snate@binkert.org static const FlagsType Scheduled = 0x0002; // has been scheduled 1067059Snate@binkert.org static const FlagsType AutoDelete = 0x0004; // delete after dispatch 10711072Sandreas.sandberg@arm.com /** 10811072Sandreas.sandberg@arm.com * This used to be AutoSerialize. This value can't be reused 10911072Sandreas.sandberg@arm.com * without changing the checkpoint version since the flag field 11011072Sandreas.sandberg@arm.com * gets serialized. 11111072Sandreas.sandberg@arm.com */ 11211072Sandreas.sandberg@arm.com static const FlagsType Reserved0 = 0x0008; 1137059Snate@binkert.org static const FlagsType IsExitEvent = 0x0010; // special exit event 1147059Snate@binkert.org static const FlagsType IsMainQueue = 0x0020; // on main event queue 1157059Snate@binkert.org static const FlagsType Initialized = 0x7a40; // somewhat random bits 1167059Snate@binkert.org static const FlagsType InitMask = 0xffc0; // mask for init bits 1177059Snate@binkert.org 1187058Snate@binkert.org public: 1197058Snate@binkert.org typedef int8_t Priority; 1207058Snate@binkert.org 121396SN/A /// Event priorities, to provide tie-breakers for events scheduled 122396SN/A /// at the same cycle. Most events are scheduled at the default 123396SN/A /// priority; these values are used to control events that need to 124396SN/A /// be ordered within a cycle. 1255501Snate@binkert.org 1267058Snate@binkert.org /// Minimum priority 1277058Snate@binkert.org static const Priority Minimum_Pri = SCHAR_MIN; 1283329Sstever@eecs.umich.edu 1297058Snate@binkert.org /// If we enable tracing on a particular cycle, do that as the 1307058Snate@binkert.org /// very first thing so we don't miss any of the events on 1317058Snate@binkert.org /// that cycle (even if we enter the debugger). 1329979Satgutier@umich.edu static const Priority Debug_Enable_Pri = -101; 133396SN/A 1347058Snate@binkert.org /// Breakpoints should happen before anything else (except 1357058Snate@binkert.org /// enabling trace output), so we don't miss any action when 1367058Snate@binkert.org /// debugging. 1377058Snate@binkert.org static const Priority Debug_Break_Pri = -100; 1383329Sstever@eecs.umich.edu 1397058Snate@binkert.org /// CPU switches schedule the new CPU's tick event for the 1407058Snate@binkert.org /// same cycle (after unscheduling the old CPU's tick event). 1417058Snate@binkert.org /// The switch needs to come before any tick events to make 1427058Snate@binkert.org /// sure we don't tick both CPUs in the same cycle. 1437058Snate@binkert.org static const Priority CPU_Switch_Pri = -31; 144396SN/A 1457058Snate@binkert.org /// For some reason "delayed" inter-cluster writebacks are 1467058Snate@binkert.org /// scheduled before regular writebacks (which have default 1477058Snate@binkert.org /// priority). Steve? 1487058Snate@binkert.org static const Priority Delayed_Writeback_Pri = -1; 149396SN/A 1507058Snate@binkert.org /// Default is zero for historical reasons. 1517058Snate@binkert.org static const Priority Default_Pri = 0; 152396SN/A 15310249Sstephan.diestelhorst@arm.com /// DVFS update event leads to stats dump therefore given a lower priority 15410249Sstephan.diestelhorst@arm.com /// to ensure all relevant states have been updated 15510249Sstephan.diestelhorst@arm.com static const Priority DVFS_Update_Pri = 31; 15610249Sstephan.diestelhorst@arm.com 1577058Snate@binkert.org /// Serailization needs to occur before tick events also, so 1587058Snate@binkert.org /// that a serialize/unserialize is identical to an on-line 1597058Snate@binkert.org /// CPU switch. 1607058Snate@binkert.org static const Priority Serialize_Pri = 32; 161396SN/A 1627058Snate@binkert.org /// CPU ticks must come after other associated CPU events 1637058Snate@binkert.org /// (such as writebacks). 1647058Snate@binkert.org static const Priority CPU_Tick_Pri = 50; 165396SN/A 1667058Snate@binkert.org /// Statistics events (dump, reset, etc.) come after 1677058Snate@binkert.org /// everything else, but before exit. 1687058Snate@binkert.org static const Priority Stat_Event_Pri = 90; 1694075Sbinkertn@umich.edu 1707058Snate@binkert.org /// Progress events come at the end. 1717058Snate@binkert.org static const Priority Progress_Event_Pri = 95; 1725501Snate@binkert.org 1737058Snate@binkert.org /// If we want to exit on this cycle, it's the very last thing 1747058Snate@binkert.org /// we do. 1757058Snate@binkert.org static const Priority Sim_Exit_Pri = 100; 1767058Snate@binkert.org 1777058Snate@binkert.org /// Maximum priority 1787058Snate@binkert.org static const Priority Maximum_Pri = SCHAR_MAX; 1799983Sstever@gmail.com}; 1809983Sstever@gmail.com 1819983Sstever@gmail.com/* 1829983Sstever@gmail.com * An item on an event queue. The action caused by a given 1839983Sstever@gmail.com * event is specified by deriving a subclass and overriding the 1849983Sstever@gmail.com * process() member function. 1859983Sstever@gmail.com * 1869983Sstever@gmail.com * Caution, the order of members is chosen to maximize data packing. 1879983Sstever@gmail.com */ 1889983Sstever@gmail.comclass Event : public EventBase, public Serializable 1899983Sstever@gmail.com{ 1909983Sstever@gmail.com friend class EventQueue; 1919983Sstever@gmail.com 1929983Sstever@gmail.com private: 1939983Sstever@gmail.com // The event queue is now a linked list of linked lists. The 1949983Sstever@gmail.com // 'nextBin' pointer is to find the bin, where a bin is defined as 1959983Sstever@gmail.com // when+priority. All events in the same bin will be stored in a 1969983Sstever@gmail.com // second linked list (a stack) maintained by the 'nextInBin' 1979983Sstever@gmail.com // pointer. The list will be accessed in LIFO order. The end 1989983Sstever@gmail.com // result is that the insert/removal in 'nextBin' is 1999983Sstever@gmail.com // linear/constant, and the lookup/removal in 'nextInBin' is 2009983Sstever@gmail.com // constant/constant. Hopefully this is a significant improvement 2019983Sstever@gmail.com // over the current fully linear insertion. 2029983Sstever@gmail.com Event *nextBin; 2039983Sstever@gmail.com Event *nextInBin; 2049983Sstever@gmail.com 2059983Sstever@gmail.com static Event *insertBefore(Event *event, Event *curr); 2069983Sstever@gmail.com static Event *removeItem(Event *event, Event *last); 2079983Sstever@gmail.com 2089983Sstever@gmail.com Tick _when; //!< timestamp when event should be processed 2099983Sstever@gmail.com Priority _priority; //!< event priority 2109983Sstever@gmail.com Flags flags; 2119983Sstever@gmail.com 2129983Sstever@gmail.com#ifndef NDEBUG 2139983Sstever@gmail.com /// Global counter to generate unique IDs for Event instances 2149983Sstever@gmail.com static Counter instanceCounter; 2159983Sstever@gmail.com 2169983Sstever@gmail.com /// This event's unique ID. We can also use pointer values for 2179983Sstever@gmail.com /// this but they're not consistent across runs making debugging 2189983Sstever@gmail.com /// more difficult. Thus we use a global counter value when 2199983Sstever@gmail.com /// debugging. 2209983Sstever@gmail.com Counter instance; 2219983Sstever@gmail.com 2229983Sstever@gmail.com /// queue to which this event belongs (though it may or may not be 2239983Sstever@gmail.com /// scheduled on this queue yet) 2249983Sstever@gmail.com EventQueue *queue; 2259983Sstever@gmail.com#endif 2269983Sstever@gmail.com 2279983Sstever@gmail.com#ifdef EVENTQ_DEBUG 2289983Sstever@gmail.com Tick whenCreated; //!< time created 2299983Sstever@gmail.com Tick whenScheduled; //!< time scheduled 2309983Sstever@gmail.com#endif 2319983Sstever@gmail.com 2329983Sstever@gmail.com void 2339983Sstever@gmail.com setWhen(Tick when, EventQueue *q) 2349983Sstever@gmail.com { 2359983Sstever@gmail.com _when = when; 2369983Sstever@gmail.com#ifndef NDEBUG 2379983Sstever@gmail.com queue = q; 2389983Sstever@gmail.com#endif 2399983Sstever@gmail.com#ifdef EVENTQ_DEBUG 2409983Sstever@gmail.com whenScheduled = curTick(); 2419983Sstever@gmail.com#endif 2429983Sstever@gmail.com } 2439983Sstever@gmail.com 2449983Sstever@gmail.com bool 2459983Sstever@gmail.com initialized() const 2469983Sstever@gmail.com { 24710673SAndreas.Sandberg@ARM.com return (flags & InitMask) == Initialized; 2489983Sstever@gmail.com } 2499983Sstever@gmail.com 2509983Sstever@gmail.com protected: 2519983Sstever@gmail.com /// Accessor for flags. 2529983Sstever@gmail.com Flags 2539983Sstever@gmail.com getFlags() const 2549983Sstever@gmail.com { 2559983Sstever@gmail.com return flags & PublicRead; 2569983Sstever@gmail.com } 2579983Sstever@gmail.com 2589983Sstever@gmail.com bool 2599983Sstever@gmail.com isFlagSet(Flags _flags) const 2609983Sstever@gmail.com { 2619983Sstever@gmail.com assert(_flags.noneSet(~PublicRead)); 2629983Sstever@gmail.com return flags.isSet(_flags); 2639983Sstever@gmail.com } 2649983Sstever@gmail.com 2659983Sstever@gmail.com /// Accessor for flags. 2669983Sstever@gmail.com void 2679983Sstever@gmail.com setFlags(Flags _flags) 2689983Sstever@gmail.com { 2699983Sstever@gmail.com assert(_flags.noneSet(~PublicWrite)); 2709983Sstever@gmail.com flags.set(_flags); 2719983Sstever@gmail.com } 2729983Sstever@gmail.com 2739983Sstever@gmail.com void 2749983Sstever@gmail.com clearFlags(Flags _flags) 2759983Sstever@gmail.com { 2769983Sstever@gmail.com assert(_flags.noneSet(~PublicWrite)); 2779983Sstever@gmail.com flags.clear(_flags); 2789983Sstever@gmail.com } 2799983Sstever@gmail.com 2809983Sstever@gmail.com void 2819983Sstever@gmail.com clearFlags() 2829983Sstever@gmail.com { 2839983Sstever@gmail.com flags.clear(PublicWrite); 2849983Sstever@gmail.com } 2859983Sstever@gmail.com 2869983Sstever@gmail.com // This function isn't really useful if TRACING_ON is not defined 2879983Sstever@gmail.com virtual void trace(const char *action); //!< trace event activity 2889983Sstever@gmail.com 2899983Sstever@gmail.com public: 290396SN/A 2912SN/A /* 2922SN/A * Event constructor 2932SN/A * @param queue that the event gets scheduled on 2942SN/A */ 2958581Ssteve.reinhardt@amd.com Event(Priority p = Default_Pri, Flags f = 0) 29610360Sandreas.hansson@arm.com : nextBin(nullptr), nextInBin(nullptr), _when(0), _priority(p), 2978581Ssteve.reinhardt@amd.com flags(Initialized | f) 298224SN/A { 2998581Ssteve.reinhardt@amd.com assert(f.noneSet(~PublicWrite)); 3004016Sstever@eecs.umich.edu#ifndef NDEBUG 3015501Snate@binkert.org instance = ++instanceCounter; 3025605Snate@binkert.org queue = NULL; 3035501Snate@binkert.org#endif 3045501Snate@binkert.org#ifdef EVENTQ_DEBUG 3057823Ssteve.reinhardt@amd.com whenCreated = curTick(); 3065501Snate@binkert.org whenScheduled = 0; 3074016Sstever@eecs.umich.edu#endif 308224SN/A } 309224SN/A 3105768Snate@binkert.org virtual ~Event(); 3115768Snate@binkert.org virtual const std::string name() const; 312265SN/A 3135501Snate@binkert.org /// Return a C string describing the event. This string should 3145501Snate@binkert.org /// *not* be dynamically allocated; just a const char array 3155501Snate@binkert.org /// describing the event class. 3165501Snate@binkert.org virtual const char *description() const; 3175501Snate@binkert.org 3185501Snate@binkert.org /// Dump the current event data 3195501Snate@binkert.org void dump() const; 3205501Snate@binkert.org 3215501Snate@binkert.org public: 3225501Snate@binkert.org /* 3235501Snate@binkert.org * This member function is invoked when the event is processed 3245501Snate@binkert.org * (occurs). There is no default implementation; each subclass 3255501Snate@binkert.org * must provide its own implementation. The event is not 3265501Snate@binkert.org * automatically deleted after it is processed (to allow for 3275501Snate@binkert.org * statically allocated event objects). 3285501Snate@binkert.org * 3295501Snate@binkert.org * If the AutoDestroy flag is set, the object is deleted once it 3305501Snate@binkert.org * is processed. 3315501Snate@binkert.org */ 3325501Snate@binkert.org virtual void process() = 0; 3335501Snate@binkert.org 3342SN/A /// Determine if the current event is scheduled 3355769Snate@binkert.org bool scheduled() const { return flags.isSet(Scheduled); } 3362SN/A 3372SN/A /// Squash the current event 3385769Snate@binkert.org void squash() { flags.set(Squashed); } 3392SN/A 3402SN/A /// Check whether the event is squashed 3415769Snate@binkert.org bool squashed() const { return flags.isSet(Squashed); } 3422SN/A 3432667Sstever@eecs.umich.edu /// See if this is a SimExitEvent (without resorting to RTTI) 3445769Snate@binkert.org bool isExitEvent() const { return flags.isSet(IsExitEvent); } 3452667Sstever@eecs.umich.edu 34610992Stimothy.jones@cl.cam.ac.uk /// Check whether this event will auto-delete 34710992Stimothy.jones@cl.cam.ac.uk bool isAutoDelete() const { return flags.isSet(AutoDelete); } 34810992Stimothy.jones@cl.cam.ac.uk 3492SN/A /// Get the time that the event is scheduled 3502SN/A Tick when() const { return _when; } 3512SN/A 3522SN/A /// Get the event priority 3537058Snate@binkert.org Priority priority() const { return _priority; } 3542SN/A 3559983Sstever@gmail.com //! If this is part of a GlobalEvent, return the pointer to the 3569983Sstever@gmail.com //! Global Event. By default, there is no GlobalEvent, so return 3579983Sstever@gmail.com //! NULL. (Overridden in GlobalEvent::BarrierEvent.) 3589983Sstever@gmail.com virtual BaseGlobalEvent *globalEvent() { return NULL; } 3599983Sstever@gmail.com 3605605Snate@binkert.org#ifndef SWIG 36111168Sandreas.hansson@arm.com void serialize(CheckpointOut &cp) const override; 36211168Sandreas.hansson@arm.com void unserialize(CheckpointIn &cp) override; 3635605Snate@binkert.org#endif 364571SN/A}; 365571SN/A 3667005Snate@binkert.org#ifndef SWIG 3677005Snate@binkert.orginline bool 3687005Snate@binkert.orgoperator<(const Event &l, const Event &r) 3697005Snate@binkert.org{ 3707005Snate@binkert.org return l.when() < r.when() || 3717005Snate@binkert.org (l.when() == r.when() && l.priority() < r.priority()); 3727005Snate@binkert.org} 3737005Snate@binkert.org 3747005Snate@binkert.orginline bool 3757005Snate@binkert.orgoperator>(const Event &l, const Event &r) 3767005Snate@binkert.org{ 3777005Snate@binkert.org return l.when() > r.when() || 3787005Snate@binkert.org (l.when() == r.when() && l.priority() > r.priority()); 3797005Snate@binkert.org} 3807005Snate@binkert.org 3817005Snate@binkert.orginline bool 3827005Snate@binkert.orgoperator<=(const Event &l, const Event &r) 3837005Snate@binkert.org{ 3847005Snate@binkert.org return l.when() < r.when() || 3857005Snate@binkert.org (l.when() == r.when() && l.priority() <= r.priority()); 3867005Snate@binkert.org} 3877005Snate@binkert.orginline bool 3887005Snate@binkert.orgoperator>=(const Event &l, const Event &r) 3897005Snate@binkert.org{ 3907005Snate@binkert.org return l.when() > r.when() || 3917005Snate@binkert.org (l.when() == r.when() && l.priority() >= r.priority()); 3927005Snate@binkert.org} 3937005Snate@binkert.org 3947005Snate@binkert.orginline bool 3957005Snate@binkert.orgoperator==(const Event &l, const Event &r) 3967005Snate@binkert.org{ 3977005Snate@binkert.org return l.when() == r.when() && l.priority() == r.priority(); 3987005Snate@binkert.org} 3997005Snate@binkert.org 4007005Snate@binkert.orginline bool 4017005Snate@binkert.orgoperator!=(const Event &l, const Event &r) 4027005Snate@binkert.org{ 4037005Snate@binkert.org return l.when() != r.when() || l.priority() != r.priority(); 4047005Snate@binkert.org} 4057005Snate@binkert.org#endif 4067005Snate@binkert.org 40710153Sandreas@sandberg.pp.se/** 4082SN/A * Queue of events sorted in time order 40910153Sandreas@sandberg.pp.se * 41010153Sandreas@sandberg.pp.se * Events are scheduled (inserted into the event queue) using the 41110153Sandreas@sandberg.pp.se * schedule() method. This method either inserts a <i>synchronous</i> 41210153Sandreas@sandberg.pp.se * or <i>asynchronous</i> event. 41310153Sandreas@sandberg.pp.se * 41410153Sandreas@sandberg.pp.se * Synchronous events are scheduled using schedule() method with the 41510153Sandreas@sandberg.pp.se * argument 'global' set to false (default). This should only be done 41610153Sandreas@sandberg.pp.se * from a thread holding the event queue lock 41710153Sandreas@sandberg.pp.se * (EventQueue::service_mutex). The lock is always held when an event 41810153Sandreas@sandberg.pp.se * handler is called, it can therefore always insert events into its 41910153Sandreas@sandberg.pp.se * own event queue unless it voluntarily releases the lock. 42010153Sandreas@sandberg.pp.se * 42110153Sandreas@sandberg.pp.se * Events can be scheduled across thread (and event queue borders) by 42210153Sandreas@sandberg.pp.se * either scheduling asynchronous events or taking the target event 42310153Sandreas@sandberg.pp.se * queue's lock. However, the lock should <i>never</i> be taken 42410153Sandreas@sandberg.pp.se * directly since this is likely to cause deadlocks. Instead, code 42510153Sandreas@sandberg.pp.se * that needs to schedule events in other event queues should 42610153Sandreas@sandberg.pp.se * temporarily release its own queue and lock the new queue. This 42710153Sandreas@sandberg.pp.se * prevents deadlocks since a single thread never owns more than one 42810153Sandreas@sandberg.pp.se * event queue lock. This functionality is provided by the 42910153Sandreas@sandberg.pp.se * ScopedMigration helper class. Note that temporarily migrating 43010153Sandreas@sandberg.pp.se * between event queues can make the simulation non-deterministic, it 43110153Sandreas@sandberg.pp.se * should therefore be limited to cases where that can be tolerated 43210153Sandreas@sandberg.pp.se * (e.g., handling asynchronous IO or fast-forwarding in KVM). 43310153Sandreas@sandberg.pp.se * 43410153Sandreas@sandberg.pp.se * Asynchronous events can also be scheduled using the normal 43510153Sandreas@sandberg.pp.se * schedule() method with the 'global' parameter set to true. Unlike 43610153Sandreas@sandberg.pp.se * the previous queue migration strategy, this strategy is fully 43710153Sandreas@sandberg.pp.se * deterministic. This causes the event to be inserted in a separate 43810153Sandreas@sandberg.pp.se * queue of asynchronous events (async_queue), which is merged main 43910153Sandreas@sandberg.pp.se * event queue at the end of each simulation quantum (by calling the 44010153Sandreas@sandberg.pp.se * handleAsyncInsertions() method). Note that this implies that such 44110153Sandreas@sandberg.pp.se * events must happen at least one simulation quantum into the future, 44210153Sandreas@sandberg.pp.se * otherwise they risk being scheduled in the past by 44310153Sandreas@sandberg.pp.se * handleAsyncInsertions(). 4442SN/A */ 44511072Sandreas.sandberg@arm.comclass EventQueue 4462SN/A{ 4475605Snate@binkert.org private: 448265SN/A std::string objName; 4492SN/A Event *head; 4509356Snilay@cs.wisc.edu Tick _curTick; 4512SN/A 4529983Sstever@gmail.com //! Mutex to protect async queue. 45310412Sandreas.hansson@arm.com std::mutex async_queue_mutex; 4549983Sstever@gmail.com 4559983Sstever@gmail.com //! List of events added by other threads to this event queue. 4569983Sstever@gmail.com std::list<Event*> async_queue; 4579983Sstever@gmail.com 45810153Sandreas@sandberg.pp.se /** 45910153Sandreas@sandberg.pp.se * Lock protecting event handling. 46010153Sandreas@sandberg.pp.se * 46110153Sandreas@sandberg.pp.se * This lock is always taken when servicing events. It is assumed 46210153Sandreas@sandberg.pp.se * that the thread scheduling new events (not asynchronous events 46310153Sandreas@sandberg.pp.se * though) have taken this lock. This is normally done by 46410153Sandreas@sandberg.pp.se * serviceOne() since new events are typically scheduled as a 46510153Sandreas@sandberg.pp.se * response to an earlier event. 46610153Sandreas@sandberg.pp.se * 46710153Sandreas@sandberg.pp.se * This lock is intended to be used to temporarily steal an event 46810153Sandreas@sandberg.pp.se * queue to support inter-thread communication when some 46910153Sandreas@sandberg.pp.se * deterministic timing can be sacrificed for speed. For example, 47010153Sandreas@sandberg.pp.se * the KVM CPU can use this support to access devices running in a 47110153Sandreas@sandberg.pp.se * different thread. 47210153Sandreas@sandberg.pp.se * 47310153Sandreas@sandberg.pp.se * @see EventQueue::ScopedMigration. 47410153Sandreas@sandberg.pp.se * @see EventQueue::ScopedRelease 47510153Sandreas@sandberg.pp.se * @see EventQueue::lock() 47610153Sandreas@sandberg.pp.se * @see EventQueue::unlock() 47710153Sandreas@sandberg.pp.se */ 47810153Sandreas@sandberg.pp.se std::mutex service_mutex; 47910153Sandreas@sandberg.pp.se 4809983Sstever@gmail.com //! Insert / remove event from the queue. Should only be called 4819983Sstever@gmail.com //! by thread operating this queue. 4822SN/A void insert(Event *event); 4832SN/A void remove(Event *event); 4842SN/A 4859983Sstever@gmail.com //! Function for adding events to the async queue. The added events 4869983Sstever@gmail.com //! are added to main event queue later. Threads, other than the 4879983Sstever@gmail.com //! owning thread, should call this function instead of insert(). 4889983Sstever@gmail.com void asyncInsert(Event *event); 4899983Sstever@gmail.com 4907063Snate@binkert.org EventQueue(const EventQueue &); 4917063Snate@binkert.org 4922SN/A public: 49310153Sandreas@sandberg.pp.se#ifndef SWIG 49410153Sandreas@sandberg.pp.se /** 49510153Sandreas@sandberg.pp.se * Temporarily migrate execution to a different event queue. 49610153Sandreas@sandberg.pp.se * 49710153Sandreas@sandberg.pp.se * An instance of this class temporarily migrates execution to a 49810153Sandreas@sandberg.pp.se * different event queue by releasing the current queue, locking 49910153Sandreas@sandberg.pp.se * the new queue, and updating curEventQueue(). This can, for 50010153Sandreas@sandberg.pp.se * example, be useful when performing IO across thread event 50110153Sandreas@sandberg.pp.se * queues when timing is not crucial (e.g., during fast 50210153Sandreas@sandberg.pp.se * forwarding). 50310153Sandreas@sandberg.pp.se */ 50410153Sandreas@sandberg.pp.se class ScopedMigration 50510153Sandreas@sandberg.pp.se { 50610153Sandreas@sandberg.pp.se public: 50710153Sandreas@sandberg.pp.se ScopedMigration(EventQueue *_new_eq) 50810153Sandreas@sandberg.pp.se : new_eq(*_new_eq), old_eq(*curEventQueue()) 50910153Sandreas@sandberg.pp.se { 51010153Sandreas@sandberg.pp.se old_eq.unlock(); 51110153Sandreas@sandberg.pp.se new_eq.lock(); 51210153Sandreas@sandberg.pp.se curEventQueue(&new_eq); 51310153Sandreas@sandberg.pp.se } 51410153Sandreas@sandberg.pp.se 51510153Sandreas@sandberg.pp.se ~ScopedMigration() 51610153Sandreas@sandberg.pp.se { 51710153Sandreas@sandberg.pp.se new_eq.unlock(); 51810153Sandreas@sandberg.pp.se old_eq.lock(); 51910153Sandreas@sandberg.pp.se curEventQueue(&old_eq); 52010153Sandreas@sandberg.pp.se } 52110153Sandreas@sandberg.pp.se 52210153Sandreas@sandberg.pp.se private: 52310153Sandreas@sandberg.pp.se EventQueue &new_eq; 52410153Sandreas@sandberg.pp.se EventQueue &old_eq; 52510153Sandreas@sandberg.pp.se }; 52610153Sandreas@sandberg.pp.se 52710153Sandreas@sandberg.pp.se /** 52810153Sandreas@sandberg.pp.se * Temporarily release the event queue service lock. 52910153Sandreas@sandberg.pp.se * 53010153Sandreas@sandberg.pp.se * There are cases where it is desirable to temporarily release 53110153Sandreas@sandberg.pp.se * the event queue lock to prevent deadlocks. For example, when 53210153Sandreas@sandberg.pp.se * waiting on the global barrier, we need to release the lock to 53310153Sandreas@sandberg.pp.se * prevent deadlocks from happening when another thread tries to 53410153Sandreas@sandberg.pp.se * temporarily take over the event queue waiting on the barrier. 53510153Sandreas@sandberg.pp.se */ 53610153Sandreas@sandberg.pp.se class ScopedRelease 53710153Sandreas@sandberg.pp.se { 53810153Sandreas@sandberg.pp.se public: 53910153Sandreas@sandberg.pp.se ScopedRelease(EventQueue *_eq) 54010153Sandreas@sandberg.pp.se : eq(*_eq) 54110153Sandreas@sandberg.pp.se { 54210153Sandreas@sandberg.pp.se eq.unlock(); 54310153Sandreas@sandberg.pp.se } 54410153Sandreas@sandberg.pp.se 54510153Sandreas@sandberg.pp.se ~ScopedRelease() 54610153Sandreas@sandberg.pp.se { 54710153Sandreas@sandberg.pp.se eq.lock(); 54810153Sandreas@sandberg.pp.se } 54910153Sandreas@sandberg.pp.se 55010153Sandreas@sandberg.pp.se private: 55110153Sandreas@sandberg.pp.se EventQueue &eq; 55210153Sandreas@sandberg.pp.se }; 55310153Sandreas@sandberg.pp.se#endif 55410153Sandreas@sandberg.pp.se 5557063Snate@binkert.org EventQueue(const std::string &n); 5562SN/A 557512SN/A virtual const std::string name() const { return objName; } 5589983Sstever@gmail.com void name(const std::string &st) { objName = st; } 559265SN/A 5609983Sstever@gmail.com //! Schedule the given event on this queue. Safe to call from any 5619983Sstever@gmail.com //! thread. 5629983Sstever@gmail.com void schedule(Event *event, Tick when, bool global = false); 5639983Sstever@gmail.com 5649983Sstever@gmail.com //! Deschedule the specified event. Should be called only from the 5659983Sstever@gmail.com //! owning thread. 5665738Snate@binkert.org void deschedule(Event *event); 5679983Sstever@gmail.com 5689983Sstever@gmail.com //! Reschedule the specified event. Should be called only from 5699983Sstever@gmail.com //! the owning thread. 5705738Snate@binkert.org void reschedule(Event *event, Tick when, bool always = false); 5712SN/A 5725501Snate@binkert.org Tick nextTick() const { return head->when(); } 5739356Snilay@cs.wisc.edu void setCurTick(Tick newVal) { _curTick = newVal; } 57411015Sandreas.sandberg@arm.com Tick getCurTick() const { return _curTick; } 57510991Stimothy.jones@cl.cam.ac.uk Event *getHead() const { return head; } 5769356Snilay@cs.wisc.edu 5772667Sstever@eecs.umich.edu Event *serviceOne(); 5782SN/A 5792SN/A // process all events up to the given timestamp. we inline a 5802SN/A // quick test to see if there are any events to process; if so, 5812SN/A // call the internal out-of-line version to process them all. 5825501Snate@binkert.org void 5835501Snate@binkert.org serviceEvents(Tick when) 5845501Snate@binkert.org { 5852SN/A while (!empty()) { 5862SN/A if (nextTick() > when) 5872SN/A break; 5882SN/A 5891634SN/A /** 5901634SN/A * @todo this assert is a good bug catcher. I need to 5911634SN/A * make it true again. 5921634SN/A */ 5931634SN/A //assert(head->when() >= when && "event scheduled in the past"); 5942SN/A serviceOne(); 5952SN/A } 5969356Snilay@cs.wisc.edu 5979356Snilay@cs.wisc.edu setCurTick(when); 5982SN/A } 5992SN/A 6002SN/A // return true if no events are queued 6015501Snate@binkert.org bool empty() const { return head == NULL; } 6022SN/A 6035501Snate@binkert.org void dump() const; 6042SN/A 6055502Snate@binkert.org bool debugVerify() const; 6065502Snate@binkert.org 6079983Sstever@gmail.com //! Function for moving events from the async_queue to the main queue. 6089983Sstever@gmail.com void handleAsyncInsertions(); 6099983Sstever@gmail.com 6108648Snilay@cs.wisc.edu /** 61110476Sandreas.hansson@arm.com * Function to signal that the event loop should be woken up because 61210476Sandreas.hansson@arm.com * an event has been scheduled by an agent outside the gem5 event 61310476Sandreas.hansson@arm.com * loop(s) whose event insertion may not have been noticed by gem5. 61410476Sandreas.hansson@arm.com * This function isn't needed by the usual gem5 event loop but may 61510476Sandreas.hansson@arm.com * be necessary in derived EventQueues which host gem5 onto other 61610476Sandreas.hansson@arm.com * schedulers. 61710476Sandreas.hansson@arm.com * 61810476Sandreas.hansson@arm.com * @param when Time of a delayed wakeup (if known). This parameter 61910476Sandreas.hansson@arm.com * can be used by an implementation to schedule a wakeup in the 62010476Sandreas.hansson@arm.com * future if it is sure it will remain active until then. 62110476Sandreas.hansson@arm.com * Or it can be ignored and the event queue can be woken up now. 62210476Sandreas.hansson@arm.com */ 62310476Sandreas.hansson@arm.com virtual void wakeup(Tick when = (Tick)-1) { } 62410476Sandreas.hansson@arm.com 62510476Sandreas.hansson@arm.com /** 6268648Snilay@cs.wisc.edu * function for replacing the head of the event queue, so that a 6278648Snilay@cs.wisc.edu * different set of events can run without disturbing events that have 6288648Snilay@cs.wisc.edu * already been scheduled. Already scheduled events can be processed 6298648Snilay@cs.wisc.edu * by replacing the original head back. 6308648Snilay@cs.wisc.edu * USING THIS FUNCTION CAN BE DANGEROUS TO THE HEALTH OF THE SIMULATOR. 6318648Snilay@cs.wisc.edu * NOT RECOMMENDED FOR USE. 6328648Snilay@cs.wisc.edu */ 6338648Snilay@cs.wisc.edu Event* replaceHead(Event* s); 6348648Snilay@cs.wisc.edu 63510153Sandreas@sandberg.pp.se /**@{*/ 63610153Sandreas@sandberg.pp.se /** 63710153Sandreas@sandberg.pp.se * Provide an interface for locking/unlocking the event queue. 63810153Sandreas@sandberg.pp.se * 63910153Sandreas@sandberg.pp.se * @warn Do NOT use these methods directly unless you really know 64010153Sandreas@sandberg.pp.se * what you are doing. Incorrect use can easily lead to simulator 64110153Sandreas@sandberg.pp.se * deadlocks. 64210153Sandreas@sandberg.pp.se * 64310153Sandreas@sandberg.pp.se * @see EventQueue::ScopedMigration. 64410153Sandreas@sandberg.pp.se * @see EventQueue::ScopedRelease 64510153Sandreas@sandberg.pp.se * @see EventQueue 64610153Sandreas@sandberg.pp.se */ 64710153Sandreas@sandberg.pp.se void lock() { service_mutex.lock(); } 64810153Sandreas@sandberg.pp.se void unlock() { service_mutex.unlock(); } 64910153Sandreas@sandberg.pp.se /**@}*/ 65010153Sandreas@sandberg.pp.se 65110906Sandreas.sandberg@arm.com /** 65210906Sandreas.sandberg@arm.com * Reschedule an event after a checkpoint. 65310906Sandreas.sandberg@arm.com * 65410906Sandreas.sandberg@arm.com * Since events don't know which event queue they belong to, 65510906Sandreas.sandberg@arm.com * parent objects need to reschedule events themselves. This 65610906Sandreas.sandberg@arm.com * method conditionally schedules an event that has the Scheduled 65710906Sandreas.sandberg@arm.com * flag set. It should be called by parent objects after 65810906Sandreas.sandberg@arm.com * unserializing an object. 65910906Sandreas.sandberg@arm.com * 66010906Sandreas.sandberg@arm.com * @warn Only use this method after unserializing an Event. 66110906Sandreas.sandberg@arm.com */ 66210906Sandreas.sandberg@arm.com void checkpointReschedule(Event *event); 66310906Sandreas.sandberg@arm.com 66410476Sandreas.hansson@arm.com virtual ~EventQueue() { } 6652SN/A}; 6662SN/A 6679554Sandreas.hansson@arm.comvoid dumpMainQueue(); 6689554Sandreas.hansson@arm.com 6695605Snate@binkert.org#ifndef SWIG 6705605Snate@binkert.orgclass EventManager 6715605Snate@binkert.org{ 6725605Snate@binkert.org protected: 6735605Snate@binkert.org /** A pointer to this object's event queue */ 6745605Snate@binkert.org EventQueue *eventq; 6752SN/A 6765605Snate@binkert.org public: 6779099Sandreas.hansson@arm.com EventManager(EventManager &em) : eventq(em.eventq) {} 6789159Sandreas.hansson@arm.com EventManager(EventManager *em) : eventq(em->eventq) {} 6795605Snate@binkert.org EventManager(EventQueue *eq) : eventq(eq) {} 6802SN/A 6815605Snate@binkert.org EventQueue * 6829099Sandreas.hansson@arm.com eventQueue() const 6837060Snate@binkert.org { 6847060Snate@binkert.org return eventq; 6857060Snate@binkert.org } 6867060Snate@binkert.org 6875605Snate@binkert.org void 6885605Snate@binkert.org schedule(Event &event, Tick when) 6895605Snate@binkert.org { 6905605Snate@binkert.org eventq->schedule(&event, when); 6915605Snate@binkert.org } 6925605Snate@binkert.org 6935605Snate@binkert.org void 6945605Snate@binkert.org deschedule(Event &event) 6955605Snate@binkert.org { 6965605Snate@binkert.org eventq->deschedule(&event); 6975605Snate@binkert.org } 6985605Snate@binkert.org 6995605Snate@binkert.org void 7005605Snate@binkert.org reschedule(Event &event, Tick when, bool always = false) 7015605Snate@binkert.org { 7025605Snate@binkert.org eventq->reschedule(&event, when, always); 7035605Snate@binkert.org } 7045605Snate@binkert.org 7055605Snate@binkert.org void 7065605Snate@binkert.org schedule(Event *event, Tick when) 7075605Snate@binkert.org { 7085605Snate@binkert.org eventq->schedule(event, when); 7095605Snate@binkert.org } 7105605Snate@binkert.org 7115605Snate@binkert.org void 7125605Snate@binkert.org deschedule(Event *event) 7135605Snate@binkert.org { 7145605Snate@binkert.org eventq->deschedule(event); 7155605Snate@binkert.org } 7165605Snate@binkert.org 7175605Snate@binkert.org void 7185605Snate@binkert.org reschedule(Event *event, Tick when, bool always = false) 7195605Snate@binkert.org { 7205605Snate@binkert.org eventq->reschedule(event, when, always); 7215605Snate@binkert.org } 7229356Snilay@cs.wisc.edu 72310476Sandreas.hansson@arm.com void wakeupEventQueue(Tick when = (Tick)-1) 72410476Sandreas.hansson@arm.com { 72510476Sandreas.hansson@arm.com eventq->wakeup(when); 72610476Sandreas.hansson@arm.com } 72710476Sandreas.hansson@arm.com 7289356Snilay@cs.wisc.edu void setCurTick(Tick newVal) { eventq->setCurTick(newVal); } 7295605Snate@binkert.org}; 7305605Snate@binkert.org 7317005Snate@binkert.orgtemplate <class T, void (T::* F)()> 7327005Snate@binkert.orgvoid 7337005Snate@binkert.orgDelayFunction(EventQueue *eventq, Tick when, T *object) 7345502Snate@binkert.org{ 7357005Snate@binkert.org class DelayEvent : public Event 7367005Snate@binkert.org { 7377005Snate@binkert.org private: 7387005Snate@binkert.org T *object; 7397005Snate@binkert.org 7407005Snate@binkert.org public: 7417005Snate@binkert.org DelayEvent(T *o) 7428581Ssteve.reinhardt@amd.com : Event(Default_Pri, AutoDelete), object(o) 7438581Ssteve.reinhardt@amd.com { } 7447005Snate@binkert.org void process() { (object->*F)(); } 7457005Snate@binkert.org const char *description() const { return "delay"; } 7467005Snate@binkert.org }; 7477005Snate@binkert.org 7487005Snate@binkert.org eventq->schedule(new DelayEvent(object), when); 7495502Snate@binkert.org} 7505502Snate@binkert.org 7517005Snate@binkert.orgtemplate <class T, void (T::* F)()> 7527005Snate@binkert.orgclass EventWrapper : public Event 7535502Snate@binkert.org{ 7547005Snate@binkert.org private: 7557005Snate@binkert.org T *object; 7565502Snate@binkert.org 7577005Snate@binkert.org public: 7587005Snate@binkert.org EventWrapper(T *obj, bool del = false, Priority p = Default_Pri) 7597005Snate@binkert.org : Event(p), object(obj) 7607005Snate@binkert.org { 7617005Snate@binkert.org if (del) 7627005Snate@binkert.org setFlags(AutoDelete); 7637005Snate@binkert.org } 7645502Snate@binkert.org 7657066Snate@binkert.org EventWrapper(T &obj, bool del = false, Priority p = Default_Pri) 7667066Snate@binkert.org : Event(p), object(&obj) 7677066Snate@binkert.org { 7687066Snate@binkert.org if (del) 7697066Snate@binkert.org setFlags(AutoDelete); 7707066Snate@binkert.org } 7717066Snate@binkert.org 7727005Snate@binkert.org void process() { (object->*F)(); } 7735502Snate@binkert.org 7747005Snate@binkert.org const std::string 7757005Snate@binkert.org name() const 7767005Snate@binkert.org { 7777005Snate@binkert.org return object->name() + ".wrapped_event"; 7787005Snate@binkert.org } 7797005Snate@binkert.org 7807005Snate@binkert.org const char *description() const { return "EventWrapped"; } 7817005Snate@binkert.org}; 7825605Snate@binkert.org#endif 7832SN/A 7841354SN/A#endif // __SIM_EVENTQ_HH__ 785