1/* 2 * Copyright (c) 2000-2005 The Regents of The University of Michigan 3 * Copyright (c) 2013 Advanced Micro Devices, Inc. 4 * Copyright (c) 2013 Mark D. Hill and David A. Wood 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are 9 * met: redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer; 11 * redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution; 14 * neither the name of the copyright holders nor the names of its 15 * contributors may be used to endorse or promote products derived from 16 * this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 * 30 * Authors: Steve Reinhardt 31 * Nathan Binkert 32 */ 33 34/* @file 35 * EventQueue interfaces 36 */ 37 38#ifndef __SIM_EVENTQ_HH__ 39#define __SIM_EVENTQ_HH__ 40 41#include <algorithm> 42#include <cassert> 43#include <climits>
| 1/* 2 * Copyright (c) 2000-2005 The Regents of The University of Michigan 3 * Copyright (c) 2013 Advanced Micro Devices, Inc. 4 * Copyright (c) 2013 Mark D. Hill and David A. Wood 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are 9 * met: redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer; 11 * redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution; 14 * neither the name of the copyright holders nor the names of its 15 * contributors may be used to endorse or promote products derived from 16 * this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 * 30 * Authors: Steve Reinhardt 31 * Nathan Binkert 32 */ 33 34/* @file 35 * EventQueue interfaces 36 */ 37 38#ifndef __SIM_EVENTQ_HH__ 39#define __SIM_EVENTQ_HH__ 40 41#include <algorithm> 42#include <cassert> 43#include <climits>
|
44#include <iosfwd> 45#include <memory> 46#include <mutex> 47#include <string> 48 49#include "base/flags.hh" 50#include "base/types.hh" 51#include "debug/Event.hh" 52#include "sim/serialize.hh" 53 54class EventQueue; // forward declaration 55class BaseGlobalEvent; 56 57//! Simulation Quantum for multiple eventq simulation. 58//! The quantum value is the period length after which the queues 59//! synchronize themselves with each other. This means that any 60//! event to scheduled on Queue A which is generated by an event on 61//! Queue B should be at least simQuantum ticks away in future. 62extern Tick simQuantum; 63 64//! Current number of allocated main event queues. 65extern uint32_t numMainEventQueues; 66 67//! Array for main event queues. 68extern std::vector<EventQueue *> mainEventQueue; 69 70//! The current event queue for the running thread. Access to this queue 71//! does not require any locking from the thread. 72 73extern __thread EventQueue *_curEventQueue; 74 75//! Current mode of execution: parallel / serial 76extern bool inParallelMode; 77 78//! Function for returning eventq queue for the provided 79//! index. The function allocates a new queue in case one 80//! does not exist for the index, provided that the index 81//! is with in bounds. 82EventQueue *getEventQueue(uint32_t index); 83 84inline EventQueue *curEventQueue() { return _curEventQueue; } 85inline void curEventQueue(EventQueue *q) { _curEventQueue = q; } 86 87/** 88 * Common base class for Event and GlobalEvent, so they can share flag 89 * and priority definitions and accessor functions. This class should 90 * not be used directly. 91 */ 92class EventBase 93{ 94 protected: 95 typedef unsigned short FlagsType; 96 typedef ::Flags<FlagsType> Flags; 97 98 static const FlagsType PublicRead = 0x003f; // public readable flags 99 static const FlagsType PublicWrite = 0x001d; // public writable flags 100 static const FlagsType Squashed = 0x0001; // has been squashed 101 static const FlagsType Scheduled = 0x0002; // has been scheduled 102 static const FlagsType Managed = 0x0004; // Use life cycle manager 103 static const FlagsType AutoDelete = Managed; // delete after dispatch 104 /** 105 * This used to be AutoSerialize. This value can't be reused 106 * without changing the checkpoint version since the flag field 107 * gets serialized. 108 */ 109 static const FlagsType Reserved0 = 0x0008; 110 static const FlagsType IsExitEvent = 0x0010; // special exit event 111 static const FlagsType IsMainQueue = 0x0020; // on main event queue 112 static const FlagsType Initialized = 0x7a40; // somewhat random bits 113 static const FlagsType InitMask = 0xffc0; // mask for init bits 114 115 public: 116 typedef int8_t Priority; 117 118 /// Event priorities, to provide tie-breakers for events scheduled 119 /// at the same cycle. Most events are scheduled at the default 120 /// priority; these values are used to control events that need to 121 /// be ordered within a cycle. 122 123 /// Minimum priority 124 static const Priority Minimum_Pri = SCHAR_MIN; 125 126 /// If we enable tracing on a particular cycle, do that as the 127 /// very first thing so we don't miss any of the events on 128 /// that cycle (even if we enter the debugger). 129 static const Priority Debug_Enable_Pri = -101; 130 131 /// Breakpoints should happen before anything else (except 132 /// enabling trace output), so we don't miss any action when 133 /// debugging. 134 static const Priority Debug_Break_Pri = -100; 135 136 /// CPU switches schedule the new CPU's tick event for the 137 /// same cycle (after unscheduling the old CPU's tick event). 138 /// The switch needs to come before any tick events to make 139 /// sure we don't tick both CPUs in the same cycle. 140 static const Priority CPU_Switch_Pri = -31; 141 142 /// For some reason "delayed" inter-cluster writebacks are 143 /// scheduled before regular writebacks (which have default 144 /// priority). Steve? 145 static const Priority Delayed_Writeback_Pri = -1; 146 147 /// Default is zero for historical reasons. 148 static const Priority Default_Pri = 0; 149 150 /// DVFS update event leads to stats dump therefore given a lower priority 151 /// to ensure all relevant states have been updated 152 static const Priority DVFS_Update_Pri = 31; 153 154 /// Serailization needs to occur before tick events also, so 155 /// that a serialize/unserialize is identical to an on-line 156 /// CPU switch. 157 static const Priority Serialize_Pri = 32; 158 159 /// CPU ticks must come after other associated CPU events 160 /// (such as writebacks). 161 static const Priority CPU_Tick_Pri = 50; 162 163 /// Statistics events (dump, reset, etc.) come after 164 /// everything else, but before exit. 165 static const Priority Stat_Event_Pri = 90; 166 167 /// Progress events come at the end. 168 static const Priority Progress_Event_Pri = 95; 169 170 /// If we want to exit on this cycle, it's the very last thing 171 /// we do. 172 static const Priority Sim_Exit_Pri = 100; 173 174 /// Maximum priority 175 static const Priority Maximum_Pri = SCHAR_MAX; 176}; 177 178/* 179 * An item on an event queue. The action caused by a given 180 * event is specified by deriving a subclass and overriding the 181 * process() member function. 182 * 183 * Caution, the order of members is chosen to maximize data packing. 184 */ 185class Event : public EventBase, public Serializable 186{ 187 friend class EventQueue; 188 189 private: 190 // The event queue is now a linked list of linked lists. The 191 // 'nextBin' pointer is to find the bin, where a bin is defined as 192 // when+priority. All events in the same bin will be stored in a 193 // second linked list (a stack) maintained by the 'nextInBin' 194 // pointer. The list will be accessed in LIFO order. The end 195 // result is that the insert/removal in 'nextBin' is 196 // linear/constant, and the lookup/removal in 'nextInBin' is 197 // constant/constant. Hopefully this is a significant improvement 198 // over the current fully linear insertion. 199 Event *nextBin; 200 Event *nextInBin; 201 202 static Event *insertBefore(Event *event, Event *curr); 203 static Event *removeItem(Event *event, Event *last); 204 205 Tick _when; //!< timestamp when event should be processed 206 Priority _priority; //!< event priority 207 Flags flags; 208 209#ifndef NDEBUG 210 /// Global counter to generate unique IDs for Event instances 211 static Counter instanceCounter; 212 213 /// This event's unique ID. We can also use pointer values for 214 /// this but they're not consistent across runs making debugging 215 /// more difficult. Thus we use a global counter value when 216 /// debugging. 217 Counter instance; 218 219 /// queue to which this event belongs (though it may or may not be 220 /// scheduled on this queue yet) 221 EventQueue *queue; 222#endif 223 224#ifdef EVENTQ_DEBUG 225 Tick whenCreated; //!< time created 226 Tick whenScheduled; //!< time scheduled 227#endif 228 229 void 230 setWhen(Tick when, EventQueue *q) 231 { 232 _when = when; 233#ifndef NDEBUG 234 queue = q; 235#endif 236#ifdef EVENTQ_DEBUG 237 whenScheduled = curTick(); 238#endif 239 } 240 241 bool 242 initialized() const 243 { 244 return (flags & InitMask) == Initialized; 245 } 246 247 protected: 248 /// Accessor for flags. 249 Flags 250 getFlags() const 251 { 252 return flags & PublicRead; 253 } 254 255 bool 256 isFlagSet(Flags _flags) const 257 { 258 assert(_flags.noneSet(~PublicRead)); 259 return flags.isSet(_flags); 260 } 261 262 /// Accessor for flags. 263 void 264 setFlags(Flags _flags) 265 { 266 assert(_flags.noneSet(~PublicWrite)); 267 flags.set(_flags); 268 } 269 270 void 271 clearFlags(Flags _flags) 272 { 273 assert(_flags.noneSet(~PublicWrite)); 274 flags.clear(_flags); 275 } 276 277 void 278 clearFlags() 279 { 280 flags.clear(PublicWrite); 281 } 282 283 // This function isn't really useful if TRACING_ON is not defined 284 virtual void trace(const char *action); //!< trace event activity 285 286 protected: /* Memory management */ 287 /** 288 * @{ 289 * Memory management hooks for events that have the Managed flag set 290 * 291 * Events can use automatic memory management by setting the 292 * Managed flag. The default implementation automatically deletes 293 * events once they have been removed from the event queue. This 294 * typically happens when events are descheduled or have been 295 * triggered and not rescheduled. 296 * 297 * The methods below may be overridden by events that need custom 298 * memory management. For example, events exported to Python need 299 * to impement reference counting to ensure that the Python 300 * implementation of the event is kept alive while it lives in the 301 * event queue. 302 * 303 * @note Memory managers are responsible for implementing 304 * reference counting (by overriding both acquireImpl() and 305 * releaseImpl()) or checking if an event is no longer scheduled 306 * in releaseImpl() before deallocating it. 307 */ 308 309 /** 310 * Managed event scheduled and being held in the event queue. 311 */ 312 void acquire() 313 { 314 if (flags.isSet(Event::Managed)) 315 acquireImpl(); 316 } 317 318 /** 319 * Managed event removed from the event queue. 320 */ 321 void release() { 322 if (flags.isSet(Event::Managed)) 323 releaseImpl(); 324 } 325 326 virtual void acquireImpl() {} 327 328 virtual void releaseImpl() { 329 if (!scheduled()) 330 delete this; 331 } 332 333 /** @} */ 334 335 public: 336 337 /* 338 * Event constructor 339 * @param queue that the event gets scheduled on 340 */ 341 Event(Priority p = Default_Pri, Flags f = 0) 342 : nextBin(nullptr), nextInBin(nullptr), _when(0), _priority(p), 343 flags(Initialized | f) 344 { 345 assert(f.noneSet(~PublicWrite)); 346#ifndef NDEBUG 347 instance = ++instanceCounter; 348 queue = NULL; 349#endif 350#ifdef EVENTQ_DEBUG 351 whenCreated = curTick(); 352 whenScheduled = 0; 353#endif 354 } 355 356 virtual ~Event(); 357 virtual const std::string name() const; 358 359 /// Return a C string describing the event. This string should 360 /// *not* be dynamically allocated; just a const char array 361 /// describing the event class. 362 virtual const char *description() const; 363 364 /// Dump the current event data 365 void dump() const; 366 367 public: 368 /* 369 * This member function is invoked when the event is processed 370 * (occurs). There is no default implementation; each subclass 371 * must provide its own implementation. The event is not 372 * automatically deleted after it is processed (to allow for 373 * statically allocated event objects). 374 * 375 * If the AutoDestroy flag is set, the object is deleted once it 376 * is processed. 377 */ 378 virtual void process() = 0; 379 380 /// Determine if the current event is scheduled 381 bool scheduled() const { return flags.isSet(Scheduled); } 382 383 /// Squash the current event 384 void squash() { flags.set(Squashed); } 385 386 /// Check whether the event is squashed 387 bool squashed() const { return flags.isSet(Squashed); } 388 389 /// See if this is a SimExitEvent (without resorting to RTTI) 390 bool isExitEvent() const { return flags.isSet(IsExitEvent); } 391 392 /// Check whether this event will auto-delete 393 bool isManaged() const { return flags.isSet(Managed); } 394 bool isAutoDelete() const { return isManaged(); } 395 396 /// Get the time that the event is scheduled 397 Tick when() const { return _when; } 398 399 /// Get the event priority 400 Priority priority() const { return _priority; } 401 402 //! If this is part of a GlobalEvent, return the pointer to the 403 //! Global Event. By default, there is no GlobalEvent, so return 404 //! NULL. (Overridden in GlobalEvent::BarrierEvent.) 405 virtual BaseGlobalEvent *globalEvent() { return NULL; } 406 407 void serialize(CheckpointOut &cp) const override; 408 void unserialize(CheckpointIn &cp) override; 409}; 410 411inline bool 412operator<(const Event &l, const Event &r) 413{ 414 return l.when() < r.when() || 415 (l.when() == r.when() && l.priority() < r.priority()); 416} 417 418inline bool 419operator>(const Event &l, const Event &r) 420{ 421 return l.when() > r.when() || 422 (l.when() == r.when() && l.priority() > r.priority()); 423} 424 425inline bool 426operator<=(const Event &l, const Event &r) 427{ 428 return l.when() < r.when() || 429 (l.when() == r.when() && l.priority() <= r.priority()); 430} 431inline bool 432operator>=(const Event &l, const Event &r) 433{ 434 return l.when() > r.when() || 435 (l.when() == r.when() && l.priority() >= r.priority()); 436} 437 438inline bool 439operator==(const Event &l, const Event &r) 440{ 441 return l.when() == r.when() && l.priority() == r.priority(); 442} 443 444inline bool 445operator!=(const Event &l, const Event &r) 446{ 447 return l.when() != r.when() || l.priority() != r.priority(); 448} 449 450/** 451 * Queue of events sorted in time order 452 * 453 * Events are scheduled (inserted into the event queue) using the 454 * schedule() method. This method either inserts a <i>synchronous</i> 455 * or <i>asynchronous</i> event. 456 * 457 * Synchronous events are scheduled using schedule() method with the 458 * argument 'global' set to false (default). This should only be done 459 * from a thread holding the event queue lock 460 * (EventQueue::service_mutex). The lock is always held when an event 461 * handler is called, it can therefore always insert events into its 462 * own event queue unless it voluntarily releases the lock. 463 * 464 * Events can be scheduled across thread (and event queue borders) by 465 * either scheduling asynchronous events or taking the target event 466 * queue's lock. However, the lock should <i>never</i> be taken 467 * directly since this is likely to cause deadlocks. Instead, code 468 * that needs to schedule events in other event queues should 469 * temporarily release its own queue and lock the new queue. This 470 * prevents deadlocks since a single thread never owns more than one 471 * event queue lock. This functionality is provided by the 472 * ScopedMigration helper class. Note that temporarily migrating 473 * between event queues can make the simulation non-deterministic, it 474 * should therefore be limited to cases where that can be tolerated 475 * (e.g., handling asynchronous IO or fast-forwarding in KVM). 476 * 477 * Asynchronous events can also be scheduled using the normal 478 * schedule() method with the 'global' parameter set to true. Unlike 479 * the previous queue migration strategy, this strategy is fully 480 * deterministic. This causes the event to be inserted in a separate 481 * queue of asynchronous events (async_queue), which is merged main 482 * event queue at the end of each simulation quantum (by calling the 483 * handleAsyncInsertions() method). Note that this implies that such 484 * events must happen at least one simulation quantum into the future, 485 * otherwise they risk being scheduled in the past by 486 * handleAsyncInsertions(). 487 */ 488class EventQueue 489{ 490 private: 491 std::string objName; 492 Event *head; 493 Tick _curTick; 494 495 //! Mutex to protect async queue. 496 std::mutex async_queue_mutex; 497 498 //! List of events added by other threads to this event queue. 499 std::list<Event*> async_queue; 500 501 /** 502 * Lock protecting event handling. 503 * 504 * This lock is always taken when servicing events. It is assumed 505 * that the thread scheduling new events (not asynchronous events 506 * though) have taken this lock. This is normally done by 507 * serviceOne() since new events are typically scheduled as a 508 * response to an earlier event. 509 * 510 * This lock is intended to be used to temporarily steal an event 511 * queue to support inter-thread communication when some 512 * deterministic timing can be sacrificed for speed. For example, 513 * the KVM CPU can use this support to access devices running in a 514 * different thread. 515 * 516 * @see EventQueue::ScopedMigration. 517 * @see EventQueue::ScopedRelease 518 * @see EventQueue::lock() 519 * @see EventQueue::unlock() 520 */ 521 std::mutex service_mutex; 522 523 //! Insert / remove event from the queue. Should only be called 524 //! by thread operating this queue. 525 void insert(Event *event); 526 void remove(Event *event); 527 528 //! Function for adding events to the async queue. The added events 529 //! are added to main event queue later. Threads, other than the 530 //! owning thread, should call this function instead of insert(). 531 void asyncInsert(Event *event); 532 533 EventQueue(const EventQueue &); 534 535 public: 536 /** 537 * Temporarily migrate execution to a different event queue. 538 * 539 * An instance of this class temporarily migrates execution to a 540 * different event queue by releasing the current queue, locking 541 * the new queue, and updating curEventQueue(). This can, for 542 * example, be useful when performing IO across thread event 543 * queues when timing is not crucial (e.g., during fast 544 * forwarding). 545 * 546 * ScopedMigration does nothing if both eqs are the same 547 */ 548 class ScopedMigration 549 { 550 public: 551 ScopedMigration(EventQueue *_new_eq, bool _doMigrate = true) 552 :new_eq(*_new_eq), old_eq(*curEventQueue()), 553 doMigrate((&new_eq != &old_eq)&&_doMigrate) 554 { 555 if (doMigrate){ 556 old_eq.unlock(); 557 new_eq.lock(); 558 curEventQueue(&new_eq); 559 } 560 } 561 562 ~ScopedMigration() 563 { 564 if (doMigrate){ 565 new_eq.unlock(); 566 old_eq.lock(); 567 curEventQueue(&old_eq); 568 } 569 } 570 571 private: 572 EventQueue &new_eq; 573 EventQueue &old_eq; 574 bool doMigrate; 575 }; 576 577 /** 578 * Temporarily release the event queue service lock. 579 * 580 * There are cases where it is desirable to temporarily release 581 * the event queue lock to prevent deadlocks. For example, when 582 * waiting on the global barrier, we need to release the lock to 583 * prevent deadlocks from happening when another thread tries to 584 * temporarily take over the event queue waiting on the barrier. 585 */ 586 class ScopedRelease 587 { 588 public: 589 ScopedRelease(EventQueue *_eq) 590 : eq(*_eq) 591 { 592 eq.unlock(); 593 } 594 595 ~ScopedRelease() 596 { 597 eq.lock(); 598 } 599 600 private: 601 EventQueue &eq; 602 }; 603 604 EventQueue(const std::string &n); 605 606 virtual const std::string name() const { return objName; } 607 void name(const std::string &st) { objName = st; } 608 609 //! Schedule the given event on this queue. Safe to call from any 610 //! thread. 611 void schedule(Event *event, Tick when, bool global = false); 612 613 //! Deschedule the specified event. Should be called only from the 614 //! owning thread. 615 void deschedule(Event *event); 616 617 //! Reschedule the specified event. Should be called only from 618 //! the owning thread. 619 void reschedule(Event *event, Tick when, bool always = false); 620 621 Tick nextTick() const { return head->when(); } 622 void setCurTick(Tick newVal) { _curTick = newVal; } 623 Tick getCurTick() const { return _curTick; } 624 Event *getHead() const { return head; } 625 626 Event *serviceOne(); 627 628 // process all events up to the given timestamp. we inline a 629 // quick test to see if there are any events to process; if so, 630 // call the internal out-of-line version to process them all. 631 void 632 serviceEvents(Tick when) 633 { 634 while (!empty()) { 635 if (nextTick() > when) 636 break; 637 638 /** 639 * @todo this assert is a good bug catcher. I need to 640 * make it true again. 641 */ 642 //assert(head->when() >= when && "event scheduled in the past"); 643 serviceOne(); 644 } 645 646 setCurTick(when); 647 } 648 649 // return true if no events are queued 650 bool empty() const { return head == NULL; } 651 652 void dump() const; 653 654 bool debugVerify() const; 655 656 //! Function for moving events from the async_queue to the main queue. 657 void handleAsyncInsertions(); 658 659 /** 660 * Function to signal that the event loop should be woken up because 661 * an event has been scheduled by an agent outside the gem5 event 662 * loop(s) whose event insertion may not have been noticed by gem5. 663 * This function isn't needed by the usual gem5 event loop but may 664 * be necessary in derived EventQueues which host gem5 onto other 665 * schedulers. 666 * 667 * @param when Time of a delayed wakeup (if known). This parameter 668 * can be used by an implementation to schedule a wakeup in the 669 * future if it is sure it will remain active until then. 670 * Or it can be ignored and the event queue can be woken up now. 671 */ 672 virtual void wakeup(Tick when = (Tick)-1) { } 673 674 /** 675 * function for replacing the head of the event queue, so that a 676 * different set of events can run without disturbing events that have 677 * already been scheduled. Already scheduled events can be processed 678 * by replacing the original head back. 679 * USING THIS FUNCTION CAN BE DANGEROUS TO THE HEALTH OF THE SIMULATOR. 680 * NOT RECOMMENDED FOR USE. 681 */ 682 Event* replaceHead(Event* s); 683 684 /**@{*/ 685 /** 686 * Provide an interface for locking/unlocking the event queue. 687 * 688 * @warn Do NOT use these methods directly unless you really know 689 * what you are doing. Incorrect use can easily lead to simulator 690 * deadlocks. 691 * 692 * @see EventQueue::ScopedMigration. 693 * @see EventQueue::ScopedRelease 694 * @see EventQueue 695 */ 696 void lock() { service_mutex.lock(); } 697 void unlock() { service_mutex.unlock(); } 698 /**@}*/ 699 700 /** 701 * Reschedule an event after a checkpoint. 702 * 703 * Since events don't know which event queue they belong to, 704 * parent objects need to reschedule events themselves. This 705 * method conditionally schedules an event that has the Scheduled 706 * flag set. It should be called by parent objects after 707 * unserializing an object. 708 * 709 * @warn Only use this method after unserializing an Event. 710 */ 711 void checkpointReschedule(Event *event); 712 713 virtual ~EventQueue() { } 714}; 715 716void dumpMainQueue(); 717 718class EventManager 719{ 720 protected: 721 /** A pointer to this object's event queue */ 722 EventQueue *eventq; 723 724 public: 725 EventManager(EventManager &em) : eventq(em.eventq) {} 726 EventManager(EventManager *em) : eventq(em->eventq) {} 727 EventManager(EventQueue *eq) : eventq(eq) {} 728 729 EventQueue * 730 eventQueue() const 731 { 732 return eventq; 733 } 734 735 void 736 schedule(Event &event, Tick when) 737 { 738 eventq->schedule(&event, when); 739 } 740 741 void 742 deschedule(Event &event) 743 { 744 eventq->deschedule(&event); 745 } 746 747 void 748 reschedule(Event &event, Tick when, bool always = false) 749 { 750 eventq->reschedule(&event, when, always); 751 } 752 753 void 754 schedule(Event *event, Tick when) 755 { 756 eventq->schedule(event, when); 757 } 758 759 void 760 deschedule(Event *event) 761 { 762 eventq->deschedule(event); 763 } 764 765 void 766 reschedule(Event *event, Tick when, bool always = false) 767 { 768 eventq->reschedule(event, when, always); 769 } 770 771 void wakeupEventQueue(Tick when = (Tick)-1) 772 { 773 eventq->wakeup(when); 774 } 775 776 void setCurTick(Tick newVal) { eventq->setCurTick(newVal); } 777}; 778 779template <class T, void (T::* F)()> 780class EventWrapper : public Event 781{ 782 private: 783 T *object; 784 785 public: 786 EventWrapper(T *obj, bool del = false, Priority p = Default_Pri) 787 : Event(p), object(obj) 788 { 789 if (del) 790 setFlags(AutoDelete); 791 } 792 793 EventWrapper(T &obj, bool del = false, Priority p = Default_Pri) 794 : Event(p), object(&obj) 795 { 796 if (del) 797 setFlags(AutoDelete); 798 } 799 800 void process() { (object->*F)(); } 801 802 const std::string 803 name() const 804 { 805 return object->name() + ".wrapped_event"; 806 } 807 808 const char *description() const { return "EventWrapped"; } 809}; 810 811class EventFunctionWrapper : public Event 812{ 813 private: 814 std::function<void(void)> callback; 815 std::string _name; 816 817 public: 818 EventFunctionWrapper(const std::function<void(void)> &callback, 819 const std::string &name, 820 bool del = false, 821 Priority p = Default_Pri) 822 : Event(p), callback(callback), _name(name) 823 { 824 if (del) 825 setFlags(AutoDelete); 826 } 827 828 void process() { callback(); } 829 830 const std::string 831 name() const 832 { 833 return _name + ".wrapped_function_event"; 834 } 835 836 const char *description() const { return "EventFunctionWrapped"; } 837}; 838 839#endif // __SIM_EVENTQ_HH__
| 45#include <iosfwd> 46#include <memory> 47#include <mutex> 48#include <string> 49 50#include "base/flags.hh" 51#include "base/types.hh" 52#include "debug/Event.hh" 53#include "sim/serialize.hh" 54 55class EventQueue; // forward declaration 56class BaseGlobalEvent; 57 58//! Simulation Quantum for multiple eventq simulation. 59//! The quantum value is the period length after which the queues 60//! synchronize themselves with each other. This means that any 61//! event to scheduled on Queue A which is generated by an event on 62//! Queue B should be at least simQuantum ticks away in future. 63extern Tick simQuantum; 64 65//! Current number of allocated main event queues. 66extern uint32_t numMainEventQueues; 67 68//! Array for main event queues. 69extern std::vector<EventQueue *> mainEventQueue; 70 71//! The current event queue for the running thread. Access to this queue 72//! does not require any locking from the thread. 73 74extern __thread EventQueue *_curEventQueue; 75 76//! Current mode of execution: parallel / serial 77extern bool inParallelMode; 78 79//! Function for returning eventq queue for the provided 80//! index. The function allocates a new queue in case one 81//! does not exist for the index, provided that the index 82//! is with in bounds. 83EventQueue *getEventQueue(uint32_t index); 84 85inline EventQueue *curEventQueue() { return _curEventQueue; } 86inline void curEventQueue(EventQueue *q) { _curEventQueue = q; } 87 88/** 89 * Common base class for Event and GlobalEvent, so they can share flag 90 * and priority definitions and accessor functions. This class should 91 * not be used directly. 92 */ 93class EventBase 94{ 95 protected: 96 typedef unsigned short FlagsType; 97 typedef ::Flags<FlagsType> Flags; 98 99 static const FlagsType PublicRead = 0x003f; // public readable flags 100 static const FlagsType PublicWrite = 0x001d; // public writable flags 101 static const FlagsType Squashed = 0x0001; // has been squashed 102 static const FlagsType Scheduled = 0x0002; // has been scheduled 103 static const FlagsType Managed = 0x0004; // Use life cycle manager 104 static const FlagsType AutoDelete = Managed; // delete after dispatch 105 /** 106 * This used to be AutoSerialize. This value can't be reused 107 * without changing the checkpoint version since the flag field 108 * gets serialized. 109 */ 110 static const FlagsType Reserved0 = 0x0008; 111 static const FlagsType IsExitEvent = 0x0010; // special exit event 112 static const FlagsType IsMainQueue = 0x0020; // on main event queue 113 static const FlagsType Initialized = 0x7a40; // somewhat random bits 114 static const FlagsType InitMask = 0xffc0; // mask for init bits 115 116 public: 117 typedef int8_t Priority; 118 119 /// Event priorities, to provide tie-breakers for events scheduled 120 /// at the same cycle. Most events are scheduled at the default 121 /// priority; these values are used to control events that need to 122 /// be ordered within a cycle. 123 124 /// Minimum priority 125 static const Priority Minimum_Pri = SCHAR_MIN; 126 127 /// If we enable tracing on a particular cycle, do that as the 128 /// very first thing so we don't miss any of the events on 129 /// that cycle (even if we enter the debugger). 130 static const Priority Debug_Enable_Pri = -101; 131 132 /// Breakpoints should happen before anything else (except 133 /// enabling trace output), so we don't miss any action when 134 /// debugging. 135 static const Priority Debug_Break_Pri = -100; 136 137 /// CPU switches schedule the new CPU's tick event for the 138 /// same cycle (after unscheduling the old CPU's tick event). 139 /// The switch needs to come before any tick events to make 140 /// sure we don't tick both CPUs in the same cycle. 141 static const Priority CPU_Switch_Pri = -31; 142 143 /// For some reason "delayed" inter-cluster writebacks are 144 /// scheduled before regular writebacks (which have default 145 /// priority). Steve? 146 static const Priority Delayed_Writeback_Pri = -1; 147 148 /// Default is zero for historical reasons. 149 static const Priority Default_Pri = 0; 150 151 /// DVFS update event leads to stats dump therefore given a lower priority 152 /// to ensure all relevant states have been updated 153 static const Priority DVFS_Update_Pri = 31; 154 155 /// Serailization needs to occur before tick events also, so 156 /// that a serialize/unserialize is identical to an on-line 157 /// CPU switch. 158 static const Priority Serialize_Pri = 32; 159 160 /// CPU ticks must come after other associated CPU events 161 /// (such as writebacks). 162 static const Priority CPU_Tick_Pri = 50; 163 164 /// Statistics events (dump, reset, etc.) come after 165 /// everything else, but before exit. 166 static const Priority Stat_Event_Pri = 90; 167 168 /// Progress events come at the end. 169 static const Priority Progress_Event_Pri = 95; 170 171 /// If we want to exit on this cycle, it's the very last thing 172 /// we do. 173 static const Priority Sim_Exit_Pri = 100; 174 175 /// Maximum priority 176 static const Priority Maximum_Pri = SCHAR_MAX; 177}; 178 179/* 180 * An item on an event queue. The action caused by a given 181 * event is specified by deriving a subclass and overriding the 182 * process() member function. 183 * 184 * Caution, the order of members is chosen to maximize data packing. 185 */ 186class Event : public EventBase, public Serializable 187{ 188 friend class EventQueue; 189 190 private: 191 // The event queue is now a linked list of linked lists. The 192 // 'nextBin' pointer is to find the bin, where a bin is defined as 193 // when+priority. All events in the same bin will be stored in a 194 // second linked list (a stack) maintained by the 'nextInBin' 195 // pointer. The list will be accessed in LIFO order. The end 196 // result is that the insert/removal in 'nextBin' is 197 // linear/constant, and the lookup/removal in 'nextInBin' is 198 // constant/constant. Hopefully this is a significant improvement 199 // over the current fully linear insertion. 200 Event *nextBin; 201 Event *nextInBin; 202 203 static Event *insertBefore(Event *event, Event *curr); 204 static Event *removeItem(Event *event, Event *last); 205 206 Tick _when; //!< timestamp when event should be processed 207 Priority _priority; //!< event priority 208 Flags flags; 209 210#ifndef NDEBUG 211 /// Global counter to generate unique IDs for Event instances 212 static Counter instanceCounter; 213 214 /// This event's unique ID. We can also use pointer values for 215 /// this but they're not consistent across runs making debugging 216 /// more difficult. Thus we use a global counter value when 217 /// debugging. 218 Counter instance; 219 220 /// queue to which this event belongs (though it may or may not be 221 /// scheduled on this queue yet) 222 EventQueue *queue; 223#endif 224 225#ifdef EVENTQ_DEBUG 226 Tick whenCreated; //!< time created 227 Tick whenScheduled; //!< time scheduled 228#endif 229 230 void 231 setWhen(Tick when, EventQueue *q) 232 { 233 _when = when; 234#ifndef NDEBUG 235 queue = q; 236#endif 237#ifdef EVENTQ_DEBUG 238 whenScheduled = curTick(); 239#endif 240 } 241 242 bool 243 initialized() const 244 { 245 return (flags & InitMask) == Initialized; 246 } 247 248 protected: 249 /// Accessor for flags. 250 Flags 251 getFlags() const 252 { 253 return flags & PublicRead; 254 } 255 256 bool 257 isFlagSet(Flags _flags) const 258 { 259 assert(_flags.noneSet(~PublicRead)); 260 return flags.isSet(_flags); 261 } 262 263 /// Accessor for flags. 264 void 265 setFlags(Flags _flags) 266 { 267 assert(_flags.noneSet(~PublicWrite)); 268 flags.set(_flags); 269 } 270 271 void 272 clearFlags(Flags _flags) 273 { 274 assert(_flags.noneSet(~PublicWrite)); 275 flags.clear(_flags); 276 } 277 278 void 279 clearFlags() 280 { 281 flags.clear(PublicWrite); 282 } 283 284 // This function isn't really useful if TRACING_ON is not defined 285 virtual void trace(const char *action); //!< trace event activity 286 287 protected: /* Memory management */ 288 /** 289 * @{ 290 * Memory management hooks for events that have the Managed flag set 291 * 292 * Events can use automatic memory management by setting the 293 * Managed flag. The default implementation automatically deletes 294 * events once they have been removed from the event queue. This 295 * typically happens when events are descheduled or have been 296 * triggered and not rescheduled. 297 * 298 * The methods below may be overridden by events that need custom 299 * memory management. For example, events exported to Python need 300 * to impement reference counting to ensure that the Python 301 * implementation of the event is kept alive while it lives in the 302 * event queue. 303 * 304 * @note Memory managers are responsible for implementing 305 * reference counting (by overriding both acquireImpl() and 306 * releaseImpl()) or checking if an event is no longer scheduled 307 * in releaseImpl() before deallocating it. 308 */ 309 310 /** 311 * Managed event scheduled and being held in the event queue. 312 */ 313 void acquire() 314 { 315 if (flags.isSet(Event::Managed)) 316 acquireImpl(); 317 } 318 319 /** 320 * Managed event removed from the event queue. 321 */ 322 void release() { 323 if (flags.isSet(Event::Managed)) 324 releaseImpl(); 325 } 326 327 virtual void acquireImpl() {} 328 329 virtual void releaseImpl() { 330 if (!scheduled()) 331 delete this; 332 } 333 334 /** @} */ 335 336 public: 337 338 /* 339 * Event constructor 340 * @param queue that the event gets scheduled on 341 */ 342 Event(Priority p = Default_Pri, Flags f = 0) 343 : nextBin(nullptr), nextInBin(nullptr), _when(0), _priority(p), 344 flags(Initialized | f) 345 { 346 assert(f.noneSet(~PublicWrite)); 347#ifndef NDEBUG 348 instance = ++instanceCounter; 349 queue = NULL; 350#endif 351#ifdef EVENTQ_DEBUG 352 whenCreated = curTick(); 353 whenScheduled = 0; 354#endif 355 } 356 357 virtual ~Event(); 358 virtual const std::string name() const; 359 360 /// Return a C string describing the event. This string should 361 /// *not* be dynamically allocated; just a const char array 362 /// describing the event class. 363 virtual const char *description() const; 364 365 /// Dump the current event data 366 void dump() const; 367 368 public: 369 /* 370 * This member function is invoked when the event is processed 371 * (occurs). There is no default implementation; each subclass 372 * must provide its own implementation. The event is not 373 * automatically deleted after it is processed (to allow for 374 * statically allocated event objects). 375 * 376 * If the AutoDestroy flag is set, the object is deleted once it 377 * is processed. 378 */ 379 virtual void process() = 0; 380 381 /// Determine if the current event is scheduled 382 bool scheduled() const { return flags.isSet(Scheduled); } 383 384 /// Squash the current event 385 void squash() { flags.set(Squashed); } 386 387 /// Check whether the event is squashed 388 bool squashed() const { return flags.isSet(Squashed); } 389 390 /// See if this is a SimExitEvent (without resorting to RTTI) 391 bool isExitEvent() const { return flags.isSet(IsExitEvent); } 392 393 /// Check whether this event will auto-delete 394 bool isManaged() const { return flags.isSet(Managed); } 395 bool isAutoDelete() const { return isManaged(); } 396 397 /// Get the time that the event is scheduled 398 Tick when() const { return _when; } 399 400 /// Get the event priority 401 Priority priority() const { return _priority; } 402 403 //! If this is part of a GlobalEvent, return the pointer to the 404 //! Global Event. By default, there is no GlobalEvent, so return 405 //! NULL. (Overridden in GlobalEvent::BarrierEvent.) 406 virtual BaseGlobalEvent *globalEvent() { return NULL; } 407 408 void serialize(CheckpointOut &cp) const override; 409 void unserialize(CheckpointIn &cp) override; 410}; 411 412inline bool 413operator<(const Event &l, const Event &r) 414{ 415 return l.when() < r.when() || 416 (l.when() == r.when() && l.priority() < r.priority()); 417} 418 419inline bool 420operator>(const Event &l, const Event &r) 421{ 422 return l.when() > r.when() || 423 (l.when() == r.when() && l.priority() > r.priority()); 424} 425 426inline bool 427operator<=(const Event &l, const Event &r) 428{ 429 return l.when() < r.when() || 430 (l.when() == r.when() && l.priority() <= r.priority()); 431} 432inline bool 433operator>=(const Event &l, const Event &r) 434{ 435 return l.when() > r.when() || 436 (l.when() == r.when() && l.priority() >= r.priority()); 437} 438 439inline bool 440operator==(const Event &l, const Event &r) 441{ 442 return l.when() == r.when() && l.priority() == r.priority(); 443} 444 445inline bool 446operator!=(const Event &l, const Event &r) 447{ 448 return l.when() != r.when() || l.priority() != r.priority(); 449} 450 451/** 452 * Queue of events sorted in time order 453 * 454 * Events are scheduled (inserted into the event queue) using the 455 * schedule() method. This method either inserts a <i>synchronous</i> 456 * or <i>asynchronous</i> event. 457 * 458 * Synchronous events are scheduled using schedule() method with the 459 * argument 'global' set to false (default). This should only be done 460 * from a thread holding the event queue lock 461 * (EventQueue::service_mutex). The lock is always held when an event 462 * handler is called, it can therefore always insert events into its 463 * own event queue unless it voluntarily releases the lock. 464 * 465 * Events can be scheduled across thread (and event queue borders) by 466 * either scheduling asynchronous events or taking the target event 467 * queue's lock. However, the lock should <i>never</i> be taken 468 * directly since this is likely to cause deadlocks. Instead, code 469 * that needs to schedule events in other event queues should 470 * temporarily release its own queue and lock the new queue. This 471 * prevents deadlocks since a single thread never owns more than one 472 * event queue lock. This functionality is provided by the 473 * ScopedMigration helper class. Note that temporarily migrating 474 * between event queues can make the simulation non-deterministic, it 475 * should therefore be limited to cases where that can be tolerated 476 * (e.g., handling asynchronous IO or fast-forwarding in KVM). 477 * 478 * Asynchronous events can also be scheduled using the normal 479 * schedule() method with the 'global' parameter set to true. Unlike 480 * the previous queue migration strategy, this strategy is fully 481 * deterministic. This causes the event to be inserted in a separate 482 * queue of asynchronous events (async_queue), which is merged main 483 * event queue at the end of each simulation quantum (by calling the 484 * handleAsyncInsertions() method). Note that this implies that such 485 * events must happen at least one simulation quantum into the future, 486 * otherwise they risk being scheduled in the past by 487 * handleAsyncInsertions(). 488 */ 489class EventQueue 490{ 491 private: 492 std::string objName; 493 Event *head; 494 Tick _curTick; 495 496 //! Mutex to protect async queue. 497 std::mutex async_queue_mutex; 498 499 //! List of events added by other threads to this event queue. 500 std::list<Event*> async_queue; 501 502 /** 503 * Lock protecting event handling. 504 * 505 * This lock is always taken when servicing events. It is assumed 506 * that the thread scheduling new events (not asynchronous events 507 * though) have taken this lock. This is normally done by 508 * serviceOne() since new events are typically scheduled as a 509 * response to an earlier event. 510 * 511 * This lock is intended to be used to temporarily steal an event 512 * queue to support inter-thread communication when some 513 * deterministic timing can be sacrificed for speed. For example, 514 * the KVM CPU can use this support to access devices running in a 515 * different thread. 516 * 517 * @see EventQueue::ScopedMigration. 518 * @see EventQueue::ScopedRelease 519 * @see EventQueue::lock() 520 * @see EventQueue::unlock() 521 */ 522 std::mutex service_mutex; 523 524 //! Insert / remove event from the queue. Should only be called 525 //! by thread operating this queue. 526 void insert(Event *event); 527 void remove(Event *event); 528 529 //! Function for adding events to the async queue. The added events 530 //! are added to main event queue later. Threads, other than the 531 //! owning thread, should call this function instead of insert(). 532 void asyncInsert(Event *event); 533 534 EventQueue(const EventQueue &); 535 536 public: 537 /** 538 * Temporarily migrate execution to a different event queue. 539 * 540 * An instance of this class temporarily migrates execution to a 541 * different event queue by releasing the current queue, locking 542 * the new queue, and updating curEventQueue(). This can, for 543 * example, be useful when performing IO across thread event 544 * queues when timing is not crucial (e.g., during fast 545 * forwarding). 546 * 547 * ScopedMigration does nothing if both eqs are the same 548 */ 549 class ScopedMigration 550 { 551 public: 552 ScopedMigration(EventQueue *_new_eq, bool _doMigrate = true) 553 :new_eq(*_new_eq), old_eq(*curEventQueue()), 554 doMigrate((&new_eq != &old_eq)&&_doMigrate) 555 { 556 if (doMigrate){ 557 old_eq.unlock(); 558 new_eq.lock(); 559 curEventQueue(&new_eq); 560 } 561 } 562 563 ~ScopedMigration() 564 { 565 if (doMigrate){ 566 new_eq.unlock(); 567 old_eq.lock(); 568 curEventQueue(&old_eq); 569 } 570 } 571 572 private: 573 EventQueue &new_eq; 574 EventQueue &old_eq; 575 bool doMigrate; 576 }; 577 578 /** 579 * Temporarily release the event queue service lock. 580 * 581 * There are cases where it is desirable to temporarily release 582 * the event queue lock to prevent deadlocks. For example, when 583 * waiting on the global barrier, we need to release the lock to 584 * prevent deadlocks from happening when another thread tries to 585 * temporarily take over the event queue waiting on the barrier. 586 */ 587 class ScopedRelease 588 { 589 public: 590 ScopedRelease(EventQueue *_eq) 591 : eq(*_eq) 592 { 593 eq.unlock(); 594 } 595 596 ~ScopedRelease() 597 { 598 eq.lock(); 599 } 600 601 private: 602 EventQueue &eq; 603 }; 604 605 EventQueue(const std::string &n); 606 607 virtual const std::string name() const { return objName; } 608 void name(const std::string &st) { objName = st; } 609 610 //! Schedule the given event on this queue. Safe to call from any 611 //! thread. 612 void schedule(Event *event, Tick when, bool global = false); 613 614 //! Deschedule the specified event. Should be called only from the 615 //! owning thread. 616 void deschedule(Event *event); 617 618 //! Reschedule the specified event. Should be called only from 619 //! the owning thread. 620 void reschedule(Event *event, Tick when, bool always = false); 621 622 Tick nextTick() const { return head->when(); } 623 void setCurTick(Tick newVal) { _curTick = newVal; } 624 Tick getCurTick() const { return _curTick; } 625 Event *getHead() const { return head; } 626 627 Event *serviceOne(); 628 629 // process all events up to the given timestamp. we inline a 630 // quick test to see if there are any events to process; if so, 631 // call the internal out-of-line version to process them all. 632 void 633 serviceEvents(Tick when) 634 { 635 while (!empty()) { 636 if (nextTick() > when) 637 break; 638 639 /** 640 * @todo this assert is a good bug catcher. I need to 641 * make it true again. 642 */ 643 //assert(head->when() >= when && "event scheduled in the past"); 644 serviceOne(); 645 } 646 647 setCurTick(when); 648 } 649 650 // return true if no events are queued 651 bool empty() const { return head == NULL; } 652 653 void dump() const; 654 655 bool debugVerify() const; 656 657 //! Function for moving events from the async_queue to the main queue. 658 void handleAsyncInsertions(); 659 660 /** 661 * Function to signal that the event loop should be woken up because 662 * an event has been scheduled by an agent outside the gem5 event 663 * loop(s) whose event insertion may not have been noticed by gem5. 664 * This function isn't needed by the usual gem5 event loop but may 665 * be necessary in derived EventQueues which host gem5 onto other 666 * schedulers. 667 * 668 * @param when Time of a delayed wakeup (if known). This parameter 669 * can be used by an implementation to schedule a wakeup in the 670 * future if it is sure it will remain active until then. 671 * Or it can be ignored and the event queue can be woken up now. 672 */ 673 virtual void wakeup(Tick when = (Tick)-1) { } 674 675 /** 676 * function for replacing the head of the event queue, so that a 677 * different set of events can run without disturbing events that have 678 * already been scheduled. Already scheduled events can be processed 679 * by replacing the original head back. 680 * USING THIS FUNCTION CAN BE DANGEROUS TO THE HEALTH OF THE SIMULATOR. 681 * NOT RECOMMENDED FOR USE. 682 */ 683 Event* replaceHead(Event* s); 684 685 /**@{*/ 686 /** 687 * Provide an interface for locking/unlocking the event queue. 688 * 689 * @warn Do NOT use these methods directly unless you really know 690 * what you are doing. Incorrect use can easily lead to simulator 691 * deadlocks. 692 * 693 * @see EventQueue::ScopedMigration. 694 * @see EventQueue::ScopedRelease 695 * @see EventQueue 696 */ 697 void lock() { service_mutex.lock(); } 698 void unlock() { service_mutex.unlock(); } 699 /**@}*/ 700 701 /** 702 * Reschedule an event after a checkpoint. 703 * 704 * Since events don't know which event queue they belong to, 705 * parent objects need to reschedule events themselves. This 706 * method conditionally schedules an event that has the Scheduled 707 * flag set. It should be called by parent objects after 708 * unserializing an object. 709 * 710 * @warn Only use this method after unserializing an Event. 711 */ 712 void checkpointReschedule(Event *event); 713 714 virtual ~EventQueue() { } 715}; 716 717void dumpMainQueue(); 718 719class EventManager 720{ 721 protected: 722 /** A pointer to this object's event queue */ 723 EventQueue *eventq; 724 725 public: 726 EventManager(EventManager &em) : eventq(em.eventq) {} 727 EventManager(EventManager *em) : eventq(em->eventq) {} 728 EventManager(EventQueue *eq) : eventq(eq) {} 729 730 EventQueue * 731 eventQueue() const 732 { 733 return eventq; 734 } 735 736 void 737 schedule(Event &event, Tick when) 738 { 739 eventq->schedule(&event, when); 740 } 741 742 void 743 deschedule(Event &event) 744 { 745 eventq->deschedule(&event); 746 } 747 748 void 749 reschedule(Event &event, Tick when, bool always = false) 750 { 751 eventq->reschedule(&event, when, always); 752 } 753 754 void 755 schedule(Event *event, Tick when) 756 { 757 eventq->schedule(event, when); 758 } 759 760 void 761 deschedule(Event *event) 762 { 763 eventq->deschedule(event); 764 } 765 766 void 767 reschedule(Event *event, Tick when, bool always = false) 768 { 769 eventq->reschedule(event, when, always); 770 } 771 772 void wakeupEventQueue(Tick when = (Tick)-1) 773 { 774 eventq->wakeup(when); 775 } 776 777 void setCurTick(Tick newVal) { eventq->setCurTick(newVal); } 778}; 779 780template <class T, void (T::* F)()> 781class EventWrapper : public Event 782{ 783 private: 784 T *object; 785 786 public: 787 EventWrapper(T *obj, bool del = false, Priority p = Default_Pri) 788 : Event(p), object(obj) 789 { 790 if (del) 791 setFlags(AutoDelete); 792 } 793 794 EventWrapper(T &obj, bool del = false, Priority p = Default_Pri) 795 : Event(p), object(&obj) 796 { 797 if (del) 798 setFlags(AutoDelete); 799 } 800 801 void process() { (object->*F)(); } 802 803 const std::string 804 name() const 805 { 806 return object->name() + ".wrapped_event"; 807 } 808 809 const char *description() const { return "EventWrapped"; } 810}; 811 812class EventFunctionWrapper : public Event 813{ 814 private: 815 std::function<void(void)> callback; 816 std::string _name; 817 818 public: 819 EventFunctionWrapper(const std::function<void(void)> &callback, 820 const std::string &name, 821 bool del = false, 822 Priority p = Default_Pri) 823 : Event(p), callback(callback), _name(name) 824 { 825 if (del) 826 setFlags(AutoDelete); 827 } 828 829 void process() { callback(); } 830 831 const std::string 832 name() const 833 { 834 return _name + ".wrapped_function_event"; 835 } 836 837 const char *description() const { return "EventFunctionWrapped"; } 838}; 839 840#endif // __SIM_EVENTQ_HH__
|