dist_iface.hh revision 11263
1/*
2 * Copyright (c) 2015 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Gabor Dozsa
38 */
39
40/* @file
41 * The interface class for multi gem5 simulations.
42 *
43 * Multi gem5 is an extension to gem5 to enable parallel simulation of a
44 * distributed system (e.g. simulation of a pool of machines
45 * connected by Ethernet links). A multi gem5 run consists of seperate gem5
46 * processes running in parallel. Each gem5 process executes
47 * the simulation of a component of the simulated distributed system.
48 * (An example component can be a multi-core board with an Ethernet NIC.)
49 * The MultiIface class below provides services to transfer data and
50 * control messages among the gem5 processes. The main such services are
51 * as follows.
52 *
53 * 1. Send a data packet coming from a simulated Ethernet link. The packet
54 * will be transferred to (all) the target(s) gem5 processes. The send
55 * operation is always performed by the simulation thread, i.e. the gem5
56 * thread that is processing the event queue associated with the simulated
57 * Ethernet link.
58 *
59 * 2. Spawn a receiver thread to process messages coming in from the
60 * from other gem5 processes. Each simulated Ethernet link has its own
61 * associated receiver thread. The receiver thread saves the incoming packet
62 * and schedule an appropriate receive event in the event queue.
63 *
64 * 3. Schedule a global barrier event periodically to keep the gem5
65 * processes in sync.
66 * Periodic barrier event to keep peer gem5 processes in sync. The basic idea
67 * is that no gem5 process can go ahead further than the simulated link
68 * transmission delay to ensure that a corresponding receive event can always
69 * be scheduled for any message coming in from a peer gem5 process.
70 *
71 *
72 *
73 * This interface is an abstract class (sendRaw() and recvRaw()
74 * methods are pure virtual). It can work with various low level
75 * send/receive service implementations (e.g. TCP/IP, MPI,...). A TCP
76 * stream socket version is implemented in dev/src/tcp_iface.[hh,cc].
77 */
78#ifndef __DEV_NET_MULTI_IFACE_HH__
79#define __DEV_NET_MULTI_IFACE_HH__
80
81#include <array>
82#include <mutex>
83#include <queue>
84#include <thread>
85#include <utility>
86
87#include "dev/net/etherpkt.hh"
88#include "dev/net/multi_packet.hh"
89#include "sim/core.hh"
90#include "sim/drain.hh"
91#include "sim/global_event.hh"
92
93class EventManager;
94
95/**
96 * The interface class to talk to peer gem5 processes.
97 */
98class MultiIface : public Drainable
99{
100  public:
101    /*!
102     * The possible reasons a multi sync among gem5 peers is needed for.
103     */
104    enum
105    class SyncTrigger {
106        periodic, /*!< Regular periodic sync. This can be interrupted by a
107                   checkpoint sync request */
108        ckpt,     /*!< sync before taking a checkpoint */
109        atomic    /*!< sync that cannot be interrupted (e.g. sync at startup) */
110    };
111
112  private:
113    typedef MultiHeaderPkt::MsgType MsgType;
114
115    /** Sync State-Machine
116     \dot
117     digraph Sync {
118     node [shape=box, fontsize=10];
119     idle -> busy
120     [ label="new trigger\n by run()" fontsize=8 ];
121     busy -> busy
122     [ label="new message by progress():\n(msg == SyncAck &&\nwaitNum > 1) || \n(msg==CkptSyncReq &&\ntrigger == ckpt)" fontsize=8 ];
123     busy -> idle
124     [ label="new message by progress():\n(msg == SyncAck &&\nwaitNum == 1)" fontsize=8 ];
125     busy -> interrupted
126     [ label="new message by progress():\n(msg == CkptSyncReq &&\ntrigger == periodic)" fontsize=8 ];
127     idle -> asyncCkpt
128     [ label="new message by progress():\nmsg == CkptSyncReq" fontsize=8 ];
129     asyncCkpt -> asyncCkpt
130     [ label="new message by progress():\nmsg == CkptSyncReq" fontsize=8 ];
131     asyncCkpt -> busy
132     [ label="new trigger by run():\ntrigger == ckpt" fontsize=8 ];
133     asyncCkpt -> idle
134     [ label="new trigger by run():\n(trigger == periodic &&\nwaitNum == 0) " fontsize=8 ];
135     asyncCkpt -> interrupted
136     [ label="new trigger by run():\n(trigger == periodic &&\nwaitNum > 0) " fontsize=8 ];
137     interrupted -> interrupted
138     [ label="new message by progress():\n(msg == CkptSyncReq &&\nwaitNum > 1)" fontsize=8 ];
139     interrupted -> idle
140     [ label="new message by progress():\n(msg == CkptSyncReq &&\nwaitNum == 1)" fontsize=8 ];
141     }
142     \enddot
143     */
144    /** @class Sync
145     * This class implements global sync operations among gem5 peer processes.
146     *
147     * @note This class is used as a singleton object (shared by all MultiIface
148     * objects).
149     */
150    class Sync
151    {
152      private:
153        /*!
154         * Internal state of the sync singleton object.
155         */
156        enum class SyncState {
157            busy,        /*!< There is an on-going sync. */
158            interrupted, /*!< An on-going periodic sync was interrupted. */
159            asyncCkpt,   /*!< A checkpoint (sim_exit) is already scheduled */
160            idle         /*!< There is no active sync. */
161        };
162        /**
163         * The lock to protect access to the MultiSync object.
164         */
165        std::mutex lock;
166        /**
167         * Condition variable for the simulation thread to wait on
168         * until all receiver threads completes the current global
169         * synchronisation.
170         */
171        std::condition_variable cv;
172        /**
173         * Number of receiver threads that not yet completed the current global
174         * synchronisation.
175         */
176        unsigned waitNum;
177        /**
178         * The trigger for the most recent sync.
179         */
180        SyncTrigger trigger;
181        /**
182         * Map sync triggers to request messages.
183         */
184        std::array<MsgType, 3> triggerToMsg = {{
185                MsgType::cmdPeriodicSyncReq,
186                MsgType::cmdCkptSyncReq,
187                MsgType::cmdAtomicSyncReq
188            }};
189
190        /**
191         * Current sync state.
192         */
193        SyncState state;
194
195      public:
196        /**
197         *  Core method to perform a full multi sync.
198         *
199         * @param t Sync trigger.
200         * @param sync_tick The tick the sync was expected to happen at.
201         * @return true if the sync completed, false if it was interrupted.
202         *
203         * @note In case of an interrupted periodic sync, sync_tick can be less
204         * than curTick() when we resume (i.e. re-run) it
205         */
206        bool run(SyncTrigger t, Tick sync_tick);
207        /**
208         * Callback when the receiver thread gets a sync message.
209         */
210        void progress(MsgType m);
211
212        Sync() : waitNum(0), state(SyncState::idle) {}
213        ~Sync() {}
214    };
215
216
217    /**
218     * The global event to schedule peridic multi sync. It is used as a
219     * singleton object.
220     *
221     * The periodic synchronisation works as follows.
222     * 1. A MultisyncEvent is scheduled as a global event when startup() is
223     * called.
224     * 2. The progress() method of the MultisyncEvent initiates a new barrier
225     * for each simulated Ethernet links.
226     * 3. Simulation thread(s) then waits until all receiver threads
227     * completes the ongoing barrier. The global sync event is done.
228     */
229    class SyncEvent : public GlobalSyncEvent
230    {
231      public:
232        /**
233         * Flag to indicate that the most recent periodic sync was interrupted
234         * (by a checkpoint request).
235         */
236        bool interrupted;
237        /**
238         * The tick when the most recent periodic synchronisation was scheduled
239         * at.
240         */
241        Tick scheduledAt;
242        /**
243         * Flag to indicate an on-going drain cycle.
244         */
245         bool isDraining;
246
247      public:
248        /**
249         * Only the firstly instanstiated MultiIface object will
250         * call this constructor.
251         */
252        SyncEvent() : GlobalSyncEvent(Default_Pri, 0), interrupted(false),
253                      scheduledAt(0), isDraining(false) {}
254
255        ~SyncEvent() { assert (scheduled() == false); }
256        /**
257         * Schedule the first periodic sync event.
258         *
259         * @param start Start tick for multi synchronisation
260         * @param repeat Frequency of multi synchronisation
261         *
262         */
263        void start(Tick start, Tick repeat);
264        /**
265         * Reschedule (if necessary) the periodic sync event.
266         *
267         * @param start Start tick for multi synchronisation
268         * @param repeat Frequency of multi synchronisation
269         *
270         * @note Useful if we have multiple MultiIface objects with
271         * different 'start' and 'repeat' values for global sync.
272         */
273        void adjust(Tick start, Tick repeat);
274        /**
275         * This is a global event so process() will be called by each
276         * simulation threads. (See further comments in the .cc file.)
277         */
278        void process() override;
279        /**
280         * Schedule periodic sync when resuming from a checkpoint.
281         */
282        void resume();
283
284        void serialize(const std::string &base, CheckpointOut &cp) const;
285        void unserialize(const std::string &base, CheckpointIn &cp);
286    };
287
288    /**
289     * The receive thread needs to store the packet pointer and the computed
290     * receive tick for each incoming data packet. This information is used
291     * by the simulation thread when it processes the corresponding receive
292     * event. (See more comments at the implemetation of the recvThreadFunc()
293     * and RecvPacketIn() methods.)
294     */
295    typedef std::pair<EthPacketPtr, Tick> RecvInfo;
296
297    /**
298     * Comparison predicate for RecvInfo, needed by the recvQueue.
299     */
300    struct RecvInfoCompare {
301        bool operator()(const RecvInfo &lhs, const RecvInfo &rhs)
302        {
303            return lhs.second > rhs.second;
304        }
305    };
306
307    /**
308     * Customized priority queue used to store incoming data packets info by
309     * the receiver thread. We need to expose the underlying container to
310     * enable iterator access for serializing.
311     */
312    class RecvQueue : public std::priority_queue<RecvInfo,
313                                                 std::vector<RecvInfo>,
314                                                 RecvInfoCompare>
315    {
316      public:
317        std::vector<RecvInfo> &impl() { return c; }
318        const std::vector<RecvInfo> &impl() const { return c; }
319    };
320
321    /*
322     * The priority queue to store RecvInfo items ordered by receive ticks.
323     */
324    RecvQueue recvQueue;
325    /**
326     * The singleton Sync object to perform multi synchronisation.
327     */
328    static Sync *sync;
329    /**
330     * The singleton SyncEvent object to schedule periodic multi sync.
331     */
332    static SyncEvent *syncEvent;
333    /**
334     * Tick to schedule the first multi sync event.
335     * This is just as optimization : we do not need any multi sync
336     * event until the simulated NIC is brought up by the OS.
337     */
338    Tick syncStart;
339    /**
340     * Frequency of multi sync events in ticks.
341     */
342    Tick syncRepeat;
343    /**
344     * Receiver thread pointer.
345     * Each MultiIface object must have exactly one receiver thread.
346     */
347    std::thread *recvThread;
348    /**
349     * The event manager associated with the MultiIface object.
350     */
351    EventManager *eventManager;
352
353    /**
354     * The receive done event for the simulated Ethernet link.
355     * It is scheduled by the receiver thread for each incoming data
356     * packet.
357     */
358    Event *recvDone;
359
360    /**
361     * The packet that belongs to the currently scheduled recvDone event.
362     */
363    EthPacketPtr scheduledRecvPacket;
364
365    /**
366     * The link delay in ticks for the simulated Ethernet link.
367     */
368    Tick linkDelay;
369
370    /**
371     * The rank of this process among the gem5 peers.
372     */
373    unsigned rank;
374    /**
375     * Total number of receiver threads (in this gem5 process).
376     * During the simulation it should be constant and equal to the
377     * number of MultiIface objects (i.e. simulated Ethernet
378     * links).
379     */
380    static unsigned recvThreadsNum;
381    /**
382     * The very first MultiIface object created becomes the master. We need
383     * a master to co-ordinate the global synchronisation.
384     */
385    static MultiIface *master;
386
387  protected:
388    /**
389     * Low level generic send routine.
390     * @param buf buffer that holds the data to send out
391     * @param length number of bytes to send
392     * @param dest_addr address of the target (simulated NIC). This may be
393     * used by a subclass for optimization (e.g. optimize broadcast)
394     */
395    virtual void sendRaw(void *buf,
396                         unsigned length,
397                         const MultiHeaderPkt::AddressType dest_addr) = 0;
398    /**
399     * Low level generic receive routine.
400     * @param buf the buffer to store the incoming message
401     * @param length buffer size (in bytes)
402     */
403    virtual bool recvRaw(void *buf, unsigned length) = 0;
404    /**
405     * Low level request for synchronisation among gem5 processes. Only one
406     * MultiIface object needs to call this (in each gem5 process) to trigger
407     * a multi sync.
408     *
409     * @param sync_req Sync request command.
410     * @param sync_tick The tick when sync is expected to happen in the sender.
411     */
412    virtual void syncRaw(MsgType sync_req, Tick sync_tick) = 0;
413    /**
414     * The function executed by a receiver thread.
415     */
416    void recvThreadFunc();
417    /**
418     * Receive a multi header packet. Called by the receiver thread.
419     * @param header the structure to store the incoming header packet.
420     * @return false if any error occured during the receive, true otherwise
421     *
422     * A header packet can carry a control command (e.g. 'barrier leave') or
423     * information about a data packet that is following the header packet
424     * back to back.
425     */
426    bool recvHeader(MultiHeaderPkt::Header &header);
427    /**
428     * Receive a data packet. Called by the receiver thread.
429     * @param data_header The packet descriptor for the expected incoming data
430     * packet.
431     */
432    void recvData(const MultiHeaderPkt::Header &data_header);
433
434  public:
435
436    /**
437     * ctor
438     * @param multi_rank Rank of this gem5 process within the multi run
439     * @param sync_start Start tick for multi synchronisation
440     * @param sync_repeat Frequency for multi synchronisation
441     * @param em The event manager associated with the simulated Ethernet link
442     */
443    MultiIface(unsigned multi_rank,
444               Tick sync_start,
445               Tick sync_repeat,
446               EventManager *em);
447
448    virtual ~MultiIface();
449    /**
450     * Send out an Ethernet packet.
451     * @param pkt The Ethernet packet to send.
452     * @param send_delay The delay in ticks for the send completion event.
453     */
454    void packetOut(EthPacketPtr pkt, Tick send_delay);
455    /**
456     * Fetch the next packet from the receive queue.
457     */
458    EthPacketPtr packetIn();
459
460    /**
461     * spawn the receiver thread.
462     * @param recv_done The receive done event associated with the simulated
463     * Ethernet link.
464     * @param link_delay The link delay for the simulated Ethernet link.
465     */
466    void spawnRecvThread(Event *recv_done,
467                         Tick link_delay);
468    /**
469     * Initialize the random number generator with a different seed in each
470     * peer gem5 process.
471     */
472    void initRandom();
473
474    DrainState drain() override;
475
476    /**
477     * Callback when draining is complete.
478     */
479    void drainDone();
480
481    /**
482     * Initialize the periodic synchronisation among peer gem5 processes.
483     */
484    void startPeriodicSync();
485
486    void serialize(const std::string &base, CheckpointOut &cp) const;
487    void unserialize(const std::string &base, CheckpointIn &cp);
488
489};
490
491
492#endif // __DEV_NET_MULTI_IFACE_HH__
493