1/* 2 * Copyright (c) 2012-2013 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software --- 41 unchanged lines hidden (view full) --- 50 51using namespace std; 52 53TrafficGen::TrafficGen(const TrafficGenParams* p) 54 : MemObject(p), 55 system(p->system), 56 masterID(system->getMasterId(name())), 57 configFile(p->config_file), |
58 elasticReq(p->elastic_req), |
59 nextTransitionTick(0), 60 nextPacketTick(0), 61 port(name() + ".port", *this), 62 retryPkt(NULL), 63 retryPktTick(0), 64 updateEvent(this), 65 drainManager(NULL) 66{ --- 36 unchanged lines hidden (view full) --- 103} 104 105void 106TrafficGen::initState() 107{ 108 // when not restoring from a checkpoint, make sure we kick things off 109 if (system->isTimingMode()) { 110 // call nextPacketTick on the state to advance it |
111 nextPacketTick = states[currState]->nextPacketTick(elasticReq, 0); |
112 schedule(updateEvent, std::min(nextPacketTick, nextTransitionTick)); 113 } else { 114 DPRINTF(TrafficGen, 115 "Traffic generator is only active in timing mode\n"); 116 } 117} 118 119unsigned int --- 41 unchanged lines hidden (view full) --- 161 } 162 163 UNSERIALIZE_SCALAR(nextTransitionTick); 164 165 UNSERIALIZE_SCALAR(nextPacketTick); 166 167 // @todo In the case of a stateful generator state such as the 168 // trace player we would also have to restore the position in the |
169 // trace playback and the tick offset |
170 UNSERIALIZE_SCALAR(currState); 171} 172 173void 174TrafficGen::update() 175{ 176 // if we have reached the time for the next state transition, then 177 // perform the transition --- 11 unchanged lines hidden (view full) --- 189 } 190 191 // if we are waiting for a retry, do not schedule any further 192 // events, in the case of a transition or a successful send, go 193 // ahead and determine when the next update should take place 194 if (retryPkt == NULL) { 195 // schedule next update event based on either the next execute 196 // tick or the next transition, which ever comes first |
197 nextPacketTick = states[currState]->nextPacketTick(elasticReq, 0); |
198 Tick nextEventTick = std::min(nextPacketTick, nextTransitionTick); 199 DPRINTF(TrafficGen, "Next event scheduled at %lld\n", nextEventTick); 200 schedule(updateEvent, nextEventTick); 201 } 202} 203 204void 205TrafficGen::parseConfig() --- 176 unchanged lines hidden (view full) --- 382 383 DPRINTF(TrafficGen, "Received retry\n"); 384 numRetries++; 385 // attempt to send the packet, and if we are successful start up 386 // the machinery again 387 if (port.sendTimingReq(retryPkt)) { 388 retryPkt = NULL; 389 // remember how much delay was incurred due to back-pressure |
390 // when sending the request, we also use this to derive 391 // the tick for the next packet |
392 Tick delay = curTick() - retryPktTick; 393 retryPktTick = 0; 394 retryTicks += delay; 395 396 if (drainManager == NULL) { 397 // packet is sent, so find out when the next one is due |
398 nextPacketTick = states[currState]->nextPacketTick(elasticReq, 399 delay); |
400 Tick nextEventTick = std::min(nextPacketTick, nextTransitionTick); 401 schedule(updateEvent, std::max(curTick(), nextEventTick)); 402 } else { 403 // shut things down 404 nextPacketTick = MaxTick; 405 nextTransitionTick = MaxTick; 406 drainManager->signalDrainDone(); 407 // Clear the drain event once we're done with it. --- 32 unchanged lines hidden --- |