fetch1.hh (10713:eddb533708cb) | fetch1.hh (11567:560d7fbbddd1) |
---|---|
1/* 2 * Copyright (c) 2013-2014 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software --- 183 unchanged lines hidden (view full) --- 192 /** Input port carrying branch requests from Execute */ 193 Latch<BranchData>::Output inp; 194 /** Output port carrying read lines to Fetch2 */ 195 Latch<ForwardLineData>::Input out; 196 /** Input port carrying branch predictions from Fetch2 */ 197 Latch<BranchData>::Output prediction; 198 199 /** Interface to reserve space in the next stage */ | 1/* 2 * Copyright (c) 2013-2014 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software --- 183 unchanged lines hidden (view full) --- 192 /** Input port carrying branch requests from Execute */ 193 Latch<BranchData>::Output inp; 194 /** Output port carrying read lines to Fetch2 */ 195 Latch<ForwardLineData>::Input out; 196 /** Input port carrying branch predictions from Fetch2 */ 197 Latch<BranchData>::Output prediction; 198 199 /** Interface to reserve space in the next stage */ |
200 Reservable &nextStageReserve; | 200 std::vector<InputBuffer<ForwardLineData>> &nextStageReserve; |
201 202 /** IcachePort to pass to the CPU. Fetch1 is the only module that uses 203 * it. */ 204 IcachePort icachePort; 205 206 /** Line snap size in bytes. All fetches clip to make their ends not 207 * extend beyond this limit. Setting this to the machine L1 cache line 208 * length will result in fetches never crossing line boundaries. */ --- 19 unchanged lines hidden (view full) --- 228 FetchWaitingForPC, /* Not fetching, waiting for stream change. 229 This doesn't stop issued fetches from being returned and 230 processed or for branches to change the state to Running. */ 231 FetchRunning /* Try to fetch, when possible */ 232 }; 233 234 /** Stage cycle-by-cycle state */ 235 | 201 202 /** IcachePort to pass to the CPU. Fetch1 is the only module that uses 203 * it. */ 204 IcachePort icachePort; 205 206 /** Line snap size in bytes. All fetches clip to make their ends not 207 * extend beyond this limit. Setting this to the machine L1 cache line 208 * length will result in fetches never crossing line boundaries. */ --- 19 unchanged lines hidden (view full) --- 228 FetchWaitingForPC, /* Not fetching, waiting for stream change. 229 This doesn't stop issued fetches from being returned and 230 processed or for branches to change the state to Running. */ 231 FetchRunning /* Try to fetch, when possible */ 232 }; 233 234 /** Stage cycle-by-cycle state */ 235 |
236 FetchState state; | 236 struct Fetch1ThreadInfo { |
237 | 237 |
238 /** Fetch PC value. This is updated by branches from Execute, branch 239 * prediction targets from Fetch2 and by incrementing it as we fetch 240 * lines subsequent to those two sources. */ 241 TheISA::PCState pc; | 238 /** Consturctor to initialize all fields. */ 239 Fetch1ThreadInfo() : 240 state(FetchWaitingForPC), 241 pc(TheISA::PCState(0)), 242 streamSeqNum(InstId::firstStreamSeqNum), 243 predictionSeqNum(InstId::firstPredictionSeqNum), 244 blocked(false), 245 wakeupGuard(false) 246 { } |
242 | 247 |
243 /** Stream sequence number. This changes on request from Execute and is 244 * used to tag instructions by the fetch stream to which they belong. 245 * Execute originates new prediction sequence numbers. */ 246 InstSeqNum streamSeqNum; | 248 Fetch1ThreadInfo(const Fetch1ThreadInfo& other) : 249 state(other.state), 250 pc(other.pc), 251 streamSeqNum(other.streamSeqNum), 252 predictionSeqNum(other.predictionSeqNum), 253 blocked(other.blocked) 254 { } |
247 | 255 |
248 /** Prediction sequence number. This changes when requests from Execute 249 * or Fetch2 ask for a change of fetch address and is used to tag lines 250 * by the prediction to which they belong. Fetch2 originates 251 * prediction sequence numbers. */ 252 InstSeqNum predictionSeqNum; | 256 FetchState state; |
253 | 257 |
254 /** Blocked indication for report */ 255 bool blocked; | 258 /** Fetch PC value. This is updated by branches from Execute, branch 259 * prediction targets from Fetch2 and by incrementing it as we fetch 260 * lines subsequent to those two sources. */ 261 TheISA::PCState pc; |
256 | 262 |
263 /** Stream sequence number. This changes on request from Execute and is 264 * used to tag instructions by the fetch stream to which they belong. 265 * Execute originates new prediction sequence numbers. */ 266 InstSeqNum streamSeqNum; 267 268 /** Prediction sequence number. This changes when requests from Execute 269 * or Fetch2 ask for a change of fetch address and is used to tag lines 270 * by the prediction to which they belong. Fetch2 originates 271 * prediction sequence numbers. */ 272 InstSeqNum predictionSeqNum; 273 274 /** Blocked indication for report */ 275 bool blocked; 276 277 /** Signal to guard against sleeping first cycle of wakeup */ 278 bool wakeupGuard; 279 }; 280 281 std::vector<Fetch1ThreadInfo> fetchInfo; 282 ThreadID threadPriority; 283 |
|
257 /** State of memory access for head instruction fetch */ 258 enum IcacheState 259 { 260 IcacheRunning, /* Default. Step icache queues when possible */ 261 IcacheNeedsRetry /* Request rejected, will be asked to retry */ 262 }; 263 264 typedef Queue<FetchRequestPtr, --- 37 unchanged lines hidden (view full) --- 302 303 /** Convert a response to a ForwardLineData */ 304 void processResponse(FetchRequestPtr response, 305 ForwardLineData &line); 306 307 friend std::ostream &operator <<(std::ostream &os, 308 IcacheState state); 309 | 284 /** State of memory access for head instruction fetch */ 285 enum IcacheState 286 { 287 IcacheRunning, /* Default. Step icache queues when possible */ 288 IcacheNeedsRetry /* Request rejected, will be asked to retry */ 289 }; 290 291 typedef Queue<FetchRequestPtr, --- 37 unchanged lines hidden (view full) --- 329 330 /** Convert a response to a ForwardLineData */ 331 void processResponse(FetchRequestPtr response, 332 ForwardLineData &line); 333 334 friend std::ostream &operator <<(std::ostream &os, 335 IcacheState state); 336 |
337 338 /** Use the current threading policy to determine the next thread to 339 * fetch from. */ 340 ThreadID getScheduledThread(); 341 |
|
310 /** Insert a line fetch into the requests. This can be a partial 311 * line request where the given address has a non-0 offset into a 312 * line. */ | 342 /** Insert a line fetch into the requests. This can be a partial 343 * line request where the given address has a non-0 offset into a 344 * line. */ |
313 void fetchLine(); | 345 void fetchLine(ThreadID tid); |
314 315 /** Try and issue a fetch for a translated request at the 316 * head of the requests queue. Also tries to move the request 317 * between queues */ 318 void tryToSendToTransfers(FetchRequestPtr request); 319 320 /** Try to send (or resend) a memory request's next/only packet to 321 * the memory system. Returns true if the fetch was successfully --- 27 unchanged lines hidden (view full) --- 349 350 public: 351 Fetch1(const std::string &name_, 352 MinorCPU &cpu_, 353 MinorCPUParams ¶ms, 354 Latch<BranchData>::Output inp_, 355 Latch<ForwardLineData>::Input out_, 356 Latch<BranchData>::Output prediction_, | 346 347 /** Try and issue a fetch for a translated request at the 348 * head of the requests queue. Also tries to move the request 349 * between queues */ 350 void tryToSendToTransfers(FetchRequestPtr request); 351 352 /** Try to send (or resend) a memory request's next/only packet to 353 * the memory system. Returns true if the fetch was successfully --- 27 unchanged lines hidden (view full) --- 381 382 public: 383 Fetch1(const std::string &name_, 384 MinorCPU &cpu_, 385 MinorCPUParams ¶ms, 386 Latch<BranchData>::Output inp_, 387 Latch<ForwardLineData>::Input out_, 388 Latch<BranchData>::Output prediction_, |
357 Reservable &next_stage_input_buffer); | 389 std::vector<InputBuffer<ForwardLineData>> &next_stage_input_buffer); |
358 359 public: 360 /** Returns the IcachePort owned by this Fetch1 */ 361 MinorCPU::MinorCPUPort &getIcachePort() { return icachePort; } 362 363 /** Pass on input/buffer data to the output if you can */ 364 void evaluate(); 365 | 390 391 public: 392 /** Returns the IcachePort owned by this Fetch1 */ 393 MinorCPU::MinorCPUPort &getIcachePort() { return icachePort; } 394 395 /** Pass on input/buffer data to the output if you can */ 396 void evaluate(); 397 |
398 /** Initiate fetch1 fetching */ 399 void wakeupFetch(ThreadID tid); 400 |
|
366 void minorTrace() const; 367 368 /** Is this stage drained? For Fetch1, draining is initiated by 369 * Execute signalling a branch with the reason HaltFetch */ 370 bool isDrained(); 371}; 372 373} 374 375#endif /* __CPU_MINOR_FETCH1_HH__ */ | 401 void minorTrace() const; 402 403 /** Is this stage drained? For Fetch1, draining is initiated by 404 * Execute signalling a branch with the reason HaltFetch */ 405 bool isDrained(); 406}; 407 408} 409 410#endif /* __CPU_MINOR_FETCH1_HH__ */ |