1/* 2 * Copyright (c) 2004-2006 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; --- 63 unchanged lines hidden (view full) --- 72 73 /** Initializes the LSQ unit with the specified number of entries. */ 74 void init(Params *params, unsigned maxLQEntries, 75 unsigned maxSQEntries, unsigned id); 76 77 /** Returns the name of the LSQ unit. */ 78 std::string name() const; 79 |
80 /** Registers statistics. */ 81 void regStats(); 82 |
83 /** Sets the CPU pointer. */ 84 void setCPU(FullCPU *cpu_ptr); 85 86 /** Sets the IEW stage pointer. */ 87 void setIEW(IEW *iew_ptr) 88 { iewStage = iew_ptr; } 89 90 /** Switches out LSQ unit. */ --- 34 unchanged lines hidden (view full) --- 125 /** Commits stores older than a specific sequence number. */ 126 void commitStores(InstSeqNum &youngest_inst); 127 128 /** Writes back stores. */ 129 void writebackStores(); 130 131 void completeDataAccess(PacketPtr pkt); 132 |
133 /** Clears all the entries in the LQ. */ 134 void clearLQ(); 135 136 /** Clears all the entries in the SQ. */ 137 void clearSQ(); 138 139 /** Resizes the LQ to a given size. */ 140 void resizeLQ(unsigned size); --- 297 unchanged lines hidden (view full) --- 438 InstSeqNum blockedLoadSeqNum; 439 440 /** The oldest load that caused a memory ordering violation. */ 441 DynInstPtr memDepViolator; 442 443 // Will also need how many read/write ports the Dcache has. Or keep track 444 // of that in stage that is one level up, and only call executeLoad/Store 445 // the appropriate number of times. |
446 |
447 /** Total number of loads forwaded from LSQ stores. */ 448 Stats::Scalar<> lsqForwLoads; |
449 |
450 /** Total number of loads ignored due to invalid addresses. */ 451 Stats::Scalar<> invAddrLoads; |
452 |
453 /** Total number of squashed loads. */ 454 Stats::Scalar<> lsqSquashedLoads; |
455 |
456 /** Total number of responses from the memory system that are 457 * ignored due to the instruction already being squashed. */ 458 Stats::Scalar<> lsqIgnoredResponses; |
459 |
460 /** Total number of squashed stores. */ 461 Stats::Scalar<> lsqSquashedStores; 462 463 /** Total number of software prefetches ignored due to invalid addresses. */ 464 Stats::Scalar<> invAddrSwpfs; 465 466 /** Ready loads blocked due to partial store-forwarding. */ 467 Stats::Scalar<> lsqBlockedLoads; 468 469 /** Number of loads that were rescheduled. */ 470 Stats::Scalar<> lsqRescheduledLoads; 471 472 /** Number of times the LSQ is blocked due to the cache. */ 473 Stats::Scalar<> lsqCacheBlocked; 474 |
475 public: 476 /** Executes the load at the given index. */ 477 template <class T> 478 Fault read(Request *req, T &data, int load_idx); 479 480 /** Executes the store at the given index. */ 481 template <class T> 482 Fault write(Request *req, T &data, int store_idx); --- 41 unchanged lines hidden (view full) --- 524 525 // Make sure this isn't an uncacheable access 526 // A bit of a hackish way to get uncached accesses to work only if they're 527 // at the head of the LSQ and are ready to commit (at the head of the ROB 528 // too). 529 if (req->getFlags() & UNCACHEABLE && 530 (load_idx != loadHead || !load_inst->reachedCommit)) { 531 iewStage->rescheduleMemInst(load_inst); |
532 ++lsqRescheduledLoads; |
533 return TheISA::genMachineCheckFault(); 534 } 535 536 // Check the SQ for any previous stores that might lead to forwarding 537 int store_idx = load_inst->sqIdx; 538 539 int store_size = 0; 540 --- 63 unchanged lines hidden (view full) --- 604 605 WritebackEvent *wb = new WritebackEvent(load_inst, data_pkt, this); 606 607 // We'll say this has a 1 cycle load-store forwarding latency 608 // for now. 609 // @todo: Need to make this a parameter. 610 wb->schedule(curTick); 611 |
612 ++lsqForwLoads; |
613 return NoFault; 614 } else if ((store_has_lower_limit && lower_load_has_store_part) || 615 (store_has_upper_limit && upper_load_has_store_part) || 616 (lower_load_has_store_part && upper_load_has_store_part)) { 617 // This is the partial store-load forwarding case where a store 618 // has only part of the load's data. 619 620 // If it's already been written back, then don't worry about --- 11 unchanged lines hidden (view full) --- 632 stalled = true; 633 stallingStoreIsn = storeQueue[store_idx].inst->seqNum; 634 stallingLoadIdx = load_idx; 635 } 636 637 // Tell IQ/mem dep unit that this instruction will need to be 638 // rescheduled eventually 639 iewStage->rescheduleMemInst(load_inst); |
640 ++lsqRescheduledLoads; |
641 642 // Do not generate a writeback event as this instruction is not 643 // complete. 644 DPRINTF(LSQUnit, "Load-store forwarding mis-match. " 645 "Store idx %i to load addr %#x\n", 646 store_idx, req->getVaddr()); 647 |
648 ++lsqBlockedLoads; |
649 return NoFault; 650 } 651 } 652 653 // If there's no forwarding case, then go access memory 654 DPRINTF(LSQUnit, "Doing functional access for inst [sn:%lli] PC %#x\n", 655 load_inst->seqNum, load_inst->readPC()); 656 --- 11 unchanged lines hidden (view full) --- 668 LSQSenderState *state = new LSQSenderState; 669 state->isLoad = true; 670 state->idx = load_idx; 671 state->inst = load_inst; 672 data_pkt->senderState = state; 673 674 // if we have a cache, do cache access too 675 if (!dcachePort->sendTiming(data_pkt)) { |
676 ++lsqCacheBlocked; |
677 // There's an older load that's already going to squash. 678 if (isLoadBlocked && blockedLoadSeqNum < load_inst->seqNum) 679 return NoFault; 680 681 // Record that the load was blocked due to memory. This 682 // load will squash all instructions after it, be 683 // refetched, and re-executed. 684 isLoadBlocked = true; --- 41 unchanged lines hidden --- |