Sequencer.cc (6162:cbd6debc4fd0) Sequencer.cc (6165:2d26c346f1be)
1
2/*
3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright

--- 32 unchanged lines hidden (view full) ---

41#include "mem/ruby/config/RubyConfig.hh"
42//#include "mem/ruby/recorder/Tracer.hh"
43#include "mem/ruby/slicc_interface/AbstractChip.hh"
44#include "mem/protocol/Chip.hh"
45#include "mem/ruby/tester/Tester.hh"
46#include "mem/ruby/common/SubBlock.hh"
47#include "mem/protocol/Protocol.hh"
48#include "mem/gems_common/Map.hh"
1
2/*
3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright

--- 32 unchanged lines hidden (view full) ---

41#include "mem/ruby/config/RubyConfig.hh"
42//#include "mem/ruby/recorder/Tracer.hh"
43#include "mem/ruby/slicc_interface/AbstractChip.hh"
44#include "mem/protocol/Chip.hh"
45#include "mem/ruby/tester/Tester.hh"
46#include "mem/ruby/common/SubBlock.hh"
47#include "mem/protocol/Protocol.hh"
48#include "mem/gems_common/Map.hh"
49#include "mem/packet.hh"
49
50Sequencer::Sequencer(AbstractChip* chip_ptr, int version) {
51 m_chip_ptr = chip_ptr;
52 m_version = version;
53
54 m_deadlock_check_scheduled = false;
55 m_outstanding_count = 0;
56
57 int smt_threads = RubyConfig::numberofSMTThreads();
58 m_writeRequestTable_ptr = new Map<Address, CacheMsg>*[smt_threads];
59 m_readRequestTable_ptr = new Map<Address, CacheMsg>*[smt_threads];
60
50
51Sequencer::Sequencer(AbstractChip* chip_ptr, int version) {
52 m_chip_ptr = chip_ptr;
53 m_version = version;
54
55 m_deadlock_check_scheduled = false;
56 m_outstanding_count = 0;
57
58 int smt_threads = RubyConfig::numberofSMTThreads();
59 m_writeRequestTable_ptr = new Map<Address, CacheMsg>*[smt_threads];
60 m_readRequestTable_ptr = new Map<Address, CacheMsg>*[smt_threads];
61
62 m_packetTable_ptr = new Map<Address, Packet*>;
63
61 for(int p=0; p < smt_threads; ++p){
62 m_writeRequestTable_ptr[p] = new Map<Address, CacheMsg>;
63 m_readRequestTable_ptr[p] = new Map<Address, CacheMsg>;
64 }
65
66}
67
68Sequencer::~Sequencer() {

--- 529 unchanged lines hidden (view full) ---

598
599 }
600
601 bool write =
602 (type == CacheRequestType_ST) ||
603 (type == CacheRequestType_ATOMIC);
604
605 if (TSO && write) {
64 for(int p=0; p < smt_threads; ++p){
65 m_writeRequestTable_ptr[p] = new Map<Address, CacheMsg>;
66 m_readRequestTable_ptr[p] = new Map<Address, CacheMsg>;
67 }
68
69}
70
71Sequencer::~Sequencer() {

--- 529 unchanged lines hidden (view full) ---

601
602 }
603
604 bool write =
605 (type == CacheRequestType_ST) ||
606 (type == CacheRequestType_ATOMIC);
607
608 if (TSO && write) {
606 m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->callBack(line_address(request.getAddress()), data);
609 m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->callBack(line_address(request.getAddress()), data,
610 m_packetTable_ptr->lookup(request.getAddress()));
607 } else {
608
609 // Copy the correct bytes out of the cache line into the subblock
610 SubBlock subblock(request_address, request_logical_address, size);
611 subblock.mergeFrom(data); // copy the correct bytes from DataBlock in the SubBlock
612
613 // Scan the store buffer to see if there are any outstanding stores we need to collect
614 if (TSO) {
615 m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->updateSubBlock(subblock);
616 }
617
618 // Call into the Driver and let it read and/or modify the sub-block
611 } else {
612
613 // Copy the correct bytes out of the cache line into the subblock
614 SubBlock subblock(request_address, request_logical_address, size);
615 subblock.mergeFrom(data); // copy the correct bytes from DataBlock in the SubBlock
616
617 // Scan the store buffer to see if there are any outstanding stores we need to collect
618 if (TSO) {
619 m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->updateSubBlock(subblock);
620 }
621
622 // Call into the Driver and let it read and/or modify the sub-block
619 g_system_ptr->getDriver()->hitCallback(m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version, subblock, type, threadID);
623 Packet* pkt = m_packetTable_ptr->lookup(request.getAddress());
620
624
625 // update data if this is a store/atomic
626
627 /*
628 if (pkt->req->isCondSwap()) {
629 L1Cache_Entry entry = m_L1Cache_vec[m_version]->lookup(Address(pkt->req->physAddr()));
630 DataBlk datablk = entry->getDataBlk();
631 uint8_t *orig_data = datablk.getArray();
632 if ( datablk.equal(pkt->req->getExtraData()) )
633 datablk->setArray(pkt->getData());
634 pkt->setData(orig_data);
635 }
636 */
637
638 g_system_ptr->getDriver()->hitCallback(pkt);
639 m_packetTable_ptr->remove(request.getAddress());
640
621 // If the request was a Store or Atomic, apply the changes in the SubBlock to the DataBlock
622 // (This is only triggered for the non-TSO case)
623 if (write) {
624 assert(!TSO);
625 subblock.mergeTo(data); // copy the correct bytes from SubBlock into the DataBlock
626 }
627 }
628}
629
630void Sequencer::printDebug(){
631 //notify driver of debug
632 g_system_ptr->getDriver()->printDebug();
633}
634
641 // If the request was a Store or Atomic, apply the changes in the SubBlock to the DataBlock
642 // (This is only triggered for the non-TSO case)
643 if (write) {
644 assert(!TSO);
645 subblock.mergeTo(data); // copy the correct bytes from SubBlock into the DataBlock
646 }
647 }
648}
649
650void Sequencer::printDebug(){
651 //notify driver of debug
652 g_system_ptr->getDriver()->printDebug();
653}
654
655//dsm: breaks build, delayed
635// Returns true if the sequencer already has a load or store outstanding
636bool
637Sequencer::isReady(const Packet* pkt) const
638{
639
640 int cpu_number = pkt->req->contextId();
641 la_t logical_addr = pkt->req->getVaddr();
642 pa_t physical_addr = pkt->req->getPaddr();

--- 17 unchanged lines hidden (view full) ---

660 Address(0),
661 AccessModeType_UserMode, // User/supervisor mode
662 0, // Size in bytes of request
663 PrefetchBit_No, // Not a prefetch
664 0, // Version number
665 Address(logical_addr), // Virtual Address
666 thread // SMT thread
667 );
656// Returns true if the sequencer already has a load or store outstanding
657bool
658Sequencer::isReady(const Packet* pkt) const
659{
660
661 int cpu_number = pkt->req->contextId();
662 la_t logical_addr = pkt->req->getVaddr();
663 pa_t physical_addr = pkt->req->getPaddr();

--- 17 unchanged lines hidden (view full) ---

681 Address(0),
682 AccessModeType_UserMode, // User/supervisor mode
683 0, // Size in bytes of request
684 PrefetchBit_No, // Not a prefetch
685 0, // Version number
686 Address(logical_addr), // Virtual Address
687 thread // SMT thread
688 );
668 isReady(request);
689 return isReady(request);
669}
670
671bool
672Sequencer::isReady(const CacheMsg& request) const
673{
674 if (m_outstanding_count >= g_SEQUENCER_OUTSTANDING_REQUESTS) {
675 //cout << "TOO MANY OUTSTANDING: " << m_outstanding_count << " " << g_SEQUENCER_OUTSTANDING_REQUESTS << " VER " << m_version << endl;
676 //printProgress(cout);

--- 19 unchanged lines hidden (view full) ---

696 }
697
698 if (TSO) {
699 return m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->isReady();
700 }
701 return true;
702}
703
690}
691
692bool
693Sequencer::isReady(const CacheMsg& request) const
694{
695 if (m_outstanding_count >= g_SEQUENCER_OUTSTANDING_REQUESTS) {
696 //cout << "TOO MANY OUTSTANDING: " << m_outstanding_count << " " << g_SEQUENCER_OUTSTANDING_REQUESTS << " VER " << m_version << endl;
697 //printProgress(cout);

--- 19 unchanged lines hidden (view full) ---

717 }
718
719 if (TSO) {
720 return m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->isReady();
721 }
722 return true;
723}
724
704// Called by Driver
725//dsm: breaks build, delayed
726// Called by Driver (Simics or Tester).
705void
727void
706Sequencer::makeRequest(const Packet* pkt, void* data)
728Sequencer::makeRequest(Packet* pkt)
707{
708 int cpu_number = pkt->req->contextId();
709 la_t logical_addr = pkt->req->getVaddr();
710 pa_t physical_addr = pkt->req->getPaddr();
711 int request_size = pkt->getSize();
712 CacheRequestType type_of_request;
729{
730 int cpu_number = pkt->req->contextId();
731 la_t logical_addr = pkt->req->getVaddr();
732 pa_t physical_addr = pkt->req->getPaddr();
733 int request_size = pkt->getSize();
734 CacheRequestType type_of_request;
735 PrefetchBit prefetch;
736 bool write = false;
713 if ( pkt->req->isInstFetch() ) {
714 type_of_request = CacheRequestType_IFETCH;
715 } else if ( pkt->req->isLocked() || pkt->req->isSwap() ) {
716 type_of_request = CacheRequestType_ATOMIC;
737 if ( pkt->req->isInstFetch() ) {
738 type_of_request = CacheRequestType_IFETCH;
739 } else if ( pkt->req->isLocked() || pkt->req->isSwap() ) {
740 type_of_request = CacheRequestType_ATOMIC;
741 write = true;
717 } else if ( pkt->isRead() ) {
718 type_of_request = CacheRequestType_LD;
719 } else if ( pkt->isWrite() ) {
720 type_of_request = CacheRequestType_ST;
742 } else if ( pkt->isRead() ) {
743 type_of_request = CacheRequestType_LD;
744 } else if ( pkt->isWrite() ) {
745 type_of_request = CacheRequestType_ST;
746 write = true;
721 } else {
722 assert(false);
723 }
747 } else {
748 assert(false);
749 }
750 if (pkt->req->isPrefetch()) {
751 prefetch = PrefetchBit_Yes;
752 } else {
753 prefetch = PrefetchBit_No;
754 }
724 la_t virtual_pc = pkt->req->getPC();
725 int isPriv = false; // TODO: get permission data
726 int thread = pkt->req->threadId();
727
728 AccessModeType access_mode = AccessModeType_UserMode; // TODO: get actual permission
729
730 CacheMsg request(Address( physical_addr ),
731 Address( physical_addr ),
732 type_of_request,
733 Address(virtual_pc),
734 access_mode, // User/supervisor mode
735 request_size, // Size in bytes of request
755 la_t virtual_pc = pkt->req->getPC();
756 int isPriv = false; // TODO: get permission data
757 int thread = pkt->req->threadId();
758
759 AccessModeType access_mode = AccessModeType_UserMode; // TODO: get actual permission
760
761 CacheMsg request(Address( physical_addr ),
762 Address( physical_addr ),
763 type_of_request,
764 Address(virtual_pc),
765 access_mode, // User/supervisor mode
766 request_size, // Size in bytes of request
736 PrefetchBit_No, // Not a prefetch
767 prefetch,
737 0, // Version number
738 Address(logical_addr), // Virtual Address
739 thread // SMT thread
740 );
768 0, // Version number
769 Address(logical_addr), // Virtual Address
770 thread // SMT thread
771 );
741 makeRequest(request);
742}
743
772
744void
745Sequencer::makeRequest(const CacheMsg& request)
746{
747 bool write = (request.getType() == CacheRequestType_ST) ||
748 (request.getType() == CacheRequestType_ATOMIC);
749
750 if (TSO && (request.getPrefetch() == PrefetchBit_No) && write) {
773 if ( TSO && write && !pkt->req->isPrefetch() ) {
751 assert(m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->isReady());
774 assert(m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->isReady());
752 m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->insertStore(request);
775 m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->insertStore(pkt, request);
753 return;
754 }
755
776 return;
777 }
778
756 bool hit = doRequest(request);
779 m_packetTable_ptr->insert(Address( physical_addr ), pkt);
757
780
781 doRequest(request);
758}
759
760bool Sequencer::doRequest(const CacheMsg& request) {
761 bool hit = false;
762 // Check the fast path
763 DataBlock* data_ptr;
764
765 int thread = request.getThreadID();

--- 171 unchanged lines hidden ---
782}
783
784bool Sequencer::doRequest(const CacheMsg& request) {
785 bool hit = false;
786 // Check the fast path
787 DataBlock* data_ptr;
788
789 int thread = request.getThreadID();

--- 171 unchanged lines hidden ---