Sequencer.cc (6355:79464d8a4d2f) Sequencer.cc (6372:f1a41ea3bbab)
1
2/*
3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright

--- 476 unchanged lines hidden (view full) ---

485// this can be called from setState whenever coherence permissions are upgraded
486// when invoked, coherence violations will be checked for the given block
487void Sequencer::checkCoherence(const Address& addr) {
488#ifdef CHECK_COHERENCE
489 g_system_ptr->checkGlobalCoherenceInvariant(addr);
490#endif
491}
492
1
2/*
3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright

--- 476 unchanged lines hidden (view full) ---

485// this can be called from setState whenever coherence permissions are upgraded
486// when invoked, coherence violations will be checked for the given block
487void Sequencer::checkCoherence(const Address& addr) {
488#ifdef CHECK_COHERENCE
489 g_system_ptr->checkGlobalCoherenceInvariant(addr);
490#endif
491}
492
493/*
494bool Sequencer::getRubyMemoryValue(const Address& addr, char* value,
495 unsigned int size_in_bytes )
496{
497 bool found = false;
498 const Address lineAddr = line_address(addr);
499 DataBlock data;
500 PhysAddress paddr(addr);
501 DataBlock* dataPtr = &data;
502
503 MachineID l2_mach = map_L2ChipId_to_L2Cache(addr, m_chip_ptr->getID() );
504 int l2_ver = l2_mach.num%RubyConfig::numberOfL2CachePerChip();
505
506 if (Protocol::m_TwoLevelCache) {
507 if(Protocol::m_CMP){
508 assert(n->m_L2Cache_L2cacheMemory_vec[l2_ver] != NULL);
509 }
510 else{
511 assert(n->m_L1Cache_cacheMemory_vec[m_version] != NULL);
512 }
513 }
514
515 if (n->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_IFETCH, dataPtr)){
516 n->m_L1Cache_L1IcacheMemory_vec[m_version]->getMemoryValue(addr, value, size_in_bytes);
517 found = true;
518 } else if (n->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
519 n->m_L1Cache_L1DcacheMemory_vec[m_version]->getMemoryValue(addr, value, size_in_bytes);
520 found = true;
521 } else if (Protocol::m_CMP && n->m_L2Cache_L2cacheMemory_vec[l2_ver]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
522 n->m_L2Cache_L2cacheMemory_vec[l2_ver]->getMemoryValue(addr, value, size_in_bytes);
523 found = true;
524 // } else if (n->TBE_TABLE_MEMBER_VARIABLE->isPresent(lineAddr)){
525// ASSERT(n->TBE_TABLE_MEMBER_VARIABLE->isPresent(lineAddr));
526// L1Cache_TBE tbeEntry = n->TBE_TABLE_MEMBER_VARIABLE->lookup(lineAddr);
527
528// int offset = addr.getOffset();
529// for(int i=0; i<size_in_bytes; ++i){
530// value[i] = tbeEntry.getDataBlk().getByte(offset + i);
531// }
532
533// found = true;
534 } else {
535 // Address not found
536 //cout << " " << m_chip_ptr->getID() << " NOT IN CACHE, Value at Directory is: " << (int) value[0] << endl;
537 n = dynamic_cast<Chip*>(g_system_ptr->getChip(map_Address_to_DirectoryNode(addr)/RubyConfig::numberOfDirectoryPerChip()));
538 int dir_version = map_Address_to_DirectoryNode(addr)%RubyConfig::numberOfDirectoryPerChip();
539 for(unsigned int i=0; i<size_in_bytes; ++i){
540 int offset = addr.getOffset();
541 value[i] = n->m_Directory_directory_vec[dir_version]->lookup(lineAddr).m_DataBlk.getByte(offset + i);
542 }
543 // Address not found
544 //WARN_MSG("Couldn't find address");
545 //WARN_EXPR(addr);
546 found = false;
547 }
548 return true;
549}
550
551bool Sequencer::setRubyMemoryValue(const Address& addr, char *value,
552 unsigned int size_in_bytes) {
553 char test_buffer[64];
554
555 // idea here is that coherent cache should find the
556 // latest data, the update it
557 bool found = false;
558 const Address lineAddr = line_address(addr);
559 PhysAddress paddr(addr);
560 DataBlock data;
561 DataBlock* dataPtr = &data;
562 Chip* n = dynamic_cast<Chip*>(m_chip_ptr);
563
564 MachineID l2_mach = map_L2ChipId_to_L2Cache(addr, m_chip_ptr->getID() );
565 int l2_ver = l2_mach.num%RubyConfig::numberOfL2CachePerChip();
566
567 assert(n->m_L1Cache_L1IcacheMemory_vec[m_version] != NULL);
568 assert(n->m_L1Cache_L1DcacheMemory_vec[m_version] != NULL);
569 if (Protocol::m_TwoLevelCache) {
570 if(Protocol::m_CMP){
571 assert(n->m_L2Cache_L2cacheMemory_vec[l2_ver] != NULL);
572 }
573 else{
574 assert(n->m_L1Cache_cacheMemory_vec[m_version] != NULL);
575 }
576 }
577
578 if (n->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_IFETCH, dataPtr)){
579 n->m_L1Cache_L1IcacheMemory_vec[m_version]->setMemoryValue(addr, value, size_in_bytes);
580 found = true;
581 } else if (n->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
582 n->m_L1Cache_L1DcacheMemory_vec[m_version]->setMemoryValue(addr, value, size_in_bytes);
583 found = true;
584 } else if (Protocol::m_CMP && n->m_L2Cache_L2cacheMemory_vec[l2_ver]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
585 n->m_L2Cache_L2cacheMemory_vec[l2_ver]->setMemoryValue(addr, value, size_in_bytes);
586 found = true;
587 } else {
588 // Address not found
589 n = dynamic_cast<Chip*>(g_system_ptr->getChip(map_Address_to_DirectoryNode(addr)/RubyConfig::numberOfDirectoryPerChip()));
590 int dir_version = map_Address_to_DirectoryNode(addr)%RubyConfig::numberOfDirectoryPerChip();
591 for(unsigned int i=0; i<size_in_bytes; ++i){
592 int offset = addr.getOffset();
593 n->m_Directory_directory_vec[dir_version]->lookup(lineAddr).m_DataBlk.setByte(offset + i, value[i]);
594 }
595 found = false;
596 }
597
598 if (found){
599 found = getRubyMemoryValue(addr, test_buffer, size_in_bytes);
600 assert(found);
601 if(value[0] != test_buffer[0]){
602 WARN_EXPR((int) value[0]);
603 WARN_EXPR((int) test_buffer[0]);
604 ERROR_MSG("setRubyMemoryValue failed to set value.");
605 }
606 }
607
608 return true;
609}
610*/
611/*
612
613void
614Sequencer::rubyMemAccess(const uint64 paddr, char* data, const int len, const AccessType type)
615{
616 if ( type == AccessType_Read || type == AccessType_Write ) {
617 // need to break up the packet data
618 uint64 guest_ptr = paddr;
619 Vector<DataBlock*> datablocks;
620 while (paddr + len != guest_ptr) {
621 Address addr(guest_ptr);
622 Address line_addr = line_address(addr);
623
624 int bytes_copied;
625 if (addr.getOffset() == 0) {
626 bytes_copied = (guest_ptr + RubyConfig::dataBlockBytes() > paddr + len)?
627 (paddr + len - guest_ptr):
628 RubyConfig::dataBlockBytes();
629 } else {
630 bytes_copied = RubyConfig::dataBlockBytes() - addr.getOffset();
631 if (guest_ptr + bytes_copied > paddr + len)
632 bytes_copied = paddr + len - guest_ptr;
633 }
634
635 // first we need to find all data blocks that have to be updated for a write
636 // and the highest block for a read
637 for(int i=0;i<RubyConfig::numberOfProcessors();i++) {
638 if (Protocol::m_TwoLevelCache){
639 if(m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[i]->isTagPresent(line_address(addr)))
640 datablocks.insertAtBottom(&m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[i]->lookup(line_addr).getDataBlk());
641 if(m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[i]->isTagPresent(line_address(addr)))
642 datablocks.insertAtBottom(&m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[i]->lookup(line_addr).getDataBlk());
643 } else {
644 if(m_chip_ptr->m_L1Cache_cacheMemory_vec[i]->isTagPresent(line_address(addr)))
645 datablocks.insertAtBottom(&m_chip_ptr->m_L1Cache_cacheMemory_vec[i]->lookup(line_addr).getDataBlk());
646 }
647 }
648 if (Protocol::m_TwoLevelCache){
649 int l2_bank = map_L2ChipId_to_L2Cache(addr, 0).num; // TODO: ONLY WORKS WITH CMP!!!
650 if (m_chip_ptr->m_L2Cache_L2cacheMemory_vec[l2_bank]->isTagPresent(line_address(Address(paddr)))) {
651 datablocks.insertAtBottom(&m_chip_ptr->m_L2Cache_L2cacheMemory_vec[l2_bank]->lookup(addr).getDataBlk());
652 }
653 }
654 assert(dynamic_cast<Chip*>(m_chip_ptr)->m_Directory_directory_vec.size() > map_Address_to_DirectoryNode(addr));
655 DirectoryMemory* dir = dynamic_cast<Chip*>(m_chip_ptr)->m_Directory_directory_vec[map_Address_to_DirectoryNode(addr)];
656 Directory_Entry& entry = dir->lookup(line_addr);
657 datablocks.insertAtBottom(&entry.getDataBlk());
658
659 if (pkt->isRead()){
660 datablocks[0]->copyData(pkt_data, addr.getOffset(), bytes_copied);
661 } else {// pkt->isWrite() {
662 for (int i=0;i<datablocks.size();i++)
663 datablocks[i]->setData(pkt_data, addr.getOffset(), bytes_copied);
664 }
665
666 guest_ptr += bytes_copied;
667 pkt_data += bytes_copied;
668 datablocks.clear();
669 }
670}
671
672*/