base.cc revision 4458
1/* 2 * Copyright (c) 2003-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Erik Hallnor 29 */ 30 31/** 32 * @file 33 * Definition of BaseCache functions. 34 */ 35 36#include "cpu/base.hh" 37#include "cpu/smt.hh" 38#include "mem/cache/base_cache.hh" 39#include "mem/cache/miss/mshr.hh" 40 41using namespace std; 42 43BaseCache::CachePort::CachePort(const std::string &_name, BaseCache *_cache) 44 : Port(_name, _cache), cache(_cache), otherPort(NULL) 45{ 46 blocked = false; 47 waitingOnRetry = false; 48} 49 50 51BaseCache::BaseCache(const std::string &name, Params ¶ms) 52 : MemObject(name), 53 blocked(0), blockedSnoop(0), 54 blkSize(params.blkSize), 55 missCount(params.maxMisses), drainEvent(NULL) 56{ 57} 58 59 60 61void 62BaseCache::CachePort::recvStatusChange(Port::Status status) 63{ 64 if (status == Port::RangeChange) { 65 otherPort->sendStatusChange(Port::RangeChange); 66 } 67} 68 69void 70BaseCache::CachePort::getDeviceAddressRanges(AddrRangeList &resp, 71 AddrRangeList &snoop) 72{ 73 AddrRangeList dummy; 74 otherPort->getPeerAddressRanges(resp, dummy); 75} 76 77int 78BaseCache::CachePort::deviceBlockSize() 79{ 80 return cache->getBlockSize(); 81} 82 83bool 84BaseCache::CachePort::checkFunctional(PacketPtr pkt) 85{ 86 //Check storage here first 87 list<PacketPtr>::iterator i = drainList.begin(); 88 list<PacketPtr>::iterator iend = drainList.end(); 89 bool notDone = true; 90 while (i != iend && notDone) { 91 PacketPtr target = *i; 92 // If the target contains data, and it overlaps the 93 // probed request, need to update data 94 if (target->intersect(pkt)) { 95 DPRINTF(Cache, "Functional %s access to blk_addr %x intersects a drain\n", 96 pkt->cmdString(), pkt->getAddr() & ~(cache->getBlockSize() - 1)); 97 notDone = fixPacket(pkt, target); 98 } 99 i++; 100 } 101 //Also check the response not yet ready to be on the list 102 std::list<std::pair<Tick,PacketPtr> >::iterator j = transmitList.begin(); 103 std::list<std::pair<Tick,PacketPtr> >::iterator jend = transmitList.end(); 104 105 while (j != jend && notDone) { 106 PacketPtr target = j->second; 107 // If the target contains data, and it overlaps the 108 // probed request, need to update data 109 if (target->intersect(pkt)) { 110 DPRINTF(Cache, "Functional %s access to blk_addr %x intersects a response\n", 111 pkt->cmdString(), pkt->getAddr() & ~(cache->getBlockSize() - 1)); 112 notDone = fixDelayedResponsePacket(pkt, target); 113 } 114 j++; 115 } 116 return notDone; 117} 118 119void 120BaseCache::CachePort::checkAndSendFunctional(PacketPtr pkt) 121{ 122 bool notDone = checkFunctional(pkt); 123 if (notDone) 124 sendFunctional(pkt); 125} 126 127 128void 129BaseCache::CachePort::respond(PacketPtr pkt, Tick time) 130{ 131 assert(time >= curTick); 132 if (pkt->needsResponse()) { 133 if (transmitList.empty()) { 134 assert(!responseEvent->scheduled()); 135 responseEvent->schedule(time); 136 transmitList.push_back(std::pair<Tick,PacketPtr>(time,pkt)); 137 return; 138 } 139 140 // something is on the list and this belongs at the end 141 if (time >= transmitList.back().first) { 142 transmitList.push_back(std::pair<Tick,PacketPtr>(time,pkt)); 143 return; 144 } 145 // Something is on the list and this belongs somewhere else 146 std::list<std::pair<Tick,PacketPtr> >::iterator i = 147 transmitList.begin(); 148 std::list<std::pair<Tick,PacketPtr> >::iterator end = 149 transmitList.end(); 150 bool done = false; 151 152 while (i != end && !done) { 153 if (time < i->first) { 154 if (i == transmitList.begin()) { 155 //Inserting at begining, reschedule 156 responseEvent->reschedule(time); 157 } 158 transmitList.insert(i,std::pair<Tick,PacketPtr>(time,pkt)); 159 done = true; 160 } 161 i++; 162 } 163 } 164 else { 165 assert(0); 166 // this code was on the cpuSidePort only... do we still need it? 167 if (pkt->cmd != MemCmd::UpgradeReq) 168 { 169 delete pkt->req; 170 delete pkt; 171 } 172 } 173} 174 175bool 176BaseCache::CachePort::drainResponse() 177{ 178 DPRINTF(CachePort, 179 "%s attempting to send a retry for response (%i waiting)\n", 180 name(), drainList.size()); 181 //We have some responses to drain first 182 PacketPtr pkt = drainList.front(); 183 if (sendTiming(pkt)) { 184 drainList.pop_front(); 185 DPRINTF(CachePort, "%s sucessful in sending a retry for" 186 "response (%i still waiting)\n", name(), drainList.size()); 187 if (!drainList.empty() || isBusRequested()) { 188 189 DPRINTF(CachePort, "%s has more responses/requests\n", name()); 190 return false; 191 } 192 } else { 193 waitingOnRetry = true; 194 DPRINTF(CachePort, "%s now waiting on a retry\n", name()); 195 } 196 return true; 197} 198 199 200bool 201BaseCache::CachePort::recvRetryCommon() 202{ 203 assert(waitingOnRetry); 204 waitingOnRetry = false; 205 if (!drainList.empty()) { 206 if (!drainResponse()) { 207 // more responses to drain... re-request bus 208 scheduleRequestEvent(curTick + 1); 209 } 210 // Check if we're done draining once this list is empty 211 if (drainList.empty()) { 212 cache->checkDrain(); 213 } 214 return true; 215 } 216 return false; 217} 218 219 220void 221BaseCache::CachePort::setBlocked() 222{ 223 assert(!blocked); 224 DPRINTF(Cache, "Cache Blocking\n"); 225 blocked = true; 226 //Clear the retry flag 227 mustSendRetry = false; 228} 229 230void 231BaseCache::CachePort::clearBlocked() 232{ 233 assert(blocked); 234 DPRINTF(Cache, "Cache Unblocking\n"); 235 blocked = false; 236 if (mustSendRetry) 237 { 238 DPRINTF(Cache, "Cache Sending Retry\n"); 239 mustSendRetry = false; 240 sendRetry(); 241 } 242} 243 244 245void 246BaseCache::init() 247{ 248 if (!cpuSidePort || !memSidePort) 249 panic("Cache not hooked up on both sides\n"); 250 cpuSidePort->sendStatusChange(Port::RangeChange); 251} 252 253void 254BaseCache::regStats() 255{ 256 using namespace Stats; 257 258 // Hit statistics 259 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 260 MemCmd cmd(access_idx); 261 const string &cstr = cmd.toString(); 262 263 hits[access_idx] 264 .init(maxThreadsPerCPU) 265 .name(name() + "." + cstr + "_hits") 266 .desc("number of " + cstr + " hits") 267 .flags(total | nozero | nonan) 268 ; 269 } 270 271 demandHits 272 .name(name() + ".demand_hits") 273 .desc("number of demand (read+write) hits") 274 .flags(total) 275 ; 276 demandHits = hits[MemCmd::ReadReq] + hits[MemCmd::WriteReq]; 277 278 overallHits 279 .name(name() + ".overall_hits") 280 .desc("number of overall hits") 281 .flags(total) 282 ; 283 overallHits = demandHits + hits[MemCmd::SoftPFReq] + hits[MemCmd::HardPFReq] 284 + hits[MemCmd::Writeback]; 285 286 // Miss statistics 287 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 288 MemCmd cmd(access_idx); 289 const string &cstr = cmd.toString(); 290 291 misses[access_idx] 292 .init(maxThreadsPerCPU) 293 .name(name() + "." + cstr + "_misses") 294 .desc("number of " + cstr + " misses") 295 .flags(total | nozero | nonan) 296 ; 297 } 298 299 demandMisses 300 .name(name() + ".demand_misses") 301 .desc("number of demand (read+write) misses") 302 .flags(total) 303 ; 304 demandMisses = misses[MemCmd::ReadReq] + misses[MemCmd::WriteReq]; 305 306 overallMisses 307 .name(name() + ".overall_misses") 308 .desc("number of overall misses") 309 .flags(total) 310 ; 311 overallMisses = demandMisses + misses[MemCmd::SoftPFReq] + 312 misses[MemCmd::HardPFReq] + misses[MemCmd::Writeback]; 313 314 // Miss latency statistics 315 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 316 MemCmd cmd(access_idx); 317 const string &cstr = cmd.toString(); 318 319 missLatency[access_idx] 320 .init(maxThreadsPerCPU) 321 .name(name() + "." + cstr + "_miss_latency") 322 .desc("number of " + cstr + " miss cycles") 323 .flags(total | nozero | nonan) 324 ; 325 } 326 327 demandMissLatency 328 .name(name() + ".demand_miss_latency") 329 .desc("number of demand (read+write) miss cycles") 330 .flags(total) 331 ; 332 demandMissLatency = missLatency[MemCmd::ReadReq] + missLatency[MemCmd::WriteReq]; 333 334 overallMissLatency 335 .name(name() + ".overall_miss_latency") 336 .desc("number of overall miss cycles") 337 .flags(total) 338 ; 339 overallMissLatency = demandMissLatency + missLatency[MemCmd::SoftPFReq] + 340 missLatency[MemCmd::HardPFReq]; 341 342 // access formulas 343 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 344 MemCmd cmd(access_idx); 345 const string &cstr = cmd.toString(); 346 347 accesses[access_idx] 348 .name(name() + "." + cstr + "_accesses") 349 .desc("number of " + cstr + " accesses(hits+misses)") 350 .flags(total | nozero | nonan) 351 ; 352 353 accesses[access_idx] = hits[access_idx] + misses[access_idx]; 354 } 355 356 demandAccesses 357 .name(name() + ".demand_accesses") 358 .desc("number of demand (read+write) accesses") 359 .flags(total) 360 ; 361 demandAccesses = demandHits + demandMisses; 362 363 overallAccesses 364 .name(name() + ".overall_accesses") 365 .desc("number of overall (read+write) accesses") 366 .flags(total) 367 ; 368 overallAccesses = overallHits + overallMisses; 369 370 // miss rate formulas 371 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 372 MemCmd cmd(access_idx); 373 const string &cstr = cmd.toString(); 374 375 missRate[access_idx] 376 .name(name() + "." + cstr + "_miss_rate") 377 .desc("miss rate for " + cstr + " accesses") 378 .flags(total | nozero | nonan) 379 ; 380 381 missRate[access_idx] = misses[access_idx] / accesses[access_idx]; 382 } 383 384 demandMissRate 385 .name(name() + ".demand_miss_rate") 386 .desc("miss rate for demand accesses") 387 .flags(total) 388 ; 389 demandMissRate = demandMisses / demandAccesses; 390 391 overallMissRate 392 .name(name() + ".overall_miss_rate") 393 .desc("miss rate for overall accesses") 394 .flags(total) 395 ; 396 overallMissRate = overallMisses / overallAccesses; 397 398 // miss latency formulas 399 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 400 MemCmd cmd(access_idx); 401 const string &cstr = cmd.toString(); 402 403 avgMissLatency[access_idx] 404 .name(name() + "." + cstr + "_avg_miss_latency") 405 .desc("average " + cstr + " miss latency") 406 .flags(total | nozero | nonan) 407 ; 408 409 avgMissLatency[access_idx] = 410 missLatency[access_idx] / misses[access_idx]; 411 } 412 413 demandAvgMissLatency 414 .name(name() + ".demand_avg_miss_latency") 415 .desc("average overall miss latency") 416 .flags(total) 417 ; 418 demandAvgMissLatency = demandMissLatency / demandMisses; 419 420 overallAvgMissLatency 421 .name(name() + ".overall_avg_miss_latency") 422 .desc("average overall miss latency") 423 .flags(total) 424 ; 425 overallAvgMissLatency = overallMissLatency / overallMisses; 426 427 blocked_cycles.init(NUM_BLOCKED_CAUSES); 428 blocked_cycles 429 .name(name() + ".blocked_cycles") 430 .desc("number of cycles access was blocked") 431 .subname(Blocked_NoMSHRs, "no_mshrs") 432 .subname(Blocked_NoTargets, "no_targets") 433 ; 434 435 436 blocked_causes.init(NUM_BLOCKED_CAUSES); 437 blocked_causes 438 .name(name() + ".blocked") 439 .desc("number of cycles access was blocked") 440 .subname(Blocked_NoMSHRs, "no_mshrs") 441 .subname(Blocked_NoTargets, "no_targets") 442 ; 443 444 avg_blocked 445 .name(name() + ".avg_blocked_cycles") 446 .desc("average number of cycles each access was blocked") 447 .subname(Blocked_NoMSHRs, "no_mshrs") 448 .subname(Blocked_NoTargets, "no_targets") 449 ; 450 451 avg_blocked = blocked_cycles / blocked_causes; 452 453 fastWrites 454 .name(name() + ".fast_writes") 455 .desc("number of fast writes performed") 456 ; 457 458 cacheCopies 459 .name(name() + ".cache_copies") 460 .desc("number of cache copies performed") 461 ; 462 463} 464 465unsigned int 466BaseCache::drain(Event *de) 467{ 468 // Set status 469 if (!canDrain()) { 470 drainEvent = de; 471 472 changeState(SimObject::Draining); 473 return 1; 474 } 475 476 changeState(SimObject::Drained); 477 return 0; 478} 479