base.cc revision 4626
1/* 2 * Copyright (c) 2003-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Erik Hallnor 29 */ 30 31/** 32 * @file 33 * Definition of BaseCache functions. 34 */ 35 36#include "cpu/base.hh" 37#include "cpu/smt.hh" 38#include "mem/cache/base_cache.hh" 39#include "mem/cache/miss/mshr.hh" 40 41using namespace std; 42 43BaseCache::CachePort::CachePort(const std::string &_name, BaseCache *_cache) 44 : SimpleTimingPort(_name, _cache), cache(_cache), otherPort(NULL), 45 blocked(false), waitingOnRetry(false), mustSendRetry(false), 46 requestCauses(0) 47{ 48} 49 50 51BaseCache::BaseCache(const std::string &name, Params ¶ms) 52 : MemObject(name), 53 mshrQueue(params.numMSHRs, 4), 54 writeBuffer(params.numWriteBuffers, params.numMSHRs+1000), 55 blkSize(params.blkSize), 56 numTarget(params.numTargets), 57 blocked(0), 58 noTargetMSHR(NULL), 59 missCount(params.maxMisses), 60 drainEvent(NULL) 61{ 62} 63 64 65void 66BaseCache::CachePort::recvStatusChange(Port::Status status) 67{ 68 if (status == Port::RangeChange) { 69 otherPort->sendStatusChange(Port::RangeChange); 70 } 71} 72 73int 74BaseCache::CachePort::deviceBlockSize() 75{ 76 return cache->getBlockSize(); 77} 78 79 80void 81BaseCache::CachePort::checkAndSendFunctional(PacketPtr pkt) 82{ 83 checkFunctional(pkt); 84 if (pkt->result != Packet::Success) 85 sendFunctional(pkt); 86} 87 88 89bool 90BaseCache::CachePort::recvRetryCommon() 91{ 92 assert(waitingOnRetry); 93 waitingOnRetry = false; 94 return false; 95} 96 97 98void 99BaseCache::CachePort::setBlocked() 100{ 101 assert(!blocked); 102 DPRINTF(Cache, "Cache Blocking\n"); 103 blocked = true; 104 //Clear the retry flag 105 mustSendRetry = false; 106} 107 108void 109BaseCache::CachePort::clearBlocked() 110{ 111 assert(blocked); 112 DPRINTF(Cache, "Cache Unblocking\n"); 113 blocked = false; 114 if (mustSendRetry) 115 { 116 DPRINTF(Cache, "Cache Sending Retry\n"); 117 mustSendRetry = false; 118 sendRetry(); 119 } 120} 121 122 123void 124BaseCache::init() 125{ 126 if (!cpuSidePort || !memSidePort) 127 panic("Cache not hooked up on both sides\n"); 128 cpuSidePort->sendStatusChange(Port::RangeChange); 129} 130 131void 132BaseCache::regStats() 133{ 134 using namespace Stats; 135 136 // Hit statistics 137 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 138 MemCmd cmd(access_idx); 139 const string &cstr = cmd.toString(); 140 141 hits[access_idx] 142 .init(maxThreadsPerCPU) 143 .name(name() + "." + cstr + "_hits") 144 .desc("number of " + cstr + " hits") 145 .flags(total | nozero | nonan) 146 ; 147 } 148 149 demandHits 150 .name(name() + ".demand_hits") 151 .desc("number of demand (read+write) hits") 152 .flags(total) 153 ; 154 demandHits = hits[MemCmd::ReadReq] + hits[MemCmd::WriteReq]; 155 156 overallHits 157 .name(name() + ".overall_hits") 158 .desc("number of overall hits") 159 .flags(total) 160 ; 161 overallHits = demandHits + hits[MemCmd::SoftPFReq] + hits[MemCmd::HardPFReq] 162 + hits[MemCmd::Writeback]; 163 164 // Miss statistics 165 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 166 MemCmd cmd(access_idx); 167 const string &cstr = cmd.toString(); 168 169 misses[access_idx] 170 .init(maxThreadsPerCPU) 171 .name(name() + "." + cstr + "_misses") 172 .desc("number of " + cstr + " misses") 173 .flags(total | nozero | nonan) 174 ; 175 } 176 177 demandMisses 178 .name(name() + ".demand_misses") 179 .desc("number of demand (read+write) misses") 180 .flags(total) 181 ; 182 demandMisses = misses[MemCmd::ReadReq] + misses[MemCmd::WriteReq]; 183 184 overallMisses 185 .name(name() + ".overall_misses") 186 .desc("number of overall misses") 187 .flags(total) 188 ; 189 overallMisses = demandMisses + misses[MemCmd::SoftPFReq] + 190 misses[MemCmd::HardPFReq] + misses[MemCmd::Writeback]; 191 192 // Miss latency statistics 193 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 194 MemCmd cmd(access_idx); 195 const string &cstr = cmd.toString(); 196 197 missLatency[access_idx] 198 .init(maxThreadsPerCPU) 199 .name(name() + "." + cstr + "_miss_latency") 200 .desc("number of " + cstr + " miss cycles") 201 .flags(total | nozero | nonan) 202 ; 203 } 204 205 demandMissLatency 206 .name(name() + ".demand_miss_latency") 207 .desc("number of demand (read+write) miss cycles") 208 .flags(total) 209 ; 210 demandMissLatency = missLatency[MemCmd::ReadReq] + missLatency[MemCmd::WriteReq]; 211 212 overallMissLatency 213 .name(name() + ".overall_miss_latency") 214 .desc("number of overall miss cycles") 215 .flags(total) 216 ; 217 overallMissLatency = demandMissLatency + missLatency[MemCmd::SoftPFReq] + 218 missLatency[MemCmd::HardPFReq]; 219 220 // access formulas 221 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 222 MemCmd cmd(access_idx); 223 const string &cstr = cmd.toString(); 224 225 accesses[access_idx] 226 .name(name() + "." + cstr + "_accesses") 227 .desc("number of " + cstr + " accesses(hits+misses)") 228 .flags(total | nozero | nonan) 229 ; 230 231 accesses[access_idx] = hits[access_idx] + misses[access_idx]; 232 } 233 234 demandAccesses 235 .name(name() + ".demand_accesses") 236 .desc("number of demand (read+write) accesses") 237 .flags(total) 238 ; 239 demandAccesses = demandHits + demandMisses; 240 241 overallAccesses 242 .name(name() + ".overall_accesses") 243 .desc("number of overall (read+write) accesses") 244 .flags(total) 245 ; 246 overallAccesses = overallHits + overallMisses; 247 248 // miss rate formulas 249 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 250 MemCmd cmd(access_idx); 251 const string &cstr = cmd.toString(); 252 253 missRate[access_idx] 254 .name(name() + "." + cstr + "_miss_rate") 255 .desc("miss rate for " + cstr + " accesses") 256 .flags(total | nozero | nonan) 257 ; 258 259 missRate[access_idx] = misses[access_idx] / accesses[access_idx]; 260 } 261 262 demandMissRate 263 .name(name() + ".demand_miss_rate") 264 .desc("miss rate for demand accesses") 265 .flags(total) 266 ; 267 demandMissRate = demandMisses / demandAccesses; 268 269 overallMissRate 270 .name(name() + ".overall_miss_rate") 271 .desc("miss rate for overall accesses") 272 .flags(total) 273 ; 274 overallMissRate = overallMisses / overallAccesses; 275 276 // miss latency formulas 277 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 278 MemCmd cmd(access_idx); 279 const string &cstr = cmd.toString(); 280 281 avgMissLatency[access_idx] 282 .name(name() + "." + cstr + "_avg_miss_latency") 283 .desc("average " + cstr + " miss latency") 284 .flags(total | nozero | nonan) 285 ; 286 287 avgMissLatency[access_idx] = 288 missLatency[access_idx] / misses[access_idx]; 289 } 290 291 demandAvgMissLatency 292 .name(name() + ".demand_avg_miss_latency") 293 .desc("average overall miss latency") 294 .flags(total) 295 ; 296 demandAvgMissLatency = demandMissLatency / demandMisses; 297 298 overallAvgMissLatency 299 .name(name() + ".overall_avg_miss_latency") 300 .desc("average overall miss latency") 301 .flags(total) 302 ; 303 overallAvgMissLatency = overallMissLatency / overallMisses; 304 305 blocked_cycles.init(NUM_BLOCKED_CAUSES); 306 blocked_cycles 307 .name(name() + ".blocked_cycles") 308 .desc("number of cycles access was blocked") 309 .subname(Blocked_NoMSHRs, "no_mshrs") 310 .subname(Blocked_NoTargets, "no_targets") 311 ; 312 313 314 blocked_causes.init(NUM_BLOCKED_CAUSES); 315 blocked_causes 316 .name(name() + ".blocked") 317 .desc("number of cycles access was blocked") 318 .subname(Blocked_NoMSHRs, "no_mshrs") 319 .subname(Blocked_NoTargets, "no_targets") 320 ; 321 322 avg_blocked 323 .name(name() + ".avg_blocked_cycles") 324 .desc("average number of cycles each access was blocked") 325 .subname(Blocked_NoMSHRs, "no_mshrs") 326 .subname(Blocked_NoTargets, "no_targets") 327 ; 328 329 avg_blocked = blocked_cycles / blocked_causes; 330 331 fastWrites 332 .name(name() + ".fast_writes") 333 .desc("number of fast writes performed") 334 ; 335 336 cacheCopies 337 .name(name() + ".cache_copies") 338 .desc("number of cache copies performed") 339 ; 340 341 writebacks 342 .init(maxThreadsPerCPU) 343 .name(name() + ".writebacks") 344 .desc("number of writebacks") 345 .flags(total) 346 ; 347 348 // MSHR statistics 349 // MSHR hit statistics 350 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 351 MemCmd cmd(access_idx); 352 const string &cstr = cmd.toString(); 353 354 mshr_hits[access_idx] 355 .init(maxThreadsPerCPU) 356 .name(name() + "." + cstr + "_mshr_hits") 357 .desc("number of " + cstr + " MSHR hits") 358 .flags(total | nozero | nonan) 359 ; 360 } 361 362 demandMshrHits 363 .name(name() + ".demand_mshr_hits") 364 .desc("number of demand (read+write) MSHR hits") 365 .flags(total) 366 ; 367 demandMshrHits = mshr_hits[MemCmd::ReadReq] + mshr_hits[MemCmd::WriteReq]; 368 369 overallMshrHits 370 .name(name() + ".overall_mshr_hits") 371 .desc("number of overall MSHR hits") 372 .flags(total) 373 ; 374 overallMshrHits = demandMshrHits + mshr_hits[MemCmd::SoftPFReq] + 375 mshr_hits[MemCmd::HardPFReq]; 376 377 // MSHR miss statistics 378 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 379 MemCmd cmd(access_idx); 380 const string &cstr = cmd.toString(); 381 382 mshr_misses[access_idx] 383 .init(maxThreadsPerCPU) 384 .name(name() + "." + cstr + "_mshr_misses") 385 .desc("number of " + cstr + " MSHR misses") 386 .flags(total | nozero | nonan) 387 ; 388 } 389 390 demandMshrMisses 391 .name(name() + ".demand_mshr_misses") 392 .desc("number of demand (read+write) MSHR misses") 393 .flags(total) 394 ; 395 demandMshrMisses = mshr_misses[MemCmd::ReadReq] + mshr_misses[MemCmd::WriteReq]; 396 397 overallMshrMisses 398 .name(name() + ".overall_mshr_misses") 399 .desc("number of overall MSHR misses") 400 .flags(total) 401 ; 402 overallMshrMisses = demandMshrMisses + mshr_misses[MemCmd::SoftPFReq] + 403 mshr_misses[MemCmd::HardPFReq]; 404 405 // MSHR miss latency statistics 406 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 407 MemCmd cmd(access_idx); 408 const string &cstr = cmd.toString(); 409 410 mshr_miss_latency[access_idx] 411 .init(maxThreadsPerCPU) 412 .name(name() + "." + cstr + "_mshr_miss_latency") 413 .desc("number of " + cstr + " MSHR miss cycles") 414 .flags(total | nozero | nonan) 415 ; 416 } 417 418 demandMshrMissLatency 419 .name(name() + ".demand_mshr_miss_latency") 420 .desc("number of demand (read+write) MSHR miss cycles") 421 .flags(total) 422 ; 423 demandMshrMissLatency = mshr_miss_latency[MemCmd::ReadReq] 424 + mshr_miss_latency[MemCmd::WriteReq]; 425 426 overallMshrMissLatency 427 .name(name() + ".overall_mshr_miss_latency") 428 .desc("number of overall MSHR miss cycles") 429 .flags(total) 430 ; 431 overallMshrMissLatency = demandMshrMissLatency + 432 mshr_miss_latency[MemCmd::SoftPFReq] + mshr_miss_latency[MemCmd::HardPFReq]; 433 434 // MSHR uncacheable statistics 435 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 436 MemCmd cmd(access_idx); 437 const string &cstr = cmd.toString(); 438 439 mshr_uncacheable[access_idx] 440 .init(maxThreadsPerCPU) 441 .name(name() + "." + cstr + "_mshr_uncacheable") 442 .desc("number of " + cstr + " MSHR uncacheable") 443 .flags(total | nozero | nonan) 444 ; 445 } 446 447 overallMshrUncacheable 448 .name(name() + ".overall_mshr_uncacheable_misses") 449 .desc("number of overall MSHR uncacheable misses") 450 .flags(total) 451 ; 452 overallMshrUncacheable = mshr_uncacheable[MemCmd::ReadReq] 453 + mshr_uncacheable[MemCmd::WriteReq] + mshr_uncacheable[MemCmd::SoftPFReq] 454 + mshr_uncacheable[MemCmd::HardPFReq]; 455 456 // MSHR miss latency statistics 457 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 458 MemCmd cmd(access_idx); 459 const string &cstr = cmd.toString(); 460 461 mshr_uncacheable_lat[access_idx] 462 .init(maxThreadsPerCPU) 463 .name(name() + "." + cstr + "_mshr_uncacheable_latency") 464 .desc("number of " + cstr + " MSHR uncacheable cycles") 465 .flags(total | nozero | nonan) 466 ; 467 } 468 469 overallMshrUncacheableLatency 470 .name(name() + ".overall_mshr_uncacheable_latency") 471 .desc("number of overall MSHR uncacheable cycles") 472 .flags(total) 473 ; 474 overallMshrUncacheableLatency = mshr_uncacheable_lat[MemCmd::ReadReq] 475 + mshr_uncacheable_lat[MemCmd::WriteReq] 476 + mshr_uncacheable_lat[MemCmd::SoftPFReq] 477 + mshr_uncacheable_lat[MemCmd::HardPFReq]; 478 479#if 0 480 // MSHR access formulas 481 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 482 MemCmd cmd(access_idx); 483 const string &cstr = cmd.toString(); 484 485 mshrAccesses[access_idx] 486 .name(name() + "." + cstr + "_mshr_accesses") 487 .desc("number of " + cstr + " mshr accesses(hits+misses)") 488 .flags(total | nozero | nonan) 489 ; 490 mshrAccesses[access_idx] = 491 mshr_hits[access_idx] + mshr_misses[access_idx] 492 + mshr_uncacheable[access_idx]; 493 } 494 495 demandMshrAccesses 496 .name(name() + ".demand_mshr_accesses") 497 .desc("number of demand (read+write) mshr accesses") 498 .flags(total | nozero | nonan) 499 ; 500 demandMshrAccesses = demandMshrHits + demandMshrMisses; 501 502 overallMshrAccesses 503 .name(name() + ".overall_mshr_accesses") 504 .desc("number of overall (read+write) mshr accesses") 505 .flags(total | nozero | nonan) 506 ; 507 overallMshrAccesses = overallMshrHits + overallMshrMisses 508 + overallMshrUncacheable; 509#endif 510 511 // MSHR miss rate formulas 512 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 513 MemCmd cmd(access_idx); 514 const string &cstr = cmd.toString(); 515 516 mshrMissRate[access_idx] 517 .name(name() + "." + cstr + "_mshr_miss_rate") 518 .desc("mshr miss rate for " + cstr + " accesses") 519 .flags(total | nozero | nonan) 520 ; 521 522 mshrMissRate[access_idx] = 523 mshr_misses[access_idx] / accesses[access_idx]; 524 } 525 526 demandMshrMissRate 527 .name(name() + ".demand_mshr_miss_rate") 528 .desc("mshr miss rate for demand accesses") 529 .flags(total) 530 ; 531 demandMshrMissRate = demandMshrMisses / demandAccesses; 532 533 overallMshrMissRate 534 .name(name() + ".overall_mshr_miss_rate") 535 .desc("mshr miss rate for overall accesses") 536 .flags(total) 537 ; 538 overallMshrMissRate = overallMshrMisses / overallAccesses; 539 540 // mshrMiss latency formulas 541 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 542 MemCmd cmd(access_idx); 543 const string &cstr = cmd.toString(); 544 545 avgMshrMissLatency[access_idx] 546 .name(name() + "." + cstr + "_avg_mshr_miss_latency") 547 .desc("average " + cstr + " mshr miss latency") 548 .flags(total | nozero | nonan) 549 ; 550 551 avgMshrMissLatency[access_idx] = 552 mshr_miss_latency[access_idx] / mshr_misses[access_idx]; 553 } 554 555 demandAvgMshrMissLatency 556 .name(name() + ".demand_avg_mshr_miss_latency") 557 .desc("average overall mshr miss latency") 558 .flags(total) 559 ; 560 demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses; 561 562 overallAvgMshrMissLatency 563 .name(name() + ".overall_avg_mshr_miss_latency") 564 .desc("average overall mshr miss latency") 565 .flags(total) 566 ; 567 overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses; 568 569 // mshrUncacheable latency formulas 570 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) { 571 MemCmd cmd(access_idx); 572 const string &cstr = cmd.toString(); 573 574 avgMshrUncacheableLatency[access_idx] 575 .name(name() + "." + cstr + "_avg_mshr_uncacheable_latency") 576 .desc("average " + cstr + " mshr uncacheable latency") 577 .flags(total | nozero | nonan) 578 ; 579 580 avgMshrUncacheableLatency[access_idx] = 581 mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx]; 582 } 583 584 overallAvgMshrUncacheableLatency 585 .name(name() + ".overall_avg_mshr_uncacheable_latency") 586 .desc("average overall mshr uncacheable latency") 587 .flags(total) 588 ; 589 overallAvgMshrUncacheableLatency = overallMshrUncacheableLatency / overallMshrUncacheable; 590 591 mshr_cap_events 592 .init(maxThreadsPerCPU) 593 .name(name() + ".mshr_cap_events") 594 .desc("number of times MSHR cap was activated") 595 .flags(total) 596 ; 597 598 //software prefetching stats 599 soft_prefetch_mshr_full 600 .init(maxThreadsPerCPU) 601 .name(name() + ".soft_prefetch_mshr_full") 602 .desc("number of mshr full events for SW prefetching instrutions") 603 .flags(total) 604 ; 605 606 mshr_no_allocate_misses 607 .name(name() +".no_allocate_misses") 608 .desc("Number of misses that were no-allocate") 609 ; 610 611} 612 613unsigned int 614BaseCache::drain(Event *de) 615{ 616 int count = memSidePort->drain(de) + cpuSidePort->drain(de); 617 618 // Set status 619 if (count != 0) { 620 drainEvent = de; 621 622 changeState(SimObject::Draining); 623 return count; 624 } 625 626 changeState(SimObject::Drained); 627 return 0; 628} 629