dram_ctrl.cc revision 9669
1/* 2 * Copyright (c) 2010-2012 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions are 16 * met: redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer; 18 * redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution; 21 * neither the name of the copyright holders nor the names of its 22 * contributors may be used to endorse or promote products derived from 23 * this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 * 37 * Authors: Andreas Hansson 38 * Ani Udipi 39 */ 40 41#include "base/trace.hh" 42#include "debug/Drain.hh" 43#include "debug/DRAM.hh" 44#include "debug/DRAMWR.hh" 45#include "mem/simple_dram.hh" 46 47using namespace std; 48 49SimpleDRAM::SimpleDRAM(const SimpleDRAMParams* p) : 50 AbstractMemory(p), 51 port(name() + ".port", *this), 52 retryRdReq(false), retryWrReq(false), 53 rowHitFlag(false), stopReads(false), actTicks(p->activation_limit, 0), 54 writeEvent(this), respondEvent(this), 55 refreshEvent(this), nextReqEvent(this), drainManager(NULL), 56 bytesPerCacheLine(0), 57 linesPerRowBuffer(p->lines_per_rowbuffer), 58 ranksPerChannel(p->ranks_per_channel), 59 banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0), 60 readBufferSize(p->read_buffer_size), 61 writeBufferSize(p->write_buffer_size), 62 writeThresholdPerc(p->write_thresh_perc), 63 tWTR(p->tWTR), tBURST(p->tBURST), 64 tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), 65 tRFC(p->tRFC), tREFI(p->tREFI), 66 tXAW(p->tXAW), activationLimit(p->activation_limit), 67 memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping), 68 pageMgmt(p->page_policy), 69 busBusyUntil(0), writeStartTime(0), 70 prevArrival(0), numReqs(0) 71{ 72 // create the bank states based on the dimensions of the ranks and 73 // banks 74 banks.resize(ranksPerChannel); 75 for (size_t c = 0; c < ranksPerChannel; ++c) { 76 banks[c].resize(banksPerRank); 77 } 78 79 // round the write threshold percent to a whole number of entries 80 // in the buffer 81 writeThreshold = writeBufferSize * writeThresholdPerc / 100.0; 82} 83 84void 85SimpleDRAM::init() 86{ 87 if (!port.isConnected()) { 88 fatal("SimpleDRAM %s is unconnected!\n", name()); 89 } else { 90 port.sendRangeChange(); 91 } 92 93 // get the burst size from the connected port as it is currently 94 // assumed to be equal to the cache line size 95 bytesPerCacheLine = port.peerBlockSize(); 96 97 // we could deal with plenty options here, but for now do a quick 98 // sanity check 99 if (bytesPerCacheLine != 64 && bytesPerCacheLine != 32) 100 panic("Unexpected burst size %d", bytesPerCacheLine); 101 102 // determine the rows per bank by looking at the total capacity 103 uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size()); 104 105 DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity, 106 AbstractMemory::size()); 107 rowsPerBank = capacity / (bytesPerCacheLine * linesPerRowBuffer * 108 banksPerRank * ranksPerChannel); 109 110 if (range.interleaved()) { 111 if (channels != range.stripes()) 112 panic("%s has %d interleaved address stripes but %d channel(s)\n", 113 name(), range.stripes(), channels); 114 115 if (addrMapping == Enums::RaBaChCo) { 116 if (bytesPerCacheLine * linesPerRowBuffer != 117 range.granularity()) { 118 panic("Interleaving of %s doesn't match RaBaChCo address map\n", 119 name()); 120 } 121 } else if (addrMapping == Enums::RaBaCoCh) { 122 if (bytesPerCacheLine != range.granularity()) { 123 panic("Interleaving of %s doesn't match RaBaCoCh address map\n", 124 name()); 125 } 126 } else if (addrMapping == Enums::CoRaBaCh) { 127 if (bytesPerCacheLine != range.granularity()) 128 panic("Interleaving of %s doesn't match CoRaBaCh address map\n", 129 name()); 130 } 131 } 132} 133 134void 135SimpleDRAM::startup() 136{ 137 // print the configuration of the controller 138 printParams(); 139 140 // kick off the refresh 141 schedule(refreshEvent, curTick() + tREFI); 142} 143 144Tick 145SimpleDRAM::recvAtomic(PacketPtr pkt) 146{ 147 DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr()); 148 149 // do the actual memory access and turn the packet into a response 150 access(pkt); 151 152 Tick latency = 0; 153 if (!pkt->memInhibitAsserted() && pkt->hasData()) { 154 // this value is not supposed to be accurate, just enough to 155 // keep things going, mimic a closed page 156 latency = tRP + tRCD + tCL; 157 } 158 return latency; 159} 160 161bool 162SimpleDRAM::readQueueFull() const 163{ 164 DPRINTF(DRAM, "Read queue limit %d current size %d\n", 165 readBufferSize, readQueue.size() + respQueue.size()); 166 167 return (readQueue.size() + respQueue.size()) == readBufferSize; 168} 169 170bool 171SimpleDRAM::writeQueueFull() const 172{ 173 DPRINTF(DRAM, "Write queue limit %d current size %d\n", 174 writeBufferSize, writeQueue.size()); 175 return writeQueue.size() == writeBufferSize; 176} 177 178SimpleDRAM::DRAMPacket* 179SimpleDRAM::decodeAddr(PacketPtr pkt) 180{ 181 // decode the address based on the address mapping scheme, with 182 // Ra, Co, Ba and Ch denoting rank, column, bank and channel, 183 // respectively 184 uint8_t rank; 185 uint16_t bank; 186 uint16_t row; 187 188 Addr addr = pkt->getAddr(); 189 190 // truncate the address to the access granularity 191 addr = addr / bytesPerCacheLine; 192 193 // we have removed the lowest order address bits that denote the 194 // position within the cache line 195 if (addrMapping == Enums::RaBaChCo) { 196 // the lowest order bits denote the column to ensure that 197 // sequential cache lines occupy the same row 198 addr = addr / linesPerRowBuffer; 199 200 // take out the channel part of the address 201 addr = addr / channels; 202 203 // after the channel bits, get the bank bits to interleave 204 // over the banks 205 bank = addr % banksPerRank; 206 addr = addr / banksPerRank; 207 208 // after the bank, we get the rank bits which thus interleaves 209 // over the ranks 210 rank = addr % ranksPerChannel; 211 addr = addr / ranksPerChannel; 212 213 // lastly, get the row bits 214 row = addr % rowsPerBank; 215 addr = addr / rowsPerBank; 216 } else if (addrMapping == Enums::RaBaCoCh) { 217 // take out the channel part of the address 218 addr = addr / channels; 219 220 // next, the column 221 addr = addr / linesPerRowBuffer; 222 223 // after the column bits, we get the bank bits to interleave 224 // over the banks 225 bank = addr % banksPerRank; 226 addr = addr / banksPerRank; 227 228 // after the bank, we get the rank bits which thus interleaves 229 // over the ranks 230 rank = addr % ranksPerChannel; 231 addr = addr / ranksPerChannel; 232 233 // lastly, get the row bits 234 row = addr % rowsPerBank; 235 addr = addr / rowsPerBank; 236 } else if (addrMapping == Enums::CoRaBaCh) { 237 // optimise for closed page mode and utilise maximum 238 // parallelism of the DRAM (at the cost of power) 239 240 // take out the channel part of the address, not that this has 241 // to match with how accesses are interleaved between the 242 // controllers in the address mapping 243 addr = addr / channels; 244 245 // start with the bank bits, as this provides the maximum 246 // opportunity for parallelism between requests 247 bank = addr % banksPerRank; 248 addr = addr / banksPerRank; 249 250 // next get the rank bits 251 rank = addr % ranksPerChannel; 252 addr = addr / ranksPerChannel; 253 254 // next the column bits which we do not need to keep track of 255 // and simply skip past 256 addr = addr / linesPerRowBuffer; 257 258 // lastly, get the row bits 259 row = addr % rowsPerBank; 260 addr = addr / rowsPerBank; 261 } else 262 panic("Unknown address mapping policy chosen!"); 263 264 assert(rank < ranksPerChannel); 265 assert(bank < banksPerRank); 266 assert(row < rowsPerBank); 267 268 DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n", 269 pkt->getAddr(), rank, bank, row); 270 271 // create the corresponding DRAM packet with the entry time and 272 // ready time set to the current tick, the latter will be updated 273 // later 274 return new DRAMPacket(pkt, rank, bank, row, pkt->getAddr(), 275 banks[rank][bank]); 276} 277 278void 279SimpleDRAM::addToReadQueue(PacketPtr pkt) 280{ 281 // only add to the read queue here. whenever the request is 282 // eventually done, set the readyTime, and call schedule() 283 assert(!pkt->isWrite()); 284 285 // First check write buffer to see if the data is already at 286 // the controller 287 list<DRAMPacket*>::const_iterator i; 288 Addr addr = pkt->getAddr(); 289 290 // @todo: add size check 291 for (i = writeQueue.begin(); i != writeQueue.end(); ++i) { 292 if ((*i)->addr == addr){ 293 servicedByWrQ++; 294 DPRINTF(DRAM, "Read to %lld serviced by write queue\n", addr); 295 bytesRead += bytesPerCacheLine; 296 bytesConsumedRd += pkt->getSize(); 297 accessAndRespond(pkt); 298 return; 299 } 300 } 301 302 DRAMPacket* dram_pkt = decodeAddr(pkt); 303 304 assert(readQueue.size() + respQueue.size() < readBufferSize); 305 rdQLenPdf[readQueue.size() + respQueue.size()]++; 306 307 DPRINTF(DRAM, "Adding to read queue\n"); 308 309 readQueue.push_back(dram_pkt); 310 311 // Update stats 312 uint32_t bank_id = banksPerRank * dram_pkt->rank + dram_pkt->bank; 313 assert(bank_id < ranksPerChannel * banksPerRank); 314 perBankRdReqs[bank_id]++; 315 316 avgRdQLen = readQueue.size() + respQueue.size(); 317 318 // If we are not already scheduled to get the read request out of 319 // the queue, do so now 320 if (!nextReqEvent.scheduled() && !stopReads) { 321 DPRINTF(DRAM, "Request scheduled immediately\n"); 322 schedule(nextReqEvent, curTick()); 323 } 324} 325 326void 327SimpleDRAM::processWriteEvent() 328{ 329 assert(!writeQueue.empty()); 330 uint32_t numWritesThisTime = 0; 331 332 DPRINTF(DRAMWR, "Beginning DRAM Writes\n"); 333 Tick temp1 M5_VAR_USED = std::max(curTick(), busBusyUntil); 334 Tick temp2 M5_VAR_USED = std::max(curTick(), maxBankFreeAt()); 335 336 // @todo: are there any dangers with the untimed while loop? 337 while (!writeQueue.empty()) { 338 if (numWritesThisTime > writeThreshold) { 339 DPRINTF(DRAMWR, "Hit write threshold %d\n", writeThreshold); 340 break; 341 } 342 343 chooseNextWrite(); 344 DRAMPacket* dram_pkt = writeQueue.front(); 345 // What's the earliest the request can be put on the bus 346 Tick schedTime = std::max(curTick(), busBusyUntil); 347 348 DPRINTF(DRAMWR, "Asking for latency estimate at %lld\n", 349 schedTime + tBURST); 350 351 pair<Tick, Tick> lat = estimateLatency(dram_pkt, schedTime + tBURST); 352 Tick accessLat = lat.second; 353 354 // look at the rowHitFlag set by estimateLatency 355 if (rowHitFlag) 356 writeRowHits++; 357 358 Bank& bank = dram_pkt->bank_ref; 359 360 if (pageMgmt == Enums::open) { 361 bank.openRow = dram_pkt->row; 362 bank.freeAt = schedTime + tBURST + std::max(accessLat, tCL); 363 busBusyUntil = bank.freeAt - tCL; 364 365 if (!rowHitFlag) { 366 bank.tRASDoneAt = bank.freeAt + tRP; 367 recordActivate(bank.freeAt - tCL - tRCD); 368 busBusyUntil = bank.freeAt - tCL - tRCD; 369 } 370 } else if (pageMgmt == Enums::close) { 371 bank.freeAt = schedTime + tBURST + accessLat + tRP + tRP; 372 // Work backwards from bank.freeAt to determine activate time 373 recordActivate(bank.freeAt - tRP - tRP - tCL - tRCD); 374 busBusyUntil = bank.freeAt - tRP - tRP - tCL - tRCD; 375 DPRINTF(DRAMWR, "processWriteEvent::bank.freeAt for " 376 "banks_id %d is %lld\n", 377 dram_pkt->rank * banksPerRank + dram_pkt->bank, 378 bank.freeAt); 379 } else 380 panic("Unknown page management policy chosen\n"); 381 382 DPRINTF(DRAMWR, "Done writing to address %lld\n", dram_pkt->addr); 383 384 DPRINTF(DRAMWR, "schedtime is %lld, tBURST is %lld, " 385 "busbusyuntil is %lld\n", 386 schedTime, tBURST, busBusyUntil); 387 388 writeQueue.pop_front(); 389 delete dram_pkt; 390 391 numWritesThisTime++; 392 } 393 394 DPRINTF(DRAMWR, "Completed %d writes, bus busy for %lld ticks,"\ 395 "banks busy for %lld ticks\n", numWritesThisTime, 396 busBusyUntil - temp1, maxBankFreeAt() - temp2); 397 398 // Update stats 399 avgWrQLen = writeQueue.size(); 400 401 // turn the bus back around for reads again 402 busBusyUntil += tWTR; 403 stopReads = false; 404 405 if (retryWrReq) { 406 retryWrReq = false; 407 port.sendRetry(); 408 } 409 410 // if there is nothing left in any queue, signal a drain 411 if (writeQueue.empty() && readQueue.empty() && 412 respQueue.empty () && drainManager) { 413 drainManager->signalDrainDone(); 414 drainManager = NULL; 415 } 416 417 // Once you're done emptying the write queue, check if there's 418 // anything in the read queue, and call schedule if required. The 419 // retry above could already have caused it to be scheduled, so 420 // first check 421 if (!nextReqEvent.scheduled()) 422 schedule(nextReqEvent, busBusyUntil); 423} 424 425void 426SimpleDRAM::triggerWrites() 427{ 428 DPRINTF(DRAM, "Writes triggered at %lld\n", curTick()); 429 // Flag variable to stop any more read scheduling 430 stopReads = true; 431 432 writeStartTime = std::max(busBusyUntil, curTick()) + tWTR; 433 434 DPRINTF(DRAM, "Writes scheduled at %lld\n", writeStartTime); 435 436 assert(writeStartTime >= curTick()); 437 assert(!writeEvent.scheduled()); 438 schedule(writeEvent, writeStartTime); 439} 440 441void 442SimpleDRAM::addToWriteQueue(PacketPtr pkt) 443{ 444 // only add to the write queue here. whenever the request is 445 // eventually done, set the readyTime, and call schedule() 446 assert(pkt->isWrite()); 447 448 DRAMPacket* dram_pkt = decodeAddr(pkt); 449 450 assert(writeQueue.size() < writeBufferSize); 451 wrQLenPdf[writeQueue.size()]++; 452 453 DPRINTF(DRAM, "Adding to write queue\n"); 454 455 writeQueue.push_back(dram_pkt); 456 457 // Update stats 458 uint32_t bank_id = banksPerRank * dram_pkt->rank + dram_pkt->bank; 459 assert(bank_id < ranksPerChannel * banksPerRank); 460 perBankWrReqs[bank_id]++; 461 462 avgWrQLen = writeQueue.size(); 463 464 // we do not wait for the writes to be send to the actual memory, 465 // but instead take responsibility for the consistency here and 466 // snoop the write queue for any upcoming reads 467 468 bytesConsumedWr += pkt->getSize(); 469 bytesWritten += bytesPerCacheLine; 470 accessAndRespond(pkt); 471 472 // If your write buffer is starting to fill up, drain it! 473 if (writeQueue.size() > writeThreshold && !stopReads){ 474 triggerWrites(); 475 } 476} 477 478void 479SimpleDRAM::printParams() const 480{ 481 // Sanity check print of important parameters 482 DPRINTF(DRAM, 483 "Memory controller %s physical organization\n" \ 484 "Bytes per cacheline %d\n" \ 485 "Lines per row buffer %d\n" \ 486 "Rows per bank %d\n" \ 487 "Banks per rank %d\n" \ 488 "Ranks per channel %d\n" \ 489 "Total mem capacity %u\n", 490 name(), bytesPerCacheLine, linesPerRowBuffer, rowsPerBank, 491 banksPerRank, ranksPerChannel, bytesPerCacheLine * 492 linesPerRowBuffer * rowsPerBank * banksPerRank * ranksPerChannel); 493 494 string scheduler = memSchedPolicy == Enums::fcfs ? "FCFS" : "FR-FCFS"; 495 string address_mapping = addrMapping == Enums::RaBaChCo ? "RaBaChCo" : 496 (addrMapping == Enums::RaBaCoCh ? "RaBaCoCh" : "CoRaBaCh"); 497 string page_policy = pageMgmt == Enums::open ? "OPEN" : "CLOSE"; 498 499 DPRINTF(DRAM, 500 "Memory controller %s characteristics\n" \ 501 "Read buffer size %d\n" \ 502 "Write buffer size %d\n" \ 503 "Write buffer thresh %d\n" \ 504 "Scheduler %s\n" \ 505 "Address mapping %s\n" \ 506 "Page policy %s\n", 507 name(), readBufferSize, writeBufferSize, writeThreshold, 508 scheduler, address_mapping, page_policy); 509 510 DPRINTF(DRAM, "Memory controller %s timing specs\n" \ 511 "tRCD %d ticks\n" \ 512 "tCL %d ticks\n" \ 513 "tRP %d ticks\n" \ 514 "tBURST %d ticks\n" \ 515 "tRFC %d ticks\n" \ 516 "tREFI %d ticks\n" \ 517 "tWTR %d ticks\n" \ 518 "tXAW (%d) %d ticks\n", 519 name(), tRCD, tCL, tRP, tBURST, tRFC, tREFI, tWTR, 520 activationLimit, tXAW); 521} 522 523void 524SimpleDRAM::printQs() const { 525 526 list<DRAMPacket*>::const_iterator i; 527 528 DPRINTF(DRAM, "===READ QUEUE===\n\n"); 529 for (i = readQueue.begin() ; i != readQueue.end() ; ++i) { 530 DPRINTF(DRAM, "Read %lu\n", (*i)->addr); 531 } 532 DPRINTF(DRAM, "\n===RESP QUEUE===\n\n"); 533 for (i = respQueue.begin() ; i != respQueue.end() ; ++i) { 534 DPRINTF(DRAM, "Response %lu\n", (*i)->addr); 535 } 536 DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n"); 537 for (i = writeQueue.begin() ; i != writeQueue.end() ; ++i) { 538 DPRINTF(DRAM, "Write %lu\n", (*i)->addr); 539 } 540} 541 542bool 543SimpleDRAM::recvTimingReq(PacketPtr pkt) 544{ 545 /// @todo temporary hack to deal with memory corruption issues until 546 /// 4-phase transactions are complete 547 for (int x = 0; x < pendingDelete.size(); x++) 548 delete pendingDelete[x]; 549 pendingDelete.clear(); 550 551 // This is where we enter from the outside world 552 DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n", 553 pkt->cmdString(),pkt->getAddr(), pkt->getSize()); 554 555 // simply drop inhibited packets for now 556 if (pkt->memInhibitAsserted()) { 557 DPRINTF(DRAM,"Inhibited packet -- Dropping it now\n"); 558 pendingDelete.push_back(pkt); 559 return true; 560 } 561 562 if (pkt->getSize() == bytesPerCacheLine) 563 cpuReqs++; 564 565 // Every million accesses, print the state of the queues 566 if (numReqs % 1000000 == 0) 567 printQs(); 568 569 // Calc avg gap between requests 570 if (prevArrival != 0) { 571 totGap += curTick() - prevArrival; 572 } 573 prevArrival = curTick(); 574 575 unsigned size = pkt->getSize(); 576 if (size > bytesPerCacheLine) 577 panic("Request size %d is greater than burst size %d", 578 size, bytesPerCacheLine); 579 580 // check local buffers and do not accept if full 581 if (pkt->isRead()) { 582 assert(size != 0); 583 if (readQueueFull()) { 584 DPRINTF(DRAM, "Read queue full, not accepting\n"); 585 // remember that we have to retry this port 586 retryRdReq = true; 587 numRdRetry++; 588 return false; 589 } else { 590 readPktSize[ceilLog2(size)]++; 591 addToReadQueue(pkt); 592 readReqs++; 593 numReqs++; 594 } 595 } else if (pkt->isWrite()) { 596 assert(size != 0); 597 if (writeQueueFull()) { 598 DPRINTF(DRAM, "Write queue full, not accepting\n"); 599 // remember that we have to retry this port 600 retryWrReq = true; 601 numWrRetry++; 602 return false; 603 } else { 604 writePktSize[ceilLog2(size)]++; 605 addToWriteQueue(pkt); 606 writeReqs++; 607 numReqs++; 608 } 609 } else { 610 DPRINTF(DRAM,"Neither read nor write, ignore timing\n"); 611 neitherReadNorWrite++; 612 accessAndRespond(pkt); 613 } 614 615 retryRdReq = false; 616 retryWrReq = false; 617 return true; 618} 619 620void 621SimpleDRAM::processRespondEvent() 622{ 623 DPRINTF(DRAM, 624 "processRespondEvent(): Some req has reached its readyTime\n"); 625 626 PacketPtr pkt = respQueue.front()->pkt; 627 628 // Actually responds to the requestor 629 bytesConsumedRd += pkt->getSize(); 630 bytesRead += bytesPerCacheLine; 631 accessAndRespond(pkt); 632 633 delete respQueue.front(); 634 respQueue.pop_front(); 635 636 // Update stats 637 avgRdQLen = readQueue.size() + respQueue.size(); 638 639 if (!respQueue.empty()) { 640 assert(respQueue.front()->readyTime >= curTick()); 641 assert(!respondEvent.scheduled()); 642 schedule(respondEvent, respQueue.front()->readyTime); 643 } else { 644 // if there is nothing left in any queue, signal a drain 645 if (writeQueue.empty() && readQueue.empty() && 646 drainManager) { 647 drainManager->signalDrainDone(); 648 drainManager = NULL; 649 } 650 } 651 652 // We have made a location in the queue available at this point, 653 // so if there is a read that was forced to wait, retry now 654 if (retryRdReq) { 655 retryRdReq = false; 656 port.sendRetry(); 657 } 658} 659 660void 661SimpleDRAM::chooseNextWrite() 662{ 663 // This method does the arbitration between write requests. The 664 // chosen packet is simply moved to the head of the write 665 // queue. The other methods know that this is the place to 666 // look. For example, with FCFS, this method does nothing 667 assert(!writeQueue.empty()); 668 669 if (writeQueue.size() == 1) { 670 DPRINTF(DRAMWR, "Single write request, nothing to do\n"); 671 return; 672 } 673 674 if (memSchedPolicy == Enums::fcfs) { 675 // Do nothing, since the correct request is already head 676 } else if (memSchedPolicy == Enums::frfcfs) { 677 list<DRAMPacket*>::iterator i = writeQueue.begin(); 678 bool foundRowHit = false; 679 while (!foundRowHit && i != writeQueue.end()) { 680 DRAMPacket* dram_pkt = *i; 681 const Bank& bank = dram_pkt->bank_ref; 682 if (bank.openRow == dram_pkt->row) { //FR part 683 DPRINTF(DRAMWR, "Write row buffer hit\n"); 684 writeQueue.erase(i); 685 writeQueue.push_front(dram_pkt); 686 foundRowHit = true; 687 } else { //FCFS part 688 ; 689 } 690 ++i; 691 } 692 } else 693 panic("No scheduling policy chosen\n"); 694 695 DPRINTF(DRAMWR, "Selected next write request\n"); 696} 697 698bool 699SimpleDRAM::chooseNextRead() 700{ 701 // This method does the arbitration between read requests. The 702 // chosen packet is simply moved to the head of the queue. The 703 // other methods know that this is the place to look. For example, 704 // with FCFS, this method does nothing 705 if (readQueue.empty()) { 706 DPRINTF(DRAM, "No read request to select\n"); 707 return false; 708 } 709 710 // If there is only one request then there is nothing left to do 711 if (readQueue.size() == 1) 712 return true; 713 714 if (memSchedPolicy == Enums::fcfs) { 715 // Do nothing, since the request to serve is already the first 716 // one in the read queue 717 } else if (memSchedPolicy == Enums::frfcfs) { 718 for (list<DRAMPacket*>::iterator i = readQueue.begin(); 719 i != readQueue.end() ; ++i) { 720 DRAMPacket* dram_pkt = *i; 721 const Bank& bank = dram_pkt->bank_ref; 722 // Check if it is a row hit 723 if (bank.openRow == dram_pkt->row) { //FR part 724 DPRINTF(DRAM, "Row buffer hit\n"); 725 readQueue.erase(i); 726 readQueue.push_front(dram_pkt); 727 break; 728 } else { //FCFS part 729 ; 730 } 731 } 732 } else 733 panic("No scheduling policy chosen!\n"); 734 735 DPRINTF(DRAM, "Selected next read request\n"); 736 return true; 737} 738 739void 740SimpleDRAM::accessAndRespond(PacketPtr pkt) 741{ 742 DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr()); 743 744 bool needsResponse = pkt->needsResponse(); 745 // do the actual memory access which also turns the packet into a 746 // response 747 access(pkt); 748 749 // turn packet around to go back to requester if response expected 750 if (needsResponse) { 751 // access already turned the packet into a response 752 assert(pkt->isResponse()); 753 754 // @todo someone should pay for this 755 pkt->busFirstWordDelay = pkt->busLastWordDelay = 0; 756 757 // queue the packet in the response queue to be sent out the 758 // next tick 759 port.schedTimingResp(pkt, curTick() + 1); 760 } else { 761 // @todo the packet is going to be deleted, and the DRAMPacket 762 // is still having a pointer to it 763 pendingDelete.push_back(pkt); 764 } 765 766 DPRINTF(DRAM, "Done\n"); 767 768 return; 769} 770 771pair<Tick, Tick> 772SimpleDRAM::estimateLatency(DRAMPacket* dram_pkt, Tick inTime) 773{ 774 // If a request reaches a bank at tick 'inTime', how much time 775 // *after* that does it take to finish the request, depending 776 // on bank status and page open policy. Note that this method 777 // considers only the time taken for the actual read or write 778 // to complete, NOT any additional time thereafter for tRAS or 779 // tRP. 780 Tick accLat = 0; 781 Tick bankLat = 0; 782 rowHitFlag = false; 783 784 const Bank& bank = dram_pkt->bank_ref; 785 if (pageMgmt == Enums::open) { // open-page policy 786 if (bank.openRow == dram_pkt->row) { 787 // When we have a row-buffer hit, 788 // we don't care about tRAS having expired or not, 789 // but do care about bank being free for access 790 rowHitFlag = true; 791 792 if (bank.freeAt < inTime) { 793 // CAS latency only 794 accLat += tCL; 795 bankLat += tCL; 796 } else { 797 accLat += 0; 798 bankLat += 0; 799 } 800 801 } else { 802 // Row-buffer miss, need to close existing row 803 // once tRAS has expired, then open the new one, 804 // then add cas latency. 805 Tick freeTime = std::max(bank.tRASDoneAt, bank.freeAt); 806 807 if (freeTime > inTime) 808 accLat += freeTime - inTime; 809 810 accLat += tRP + tRCD + tCL; 811 bankLat += tRP + tRCD + tCL; 812 } 813 } else if (pageMgmt == Enums::close) { 814 // With a close page policy, no notion of 815 // bank.tRASDoneAt 816 if (bank.freeAt > inTime) 817 accLat += bank.freeAt - inTime; 818 819 // page already closed, simply open the row, and 820 // add cas latency 821 accLat += tRCD + tCL; 822 bankLat += tRCD + tCL; 823 } else 824 panic("No page management policy chosen\n"); 825 826 DPRINTF(DRAM, "Returning < %lld, %lld > from estimateLatency()\n", 827 bankLat, accLat); 828 829 return make_pair(bankLat, accLat); 830} 831 832void 833SimpleDRAM::processNextReqEvent() 834{ 835 scheduleNextReq(); 836} 837 838void 839SimpleDRAM::recordActivate(Tick act_tick) 840{ 841 assert(actTicks.size() == activationLimit); 842 843 DPRINTF(DRAM, "Activate at tick %d\n", act_tick); 844 845 // sanity check 846 if (actTicks.back() && (act_tick - actTicks.back()) < tXAW) { 847 panic("Got %d activates in window %d (%d - %d) which is smaller " 848 "than %d\n", activationLimit, act_tick - actTicks.back(), 849 act_tick, actTicks.back(), tXAW); 850 } 851 852 // shift the times used for the book keeping, the last element 853 // (highest index) is the oldest one and hence the lowest value 854 actTicks.pop_back(); 855 856 // record an new activation (in the future) 857 actTicks.push_front(act_tick); 858 859 // cannot activate more than X times in time window tXAW, push the 860 // next one (the X + 1'st activate) to be tXAW away from the 861 // oldest in our window of X 862 if (actTicks.back() && (act_tick - actTicks.back()) < tXAW) { 863 DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate no earlier " 864 "than %d\n", activationLimit, actTicks.back() + tXAW); 865 for(int i = 0; i < ranksPerChannel; i++) 866 for(int j = 0; j < banksPerRank; j++) 867 // next activate must not happen before end of window 868 banks[i][j].freeAt = std::max(banks[i][j].freeAt, 869 actTicks.back() + tXAW); 870 } 871} 872 873void 874SimpleDRAM::doDRAMAccess(DRAMPacket* dram_pkt) 875{ 876 877 DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n", 878 dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row); 879 880 // estimate the bank and access latency 881 pair<Tick, Tick> lat = estimateLatency(dram_pkt, curTick()); 882 Tick bankLat = lat.first; 883 Tick accessLat = lat.second; 884 885 // This request was woken up at this time based on a prior call 886 // to estimateLatency(). However, between then and now, both the 887 // accessLatency and/or busBusyUntil may have changed. We need 888 // to correct for that. 889 890 Tick addDelay = (curTick() + accessLat < busBusyUntil) ? 891 busBusyUntil - (curTick() + accessLat) : 0; 892 893 Bank& bank = dram_pkt->bank_ref; 894 895 // Update bank state 896 if (pageMgmt == Enums::open) { 897 bank.openRow = dram_pkt->row; 898 bank.freeAt = curTick() + addDelay + accessLat; 899 // If you activated a new row do to this access, the next access 900 // will have to respect tRAS for this bank. Assume tRAS ~= 3 * tRP. 901 // Also need to account for t_XAW 902 if (!rowHitFlag) { 903 bank.tRASDoneAt = bank.freeAt + tRP; 904 recordActivate(bank.freeAt - tCL - tRCD); //since this is open page, 905 //no tRP by default 906 } 907 } else if (pageMgmt == Enums::close) { // accounting for tRAS also 908 // assuming that tRAS ~= 3 * tRP, and tRC ~= 4 * tRP, as is common 909 // (refer Jacob/Ng/Wang and Micron datasheets) 910 bank.freeAt = curTick() + addDelay + accessLat + tRP + tRP; 911 recordActivate(bank.freeAt - tRP - tRP - tCL - tRCD); //essentially (freeAt - tRC) 912 DPRINTF(DRAM,"doDRAMAccess::bank.freeAt is %lld\n",bank.freeAt); 913 } else 914 panic("No page management policy chosen\n"); 915 916 // Update request parameters 917 dram_pkt->readyTime = curTick() + addDelay + accessLat + tBURST; 918 919 920 DPRINTF(DRAM, "Req %lld: curtick is %lld accessLat is %d " \ 921 "readytime is %lld busbusyuntil is %lld. " \ 922 "Scheduling at readyTime\n", dram_pkt->addr, 923 curTick(), accessLat, dram_pkt->readyTime, busBusyUntil); 924 925 // Make sure requests are not overlapping on the databus 926 assert (dram_pkt->readyTime - busBusyUntil >= tBURST); 927 928 // Update bus state 929 busBusyUntil = dram_pkt->readyTime; 930 931 DPRINTF(DRAM,"Access time is %lld\n", 932 dram_pkt->readyTime - dram_pkt->entryTime); 933 934 // Update stats 935 totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime; 936 totBankLat += bankLat; 937 totBusLat += tBURST; 938 totQLat += dram_pkt->readyTime - dram_pkt->entryTime - bankLat - tBURST; 939 940 if (rowHitFlag) 941 readRowHits++; 942 943 // At this point we're done dealing with the request 944 // It will be moved to a separate response queue with a 945 // correct readyTime, and eventually be sent back at that 946 //time 947 moveToRespQ(); 948 949 // The absolute soonest you have to start thinking about the 950 // next request is the longest access time that can occur before 951 // busBusyUntil. Assuming you need to meet tRAS, then precharge, 952 // open a new row, and access, it is ~4*tRCD. 953 954 955 Tick newTime = (busBusyUntil > 4 * tRCD) ? 956 std::max(busBusyUntil - 4 * tRCD, curTick()) : 957 curTick(); 958 959 if (!nextReqEvent.scheduled() && !stopReads){ 960 schedule(nextReqEvent, newTime); 961 } else { 962 if (newTime < nextReqEvent.when()) 963 reschedule(nextReqEvent, newTime); 964 } 965 966 967} 968 969void 970SimpleDRAM::moveToRespQ() 971{ 972 // Remove from read queue 973 DRAMPacket* dram_pkt = readQueue.front(); 974 readQueue.pop_front(); 975 976 // Insert into response queue sorted by readyTime 977 // It will be sent back to the requestor at its 978 // readyTime 979 if (respQueue.empty()) { 980 respQueue.push_front(dram_pkt); 981 assert(!respondEvent.scheduled()); 982 assert(dram_pkt->readyTime >= curTick()); 983 schedule(respondEvent, dram_pkt->readyTime); 984 } else { 985 bool done = false; 986 list<DRAMPacket*>::iterator i = respQueue.begin(); 987 while (!done && i != respQueue.end()) { 988 if ((*i)->readyTime > dram_pkt->readyTime) { 989 respQueue.insert(i, dram_pkt); 990 done = true; 991 } 992 ++i; 993 } 994 995 if (!done) 996 respQueue.push_back(dram_pkt); 997 998 assert(respondEvent.scheduled()); 999 1000 if (respQueue.front()->readyTime < respondEvent.when()) { 1001 assert(respQueue.front()->readyTime >= curTick()); 1002 reschedule(respondEvent, respQueue.front()->readyTime); 1003 } 1004 } 1005} 1006 1007void 1008SimpleDRAM::scheduleNextReq() 1009{ 1010 DPRINTF(DRAM, "Reached scheduleNextReq()\n"); 1011 1012 // Figure out which read request goes next, and move it to the 1013 // front of the read queue 1014 if (!chooseNextRead()) { 1015 // In the case there is no read request to go next, see if we 1016 // are asked to drain, and if so trigger writes, this also 1017 // ensures that if we hit the write limit we will do this 1018 // multiple times until we are completely drained 1019 if (drainManager && !writeQueue.empty() && !writeEvent.scheduled()) 1020 triggerWrites(); 1021 } else { 1022 doDRAMAccess(readQueue.front()); 1023 } 1024} 1025 1026Tick 1027SimpleDRAM::maxBankFreeAt() const 1028{ 1029 Tick banksFree = 0; 1030 1031 for(int i = 0; i < ranksPerChannel; i++) 1032 for(int j = 0; j < banksPerRank; j++) 1033 banksFree = std::max(banks[i][j].freeAt, banksFree); 1034 1035 return banksFree; 1036} 1037 1038void 1039SimpleDRAM::processRefreshEvent() 1040{ 1041 DPRINTF(DRAM, "Refreshing at tick %ld\n", curTick()); 1042 1043 Tick banksFree = std::max(curTick(), maxBankFreeAt()) + tRFC; 1044 1045 for(int i = 0; i < ranksPerChannel; i++) 1046 for(int j = 0; j < banksPerRank; j++) 1047 banks[i][j].freeAt = banksFree; 1048 1049 schedule(refreshEvent, curTick() + tREFI); 1050} 1051 1052void 1053SimpleDRAM::regStats() 1054{ 1055 using namespace Stats; 1056 1057 AbstractMemory::regStats(); 1058 1059 readReqs 1060 .name(name() + ".readReqs") 1061 .desc("Total number of read requests seen"); 1062 1063 writeReqs 1064 .name(name() + ".writeReqs") 1065 .desc("Total number of write requests seen"); 1066 1067 servicedByWrQ 1068 .name(name() + ".servicedByWrQ") 1069 .desc("Number of read reqs serviced by write Q"); 1070 1071 cpuReqs 1072 .name(name() + ".cpureqs") 1073 .desc("Reqs generatd by CPU via cache - shady"); 1074 1075 neitherReadNorWrite 1076 .name(name() + ".neitherReadNorWrite") 1077 .desc("Reqs where no action is needed"); 1078 1079 perBankRdReqs 1080 .init(banksPerRank * ranksPerChannel) 1081 .name(name() + ".perBankRdReqs") 1082 .desc("Track reads on a per bank basis"); 1083 1084 perBankWrReqs 1085 .init(banksPerRank * ranksPerChannel) 1086 .name(name() + ".perBankWrReqs") 1087 .desc("Track writes on a per bank basis"); 1088 1089 avgRdQLen 1090 .name(name() + ".avgRdQLen") 1091 .desc("Average read queue length over time") 1092 .precision(2); 1093 1094 avgWrQLen 1095 .name(name() + ".avgWrQLen") 1096 .desc("Average write queue length over time") 1097 .precision(2); 1098 1099 totQLat 1100 .name(name() + ".totQLat") 1101 .desc("Total cycles spent in queuing delays"); 1102 1103 totBankLat 1104 .name(name() + ".totBankLat") 1105 .desc("Total cycles spent in bank access"); 1106 1107 totBusLat 1108 .name(name() + ".totBusLat") 1109 .desc("Total cycles spent in databus access"); 1110 1111 totMemAccLat 1112 .name(name() + ".totMemAccLat") 1113 .desc("Sum of mem lat for all requests"); 1114 1115 avgQLat 1116 .name(name() + ".avgQLat") 1117 .desc("Average queueing delay per request") 1118 .precision(2); 1119 1120 avgQLat = totQLat / (readReqs - servicedByWrQ); 1121 1122 avgBankLat 1123 .name(name() + ".avgBankLat") 1124 .desc("Average bank access latency per request") 1125 .precision(2); 1126 1127 avgBankLat = totBankLat / (readReqs - servicedByWrQ); 1128 1129 avgBusLat 1130 .name(name() + ".avgBusLat") 1131 .desc("Average bus latency per request") 1132 .precision(2); 1133 1134 avgBusLat = totBusLat / (readReqs - servicedByWrQ); 1135 1136 avgMemAccLat 1137 .name(name() + ".avgMemAccLat") 1138 .desc("Average memory access latency") 1139 .precision(2); 1140 1141 avgMemAccLat = totMemAccLat / (readReqs - servicedByWrQ); 1142 1143 numRdRetry 1144 .name(name() + ".numRdRetry") 1145 .desc("Number of times rd buffer was full causing retry"); 1146 1147 numWrRetry 1148 .name(name() + ".numWrRetry") 1149 .desc("Number of times wr buffer was full causing retry"); 1150 1151 readRowHits 1152 .name(name() + ".readRowHits") 1153 .desc("Number of row buffer hits during reads"); 1154 1155 writeRowHits 1156 .name(name() + ".writeRowHits") 1157 .desc("Number of row buffer hits during writes"); 1158 1159 readRowHitRate 1160 .name(name() + ".readRowHitRate") 1161 .desc("Row buffer hit rate for reads") 1162 .precision(2); 1163 1164 readRowHitRate = (readRowHits / (readReqs - servicedByWrQ)) * 100; 1165 1166 writeRowHitRate 1167 .name(name() + ".writeRowHitRate") 1168 .desc("Row buffer hit rate for writes") 1169 .precision(2); 1170 1171 writeRowHitRate = (writeRowHits / writeReqs) * 100; 1172 1173 readPktSize 1174 .init(ceilLog2(bytesPerCacheLine) + 1) 1175 .name(name() + ".readPktSize") 1176 .desc("Categorize read packet sizes"); 1177 1178 writePktSize 1179 .init(ceilLog2(bytesPerCacheLine) + 1) 1180 .name(name() + ".writePktSize") 1181 .desc("Categorize write packet sizes"); 1182 1183 rdQLenPdf 1184 .init(readBufferSize) 1185 .name(name() + ".rdQLenPdf") 1186 .desc("What read queue length does an incoming req see"); 1187 1188 wrQLenPdf 1189 .init(writeBufferSize) 1190 .name(name() + ".wrQLenPdf") 1191 .desc("What write queue length does an incoming req see"); 1192 1193 1194 bytesRead 1195 .name(name() + ".bytesRead") 1196 .desc("Total number of bytes read from memory"); 1197 1198 bytesWritten 1199 .name(name() + ".bytesWritten") 1200 .desc("Total number of bytes written to memory"); 1201 1202 bytesConsumedRd 1203 .name(name() + ".bytesConsumedRd") 1204 .desc("bytesRead derated as per pkt->getSize()"); 1205 1206 bytesConsumedWr 1207 .name(name() + ".bytesConsumedWr") 1208 .desc("bytesWritten derated as per pkt->getSize()"); 1209 1210 avgRdBW 1211 .name(name() + ".avgRdBW") 1212 .desc("Average achieved read bandwidth in MB/s") 1213 .precision(2); 1214 1215 avgRdBW = (bytesRead / 1000000) / simSeconds; 1216 1217 avgWrBW 1218 .name(name() + ".avgWrBW") 1219 .desc("Average achieved write bandwidth in MB/s") 1220 .precision(2); 1221 1222 avgWrBW = (bytesWritten / 1000000) / simSeconds; 1223 1224 avgConsumedRdBW 1225 .name(name() + ".avgConsumedRdBW") 1226 .desc("Average consumed read bandwidth in MB/s") 1227 .precision(2); 1228 1229 avgConsumedRdBW = (bytesConsumedRd / 1000000) / simSeconds; 1230 1231 avgConsumedWrBW 1232 .name(name() + ".avgConsumedWrBW") 1233 .desc("Average consumed write bandwidth in MB/s") 1234 .precision(2); 1235 1236 avgConsumedWrBW = (bytesConsumedWr / 1000000) / simSeconds; 1237 1238 peakBW 1239 .name(name() + ".peakBW") 1240 .desc("Theoretical peak bandwidth in MB/s") 1241 .precision(2); 1242 1243 peakBW = (SimClock::Frequency / tBURST) * bytesPerCacheLine / 1000000; 1244 1245 busUtil 1246 .name(name() + ".busUtil") 1247 .desc("Data bus utilization in percentage") 1248 .precision(2); 1249 1250 busUtil = (avgRdBW + avgWrBW) / peakBW * 100; 1251 1252 totGap 1253 .name(name() + ".totGap") 1254 .desc("Total gap between requests"); 1255 1256 avgGap 1257 .name(name() + ".avgGap") 1258 .desc("Average gap between requests") 1259 .precision(2); 1260 1261 avgGap = totGap / (readReqs + writeReqs); 1262} 1263 1264void 1265SimpleDRAM::recvFunctional(PacketPtr pkt) 1266{ 1267 // rely on the abstract memory 1268 functionalAccess(pkt); 1269} 1270 1271BaseSlavePort& 1272SimpleDRAM::getSlavePort(const string &if_name, PortID idx) 1273{ 1274 if (if_name != "port") { 1275 return MemObject::getSlavePort(if_name, idx); 1276 } else { 1277 return port; 1278 } 1279} 1280 1281unsigned int 1282SimpleDRAM::drain(DrainManager *dm) 1283{ 1284 unsigned int count = port.drain(dm); 1285 1286 // if there is anything in any of our internal queues, keep track 1287 // of that as well 1288 if (!(writeQueue.empty() && readQueue.empty() && 1289 respQueue.empty())) { 1290 DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d," 1291 " resp: %d\n", writeQueue.size(), readQueue.size(), 1292 respQueue.size()); 1293 ++count; 1294 drainManager = dm; 1295 // the only part that is not drained automatically over time 1296 // is the write queue, thus trigger writes if there are any 1297 // waiting and no reads waiting, otherwise wait until the 1298 // reads are done 1299 if (readQueue.empty() && !writeQueue.empty() && 1300 !writeEvent.scheduled()) 1301 triggerWrites(); 1302 } 1303 1304 if (count) 1305 setDrainState(Drainable::Draining); 1306 else 1307 setDrainState(Drainable::Drained); 1308 return count; 1309} 1310 1311SimpleDRAM::MemoryPort::MemoryPort(const std::string& name, SimpleDRAM& _memory) 1312 : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this), 1313 memory(_memory) 1314{ } 1315 1316AddrRangeList 1317SimpleDRAM::MemoryPort::getAddrRanges() const 1318{ 1319 AddrRangeList ranges; 1320 ranges.push_back(memory.getAddrRange()); 1321 return ranges; 1322} 1323 1324void 1325SimpleDRAM::MemoryPort::recvFunctional(PacketPtr pkt) 1326{ 1327 pkt->pushLabel(memory.name()); 1328 1329 if (!queue.checkFunctional(pkt)) { 1330 // Default implementation of SimpleTimingPort::recvFunctional() 1331 // calls recvAtomic() and throws away the latency; we can save a 1332 // little here by just not calculating the latency. 1333 memory.recvFunctional(pkt); 1334 } 1335 1336 pkt->popLabel(); 1337} 1338 1339Tick 1340SimpleDRAM::MemoryPort::recvAtomic(PacketPtr pkt) 1341{ 1342 return memory.recvAtomic(pkt); 1343} 1344 1345bool 1346SimpleDRAM::MemoryPort::recvTimingReq(PacketPtr pkt) 1347{ 1348 // pass it to the memory controller 1349 return memory.recvTimingReq(pkt); 1350} 1351 1352SimpleDRAM* 1353SimpleDRAMParams::create() 1354{ 1355 return new SimpleDRAM(this); 1356} 1357