Deleted Added
sdiff udiff text old ( 12749:223c83ed9979 ) new ( 13784:1941dc118243 )
full compact
1/*
2 * Copyright (c) 2017 Jason Lowe-Power
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Jason Lowe-Power
29 */
30
31#include "learning_gem5/part2/simple_cache.hh"
32
33#include "base/random.hh"
34#include "debug/SimpleCache.hh"
35#include "sim/system.hh"
36
37SimpleCache::SimpleCache(SimpleCacheParams *params) :
38 MemObject(params),
39 latency(params->latency),
40 blockSize(params->system->cacheLineSize()),
41 capacity(params->size / blockSize),
42 memPort(params->name + ".mem_side", this),
43 blocked(false), originalPacket(nullptr), waitingPortId(-1)
44{
45 // Since the CPU side ports are a vector of ports, create an instance of
46 // the CPUSidePort for each connection. This member of params is
47 // automatically created depending on the name of the vector port and
48 // holds the number of connections to this port name
49 for (int i = 0; i < params->port_cpu_side_connection_count; ++i) {
50 cpuPorts.emplace_back(name() + csprintf(".cpu_side[%d]", i), i, this);
51 }
52}
53
54Port &
55SimpleCache::getPort(const std::string &if_name, PortID idx)
56{
57 panic_if(idx != InvalidPortID, "This object doesn't support vector ports");
58
59 // This is the name from the Python SimObject declaration in SimpleCache.py
60 if (if_name == "mem_side") {
61 return memPort;
62 } else if (if_name == "cpu_side" && idx < cpuPorts.size()) {
63 // We should have already created all of the ports in the constructor
64 return cpuPorts[idx];
65 } else {
66 // pass it along to our super class
67 return MemObject::getPort(if_name, idx);
68 }
69}
70
71void
72SimpleCache::CPUSidePort::sendPacket(PacketPtr pkt)
73{
74 // Note: This flow control is very simple since the cache is blocking.
75
76 panic_if(blockedPacket != nullptr, "Should never try to send if blocked!");
77
78 // If we can't send the packet across the port, store it for later.
79 DPRINTF(SimpleCache, "Sending %s to CPU\n", pkt->print());
80 if (!sendTimingResp(pkt)) {
81 DPRINTF(SimpleCache, "failed!\n");
82 blockedPacket = pkt;
83 }
84}
85
86AddrRangeList
87SimpleCache::CPUSidePort::getAddrRanges() const
88{
89 return owner->getAddrRanges();
90}
91
92void
93SimpleCache::CPUSidePort::trySendRetry()
94{
95 if (needRetry && blockedPacket == nullptr) {
96 // Only send a retry if the port is now completely free
97 needRetry = false;
98 DPRINTF(SimpleCache, "Sending retry req.\n");
99 sendRetryReq();
100 }
101}
102
103void
104SimpleCache::CPUSidePort::recvFunctional(PacketPtr pkt)
105{
106 // Just forward to the cache.
107 return owner->handleFunctional(pkt);
108}
109
110bool
111SimpleCache::CPUSidePort::recvTimingReq(PacketPtr pkt)
112{
113 DPRINTF(SimpleCache, "Got request %s\n", pkt->print());
114
115 if (blockedPacket || needRetry) {
116 // The cache may not be able to send a reply if this is blocked
117 DPRINTF(SimpleCache, "Request blocked\n");
118 needRetry = true;
119 return false;
120 }
121 // Just forward to the cache.
122 if (!owner->handleRequest(pkt, id)) {
123 DPRINTF(SimpleCache, "Request failed\n");
124 // stalling
125 needRetry = true;
126 return false;
127 } else {
128 DPRINTF(SimpleCache, "Request succeeded\n");
129 return true;
130 }
131}
132
133void
134SimpleCache::CPUSidePort::recvRespRetry()
135{
136 // We should have a blocked packet if this function is called.
137 assert(blockedPacket != nullptr);
138
139 // Grab the blocked packet.
140 PacketPtr pkt = blockedPacket;
141 blockedPacket = nullptr;
142
143 DPRINTF(SimpleCache, "Retrying response pkt %s\n", pkt->print());
144 // Try to resend it. It's possible that it fails again.
145 sendPacket(pkt);
146
147 // We may now be able to accept new packets
148 trySendRetry();
149}
150
151void
152SimpleCache::MemSidePort::sendPacket(PacketPtr pkt)
153{
154 // Note: This flow control is very simple since the cache is blocking.
155
156 panic_if(blockedPacket != nullptr, "Should never try to send if blocked!");
157
158 // If we can't send the packet across the port, store it for later.
159 if (!sendTimingReq(pkt)) {
160 blockedPacket = pkt;
161 }
162}
163
164bool
165SimpleCache::MemSidePort::recvTimingResp(PacketPtr pkt)
166{
167 // Just forward to the cache.
168 return owner->handleResponse(pkt);
169}
170
171void
172SimpleCache::MemSidePort::recvReqRetry()
173{
174 // We should have a blocked packet if this function is called.
175 assert(blockedPacket != nullptr);
176
177 // Grab the blocked packet.
178 PacketPtr pkt = blockedPacket;
179 blockedPacket = nullptr;
180
181 // Try to resend it. It's possible that it fails again.
182 sendPacket(pkt);
183}
184
185void
186SimpleCache::MemSidePort::recvRangeChange()
187{
188 owner->sendRangeChange();
189}
190
191bool
192SimpleCache::handleRequest(PacketPtr pkt, int port_id)
193{
194 if (blocked) {
195 // There is currently an outstanding request so we can't respond. Stall
196 return false;
197 }
198
199 DPRINTF(SimpleCache, "Got request for addr %#x\n", pkt->getAddr());
200
201 // This cache is now blocked waiting for the response to this packet.
202 blocked = true;
203
204 // Store the port for when we get the response
205 assert(waitingPortId == -1);
206 waitingPortId = port_id;
207
208 // Schedule an event after cache access latency to actually access
209 schedule(new EventFunctionWrapper([this, pkt]{ accessTiming(pkt); },
210 name() + ".accessEvent", true),
211 clockEdge(latency));
212
213 return true;
214}
215
216bool
217SimpleCache::handleResponse(PacketPtr pkt)
218{
219 assert(blocked);
220 DPRINTF(SimpleCache, "Got response for addr %#x\n", pkt->getAddr());
221
222 // For now assume that inserts are off of the critical path and don't count
223 // for any added latency.
224 insert(pkt);
225
226 missLatency.sample(curTick() - missTime);
227
228 // If we had to upgrade the request packet to a full cache line, now we
229 // can use that packet to construct the response.
230 if (originalPacket != nullptr) {
231 DPRINTF(SimpleCache, "Copying data from new packet to old\n");
232 // We had to upgrade a previous packet. We can functionally deal with
233 // the cache access now. It better be a hit.
234 bool hit M5_VAR_USED = accessFunctional(originalPacket);
235 panic_if(!hit, "Should always hit after inserting");
236 originalPacket->makeResponse();
237 delete pkt; // We may need to delay this, I'm not sure.
238 pkt = originalPacket;
239 originalPacket = nullptr;
240 } // else, pkt contains the data it needs
241
242 sendResponse(pkt);
243
244 return true;
245}
246
247void SimpleCache::sendResponse(PacketPtr pkt)
248{
249 assert(blocked);
250 DPRINTF(SimpleCache, "Sending resp for addr %#x\n", pkt->getAddr());
251
252 int port = waitingPortId;
253
254 // The packet is now done. We're about to put it in the port, no need for
255 // this object to continue to stall.
256 // We need to free the resource before sending the packet in case the CPU
257 // tries to send another request immediately (e.g., in the same callchain).
258 blocked = false;
259 waitingPortId = -1;
260
261 // Simply forward to the memory port
262 cpuPorts[port].sendPacket(pkt);
263
264 // For each of the cpu ports, if it needs to send a retry, it should do it
265 // now since this memory object may be unblocked now.
266 for (auto& port : cpuPorts) {
267 port.trySendRetry();
268 }
269}
270
271void
272SimpleCache::handleFunctional(PacketPtr pkt)
273{
274 if (accessFunctional(pkt)) {
275 pkt->makeResponse();
276 } else {
277 memPort.sendFunctional(pkt);
278 }
279}
280
281void
282SimpleCache::accessTiming(PacketPtr pkt)
283{
284 bool hit = accessFunctional(pkt);
285
286 DPRINTF(SimpleCache, "%s for packet: %s\n", hit ? "Hit" : "Miss",
287 pkt->print());
288
289 if (hit) {
290 // Respond to the CPU side
291 hits++; // update stats
292 DDUMP(SimpleCache, pkt->getConstPtr<uint8_t>(), pkt->getSize());
293 pkt->makeResponse();
294 sendResponse(pkt);
295 } else {
296 misses++; // update stats
297 missTime = curTick();
298 // Forward to the memory side.
299 // We can't directly forward the packet unless it is exactly the size
300 // of the cache line, and aligned. Check for that here.
301 Addr addr = pkt->getAddr();
302 Addr block_addr = pkt->getBlockAddr(blockSize);
303 unsigned size = pkt->getSize();
304 if (addr == block_addr && size == blockSize) {
305 // Aligned and block size. We can just forward.
306 DPRINTF(SimpleCache, "forwarding packet\n");
307 memPort.sendPacket(pkt);
308 } else {
309 DPRINTF(SimpleCache, "Upgrading packet to block size\n");
310 panic_if(addr - block_addr + size > blockSize,
311 "Cannot handle accesses that span multiple cache lines");
312 // Unaligned access to one cache block
313 assert(pkt->needsResponse());
314 MemCmd cmd;
315 if (pkt->isWrite() || pkt->isRead()) {
316 // Read the data from memory to write into the block.
317 // We'll write the data in the cache (i.e., a writeback cache)
318 cmd = MemCmd::ReadReq;
319 } else {
320 panic("Unknown packet type in upgrade size");
321 }
322
323 // Create a new packet that is blockSize
324 PacketPtr new_pkt = new Packet(pkt->req, cmd, blockSize);
325 new_pkt->allocate();
326
327 // Should now be block aligned
328 assert(new_pkt->getAddr() == new_pkt->getBlockAddr(blockSize));
329
330 // Save the old packet
331 originalPacket = pkt;
332
333 DPRINTF(SimpleCache, "forwarding packet\n");
334 memPort.sendPacket(new_pkt);
335 }
336 }
337}
338
339bool
340SimpleCache::accessFunctional(PacketPtr pkt)
341{
342 Addr block_addr = pkt->getBlockAddr(blockSize);
343 auto it = cacheStore.find(block_addr);
344 if (it != cacheStore.end()) {
345 if (pkt->isWrite()) {
346 // Write the data into the block in the cache
347 pkt->writeDataToBlock(it->second, blockSize);
348 } else if (pkt->isRead()) {
349 // Read the data out of the cache block into the packet
350 pkt->setDataFromBlock(it->second, blockSize);
351 } else {
352 panic("Unknown packet type!");
353 }
354 return true;
355 }
356 return false;
357}
358
359void
360SimpleCache::insert(PacketPtr pkt)
361{
362 // The packet should be aligned.
363 assert(pkt->getAddr() == pkt->getBlockAddr(blockSize));
364 // The address should not be in the cache
365 assert(cacheStore.find(pkt->getAddr()) == cacheStore.end());
366 // The pkt should be a response
367 assert(pkt->isResponse());
368
369 if (cacheStore.size() >= capacity) {
370 // Select random thing to evict. This is a little convoluted since we
371 // are using a std::unordered_map. See http://bit.ly/2hrnLP2
372 int bucket, bucket_size;
373 do {
374 bucket = random_mt.random(0, (int)cacheStore.bucket_count() - 1);
375 } while ( (bucket_size = cacheStore.bucket_size(bucket)) == 0 );
376 auto block = std::next(cacheStore.begin(bucket),
377 random_mt.random(0, bucket_size - 1));
378
379 DPRINTF(SimpleCache, "Removing addr %#x\n", block->first);
380
381 // Write back the data.
382 // Create a new request-packet pair
383 RequestPtr req = std::make_shared<Request>(
384 block->first, blockSize, 0, 0);
385
386 PacketPtr new_pkt = new Packet(req, MemCmd::WritebackDirty, blockSize);
387 new_pkt->dataDynamic(block->second); // This will be deleted later
388
389 DPRINTF(SimpleCache, "Writing packet back %s\n", pkt->print());
390 // Send the write to memory
391 memPort.sendPacket(new_pkt);
392
393 // Delete this entry
394 cacheStore.erase(block->first);
395 }
396
397 DPRINTF(SimpleCache, "Inserting %s\n", pkt->print());
398 DDUMP(SimpleCache, pkt->getConstPtr<uint8_t>(), blockSize);
399
400 // Allocate space for the cache block data
401 uint8_t *data = new uint8_t[blockSize];
402
403 // Insert the data and address into the cache store
404 cacheStore[pkt->getAddr()] = data;
405
406 // Write the data into the cache
407 pkt->writeDataToBlock(data, blockSize);
408}
409
410AddrRangeList
411SimpleCache::getAddrRanges() const
412{
413 DPRINTF(SimpleCache, "Sending new ranges\n");
414 // Just use the same ranges as whatever is on the memory side.
415 return memPort.getAddrRanges();
416}
417
418void
419SimpleCache::sendRangeChange() const
420{
421 for (auto& port : cpuPorts) {
422 port.sendRangeChange();
423 }
424}
425
426void
427SimpleCache::regStats()
428{
429 // If you don't do this you get errors about uninitialized stats.
430 MemObject::regStats();
431
432 hits.name(name() + ".hits")
433 .desc("Number of hits")
434 ;
435
436 misses.name(name() + ".misses")
437 .desc("Number of misses")
438 ;
439
440 missLatency.name(name() + ".missLatency")
441 .desc("Ticks for misses to the cache")
442 .init(16) // number of buckets
443 ;
444
445 hitRatio.name(name() + ".hitRatio")
446 .desc("The ratio of hits to the total accesses to the cache")
447 ;
448
449 hitRatio = hits / (hits + misses);
450
451}
452
453
454SimpleCache*
455SimpleCacheParams::create()
456{
457 return new SimpleCache(this);
458}