cache.cc (12720:8db2ee0c2cf6) cache.cc (12721:7f611e9412f0)
1/*
2 * Copyright (c) 2010-2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software

--- 1053 unchanged lines hidden (view full) ---

1062
1063 pkt->allocate();
1064 DPRINTF(Cache, "%s: created %s from %s\n", __func__, pkt->print(),
1065 cpu_pkt->print());
1066 return pkt;
1067}
1068
1069
1/*
2 * Copyright (c) 2010-2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software

--- 1053 unchanged lines hidden (view full) ---

1062
1063 pkt->allocate();
1064 DPRINTF(Cache, "%s: created %s from %s\n", __func__, pkt->print(),
1065 cpu_pkt->print());
1066 return pkt;
1067}
1068
1069
1070Cycles
1071Cache::handleAtomicReqMiss(PacketPtr pkt, CacheBlk *blk,
1072 PacketList &writebacks)
1073{
1074 // deal with the packets that go through the write path of
1075 // the cache, i.e. any evictions and writes
1076 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean ||
1077 (pkt->req->isUncacheable() && pkt->isWrite())) {
1078 Cycles latency = ticksToCycles(memSidePort->sendAtomic(pkt));
1079
1080 // at this point, if the request was an uncacheable write
1081 // request, it has been satisfied by a memory below and the
1082 // packet carries the response back
1083 assert(!(pkt->req->isUncacheable() && pkt->isWrite()) ||
1084 pkt->isResponse());
1085
1086 return latency;
1087 }
1088
1089 // only misses left
1090
1091 PacketPtr bus_pkt = createMissPacket(pkt, blk, pkt->needsWritable());
1092
1093 bool is_forward = (bus_pkt == nullptr);
1094
1095 if (is_forward) {
1096 // just forwarding the same request to the next level
1097 // no local cache operation involved
1098 bus_pkt = pkt;
1099 }
1100
1101 DPRINTF(Cache, "%s: Sending an atomic %s\n", __func__,
1102 bus_pkt->print());
1103
1104#if TRACING_ON
1105 CacheBlk::State old_state = blk ? blk->status : 0;
1106#endif
1107
1108 Cycles latency = ticksToCycles(memSidePort->sendAtomic(bus_pkt));
1109
1110 bool is_invalidate = bus_pkt->isInvalidate();
1111
1112 // We are now dealing with the response handling
1113 DPRINTF(Cache, "%s: Receive response: %s in state %i\n", __func__,
1114 bus_pkt->print(), old_state);
1115
1116 // If packet was a forward, the response (if any) is already
1117 // in place in the bus_pkt == pkt structure, so we don't need
1118 // to do anything. Otherwise, use the separate bus_pkt to
1119 // generate response to pkt and then delete it.
1120 if (!is_forward) {
1121 if (pkt->needsResponse()) {
1122 assert(bus_pkt->isResponse());
1123 if (bus_pkt->isError()) {
1124 pkt->makeAtomicResponse();
1125 pkt->copyError(bus_pkt);
1126 } else if (pkt->cmd == MemCmd::WriteLineReq) {
1127 // note the use of pkt, not bus_pkt here.
1128
1129 // write-line request to the cache that promoted
1130 // the write to a whole line
1131 blk = handleFill(pkt, blk, writebacks,
1132 allocOnFill(pkt->cmd));
1133 assert(blk != NULL);
1134 is_invalidate = false;
1135 satisfyRequest(pkt, blk);
1136 } else if (bus_pkt->isRead() ||
1137 bus_pkt->cmd == MemCmd::UpgradeResp) {
1138 // we're updating cache state to allow us to
1139 // satisfy the upstream request from the cache
1140 blk = handleFill(bus_pkt, blk, writebacks,
1141 allocOnFill(pkt->cmd));
1142 satisfyRequest(pkt, blk);
1143 maintainClusivity(pkt->fromCache(), blk);
1144 } else {
1145 // we're satisfying the upstream request without
1146 // modifying cache state, e.g., a write-through
1147 pkt->makeAtomicResponse();
1148 }
1149 }
1150 delete bus_pkt;
1151 }
1152
1153 if (is_invalidate && blk && blk->isValid()) {
1154 invalidateBlock(blk);
1155 }
1156
1157 return latency;
1158}
1159
1070Tick
1071Cache::recvAtomic(PacketPtr pkt)
1072{
1073 // We are in atomic mode so we pay just for lookupLatency here.
1074 Cycles lat = lookupLatency;
1075
1076 // Forward the request if the system is in cache bypass mode.
1077 if (system->bypassCaches())

--- 35 unchanged lines hidden (view full) ---

1113 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id);
1114 writebacks.push_back(wb_pkt);
1115 pkt->setSatisfied();
1116 }
1117
1118 // handle writebacks resulting from the access here to ensure they
1119 // logically proceed anything happening below
1120 doWritebacksAtomic(writebacks);
1160Tick
1161Cache::recvAtomic(PacketPtr pkt)
1162{
1163 // We are in atomic mode so we pay just for lookupLatency here.
1164 Cycles lat = lookupLatency;
1165
1166 // Forward the request if the system is in cache bypass mode.
1167 if (system->bypassCaches())

--- 35 unchanged lines hidden (view full) ---

1203 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id);
1204 writebacks.push_back(wb_pkt);
1205 pkt->setSatisfied();
1206 }
1207
1208 // handle writebacks resulting from the access here to ensure they
1209 // logically proceed anything happening below
1210 doWritebacksAtomic(writebacks);
1211 assert(writebacks.empty());
1121
1122 if (!satisfied) {
1212
1213 if (!satisfied) {
1123 // MISS
1124
1125 // deal with the packets that go through the write path of
1126 // the cache, i.e. any evictions and writes
1127 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean ||
1128 (pkt->req->isUncacheable() && pkt->isWrite())) {
1129 lat += ticksToCycles(memSidePort->sendAtomic(pkt));
1130 return lat * clockPeriod();
1131 }
1132 // only misses left
1133
1134 PacketPtr bus_pkt = createMissPacket(pkt, blk, pkt->needsWritable());
1135
1136 bool is_forward = (bus_pkt == nullptr);
1137
1138 if (is_forward) {
1139 // just forwarding the same request to the next level
1140 // no local cache operation involved
1141 bus_pkt = pkt;
1142 }
1143
1144 DPRINTF(Cache, "%s: Sending an atomic %s\n", __func__,
1145 bus_pkt->print());
1146
1147#if TRACING_ON
1148 CacheBlk::State old_state = blk ? blk->status : 0;
1149#endif
1150
1151 lat += ticksToCycles(memSidePort->sendAtomic(bus_pkt));
1152
1153 bool is_invalidate = bus_pkt->isInvalidate();
1154
1155 // We are now dealing with the response handling
1156 DPRINTF(Cache, "%s: Receive response: %s in state %i\n", __func__,
1157 bus_pkt->print(), old_state);
1158
1159 // If packet was a forward, the response (if any) is already
1160 // in place in the bus_pkt == pkt structure, so we don't need
1161 // to do anything. Otherwise, use the separate bus_pkt to
1162 // generate response to pkt and then delete it.
1163 if (!is_forward) {
1164 if (pkt->needsResponse()) {
1165 assert(bus_pkt->isResponse());
1166 if (bus_pkt->isError()) {
1167 pkt->makeAtomicResponse();
1168 pkt->copyError(bus_pkt);
1169 } else if (pkt->cmd == MemCmd::WriteLineReq) {
1170 // note the use of pkt, not bus_pkt here.
1171
1172 // write-line request to the cache that promoted
1173 // the write to a whole line
1174 blk = handleFill(pkt, blk, writebacks,
1175 allocOnFill(pkt->cmd));
1176 assert(blk != NULL);
1177 is_invalidate = false;
1178 satisfyRequest(pkt, blk);
1179 } else if (bus_pkt->isRead() ||
1180 bus_pkt->cmd == MemCmd::UpgradeResp) {
1181 // we're updating cache state to allow us to
1182 // satisfy the upstream request from the cache
1183 blk = handleFill(bus_pkt, blk, writebacks,
1184 allocOnFill(pkt->cmd));
1185 satisfyRequest(pkt, blk);
1186 maintainClusivity(pkt->fromCache(), blk);
1187 } else {
1188 // we're satisfying the upstream request without
1189 // modifying cache state, e.g., a write-through
1190 pkt->makeAtomicResponse();
1191 }
1192 }
1193 delete bus_pkt;
1194 }
1195
1196 if (is_invalidate && blk && blk->isValid()) {
1197 invalidateBlock(blk);
1198 }
1214 lat += handleAtomicReqMiss(pkt, blk, writebacks);
1199 }
1200
1201 // Note that we don't invoke the prefetcher at all in atomic mode.
1202 // It's not clear how to do it properly, particularly for
1203 // prefetchers that aggressively generate prefetch candidates and
1204 // rely on bandwidth contention to throttle them; these will tend
1205 // to pollute the cache in atomic mode since there is no bandwidth
1206 // contention. If we ever do want to enable prefetching in atomic

--- 1673 unchanged lines hidden ---
1215 }
1216
1217 // Note that we don't invoke the prefetcher at all in atomic mode.
1218 // It's not clear how to do it properly, particularly for
1219 // prefetchers that aggressively generate prefetch candidates and
1220 // rely on bandwidth contention to throttle them; these will tend
1221 // to pollute the cache in atomic mode since there is no bandwidth
1222 // contention. If we ever do want to enable prefetching in atomic

--- 1673 unchanged lines hidden ---