skewed_associative.cc (13219:454ecc63338d) skewed_associative.cc (13225:8d1621fc586e)
1/*
2 * Copyright (c) 2018 Inria
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Daniel Carvalho
29 */
30
31/**
32 * @file
33 * Definitions of a skewed associative indexing policy.
34 */
35
36#include "mem/cache/tags/indexing_policies/skewed_associative.hh"
37
38#include "base/bitfield.hh"
39#include "base/intmath.hh"
40#include "base/logging.hh"
1/*
2 * Copyright (c) 2018 Inria
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Daniel Carvalho
29 */
30
31/**
32 * @file
33 * Definitions of a skewed associative indexing policy.
34 */
35
36#include "mem/cache/tags/indexing_policies/skewed_associative.hh"
37
38#include "base/bitfield.hh"
39#include "base/intmath.hh"
40#include "base/logging.hh"
41#include "mem/cache/replacement_policies/base.hh"
41#include "mem/cache/replacement_policies/replaceable_entry.hh"
42
43SkewedAssociative::SkewedAssociative(const Params *p)
44 : BaseIndexingPolicy(p), msbShift(floorLog2(numSets) - 1)
45{
46 if (assoc > NUM_SKEWING_FUNCTIONS) {
47 warn_once("Associativity higher than number of skewing functions. " \
48 "Expect sub-optimal skewing.\n");
49 }
50
51 // Check if set is too big to do skewing. If using big sets, rewrite
52 // skewing functions accordingly to make good use of the hashing function
53 panic_if(setShift + 2 * (msbShift + 1) > 64, "Unsuported number of bits " \
54 "for the skewing functions.");
55
56 // We must have more than two sets, otherwise the MSB and LSB are the same
57 // bit, and the xor of them will always be 0
58 fatal_if(numSets <= 2, "The number of sets must be greater than 2");
59}
60
61Addr
62SkewedAssociative::hash(const Addr addr) const
63{
64 // Get relevant bits
65 const uint8_t lsb = bits<Addr>(addr, 0);
66 const uint8_t msb = bits<Addr>(addr, msbShift);
67 const uint8_t xor_bit = msb ^ lsb;
68
69 // Shift-off LSB and set new MSB as xor of old LSB and MSB
70 return insertBits<Addr, uint8_t>(addr >> 1, msbShift, xor_bit);
71}
72
73Addr
74SkewedAssociative::dehash(const Addr addr) const
75{
76 // Get relevant bits. The original MSB is one bit away on the current MSB
77 // (which is the XOR bit). The original LSB can be retrieved from inverting
78 // the xor that generated the XOR bit.
79 const uint8_t msb = bits<Addr>(addr, msbShift - 1);
80 const uint8_t xor_bit = bits<Addr>(addr, msbShift);
81 const uint8_t lsb = msb ^ xor_bit;
82
83 // Remove current MSB (XOR bit), shift left and add LSB back
84 const Addr addr_no_msb = mbits<Addr>(addr, msbShift - 1, 0);
85 return insertBits<Addr, uint8_t>(addr_no_msb << 1, 0, lsb);
86}
87
88Addr
89SkewedAssociative::skew(const Addr addr, const uint32_t way) const
90{
91 // Assume an address of size A bits can be decomposed into
92 // {addr3, addr2, addr1, addr0}, where:
93 // addr0 (M bits) = Block offset;
94 // addr1 (N bits) = Set bits in conventional cache;
95 // addr3 (A - M - 2*N bits), addr2 (N bits) = Tag bits.
96 // We use addr1 and addr2, as proposed in the original paper
97 Addr addr1 = bits<Addr>(addr, msbShift, 0);
98 const Addr addr2 = bits<Addr>(addr, 2 * (msbShift + 1) - 1, msbShift + 1);
99
100 // Select and apply skewing function for given way
101 switch (way % NUM_SKEWING_FUNCTIONS) {
102 case 0:
103 addr1 = hash(addr1) ^ hash(addr2) ^ addr2;
104 break;
105 case 1:
106 addr1 = hash(addr1) ^ hash(addr2) ^ addr1;
107 break;
108 case 2:
109 addr1 = hash(addr1) ^ dehash(addr2) ^ addr2;
110 break;
111 case 3:
112 addr1 = hash(addr1) ^ dehash(addr2) ^ addr1;
113 break;
114 case 4:
115 addr1 = dehash(addr1) ^ hash(addr2) ^ addr2;
116 break;
117 case 5:
118 addr1 = dehash(addr1) ^ hash(addr2) ^ addr1;
119 break;
120 case 6:
121 addr1 = dehash(addr1) ^ dehash(addr2) ^ addr2;
122 break;
123 case 7:
124 addr1 = dehash(addr1) ^ dehash(addr2) ^ addr1;
125 break;
126 default:
127 panic("A skewing function has not been implemented for this way.");
128 }
129
130 // If we have more than 8 ways, just pile them up on hashes. This is not
131 // the optimal solution, and can be improved by adding more skewing
132 // functions to the previous selector
133 for (uint32_t i = 0; i < way/NUM_SKEWING_FUNCTIONS; i++) {
134 addr1 = hash(addr1);
135 }
136
137 return addr1;
138}
139
140Addr
141SkewedAssociative::deskew(const Addr addr, const uint32_t way) const
142{
143 // Get relevant bits of the addr
144 Addr addr1 = bits<Addr>(addr, msbShift, 0);
145 const Addr addr2 = bits<Addr>(addr, 2 * (msbShift + 1) - 1, msbShift + 1);
146
147 // If we have more than NUM_SKEWING_FUNCTIONS ways, unpile the hashes
148 if (way >= NUM_SKEWING_FUNCTIONS) {
149 for (uint32_t i = 0; i < way/NUM_SKEWING_FUNCTIONS; i++) {
150 addr1 = dehash(addr1);
151 }
152 }
153
154 // Select and apply skewing function for given way
155 switch (way % 8) {
156 case 0:
157 return dehash(addr1 ^ hash(addr2) ^ addr2);
158 case 1:
159 addr1 = addr1 ^ hash(addr2);
160 for (int i = 0; i < msbShift; i++) {
161 addr1 = hash(addr1);
162 }
163 return addr1;
164 case 2:
165 return dehash(addr1 ^ dehash(addr2) ^ addr2);
166 case 3:
167 addr1 = addr1 ^ dehash(addr2);
168 for (int i = 0; i < msbShift; i++) {
169 addr1 = hash(addr1);
170 }
171 return addr1;
172 case 4:
173 return hash(addr1 ^ hash(addr2) ^ addr2);
174 case 5:
175 addr1 = addr1 ^ hash(addr2);
176 for (int i = 0; i <= msbShift; i++) {
177 addr1 = hash(addr1);
178 }
179 return addr1;
180 case 6:
181 return hash(addr1 ^ dehash(addr2) ^ addr2);
182 case 7:
183 addr1 = addr1 ^ dehash(addr2);
184 for (int i = 0; i <= msbShift; i++) {
185 addr1 = hash(addr1);
186 }
187 return addr1;
188 default:
189 panic("A skewing function has not been implemented for this way.");
190 }
191}
192
193uint32_t
194SkewedAssociative::extractSet(const Addr addr, const uint32_t way) const
195{
196 return skew(addr >> setShift, way) & setMask;
197}
198
199Addr
200SkewedAssociative::regenerateAddr(const Addr tag,
201 const ReplaceableEntry* entry) const
202{
203 const Addr addr_set = (tag << (msbShift + 1)) | entry->getSet();
204 return (tag << tagShift) |
205 ((deskew(addr_set, entry->getWay()) & setMask) << setShift);
206}
207
208std::vector<ReplaceableEntry*>
209SkewedAssociative::getPossibleEntries(const Addr addr) const
210{
211 std::vector<ReplaceableEntry*> entries;
212
213 // Parse all ways
214 for (uint32_t way = 0; way < assoc; ++way) {
215 // Apply hash to get set, and get way entry in it
216 entries.push_back(sets[extractSet(addr, way)][way]);
217 }
218
219 return entries;
220}
221
222SkewedAssociative *
223SkewedAssociativeParams::create()
224{
225 return new SkewedAssociative(this);
226}
42
43SkewedAssociative::SkewedAssociative(const Params *p)
44 : BaseIndexingPolicy(p), msbShift(floorLog2(numSets) - 1)
45{
46 if (assoc > NUM_SKEWING_FUNCTIONS) {
47 warn_once("Associativity higher than number of skewing functions. " \
48 "Expect sub-optimal skewing.\n");
49 }
50
51 // Check if set is too big to do skewing. If using big sets, rewrite
52 // skewing functions accordingly to make good use of the hashing function
53 panic_if(setShift + 2 * (msbShift + 1) > 64, "Unsuported number of bits " \
54 "for the skewing functions.");
55
56 // We must have more than two sets, otherwise the MSB and LSB are the same
57 // bit, and the xor of them will always be 0
58 fatal_if(numSets <= 2, "The number of sets must be greater than 2");
59}
60
61Addr
62SkewedAssociative::hash(const Addr addr) const
63{
64 // Get relevant bits
65 const uint8_t lsb = bits<Addr>(addr, 0);
66 const uint8_t msb = bits<Addr>(addr, msbShift);
67 const uint8_t xor_bit = msb ^ lsb;
68
69 // Shift-off LSB and set new MSB as xor of old LSB and MSB
70 return insertBits<Addr, uint8_t>(addr >> 1, msbShift, xor_bit);
71}
72
73Addr
74SkewedAssociative::dehash(const Addr addr) const
75{
76 // Get relevant bits. The original MSB is one bit away on the current MSB
77 // (which is the XOR bit). The original LSB can be retrieved from inverting
78 // the xor that generated the XOR bit.
79 const uint8_t msb = bits<Addr>(addr, msbShift - 1);
80 const uint8_t xor_bit = bits<Addr>(addr, msbShift);
81 const uint8_t lsb = msb ^ xor_bit;
82
83 // Remove current MSB (XOR bit), shift left and add LSB back
84 const Addr addr_no_msb = mbits<Addr>(addr, msbShift - 1, 0);
85 return insertBits<Addr, uint8_t>(addr_no_msb << 1, 0, lsb);
86}
87
88Addr
89SkewedAssociative::skew(const Addr addr, const uint32_t way) const
90{
91 // Assume an address of size A bits can be decomposed into
92 // {addr3, addr2, addr1, addr0}, where:
93 // addr0 (M bits) = Block offset;
94 // addr1 (N bits) = Set bits in conventional cache;
95 // addr3 (A - M - 2*N bits), addr2 (N bits) = Tag bits.
96 // We use addr1 and addr2, as proposed in the original paper
97 Addr addr1 = bits<Addr>(addr, msbShift, 0);
98 const Addr addr2 = bits<Addr>(addr, 2 * (msbShift + 1) - 1, msbShift + 1);
99
100 // Select and apply skewing function for given way
101 switch (way % NUM_SKEWING_FUNCTIONS) {
102 case 0:
103 addr1 = hash(addr1) ^ hash(addr2) ^ addr2;
104 break;
105 case 1:
106 addr1 = hash(addr1) ^ hash(addr2) ^ addr1;
107 break;
108 case 2:
109 addr1 = hash(addr1) ^ dehash(addr2) ^ addr2;
110 break;
111 case 3:
112 addr1 = hash(addr1) ^ dehash(addr2) ^ addr1;
113 break;
114 case 4:
115 addr1 = dehash(addr1) ^ hash(addr2) ^ addr2;
116 break;
117 case 5:
118 addr1 = dehash(addr1) ^ hash(addr2) ^ addr1;
119 break;
120 case 6:
121 addr1 = dehash(addr1) ^ dehash(addr2) ^ addr2;
122 break;
123 case 7:
124 addr1 = dehash(addr1) ^ dehash(addr2) ^ addr1;
125 break;
126 default:
127 panic("A skewing function has not been implemented for this way.");
128 }
129
130 // If we have more than 8 ways, just pile them up on hashes. This is not
131 // the optimal solution, and can be improved by adding more skewing
132 // functions to the previous selector
133 for (uint32_t i = 0; i < way/NUM_SKEWING_FUNCTIONS; i++) {
134 addr1 = hash(addr1);
135 }
136
137 return addr1;
138}
139
140Addr
141SkewedAssociative::deskew(const Addr addr, const uint32_t way) const
142{
143 // Get relevant bits of the addr
144 Addr addr1 = bits<Addr>(addr, msbShift, 0);
145 const Addr addr2 = bits<Addr>(addr, 2 * (msbShift + 1) - 1, msbShift + 1);
146
147 // If we have more than NUM_SKEWING_FUNCTIONS ways, unpile the hashes
148 if (way >= NUM_SKEWING_FUNCTIONS) {
149 for (uint32_t i = 0; i < way/NUM_SKEWING_FUNCTIONS; i++) {
150 addr1 = dehash(addr1);
151 }
152 }
153
154 // Select and apply skewing function for given way
155 switch (way % 8) {
156 case 0:
157 return dehash(addr1 ^ hash(addr2) ^ addr2);
158 case 1:
159 addr1 = addr1 ^ hash(addr2);
160 for (int i = 0; i < msbShift; i++) {
161 addr1 = hash(addr1);
162 }
163 return addr1;
164 case 2:
165 return dehash(addr1 ^ dehash(addr2) ^ addr2);
166 case 3:
167 addr1 = addr1 ^ dehash(addr2);
168 for (int i = 0; i < msbShift; i++) {
169 addr1 = hash(addr1);
170 }
171 return addr1;
172 case 4:
173 return hash(addr1 ^ hash(addr2) ^ addr2);
174 case 5:
175 addr1 = addr1 ^ hash(addr2);
176 for (int i = 0; i <= msbShift; i++) {
177 addr1 = hash(addr1);
178 }
179 return addr1;
180 case 6:
181 return hash(addr1 ^ dehash(addr2) ^ addr2);
182 case 7:
183 addr1 = addr1 ^ dehash(addr2);
184 for (int i = 0; i <= msbShift; i++) {
185 addr1 = hash(addr1);
186 }
187 return addr1;
188 default:
189 panic("A skewing function has not been implemented for this way.");
190 }
191}
192
193uint32_t
194SkewedAssociative::extractSet(const Addr addr, const uint32_t way) const
195{
196 return skew(addr >> setShift, way) & setMask;
197}
198
199Addr
200SkewedAssociative::regenerateAddr(const Addr tag,
201 const ReplaceableEntry* entry) const
202{
203 const Addr addr_set = (tag << (msbShift + 1)) | entry->getSet();
204 return (tag << tagShift) |
205 ((deskew(addr_set, entry->getWay()) & setMask) << setShift);
206}
207
208std::vector<ReplaceableEntry*>
209SkewedAssociative::getPossibleEntries(const Addr addr) const
210{
211 std::vector<ReplaceableEntry*> entries;
212
213 // Parse all ways
214 for (uint32_t way = 0; way < assoc; ++way) {
215 // Apply hash to get set, and get way entry in it
216 entries.push_back(sets[extractSet(addr, way)][way]);
217 }
218
219 return entries;
220}
221
222SkewedAssociative *
223SkewedAssociativeParams::create()
224{
225 return new SkewedAssociative(this);
226}