Sequencer.cc (6505:a2306c563df2) Sequencer.cc (6506:e9e7ca667575)
1
2/*
3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include "mem/ruby/common/Global.hh"
31#include "mem/ruby/system/Sequencer.hh"
32#include "mem/ruby/system/System.hh"
33#include "mem/protocol/Protocol.hh"
34#include "mem/ruby/profiler/Profiler.hh"
35#include "mem/ruby/system/CacheMemory.hh"
36#include "mem/protocol/CacheMsg.hh"
37#include "mem/ruby/recorder/Tracer.hh"
38#include "mem/ruby/common/SubBlock.hh"
39#include "mem/protocol/Protocol.hh"
40#include "mem/gems_common/Map.hh"
41#include "mem/ruby/buffers/MessageBuffer.hh"
42#include "mem/ruby/slicc_interface/AbstractController.hh"
43
44//Sequencer::Sequencer(int core_id, MessageBuffer* mandatory_q)
45
46#define LLSC_FAIL -2
47
48Sequencer::Sequencer(const string & name)
49 :RubyPort(name)
50{
51}
52
53void Sequencer::init(const vector<string> & argv)
54{
55 m_deadlock_check_scheduled = false;
56 m_outstanding_count = 0;
57
58 m_max_outstanding_requests = 0;
59 m_deadlock_threshold = 0;
60 m_version = -1;
61 m_instCache_ptr = NULL;
62 m_dataCache_ptr = NULL;
63 m_controller = NULL;
64 m_servicing_atomic = -1;
65 m_atomics_counter = 0;
66 for (size_t i=0; i<argv.size(); i+=2) {
67 if ( argv[i] == "controller") {
68 m_controller = RubySystem::getController(argv[i+1]); // args[i] = "L1Cache"
69 m_mandatory_q_ptr = m_controller->getMandatoryQueue();
70 } else if ( argv[i] == "icache")
71 m_instCache_ptr = RubySystem::getCache(argv[i+1]);
72 else if ( argv[i] == "dcache")
73 m_dataCache_ptr = RubySystem::getCache(argv[i+1]);
74 else if ( argv[i] == "version")
75 m_version = atoi(argv[i+1].c_str());
76 else if ( argv[i] == "max_outstanding_requests")
77 m_max_outstanding_requests = atoi(argv[i+1].c_str());
78 else if ( argv[i] == "deadlock_threshold")
79 m_deadlock_threshold = atoi(argv[i+1].c_str());
80 else {
81 cerr << "WARNING: Sequencer: Unkown configuration parameter: " << argv[i] << endl;
82 assert(false);
83 }
84 }
85 assert(m_max_outstanding_requests > 0);
86 assert(m_deadlock_threshold > 0);
87 assert(m_version > -1);
88 assert(m_instCache_ptr != NULL);
89 assert(m_dataCache_ptr != NULL);
90 assert(m_controller != NULL);
91}
92
93Sequencer::~Sequencer() {
94
95}
96
97void Sequencer::wakeup() {
98 // Check for deadlock of any of the requests
99 Time current_time = g_eventQueue_ptr->getTime();
100
101 // Check across all outstanding requests
102 int total_outstanding = 0;
103
104 Vector<Address> keys = m_readRequestTable.keys();
105 for (int i=0; i<keys.size(); i++) {
106 SequencerRequest* request = m_readRequestTable.lookup(keys[i]);
107 if (current_time - request->issue_time >= m_deadlock_threshold) {
108 WARN_MSG("Possible Deadlock detected");
109 WARN_EXPR(request);
110 WARN_EXPR(m_version);
111 WARN_EXPR(keys.size());
112 WARN_EXPR(current_time);
113 WARN_EXPR(request->issue_time);
114 WARN_EXPR(current_time - request->issue_time);
115 ERROR_MSG("Aborting");
116 }
117 }
118
119 keys = m_writeRequestTable.keys();
120 for (int i=0; i<keys.size(); i++) {
121 SequencerRequest* request = m_writeRequestTable.lookup(keys[i]);
122 if (current_time - request->issue_time >= m_deadlock_threshold) {
123 WARN_MSG("Possible Deadlock detected");
124 WARN_EXPR(request);
125 WARN_EXPR(m_version);
126 WARN_EXPR(current_time);
127 WARN_EXPR(request->issue_time);
128 WARN_EXPR(current_time - request->issue_time);
129 WARN_EXPR(keys.size());
130 ERROR_MSG("Aborting");
131 }
132 }
133 total_outstanding += m_writeRequestTable.size() + m_readRequestTable.size();
134
135 assert(m_outstanding_count == total_outstanding);
136
137 if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking
138 g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold);
139 } else {
140 m_deadlock_check_scheduled = false;
141 }
142}
143
144void Sequencer::printProgress(ostream& out) const{
145 /*
146 int total_demand = 0;
147 out << "Sequencer Stats Version " << m_version << endl;
148 out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
149 out << "---------------" << endl;
150 out << "outstanding requests" << endl;
151
152 Vector<Address> rkeys = m_readRequestTable.keys();
153 int read_size = rkeys.size();
154 out << "proc " << m_version << " Read Requests = " << read_size << endl;
155 // print the request table
156 for(int i=0; i < read_size; ++i){
157 SequencerRequest * request = m_readRequestTable.lookup(rkeys[i]);
158 out << "\tRequest[ " << i << " ] = " << request->type << " Address " << rkeys[i] << " Posted " << request->issue_time << " PF " << PrefetchBit_No << endl;
159 total_demand++;
160 }
161
162 Vector<Address> wkeys = m_writeRequestTable.keys();
163 int write_size = wkeys.size();
164 out << "proc " << m_version << " Write Requests = " << write_size << endl;
165 // print the request table
166 for(int i=0; i < write_size; ++i){
167 CacheMsg & request = m_writeRequestTable.lookup(wkeys[i]);
168 out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
169 if( request.getPrefetch() == PrefetchBit_No ){
170 total_demand++;
171 }
172 }
173
174 out << endl;
175
176 out << "Total Number Outstanding: " << m_outstanding_count << endl;
177 out << "Total Number Demand : " << total_demand << endl;
178 out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl;
179 out << endl;
180 out << endl;
181 */
182}
183
184void Sequencer::printConfig(ostream& out) const {
185 out << "Seqeuncer config: " << m_name << endl;
186 out << " controller: " << m_controller->getName() << endl;
187 out << " version: " << m_version << endl;
188 out << " max_outstanding_requests: " << m_max_outstanding_requests << endl;
189 out << " deadlock_threshold: " << m_deadlock_threshold << endl;
190}
191
192// Insert the request on the correct request table. Return true if
193// the entry was already present.
194bool Sequencer::insertRequest(SequencerRequest* request) {
195 int total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
196
197 assert(m_outstanding_count == total_outstanding);
198
199 // See if we should schedule a deadlock check
200 if (m_deadlock_check_scheduled == false) {
201 g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold);
202 m_deadlock_check_scheduled = true;
203 }
204
205 Address line_addr(request->ruby_request.paddr);
206 line_addr.makeLineAddress();
207 if ((request->ruby_request.type == RubyRequestType_ST) ||
208 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
209 (request->ruby_request.type == RubyRequestType_RMW_Write) ||
210 (request->ruby_request.type == RubyRequestType_Locked_Read) ||
211 (request->ruby_request.type == RubyRequestType_Locked_Write)) {
212 if (m_writeRequestTable.exist(line_addr)) {
213 m_writeRequestTable.lookup(line_addr) = request;
214 // return true;
215 assert(0); // drh5: isn't this an error? do you lose the initial request?
216 }
217 m_writeRequestTable.allocate(line_addr);
218 m_writeRequestTable.lookup(line_addr) = request;
219 m_outstanding_count++;
220 } else {
221 if (m_readRequestTable.exist(line_addr)) {
222 m_readRequestTable.lookup(line_addr) = request;
223 // return true;
224 assert(0); // drh5: isn't this an error? do you lose the initial request?
225 }
226 m_readRequestTable.allocate(line_addr);
227 m_readRequestTable.lookup(line_addr) = request;
228 m_outstanding_count++;
229 }
230
231 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
232
233 total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
234 assert(m_outstanding_count == total_outstanding);
235
236 return false;
237}
238
239void Sequencer::removeRequest(SequencerRequest* srequest) {
240
241 assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
242
243 const RubyRequest & ruby_request = srequest->ruby_request;
244 Address line_addr(ruby_request.paddr);
245 line_addr.makeLineAddress();
246 if ((ruby_request.type == RubyRequestType_ST) ||
247 (ruby_request.type == RubyRequestType_RMW_Read) ||
248 (ruby_request.type == RubyRequestType_RMW_Write) ||
249 (ruby_request.type == RubyRequestType_Locked_Read) ||
250 (ruby_request.type == RubyRequestType_Locked_Write)) {
251 m_writeRequestTable.deallocate(line_addr);
252 } else {
253 m_readRequestTable.deallocate(line_addr);
254 }
255 m_outstanding_count--;
256
257 assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
258}
259
260void Sequencer::writeCallback(const Address& address, DataBlock& data) {
261
262 assert(address == line_address(address));
263 assert(m_writeRequestTable.exist(line_address(address)));
264
265 SequencerRequest* request = m_writeRequestTable.lookup(address);
266 removeRequest(request);
267
268 assert((request->ruby_request.type == RubyRequestType_ST) ||
269 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
270 (request->ruby_request.type == RubyRequestType_RMW_Write) ||
271 (request->ruby_request.type == RubyRequestType_Locked_Read) ||
272 (request->ruby_request.type == RubyRequestType_Locked_Write));
273 // POLINA: the assumption is that atomics are only on data cache and not instruction cache
274 if (request->ruby_request.type == RubyRequestType_Locked_Read) {
275 m_dataCache_ptr->setLocked(address, m_version);
276 }
1
2/*
3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include "mem/ruby/common/Global.hh"
31#include "mem/ruby/system/Sequencer.hh"
32#include "mem/ruby/system/System.hh"
33#include "mem/protocol/Protocol.hh"
34#include "mem/ruby/profiler/Profiler.hh"
35#include "mem/ruby/system/CacheMemory.hh"
36#include "mem/protocol/CacheMsg.hh"
37#include "mem/ruby/recorder/Tracer.hh"
38#include "mem/ruby/common/SubBlock.hh"
39#include "mem/protocol/Protocol.hh"
40#include "mem/gems_common/Map.hh"
41#include "mem/ruby/buffers/MessageBuffer.hh"
42#include "mem/ruby/slicc_interface/AbstractController.hh"
43
44//Sequencer::Sequencer(int core_id, MessageBuffer* mandatory_q)
45
46#define LLSC_FAIL -2
47
48Sequencer::Sequencer(const string & name)
49 :RubyPort(name)
50{
51}
52
53void Sequencer::init(const vector<string> & argv)
54{
55 m_deadlock_check_scheduled = false;
56 m_outstanding_count = 0;
57
58 m_max_outstanding_requests = 0;
59 m_deadlock_threshold = 0;
60 m_version = -1;
61 m_instCache_ptr = NULL;
62 m_dataCache_ptr = NULL;
63 m_controller = NULL;
64 m_servicing_atomic = -1;
65 m_atomics_counter = 0;
66 for (size_t i=0; i<argv.size(); i+=2) {
67 if ( argv[i] == "controller") {
68 m_controller = RubySystem::getController(argv[i+1]); // args[i] = "L1Cache"
69 m_mandatory_q_ptr = m_controller->getMandatoryQueue();
70 } else if ( argv[i] == "icache")
71 m_instCache_ptr = RubySystem::getCache(argv[i+1]);
72 else if ( argv[i] == "dcache")
73 m_dataCache_ptr = RubySystem::getCache(argv[i+1]);
74 else if ( argv[i] == "version")
75 m_version = atoi(argv[i+1].c_str());
76 else if ( argv[i] == "max_outstanding_requests")
77 m_max_outstanding_requests = atoi(argv[i+1].c_str());
78 else if ( argv[i] == "deadlock_threshold")
79 m_deadlock_threshold = atoi(argv[i+1].c_str());
80 else {
81 cerr << "WARNING: Sequencer: Unkown configuration parameter: " << argv[i] << endl;
82 assert(false);
83 }
84 }
85 assert(m_max_outstanding_requests > 0);
86 assert(m_deadlock_threshold > 0);
87 assert(m_version > -1);
88 assert(m_instCache_ptr != NULL);
89 assert(m_dataCache_ptr != NULL);
90 assert(m_controller != NULL);
91}
92
93Sequencer::~Sequencer() {
94
95}
96
97void Sequencer::wakeup() {
98 // Check for deadlock of any of the requests
99 Time current_time = g_eventQueue_ptr->getTime();
100
101 // Check across all outstanding requests
102 int total_outstanding = 0;
103
104 Vector<Address> keys = m_readRequestTable.keys();
105 for (int i=0; i<keys.size(); i++) {
106 SequencerRequest* request = m_readRequestTable.lookup(keys[i]);
107 if (current_time - request->issue_time >= m_deadlock_threshold) {
108 WARN_MSG("Possible Deadlock detected");
109 WARN_EXPR(request);
110 WARN_EXPR(m_version);
111 WARN_EXPR(keys.size());
112 WARN_EXPR(current_time);
113 WARN_EXPR(request->issue_time);
114 WARN_EXPR(current_time - request->issue_time);
115 ERROR_MSG("Aborting");
116 }
117 }
118
119 keys = m_writeRequestTable.keys();
120 for (int i=0; i<keys.size(); i++) {
121 SequencerRequest* request = m_writeRequestTable.lookup(keys[i]);
122 if (current_time - request->issue_time >= m_deadlock_threshold) {
123 WARN_MSG("Possible Deadlock detected");
124 WARN_EXPR(request);
125 WARN_EXPR(m_version);
126 WARN_EXPR(current_time);
127 WARN_EXPR(request->issue_time);
128 WARN_EXPR(current_time - request->issue_time);
129 WARN_EXPR(keys.size());
130 ERROR_MSG("Aborting");
131 }
132 }
133 total_outstanding += m_writeRequestTable.size() + m_readRequestTable.size();
134
135 assert(m_outstanding_count == total_outstanding);
136
137 if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking
138 g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold);
139 } else {
140 m_deadlock_check_scheduled = false;
141 }
142}
143
144void Sequencer::printProgress(ostream& out) const{
145 /*
146 int total_demand = 0;
147 out << "Sequencer Stats Version " << m_version << endl;
148 out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
149 out << "---------------" << endl;
150 out << "outstanding requests" << endl;
151
152 Vector<Address> rkeys = m_readRequestTable.keys();
153 int read_size = rkeys.size();
154 out << "proc " << m_version << " Read Requests = " << read_size << endl;
155 // print the request table
156 for(int i=0; i < read_size; ++i){
157 SequencerRequest * request = m_readRequestTable.lookup(rkeys[i]);
158 out << "\tRequest[ " << i << " ] = " << request->type << " Address " << rkeys[i] << " Posted " << request->issue_time << " PF " << PrefetchBit_No << endl;
159 total_demand++;
160 }
161
162 Vector<Address> wkeys = m_writeRequestTable.keys();
163 int write_size = wkeys.size();
164 out << "proc " << m_version << " Write Requests = " << write_size << endl;
165 // print the request table
166 for(int i=0; i < write_size; ++i){
167 CacheMsg & request = m_writeRequestTable.lookup(wkeys[i]);
168 out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
169 if( request.getPrefetch() == PrefetchBit_No ){
170 total_demand++;
171 }
172 }
173
174 out << endl;
175
176 out << "Total Number Outstanding: " << m_outstanding_count << endl;
177 out << "Total Number Demand : " << total_demand << endl;
178 out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl;
179 out << endl;
180 out << endl;
181 */
182}
183
184void Sequencer::printConfig(ostream& out) const {
185 out << "Seqeuncer config: " << m_name << endl;
186 out << " controller: " << m_controller->getName() << endl;
187 out << " version: " << m_version << endl;
188 out << " max_outstanding_requests: " << m_max_outstanding_requests << endl;
189 out << " deadlock_threshold: " << m_deadlock_threshold << endl;
190}
191
192// Insert the request on the correct request table. Return true if
193// the entry was already present.
194bool Sequencer::insertRequest(SequencerRequest* request) {
195 int total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
196
197 assert(m_outstanding_count == total_outstanding);
198
199 // See if we should schedule a deadlock check
200 if (m_deadlock_check_scheduled == false) {
201 g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold);
202 m_deadlock_check_scheduled = true;
203 }
204
205 Address line_addr(request->ruby_request.paddr);
206 line_addr.makeLineAddress();
207 if ((request->ruby_request.type == RubyRequestType_ST) ||
208 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
209 (request->ruby_request.type == RubyRequestType_RMW_Write) ||
210 (request->ruby_request.type == RubyRequestType_Locked_Read) ||
211 (request->ruby_request.type == RubyRequestType_Locked_Write)) {
212 if (m_writeRequestTable.exist(line_addr)) {
213 m_writeRequestTable.lookup(line_addr) = request;
214 // return true;
215 assert(0); // drh5: isn't this an error? do you lose the initial request?
216 }
217 m_writeRequestTable.allocate(line_addr);
218 m_writeRequestTable.lookup(line_addr) = request;
219 m_outstanding_count++;
220 } else {
221 if (m_readRequestTable.exist(line_addr)) {
222 m_readRequestTable.lookup(line_addr) = request;
223 // return true;
224 assert(0); // drh5: isn't this an error? do you lose the initial request?
225 }
226 m_readRequestTable.allocate(line_addr);
227 m_readRequestTable.lookup(line_addr) = request;
228 m_outstanding_count++;
229 }
230
231 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
232
233 total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
234 assert(m_outstanding_count == total_outstanding);
235
236 return false;
237}
238
239void Sequencer::removeRequest(SequencerRequest* srequest) {
240
241 assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
242
243 const RubyRequest & ruby_request = srequest->ruby_request;
244 Address line_addr(ruby_request.paddr);
245 line_addr.makeLineAddress();
246 if ((ruby_request.type == RubyRequestType_ST) ||
247 (ruby_request.type == RubyRequestType_RMW_Read) ||
248 (ruby_request.type == RubyRequestType_RMW_Write) ||
249 (ruby_request.type == RubyRequestType_Locked_Read) ||
250 (ruby_request.type == RubyRequestType_Locked_Write)) {
251 m_writeRequestTable.deallocate(line_addr);
252 } else {
253 m_readRequestTable.deallocate(line_addr);
254 }
255 m_outstanding_count--;
256
257 assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
258}
259
260void Sequencer::writeCallback(const Address& address, DataBlock& data) {
261
262 assert(address == line_address(address));
263 assert(m_writeRequestTable.exist(line_address(address)));
264
265 SequencerRequest* request = m_writeRequestTable.lookup(address);
266 removeRequest(request);
267
268 assert((request->ruby_request.type == RubyRequestType_ST) ||
269 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
270 (request->ruby_request.type == RubyRequestType_RMW_Write) ||
271 (request->ruby_request.type == RubyRequestType_Locked_Read) ||
272 (request->ruby_request.type == RubyRequestType_Locked_Write));
273 // POLINA: the assumption is that atomics are only on data cache and not instruction cache
274 if (request->ruby_request.type == RubyRequestType_Locked_Read) {
275 m_dataCache_ptr->setLocked(address, m_version);
276 }
277 else if (request->ruby_request.type == RubyRequestType_RMW_Read) {
278 m_controller->set_atomic(address);
279 }
280 else if (request->ruby_request.type == RubyRequestType_RMW_Write) {
281 m_controller->clear_atomic();
282 }
277
278 hitCallback(request, data);
279}
280
281void Sequencer::readCallback(const Address& address, DataBlock& data) {
282
283 assert(address == line_address(address));
284 assert(m_readRequestTable.exist(line_address(address)));
285
286 SequencerRequest* request = m_readRequestTable.lookup(address);
287 removeRequest(request);
288
289 assert((request->ruby_request.type == RubyRequestType_LD) ||
290 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
291 (request->ruby_request.type == RubyRequestType_IFETCH));
292
293 hitCallback(request, data);
294}
295
296void Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) {
297 const RubyRequest & ruby_request = srequest->ruby_request;
298 Address request_address(ruby_request.paddr);
299 Address request_line_address(ruby_request.paddr);
300 request_line_address.makeLineAddress();
301 RubyRequestType type = ruby_request.type;
302 Time issued_time = srequest->issue_time;
303
304 // Set this cache entry to the most recently used
305 if (type == RubyRequestType_IFETCH) {
306 if (m_instCache_ptr->isTagPresent(request_line_address) )
307 m_instCache_ptr->setMRU(request_line_address);
308 } else {
309 if (m_dataCache_ptr->isTagPresent(request_line_address) )
310 m_dataCache_ptr->setMRU(request_line_address);
311 }
312
313 assert(g_eventQueue_ptr->getTime() >= issued_time);
314 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
315
316 // Profile the miss latency for all non-zero demand misses
317 if (miss_latency != 0) {
318 g_system_ptr->getProfiler()->missLatency(miss_latency, type);
319
320 if (Debug::getProtocolTrace()) {
321 g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(ruby_request.paddr),
322 "", "Done", "", int_to_string(miss_latency)+" cycles");
323 }
324 }
325 /*
326 if (request.getPrefetch() == PrefetchBit_Yes) {
327 return; // Ignore the prefetch
328 }
329 */
330
331 // update the data
332 if (ruby_request.data != NULL) {
333 if ((type == RubyRequestType_LD) ||
334 (type == RubyRequestType_IFETCH) ||
335 (type == RubyRequestType_RMW_Read)) {
336 memcpy(ruby_request.data, data.getData(request_address.getOffset(), ruby_request.len), ruby_request.len);
337 } else {
338 data.setData(ruby_request.data, request_address.getOffset(), ruby_request.len);
339 }
340 }
341
342 m_hit_callback(srequest->id);
343 delete srequest;
344}
345
346// Returns true if the sequencer already has a load or store outstanding
347bool Sequencer::isReady(const RubyRequest& request) {
348 // POLINA: check if we are currently flushing the write buffer, if so Ruby is returned as not ready
349 // to simulate stalling of the front-end
350 // Do we stall all the sequencers? If it is atomic instruction - yes!
351 if (m_outstanding_count >= m_max_outstanding_requests) {
352 return false;
353 }
354
355 if( m_writeRequestTable.exist(line_address(Address(request.paddr))) ||
356 m_readRequestTable.exist(line_address(Address(request.paddr))) ){
357 //cout << "OUTSTANDING REQUEST EXISTS " << p << " VER " << m_version << endl;
358 //printProgress(cout);
359 return false;
360 }
361
362 if (m_servicing_atomic != -1 && m_servicing_atomic != (int)request.proc_id) {
363 assert(m_atomics_counter > 0);
364 return false;
365 }
366 else {
367 if (request.type == RubyRequestType_RMW_Read) {
368 if (m_servicing_atomic == -1) {
369 assert(m_atomics_counter == 0);
370 m_servicing_atomic = (int)request.proc_id;
371 }
372 else {
373 assert(m_servicing_atomic == (int)request.proc_id);
374 }
375 m_atomics_counter++;
376 }
377 else if (request.type == RubyRequestType_RMW_Write) {
378 assert(m_servicing_atomic == (int)request.proc_id);
379 assert(m_atomics_counter > 0);
380 m_atomics_counter--;
381 if (m_atomics_counter == 0) {
382 m_servicing_atomic = -1;
383 }
384 }
385 }
386
387 return true;
388}
389
390bool Sequencer::empty() const {
391 return (m_writeRequestTable.size() == 0) && (m_readRequestTable.size() == 0);
392}
393
394
395int64_t Sequencer::makeRequest(const RubyRequest & request)
396{
397 assert(Address(request.paddr).getOffset() + request.len <= RubySystem::getBlockSizeBytes());
398 if (isReady(request)) {
399 int64_t id = makeUniqueRequestID();
400 SequencerRequest *srequest = new SequencerRequest(request, id, g_eventQueue_ptr->getTime());
401 bool found = insertRequest(srequest);
402 if (!found)
403 if (request.type == RubyRequestType_Locked_Write) {
404 // NOTE: it is OK to check the locked flag here as the mandatory queue will be checked first
405 // ensuring that nothing comes between checking the flag and servicing the store
406 if (!m_dataCache_ptr->isLocked(line_address(Address(request.paddr)), m_version)) {
407 return LLSC_FAIL;
408 }
409 else {
410 m_dataCache_ptr->clearLocked(line_address(Address(request.paddr)));
411 }
412 }
413 issueRequest(request);
414
415 // TODO: issue hardware prefetches here
416 return id;
417 }
418 else {
419 return -1;
420 }
421}
422
423void Sequencer::issueRequest(const RubyRequest& request) {
424
425 // TODO: get rid of CacheMsg, CacheRequestType, and AccessModeTYpe, & have SLICC use RubyRequest and subtypes natively
426 CacheRequestType ctype;
427 switch(request.type) {
428 case RubyRequestType_IFETCH:
429 ctype = CacheRequestType_IFETCH;
430 break;
431 case RubyRequestType_LD:
432 ctype = CacheRequestType_LD;
433 break;
434 case RubyRequestType_ST:
435 ctype = CacheRequestType_ST;
436 break;
437 case RubyRequestType_Locked_Read:
438 ctype = CacheRequestType_ST;
439 break;
440 case RubyRequestType_Locked_Write:
441 ctype = CacheRequestType_ST;
442 break;
443 case RubyRequestType_RMW_Read:
444 ctype = CacheRequestType_ATOMIC;
445 break;
446 case RubyRequestType_RMW_Write:
447 ctype = CacheRequestType_ATOMIC;
448 break;
449 default:
450 assert(0);
451 }
452 AccessModeType amtype;
453 switch(request.access_mode){
454 case RubyAccessMode_User:
455 amtype = AccessModeType_UserMode;
456 break;
457 case RubyAccessMode_Supervisor:
458 amtype = AccessModeType_SupervisorMode;
459 break;
460 case RubyAccessMode_Device:
461 amtype = AccessModeType_UserMode;
462 break;
463 default:
464 assert(0);
465 }
466 Address line_addr(request.paddr);
467 line_addr.makeLineAddress();
468 CacheMsg msg(line_addr, Address(request.paddr), ctype, Address(request.pc), amtype, request.len, PrefetchBit_No, request.proc_id);
469
470 if (Debug::getProtocolTrace()) {
471 g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(request.paddr),
472 "", "Begin", "", RubyRequestType_to_string(request.type));
473 }
474
475 if (g_system_ptr->getTracer()->traceEnabled()) {
476 g_system_ptr->getTracer()->traceRequest(m_name, line_addr, Address(request.pc),
477 request.type, g_eventQueue_ptr->getTime());
478 }
479
480 Time latency = 0; // initialzed to an null value
481
482 if (request.type == RubyRequestType_IFETCH)
483 latency = m_instCache_ptr->getLatency();
484 else
485 latency = m_dataCache_ptr->getLatency();
486
487 // Send the message to the cache controller
488 assert(latency > 0);
489
490
491 m_mandatory_q_ptr->enqueue(msg, latency);
492}
493/*
494bool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
495 AccessModeType access_mode,
496 int size, DataBlock*& data_ptr) {
497 if (type == CacheRequestType_IFETCH) {
498 return m_instCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
499 } else {
500 return m_dataCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
501 }
502}
503*/
504
505void Sequencer::print(ostream& out) const {
506 out << "[Sequencer: " << m_version
507 << ", outstanding requests: " << m_outstanding_count;
508
509 out << ", read request table: " << m_readRequestTable
510 << ", write request table: " << m_writeRequestTable;
511 out << "]";
512}
513
514// this can be called from setState whenever coherence permissions are upgraded
515// when invoked, coherence violations will be checked for the given block
516void Sequencer::checkCoherence(const Address& addr) {
517#ifdef CHECK_COHERENCE
518 g_system_ptr->checkGlobalCoherenceInvariant(addr);
519#endif
520}
521
283
284 hitCallback(request, data);
285}
286
287void Sequencer::readCallback(const Address& address, DataBlock& data) {
288
289 assert(address == line_address(address));
290 assert(m_readRequestTable.exist(line_address(address)));
291
292 SequencerRequest* request = m_readRequestTable.lookup(address);
293 removeRequest(request);
294
295 assert((request->ruby_request.type == RubyRequestType_LD) ||
296 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
297 (request->ruby_request.type == RubyRequestType_IFETCH));
298
299 hitCallback(request, data);
300}
301
302void Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) {
303 const RubyRequest & ruby_request = srequest->ruby_request;
304 Address request_address(ruby_request.paddr);
305 Address request_line_address(ruby_request.paddr);
306 request_line_address.makeLineAddress();
307 RubyRequestType type = ruby_request.type;
308 Time issued_time = srequest->issue_time;
309
310 // Set this cache entry to the most recently used
311 if (type == RubyRequestType_IFETCH) {
312 if (m_instCache_ptr->isTagPresent(request_line_address) )
313 m_instCache_ptr->setMRU(request_line_address);
314 } else {
315 if (m_dataCache_ptr->isTagPresent(request_line_address) )
316 m_dataCache_ptr->setMRU(request_line_address);
317 }
318
319 assert(g_eventQueue_ptr->getTime() >= issued_time);
320 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
321
322 // Profile the miss latency for all non-zero demand misses
323 if (miss_latency != 0) {
324 g_system_ptr->getProfiler()->missLatency(miss_latency, type);
325
326 if (Debug::getProtocolTrace()) {
327 g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(ruby_request.paddr),
328 "", "Done", "", int_to_string(miss_latency)+" cycles");
329 }
330 }
331 /*
332 if (request.getPrefetch() == PrefetchBit_Yes) {
333 return; // Ignore the prefetch
334 }
335 */
336
337 // update the data
338 if (ruby_request.data != NULL) {
339 if ((type == RubyRequestType_LD) ||
340 (type == RubyRequestType_IFETCH) ||
341 (type == RubyRequestType_RMW_Read)) {
342 memcpy(ruby_request.data, data.getData(request_address.getOffset(), ruby_request.len), ruby_request.len);
343 } else {
344 data.setData(ruby_request.data, request_address.getOffset(), ruby_request.len);
345 }
346 }
347
348 m_hit_callback(srequest->id);
349 delete srequest;
350}
351
352// Returns true if the sequencer already has a load or store outstanding
353bool Sequencer::isReady(const RubyRequest& request) {
354 // POLINA: check if we are currently flushing the write buffer, if so Ruby is returned as not ready
355 // to simulate stalling of the front-end
356 // Do we stall all the sequencers? If it is atomic instruction - yes!
357 if (m_outstanding_count >= m_max_outstanding_requests) {
358 return false;
359 }
360
361 if( m_writeRequestTable.exist(line_address(Address(request.paddr))) ||
362 m_readRequestTable.exist(line_address(Address(request.paddr))) ){
363 //cout << "OUTSTANDING REQUEST EXISTS " << p << " VER " << m_version << endl;
364 //printProgress(cout);
365 return false;
366 }
367
368 if (m_servicing_atomic != -1 && m_servicing_atomic != (int)request.proc_id) {
369 assert(m_atomics_counter > 0);
370 return false;
371 }
372 else {
373 if (request.type == RubyRequestType_RMW_Read) {
374 if (m_servicing_atomic == -1) {
375 assert(m_atomics_counter == 0);
376 m_servicing_atomic = (int)request.proc_id;
377 }
378 else {
379 assert(m_servicing_atomic == (int)request.proc_id);
380 }
381 m_atomics_counter++;
382 }
383 else if (request.type == RubyRequestType_RMW_Write) {
384 assert(m_servicing_atomic == (int)request.proc_id);
385 assert(m_atomics_counter > 0);
386 m_atomics_counter--;
387 if (m_atomics_counter == 0) {
388 m_servicing_atomic = -1;
389 }
390 }
391 }
392
393 return true;
394}
395
396bool Sequencer::empty() const {
397 return (m_writeRequestTable.size() == 0) && (m_readRequestTable.size() == 0);
398}
399
400
401int64_t Sequencer::makeRequest(const RubyRequest & request)
402{
403 assert(Address(request.paddr).getOffset() + request.len <= RubySystem::getBlockSizeBytes());
404 if (isReady(request)) {
405 int64_t id = makeUniqueRequestID();
406 SequencerRequest *srequest = new SequencerRequest(request, id, g_eventQueue_ptr->getTime());
407 bool found = insertRequest(srequest);
408 if (!found)
409 if (request.type == RubyRequestType_Locked_Write) {
410 // NOTE: it is OK to check the locked flag here as the mandatory queue will be checked first
411 // ensuring that nothing comes between checking the flag and servicing the store
412 if (!m_dataCache_ptr->isLocked(line_address(Address(request.paddr)), m_version)) {
413 return LLSC_FAIL;
414 }
415 else {
416 m_dataCache_ptr->clearLocked(line_address(Address(request.paddr)));
417 }
418 }
419 issueRequest(request);
420
421 // TODO: issue hardware prefetches here
422 return id;
423 }
424 else {
425 return -1;
426 }
427}
428
429void Sequencer::issueRequest(const RubyRequest& request) {
430
431 // TODO: get rid of CacheMsg, CacheRequestType, and AccessModeTYpe, & have SLICC use RubyRequest and subtypes natively
432 CacheRequestType ctype;
433 switch(request.type) {
434 case RubyRequestType_IFETCH:
435 ctype = CacheRequestType_IFETCH;
436 break;
437 case RubyRequestType_LD:
438 ctype = CacheRequestType_LD;
439 break;
440 case RubyRequestType_ST:
441 ctype = CacheRequestType_ST;
442 break;
443 case RubyRequestType_Locked_Read:
444 ctype = CacheRequestType_ST;
445 break;
446 case RubyRequestType_Locked_Write:
447 ctype = CacheRequestType_ST;
448 break;
449 case RubyRequestType_RMW_Read:
450 ctype = CacheRequestType_ATOMIC;
451 break;
452 case RubyRequestType_RMW_Write:
453 ctype = CacheRequestType_ATOMIC;
454 break;
455 default:
456 assert(0);
457 }
458 AccessModeType amtype;
459 switch(request.access_mode){
460 case RubyAccessMode_User:
461 amtype = AccessModeType_UserMode;
462 break;
463 case RubyAccessMode_Supervisor:
464 amtype = AccessModeType_SupervisorMode;
465 break;
466 case RubyAccessMode_Device:
467 amtype = AccessModeType_UserMode;
468 break;
469 default:
470 assert(0);
471 }
472 Address line_addr(request.paddr);
473 line_addr.makeLineAddress();
474 CacheMsg msg(line_addr, Address(request.paddr), ctype, Address(request.pc), amtype, request.len, PrefetchBit_No, request.proc_id);
475
476 if (Debug::getProtocolTrace()) {
477 g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(request.paddr),
478 "", "Begin", "", RubyRequestType_to_string(request.type));
479 }
480
481 if (g_system_ptr->getTracer()->traceEnabled()) {
482 g_system_ptr->getTracer()->traceRequest(m_name, line_addr, Address(request.pc),
483 request.type, g_eventQueue_ptr->getTime());
484 }
485
486 Time latency = 0; // initialzed to an null value
487
488 if (request.type == RubyRequestType_IFETCH)
489 latency = m_instCache_ptr->getLatency();
490 else
491 latency = m_dataCache_ptr->getLatency();
492
493 // Send the message to the cache controller
494 assert(latency > 0);
495
496
497 m_mandatory_q_ptr->enqueue(msg, latency);
498}
499/*
500bool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
501 AccessModeType access_mode,
502 int size, DataBlock*& data_ptr) {
503 if (type == CacheRequestType_IFETCH) {
504 return m_instCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
505 } else {
506 return m_dataCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
507 }
508}
509*/
510
511void Sequencer::print(ostream& out) const {
512 out << "[Sequencer: " << m_version
513 << ", outstanding requests: " << m_outstanding_count;
514
515 out << ", read request table: " << m_readRequestTable
516 << ", write request table: " << m_writeRequestTable;
517 out << "]";
518}
519
520// this can be called from setState whenever coherence permissions are upgraded
521// when invoked, coherence violations will be checked for the given block
522void Sequencer::checkCoherence(const Address& addr) {
523#ifdef CHECK_COHERENCE
524 g_system_ptr->checkGlobalCoherenceInvariant(addr);
525#endif
526}
527