Sequencer.cc (6893:9cdf9b65d946) Sequencer.cc (6899:f8057af86bf7)
1
2/*
3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include "mem/ruby/libruby.hh"
31#include "mem/ruby/common/Global.hh"
32#include "mem/ruby/system/Sequencer.hh"
33#include "mem/ruby/system/System.hh"
34#include "mem/protocol/Protocol.hh"
35#include "mem/ruby/profiler/Profiler.hh"
36#include "mem/ruby/system/CacheMemory.hh"
37#include "mem/protocol/CacheMsg.hh"
38#include "mem/ruby/recorder/Tracer.hh"
39#include "mem/ruby/common/SubBlock.hh"
40#include "mem/protocol/Protocol.hh"
41#include "mem/gems_common/Map.hh"
42#include "mem/ruby/buffers/MessageBuffer.hh"
43#include "mem/ruby/slicc_interface/AbstractController.hh"
1
2/*
3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include "mem/ruby/libruby.hh"
31#include "mem/ruby/common/Global.hh"
32#include "mem/ruby/system/Sequencer.hh"
33#include "mem/ruby/system/System.hh"
34#include "mem/protocol/Protocol.hh"
35#include "mem/ruby/profiler/Profiler.hh"
36#include "mem/ruby/system/CacheMemory.hh"
37#include "mem/protocol/CacheMsg.hh"
38#include "mem/ruby/recorder/Tracer.hh"
39#include "mem/ruby/common/SubBlock.hh"
40#include "mem/protocol/Protocol.hh"
41#include "mem/gems_common/Map.hh"
42#include "mem/ruby/buffers/MessageBuffer.hh"
43#include "mem/ruby/slicc_interface/AbstractController.hh"
44#include "cpu/rubytest/RubyTester.hh"
44
45#include "params/RubySequencer.hh"
46
47//Sequencer::Sequencer(int core_id, MessageBuffer* mandatory_q)
48
49#define LLSC_FAIL -2
50long int already = 0;
51
52Sequencer *
53RubySequencerParams::create()
54{
55 return new Sequencer(this);
56}
57
58Sequencer::Sequencer(const Params *p)
59 : RubyPort(p), deadlockCheckEvent(this)
60{
61 m_store_waiting_on_load_cycles = 0;
62 m_store_waiting_on_store_cycles = 0;
63 m_load_waiting_on_store_cycles = 0;
64 m_load_waiting_on_load_cycles = 0;
65
66 m_outstanding_count = 0;
67
68 m_max_outstanding_requests = 0;
69 m_deadlock_threshold = 0;
70 m_instCache_ptr = NULL;
71 m_dataCache_ptr = NULL;
72
73 m_instCache_ptr = p->icache;
74 m_dataCache_ptr = p->dcache;
75 m_max_outstanding_requests = p->max_outstanding_requests;
76 m_deadlock_threshold = p->deadlock_threshold;
45
46#include "params/RubySequencer.hh"
47
48//Sequencer::Sequencer(int core_id, MessageBuffer* mandatory_q)
49
50#define LLSC_FAIL -2
51long int already = 0;
52
53Sequencer *
54RubySequencerParams::create()
55{
56 return new Sequencer(this);
57}
58
59Sequencer::Sequencer(const Params *p)
60 : RubyPort(p), deadlockCheckEvent(this)
61{
62 m_store_waiting_on_load_cycles = 0;
63 m_store_waiting_on_store_cycles = 0;
64 m_load_waiting_on_store_cycles = 0;
65 m_load_waiting_on_load_cycles = 0;
66
67 m_outstanding_count = 0;
68
69 m_max_outstanding_requests = 0;
70 m_deadlock_threshold = 0;
71 m_instCache_ptr = NULL;
72 m_dataCache_ptr = NULL;
73
74 m_instCache_ptr = p->icache;
75 m_dataCache_ptr = p->dcache;
76 m_max_outstanding_requests = p->max_outstanding_requests;
77 m_deadlock_threshold = p->deadlock_threshold;
77
78 m_usingRubyTester = p->using_ruby_tester;
79
78 assert(m_max_outstanding_requests > 0);
79 assert(m_deadlock_threshold > 0);
80 assert(m_instCache_ptr != NULL);
81 assert(m_dataCache_ptr != NULL);
82}
83
84Sequencer::~Sequencer() {
85
86}
87
88void Sequencer::wakeup() {
89 // Check for deadlock of any of the requests
90 Time current_time = g_eventQueue_ptr->getTime();
91
92 // Check across all outstanding requests
93 int total_outstanding = 0;
94
95 Vector<Address> keys = m_readRequestTable.keys();
96 for (int i=0; i<keys.size(); i++) {
97 SequencerRequest* request = m_readRequestTable.lookup(keys[i]);
98 if (current_time - request->issue_time >= m_deadlock_threshold) {
99 WARN_MSG("Possible Deadlock detected");
100 WARN_EXPR(request);
101 WARN_EXPR(m_version);
102 WARN_EXPR(request->ruby_request.paddr);
103 WARN_EXPR(keys.size());
104 WARN_EXPR(current_time);
105 WARN_EXPR(request->issue_time);
106 WARN_EXPR(current_time - request->issue_time);
107 ERROR_MSG("Aborting");
108 }
109 }
110
111 keys = m_writeRequestTable.keys();
112 for (int i=0; i<keys.size(); i++) {
113 SequencerRequest* request = m_writeRequestTable.lookup(keys[i]);
114 if (current_time - request->issue_time >= m_deadlock_threshold) {
115 WARN_MSG("Possible Deadlock detected");
116 WARN_EXPR(request);
117 WARN_EXPR(m_version);
118 WARN_EXPR(current_time);
119 WARN_EXPR(request->issue_time);
120 WARN_EXPR(current_time - request->issue_time);
121 WARN_EXPR(keys.size());
122 ERROR_MSG("Aborting");
123 }
124 }
125 total_outstanding += m_writeRequestTable.size() + m_readRequestTable.size();
126
127 assert(m_outstanding_count == total_outstanding);
128
129 if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking
130 schedule(deadlockCheckEvent,
131 (m_deadlock_threshold * g_eventQueue_ptr->getClock()) + curTick);
132 }
133}
134
135void Sequencer::printStats(ostream & out) const {
136 out << "Sequencer: " << m_name << endl;
137 out << " store_waiting_on_load_cycles: " << m_store_waiting_on_load_cycles << endl;
138 out << " store_waiting_on_store_cycles: " << m_store_waiting_on_store_cycles << endl;
139 out << " load_waiting_on_load_cycles: " << m_load_waiting_on_load_cycles << endl;
140 out << " load_waiting_on_store_cycles: " << m_load_waiting_on_store_cycles << endl;
141}
142
143void Sequencer::printProgress(ostream& out) const{
144 /*
145 int total_demand = 0;
146 out << "Sequencer Stats Version " << m_version << endl;
147 out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
148 out << "---------------" << endl;
149 out << "outstanding requests" << endl;
150
151 Vector<Address> rkeys = m_readRequestTable.keys();
152 int read_size = rkeys.size();
153 out << "proc " << m_version << " Read Requests = " << read_size << endl;
154 // print the request table
155 for(int i=0; i < read_size; ++i){
156 SequencerRequest * request = m_readRequestTable.lookup(rkeys[i]);
157 out << "\tRequest[ " << i << " ] = " << request->type << " Address " << rkeys[i] << " Posted " << request->issue_time << " PF " << PrefetchBit_No << endl;
158 total_demand++;
159 }
160
161 Vector<Address> wkeys = m_writeRequestTable.keys();
162 int write_size = wkeys.size();
163 out << "proc " << m_version << " Write Requests = " << write_size << endl;
164 // print the request table
165 for(int i=0; i < write_size; ++i){
166 CacheMsg & request = m_writeRequestTable.lookup(wkeys[i]);
167 out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
168 if( request.getPrefetch() == PrefetchBit_No ){
169 total_demand++;
170 }
171 }
172
173 out << endl;
174
175 out << "Total Number Outstanding: " << m_outstanding_count << endl;
176 out << "Total Number Demand : " << total_demand << endl;
177 out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl;
178 out << endl;
179 out << endl;
180 */
181}
182
183void Sequencer::printConfig(ostream& out) const {
184 out << "Seqeuncer config: " << m_name << endl;
185 out << " controller: " << m_controller->getName() << endl;
186 out << " version: " << m_version << endl;
187 out << " max_outstanding_requests: " << m_max_outstanding_requests << endl;
188 out << " deadlock_threshold: " << m_deadlock_threshold << endl;
189}
190
191// Insert the request on the correct request table. Return true if
192// the entry was already present.
193bool Sequencer::insertRequest(SequencerRequest* request) {
194 int total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
195
196 assert(m_outstanding_count == total_outstanding);
197
198 // See if we should schedule a deadlock check
199 if (deadlockCheckEvent.scheduled() == false) {
200 schedule(deadlockCheckEvent, m_deadlock_threshold + curTick);
201 }
202
203 Address line_addr(request->ruby_request.paddr);
204 line_addr.makeLineAddress();
205 if ((request->ruby_request.type == RubyRequestType_ST) ||
206 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
207 (request->ruby_request.type == RubyRequestType_RMW_Write) ||
208 (request->ruby_request.type == RubyRequestType_Locked_Read) ||
209 (request->ruby_request.type == RubyRequestType_Locked_Write)) {
210 if (m_writeRequestTable.exist(line_addr)) {
211 m_writeRequestTable.lookup(line_addr) = request;
212 // return true;
213 assert(0); // drh5: isn't this an error? do you lose the initial request?
214 }
215 m_writeRequestTable.allocate(line_addr);
216 m_writeRequestTable.lookup(line_addr) = request;
217 m_outstanding_count++;
218 } else {
219 if (m_readRequestTable.exist(line_addr)) {
220 m_readRequestTable.lookup(line_addr) = request;
221 // return true;
222 assert(0); // drh5: isn't this an error? do you lose the initial request?
223 }
224 m_readRequestTable.allocate(line_addr);
225 m_readRequestTable.lookup(line_addr) = request;
226 m_outstanding_count++;
227 }
228
229 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
230
231 total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
232 assert(m_outstanding_count == total_outstanding);
233
234 return false;
235}
236
237void Sequencer::removeRequest(SequencerRequest* srequest) {
238
239 assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
240
241 const RubyRequest & ruby_request = srequest->ruby_request;
242 Address line_addr(ruby_request.paddr);
243 line_addr.makeLineAddress();
244 if ((ruby_request.type == RubyRequestType_ST) ||
245 (ruby_request.type == RubyRequestType_RMW_Read) ||
246 (ruby_request.type == RubyRequestType_RMW_Write) ||
247 (ruby_request.type == RubyRequestType_Locked_Read) ||
248 (ruby_request.type == RubyRequestType_Locked_Write)) {
249 m_writeRequestTable.deallocate(line_addr);
250 } else {
251 m_readRequestTable.deallocate(line_addr);
252 }
253 m_outstanding_count--;
254
255 assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
256}
257
258void Sequencer::writeCallback(const Address& address, DataBlock& data) {
259
260 assert(address == line_address(address));
261 assert(m_writeRequestTable.exist(line_address(address)));
262
263 SequencerRequest* request = m_writeRequestTable.lookup(address);
264
265 removeRequest(request);
266
267 assert((request->ruby_request.type == RubyRequestType_ST) ||
268 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
269 (request->ruby_request.type == RubyRequestType_RMW_Write) ||
270 (request->ruby_request.type == RubyRequestType_Locked_Read) ||
271 (request->ruby_request.type == RubyRequestType_Locked_Write));
272
273 if (request->ruby_request.type == RubyRequestType_Locked_Read) {
274 m_dataCache_ptr->setLocked(address, m_version);
275 }
276 else if (request->ruby_request.type == RubyRequestType_RMW_Read) {
277 m_controller->blockOnQueue(address, m_mandatory_q_ptr);
278 }
279 else if (request->ruby_request.type == RubyRequestType_RMW_Write) {
280 m_controller->unblock(address);
281 }
282
283 hitCallback(request, data);
284}
285
286void Sequencer::readCallback(const Address& address, DataBlock& data) {
287
288 assert(address == line_address(address));
289 assert(m_readRequestTable.exist(line_address(address)));
290
291 SequencerRequest* request = m_readRequestTable.lookup(address);
292 removeRequest(request);
293
294 assert((request->ruby_request.type == RubyRequestType_LD) ||
295 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
296 (request->ruby_request.type == RubyRequestType_IFETCH));
297
298 hitCallback(request, data);
299}
300
301void Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) {
302 const RubyRequest & ruby_request = srequest->ruby_request;
303 Address request_address(ruby_request.paddr);
304 Address request_line_address(ruby_request.paddr);
305 request_line_address.makeLineAddress();
306 RubyRequestType type = ruby_request.type;
307 Time issued_time = srequest->issue_time;
308
309 // Set this cache entry to the most recently used
310 if (type == RubyRequestType_IFETCH) {
311 if (m_instCache_ptr->isTagPresent(request_line_address) )
312 m_instCache_ptr->setMRU(request_line_address);
313 } else {
314 if (m_dataCache_ptr->isTagPresent(request_line_address) )
315 m_dataCache_ptr->setMRU(request_line_address);
316 }
317
318 assert(g_eventQueue_ptr->getTime() >= issued_time);
319 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
320
321 // Profile the miss latency for all non-zero demand misses
322 if (miss_latency != 0) {
323 g_system_ptr->getProfiler()->missLatency(miss_latency, type);
324
325 if (Debug::getProtocolTrace()) {
326 g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(ruby_request.paddr),
327 "", "Done", "", int_to_string(miss_latency)+" cycles");
328 }
329 }
330 /*
331 if (request.getPrefetch() == PrefetchBit_Yes) {
332 return; // Ignore the prefetch
333 }
334 */
335
336 // update the data
337 if (ruby_request.data != NULL) {
338 if ((type == RubyRequestType_LD) ||
339 (type == RubyRequestType_IFETCH) ||
340 (type == RubyRequestType_RMW_Read)) {
341 memcpy(ruby_request.data, data.getData(request_address.getOffset(), ruby_request.len), ruby_request.len);
342 } else {
343 data.setData(ruby_request.data, request_address.getOffset(), ruby_request.len);
344 }
345 }
346
80 assert(m_max_outstanding_requests > 0);
81 assert(m_deadlock_threshold > 0);
82 assert(m_instCache_ptr != NULL);
83 assert(m_dataCache_ptr != NULL);
84}
85
86Sequencer::~Sequencer() {
87
88}
89
90void Sequencer::wakeup() {
91 // Check for deadlock of any of the requests
92 Time current_time = g_eventQueue_ptr->getTime();
93
94 // Check across all outstanding requests
95 int total_outstanding = 0;
96
97 Vector<Address> keys = m_readRequestTable.keys();
98 for (int i=0; i<keys.size(); i++) {
99 SequencerRequest* request = m_readRequestTable.lookup(keys[i]);
100 if (current_time - request->issue_time >= m_deadlock_threshold) {
101 WARN_MSG("Possible Deadlock detected");
102 WARN_EXPR(request);
103 WARN_EXPR(m_version);
104 WARN_EXPR(request->ruby_request.paddr);
105 WARN_EXPR(keys.size());
106 WARN_EXPR(current_time);
107 WARN_EXPR(request->issue_time);
108 WARN_EXPR(current_time - request->issue_time);
109 ERROR_MSG("Aborting");
110 }
111 }
112
113 keys = m_writeRequestTable.keys();
114 for (int i=0; i<keys.size(); i++) {
115 SequencerRequest* request = m_writeRequestTable.lookup(keys[i]);
116 if (current_time - request->issue_time >= m_deadlock_threshold) {
117 WARN_MSG("Possible Deadlock detected");
118 WARN_EXPR(request);
119 WARN_EXPR(m_version);
120 WARN_EXPR(current_time);
121 WARN_EXPR(request->issue_time);
122 WARN_EXPR(current_time - request->issue_time);
123 WARN_EXPR(keys.size());
124 ERROR_MSG("Aborting");
125 }
126 }
127 total_outstanding += m_writeRequestTable.size() + m_readRequestTable.size();
128
129 assert(m_outstanding_count == total_outstanding);
130
131 if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking
132 schedule(deadlockCheckEvent,
133 (m_deadlock_threshold * g_eventQueue_ptr->getClock()) + curTick);
134 }
135}
136
137void Sequencer::printStats(ostream & out) const {
138 out << "Sequencer: " << m_name << endl;
139 out << " store_waiting_on_load_cycles: " << m_store_waiting_on_load_cycles << endl;
140 out << " store_waiting_on_store_cycles: " << m_store_waiting_on_store_cycles << endl;
141 out << " load_waiting_on_load_cycles: " << m_load_waiting_on_load_cycles << endl;
142 out << " load_waiting_on_store_cycles: " << m_load_waiting_on_store_cycles << endl;
143}
144
145void Sequencer::printProgress(ostream& out) const{
146 /*
147 int total_demand = 0;
148 out << "Sequencer Stats Version " << m_version << endl;
149 out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
150 out << "---------------" << endl;
151 out << "outstanding requests" << endl;
152
153 Vector<Address> rkeys = m_readRequestTable.keys();
154 int read_size = rkeys.size();
155 out << "proc " << m_version << " Read Requests = " << read_size << endl;
156 // print the request table
157 for(int i=0; i < read_size; ++i){
158 SequencerRequest * request = m_readRequestTable.lookup(rkeys[i]);
159 out << "\tRequest[ " << i << " ] = " << request->type << " Address " << rkeys[i] << " Posted " << request->issue_time << " PF " << PrefetchBit_No << endl;
160 total_demand++;
161 }
162
163 Vector<Address> wkeys = m_writeRequestTable.keys();
164 int write_size = wkeys.size();
165 out << "proc " << m_version << " Write Requests = " << write_size << endl;
166 // print the request table
167 for(int i=0; i < write_size; ++i){
168 CacheMsg & request = m_writeRequestTable.lookup(wkeys[i]);
169 out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
170 if( request.getPrefetch() == PrefetchBit_No ){
171 total_demand++;
172 }
173 }
174
175 out << endl;
176
177 out << "Total Number Outstanding: " << m_outstanding_count << endl;
178 out << "Total Number Demand : " << total_demand << endl;
179 out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl;
180 out << endl;
181 out << endl;
182 */
183}
184
185void Sequencer::printConfig(ostream& out) const {
186 out << "Seqeuncer config: " << m_name << endl;
187 out << " controller: " << m_controller->getName() << endl;
188 out << " version: " << m_version << endl;
189 out << " max_outstanding_requests: " << m_max_outstanding_requests << endl;
190 out << " deadlock_threshold: " << m_deadlock_threshold << endl;
191}
192
193// Insert the request on the correct request table. Return true if
194// the entry was already present.
195bool Sequencer::insertRequest(SequencerRequest* request) {
196 int total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
197
198 assert(m_outstanding_count == total_outstanding);
199
200 // See if we should schedule a deadlock check
201 if (deadlockCheckEvent.scheduled() == false) {
202 schedule(deadlockCheckEvent, m_deadlock_threshold + curTick);
203 }
204
205 Address line_addr(request->ruby_request.paddr);
206 line_addr.makeLineAddress();
207 if ((request->ruby_request.type == RubyRequestType_ST) ||
208 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
209 (request->ruby_request.type == RubyRequestType_RMW_Write) ||
210 (request->ruby_request.type == RubyRequestType_Locked_Read) ||
211 (request->ruby_request.type == RubyRequestType_Locked_Write)) {
212 if (m_writeRequestTable.exist(line_addr)) {
213 m_writeRequestTable.lookup(line_addr) = request;
214 // return true;
215 assert(0); // drh5: isn't this an error? do you lose the initial request?
216 }
217 m_writeRequestTable.allocate(line_addr);
218 m_writeRequestTable.lookup(line_addr) = request;
219 m_outstanding_count++;
220 } else {
221 if (m_readRequestTable.exist(line_addr)) {
222 m_readRequestTable.lookup(line_addr) = request;
223 // return true;
224 assert(0); // drh5: isn't this an error? do you lose the initial request?
225 }
226 m_readRequestTable.allocate(line_addr);
227 m_readRequestTable.lookup(line_addr) = request;
228 m_outstanding_count++;
229 }
230
231 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
232
233 total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
234 assert(m_outstanding_count == total_outstanding);
235
236 return false;
237}
238
239void Sequencer::removeRequest(SequencerRequest* srequest) {
240
241 assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
242
243 const RubyRequest & ruby_request = srequest->ruby_request;
244 Address line_addr(ruby_request.paddr);
245 line_addr.makeLineAddress();
246 if ((ruby_request.type == RubyRequestType_ST) ||
247 (ruby_request.type == RubyRequestType_RMW_Read) ||
248 (ruby_request.type == RubyRequestType_RMW_Write) ||
249 (ruby_request.type == RubyRequestType_Locked_Read) ||
250 (ruby_request.type == RubyRequestType_Locked_Write)) {
251 m_writeRequestTable.deallocate(line_addr);
252 } else {
253 m_readRequestTable.deallocate(line_addr);
254 }
255 m_outstanding_count--;
256
257 assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
258}
259
260void Sequencer::writeCallback(const Address& address, DataBlock& data) {
261
262 assert(address == line_address(address));
263 assert(m_writeRequestTable.exist(line_address(address)));
264
265 SequencerRequest* request = m_writeRequestTable.lookup(address);
266
267 removeRequest(request);
268
269 assert((request->ruby_request.type == RubyRequestType_ST) ||
270 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
271 (request->ruby_request.type == RubyRequestType_RMW_Write) ||
272 (request->ruby_request.type == RubyRequestType_Locked_Read) ||
273 (request->ruby_request.type == RubyRequestType_Locked_Write));
274
275 if (request->ruby_request.type == RubyRequestType_Locked_Read) {
276 m_dataCache_ptr->setLocked(address, m_version);
277 }
278 else if (request->ruby_request.type == RubyRequestType_RMW_Read) {
279 m_controller->blockOnQueue(address, m_mandatory_q_ptr);
280 }
281 else if (request->ruby_request.type == RubyRequestType_RMW_Write) {
282 m_controller->unblock(address);
283 }
284
285 hitCallback(request, data);
286}
287
288void Sequencer::readCallback(const Address& address, DataBlock& data) {
289
290 assert(address == line_address(address));
291 assert(m_readRequestTable.exist(line_address(address)));
292
293 SequencerRequest* request = m_readRequestTable.lookup(address);
294 removeRequest(request);
295
296 assert((request->ruby_request.type == RubyRequestType_LD) ||
297 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
298 (request->ruby_request.type == RubyRequestType_IFETCH));
299
300 hitCallback(request, data);
301}
302
303void Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) {
304 const RubyRequest & ruby_request = srequest->ruby_request;
305 Address request_address(ruby_request.paddr);
306 Address request_line_address(ruby_request.paddr);
307 request_line_address.makeLineAddress();
308 RubyRequestType type = ruby_request.type;
309 Time issued_time = srequest->issue_time;
310
311 // Set this cache entry to the most recently used
312 if (type == RubyRequestType_IFETCH) {
313 if (m_instCache_ptr->isTagPresent(request_line_address) )
314 m_instCache_ptr->setMRU(request_line_address);
315 } else {
316 if (m_dataCache_ptr->isTagPresent(request_line_address) )
317 m_dataCache_ptr->setMRU(request_line_address);
318 }
319
320 assert(g_eventQueue_ptr->getTime() >= issued_time);
321 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
322
323 // Profile the miss latency for all non-zero demand misses
324 if (miss_latency != 0) {
325 g_system_ptr->getProfiler()->missLatency(miss_latency, type);
326
327 if (Debug::getProtocolTrace()) {
328 g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(ruby_request.paddr),
329 "", "Done", "", int_to_string(miss_latency)+" cycles");
330 }
331 }
332 /*
333 if (request.getPrefetch() == PrefetchBit_Yes) {
334 return; // Ignore the prefetch
335 }
336 */
337
338 // update the data
339 if (ruby_request.data != NULL) {
340 if ((type == RubyRequestType_LD) ||
341 (type == RubyRequestType_IFETCH) ||
342 (type == RubyRequestType_RMW_Read)) {
343 memcpy(ruby_request.data, data.getData(request_address.getOffset(), ruby_request.len), ruby_request.len);
344 } else {
345 data.setData(ruby_request.data, request_address.getOffset(), ruby_request.len);
346 }
347 }
348
349 //
350 // If using the RubyTester, update the RubyTester sender state's subBlock
351 // with the recieved data. The tester will later access this state.
352 // Note: RubyPort will access it's sender state before the RubyTester.
353 //
354 if (m_usingRubyTester) {
355 //
356 // Since the hit callback func only takes a request id, we must iterate
357 // through the requests and update the packet's subBlock here.
358 // All this would be fixed if we could attach a M5 pkt pointer to the
359 // ruby request, however that change will break the libruby interface so
360 // we'll hold off on that for now.
361 //
362 RequestMap::iterator i = pending_cpu_requests.find(srequest->id);
363 if (i == pending_cpu_requests.end())
364 panic("could not find pending request %d\n", srequest->id);
365 RequestCookie *cookie = i->second;
366 Packet *pkt = cookie->pkt;
367
368 RubyTester::SenderState* testerSenderState;
369 testerSenderState = safe_cast<RubyTester::SenderState*>(pkt->senderState);
370 testerSenderState->subBlock->mergeFrom(data);
371 }
372
347 m_hit_callback(srequest->id);
348 delete srequest;
349}
350
351// Returns true if the sequencer already has a load or store outstanding
352int Sequencer::isReady(const RubyRequest& request) {
353 bool is_outstanding_store = m_writeRequestTable.exist(line_address(Address(request.paddr)));
354 bool is_outstanding_load = m_readRequestTable.exist(line_address(Address(request.paddr)));
355 if ( is_outstanding_store ) {
356 if ((request.type == RubyRequestType_LD) ||
357 (request.type == RubyRequestType_IFETCH) ||
358 (request.type == RubyRequestType_RMW_Read)) {
359 m_store_waiting_on_load_cycles++;
360 } else {
361 m_store_waiting_on_store_cycles++;
362 }
363 return LIBRUBY_ALIASED_REQUEST;
364 } else if ( is_outstanding_load ) {
365 if ((request.type == RubyRequestType_ST) ||
366 (request.type == RubyRequestType_RMW_Write) ) {
367 m_load_waiting_on_store_cycles++;
368 } else {
369 m_load_waiting_on_load_cycles++;
370 }
371 return LIBRUBY_ALIASED_REQUEST;
372 }
373
374 if (m_outstanding_count >= m_max_outstanding_requests) {
375 return LIBRUBY_BUFFER_FULL;
376 }
377
378 return 1;
379}
380
381bool Sequencer::empty() const {
382 return (m_writeRequestTable.size() == 0) && (m_readRequestTable.size() == 0);
383}
384
385
386int64_t Sequencer::makeRequest(const RubyRequest & request)
387{
388 assert(Address(request.paddr).getOffset() + request.len <= RubySystem::getBlockSizeBytes());
389 int ready = isReady(request);
390 if (ready > 0) {
391 int64_t id = makeUniqueRequestID();
392 SequencerRequest *srequest = new SequencerRequest(request, id, g_eventQueue_ptr->getTime());
393 bool found = insertRequest(srequest);
394 if (!found) {
395 if (request.type == RubyRequestType_Locked_Write) {
396 // NOTE: it is OK to check the locked flag here as the mandatory queue will be checked first
397 // ensuring that nothing comes between checking the flag and servicing the store
398 if (!m_dataCache_ptr->isLocked(line_address(Address(request.paddr)), m_version)) {
399 return LLSC_FAIL;
400 }
401 else {
402 m_dataCache_ptr->clearLocked(line_address(Address(request.paddr)));
403 }
404 }
405 issueRequest(request);
406
407 // TODO: issue hardware prefetches here
408 return id;
409 }
410 else {
411 assert(0);
412 return 0;
413 }
414 } else {
415 return ready;
416 }
417}
418
419void Sequencer::issueRequest(const RubyRequest& request) {
420
421 // TODO: get rid of CacheMsg, CacheRequestType, and AccessModeTYpe, & have SLICC use RubyRequest and subtypes natively
422 CacheRequestType ctype;
423 switch(request.type) {
424 case RubyRequestType_IFETCH:
425 ctype = CacheRequestType_IFETCH;
426 break;
427 case RubyRequestType_LD:
428 ctype = CacheRequestType_LD;
429 break;
430 case RubyRequestType_ST:
431 ctype = CacheRequestType_ST;
432 break;
433 case RubyRequestType_Locked_Read:
434 case RubyRequestType_Locked_Write:
435 ctype = CacheRequestType_ATOMIC;
436 break;
437 case RubyRequestType_RMW_Read:
438 ctype = CacheRequestType_ATOMIC;
439 break;
440 case RubyRequestType_RMW_Write:
441 ctype = CacheRequestType_ATOMIC;
442 break;
443 default:
444 assert(0);
445 }
446 AccessModeType amtype;
447 switch(request.access_mode){
448 case RubyAccessMode_User:
449 amtype = AccessModeType_UserMode;
450 break;
451 case RubyAccessMode_Supervisor:
452 amtype = AccessModeType_SupervisorMode;
453 break;
454 case RubyAccessMode_Device:
455 amtype = AccessModeType_UserMode;
456 break;
457 default:
458 assert(0);
459 }
460 Address line_addr(request.paddr);
461 line_addr.makeLineAddress();
462 CacheMsg msg(line_addr, Address(request.paddr), ctype, Address(request.pc), amtype, request.len, PrefetchBit_No, request.proc_id);
463
464 if (Debug::getProtocolTrace()) {
465 g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(request.paddr),
466 "", "Begin", "", RubyRequestType_to_string(request.type));
467 }
468
469 if (g_system_ptr->getTracer()->traceEnabled()) {
470 g_system_ptr->getTracer()->traceRequest(this, line_addr, Address(request.pc),
471 request.type, g_eventQueue_ptr->getTime());
472 }
473
474 Time latency = 0; // initialzed to an null value
475
476 if (request.type == RubyRequestType_IFETCH)
477 latency = m_instCache_ptr->getLatency();
478 else
479 latency = m_dataCache_ptr->getLatency();
480
481 // Send the message to the cache controller
482 assert(latency > 0);
483
484 assert(m_mandatory_q_ptr != NULL);
485 m_mandatory_q_ptr->enqueue(msg, latency);
486}
487/*
488bool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
489 AccessModeType access_mode,
490 int size, DataBlock*& data_ptr) {
491 if (type == CacheRequestType_IFETCH) {
492 return m_instCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
493 } else {
494 return m_dataCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
495 }
496}
497*/
498
499void Sequencer::print(ostream& out) const {
500 out << "[Sequencer: " << m_version
501 << ", outstanding requests: " << m_outstanding_count;
502
503 out << ", read request table: " << m_readRequestTable
504 << ", write request table: " << m_writeRequestTable;
505 out << "]";
506}
507
508// this can be called from setState whenever coherence permissions are upgraded
509// when invoked, coherence violations will be checked for the given block
510void Sequencer::checkCoherence(const Address& addr) {
511#ifdef CHECK_COHERENCE
512 g_system_ptr->checkGlobalCoherenceInvariant(addr);
513#endif
514}
515
373 m_hit_callback(srequest->id);
374 delete srequest;
375}
376
377// Returns true if the sequencer already has a load or store outstanding
378int Sequencer::isReady(const RubyRequest& request) {
379 bool is_outstanding_store = m_writeRequestTable.exist(line_address(Address(request.paddr)));
380 bool is_outstanding_load = m_readRequestTable.exist(line_address(Address(request.paddr)));
381 if ( is_outstanding_store ) {
382 if ((request.type == RubyRequestType_LD) ||
383 (request.type == RubyRequestType_IFETCH) ||
384 (request.type == RubyRequestType_RMW_Read)) {
385 m_store_waiting_on_load_cycles++;
386 } else {
387 m_store_waiting_on_store_cycles++;
388 }
389 return LIBRUBY_ALIASED_REQUEST;
390 } else if ( is_outstanding_load ) {
391 if ((request.type == RubyRequestType_ST) ||
392 (request.type == RubyRequestType_RMW_Write) ) {
393 m_load_waiting_on_store_cycles++;
394 } else {
395 m_load_waiting_on_load_cycles++;
396 }
397 return LIBRUBY_ALIASED_REQUEST;
398 }
399
400 if (m_outstanding_count >= m_max_outstanding_requests) {
401 return LIBRUBY_BUFFER_FULL;
402 }
403
404 return 1;
405}
406
407bool Sequencer::empty() const {
408 return (m_writeRequestTable.size() == 0) && (m_readRequestTable.size() == 0);
409}
410
411
412int64_t Sequencer::makeRequest(const RubyRequest & request)
413{
414 assert(Address(request.paddr).getOffset() + request.len <= RubySystem::getBlockSizeBytes());
415 int ready = isReady(request);
416 if (ready > 0) {
417 int64_t id = makeUniqueRequestID();
418 SequencerRequest *srequest = new SequencerRequest(request, id, g_eventQueue_ptr->getTime());
419 bool found = insertRequest(srequest);
420 if (!found) {
421 if (request.type == RubyRequestType_Locked_Write) {
422 // NOTE: it is OK to check the locked flag here as the mandatory queue will be checked first
423 // ensuring that nothing comes between checking the flag and servicing the store
424 if (!m_dataCache_ptr->isLocked(line_address(Address(request.paddr)), m_version)) {
425 return LLSC_FAIL;
426 }
427 else {
428 m_dataCache_ptr->clearLocked(line_address(Address(request.paddr)));
429 }
430 }
431 issueRequest(request);
432
433 // TODO: issue hardware prefetches here
434 return id;
435 }
436 else {
437 assert(0);
438 return 0;
439 }
440 } else {
441 return ready;
442 }
443}
444
445void Sequencer::issueRequest(const RubyRequest& request) {
446
447 // TODO: get rid of CacheMsg, CacheRequestType, and AccessModeTYpe, & have SLICC use RubyRequest and subtypes natively
448 CacheRequestType ctype;
449 switch(request.type) {
450 case RubyRequestType_IFETCH:
451 ctype = CacheRequestType_IFETCH;
452 break;
453 case RubyRequestType_LD:
454 ctype = CacheRequestType_LD;
455 break;
456 case RubyRequestType_ST:
457 ctype = CacheRequestType_ST;
458 break;
459 case RubyRequestType_Locked_Read:
460 case RubyRequestType_Locked_Write:
461 ctype = CacheRequestType_ATOMIC;
462 break;
463 case RubyRequestType_RMW_Read:
464 ctype = CacheRequestType_ATOMIC;
465 break;
466 case RubyRequestType_RMW_Write:
467 ctype = CacheRequestType_ATOMIC;
468 break;
469 default:
470 assert(0);
471 }
472 AccessModeType amtype;
473 switch(request.access_mode){
474 case RubyAccessMode_User:
475 amtype = AccessModeType_UserMode;
476 break;
477 case RubyAccessMode_Supervisor:
478 amtype = AccessModeType_SupervisorMode;
479 break;
480 case RubyAccessMode_Device:
481 amtype = AccessModeType_UserMode;
482 break;
483 default:
484 assert(0);
485 }
486 Address line_addr(request.paddr);
487 line_addr.makeLineAddress();
488 CacheMsg msg(line_addr, Address(request.paddr), ctype, Address(request.pc), amtype, request.len, PrefetchBit_No, request.proc_id);
489
490 if (Debug::getProtocolTrace()) {
491 g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(request.paddr),
492 "", "Begin", "", RubyRequestType_to_string(request.type));
493 }
494
495 if (g_system_ptr->getTracer()->traceEnabled()) {
496 g_system_ptr->getTracer()->traceRequest(this, line_addr, Address(request.pc),
497 request.type, g_eventQueue_ptr->getTime());
498 }
499
500 Time latency = 0; // initialzed to an null value
501
502 if (request.type == RubyRequestType_IFETCH)
503 latency = m_instCache_ptr->getLatency();
504 else
505 latency = m_dataCache_ptr->getLatency();
506
507 // Send the message to the cache controller
508 assert(latency > 0);
509
510 assert(m_mandatory_q_ptr != NULL);
511 m_mandatory_q_ptr->enqueue(msg, latency);
512}
513/*
514bool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
515 AccessModeType access_mode,
516 int size, DataBlock*& data_ptr) {
517 if (type == CacheRequestType_IFETCH) {
518 return m_instCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
519 } else {
520 return m_dataCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
521 }
522}
523*/
524
525void Sequencer::print(ostream& out) const {
526 out << "[Sequencer: " << m_version
527 << ", outstanding requests: " << m_outstanding_count;
528
529 out << ", read request table: " << m_readRequestTable
530 << ", write request table: " << m_writeRequestTable;
531 out << "]";
532}
533
534// this can be called from setState whenever coherence permissions are upgraded
535// when invoked, coherence violations will be checked for the given block
536void Sequencer::checkCoherence(const Address& addr) {
537#ifdef CHECK_COHERENCE
538 g_system_ptr->checkGlobalCoherenceInvariant(addr);
539#endif
540}
541