Sequencer.cc (7805:f249937228b5) Sequencer.cc (7823:dac01f14f20f)
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "base/str.hh"
30#include "base/misc.hh"
31#include "cpu/testers/rubytest/RubyTester.hh"
32#include "mem/protocol/CacheMsg.hh"
33#include "mem/protocol/Protocol.hh"
34#include "mem/protocol/Protocol.hh"
35#include "mem/ruby/buffers/MessageBuffer.hh"
36#include "mem/ruby/common/Global.hh"
37#include "mem/ruby/common/SubBlock.hh"
38#include "mem/ruby/libruby.hh"
39#include "mem/ruby/profiler/Profiler.hh"
40#include "mem/ruby/recorder/Tracer.hh"
41#include "mem/ruby/slicc_interface/AbstractController.hh"
42#include "mem/ruby/system/CacheMemory.hh"
43#include "mem/ruby/system/Sequencer.hh"
44#include "mem/ruby/system/System.hh"
45#include "mem/packet.hh"
46#include "params/RubySequencer.hh"
47
48using namespace std;
49
50Sequencer *
51RubySequencerParams::create()
52{
53 return new Sequencer(this);
54}
55
56Sequencer::Sequencer(const Params *p)
57 : RubyPort(p), deadlockCheckEvent(this)
58{
59 m_store_waiting_on_load_cycles = 0;
60 m_store_waiting_on_store_cycles = 0;
61 m_load_waiting_on_store_cycles = 0;
62 m_load_waiting_on_load_cycles = 0;
63
64 m_outstanding_count = 0;
65
66 m_max_outstanding_requests = 0;
67 m_deadlock_threshold = 0;
68 m_instCache_ptr = NULL;
69 m_dataCache_ptr = NULL;
70
71 m_instCache_ptr = p->icache;
72 m_dataCache_ptr = p->dcache;
73 m_max_outstanding_requests = p->max_outstanding_requests;
74 m_deadlock_threshold = p->deadlock_threshold;
75 m_usingRubyTester = p->using_ruby_tester;
76
77 assert(m_max_outstanding_requests > 0);
78 assert(m_deadlock_threshold > 0);
79 assert(m_instCache_ptr != NULL);
80 assert(m_dataCache_ptr != NULL);
81}
82
83Sequencer::~Sequencer()
84{
85}
86
87void
88Sequencer::wakeup()
89{
90 // Check for deadlock of any of the requests
91 Time current_time = g_eventQueue_ptr->getTime();
92
93 // Check across all outstanding requests
94 int total_outstanding = 0;
95
96 RequestTable::iterator read = m_readRequestTable.begin();
97 RequestTable::iterator read_end = m_readRequestTable.end();
98 for (; read != read_end; ++read) {
99 SequencerRequest* request = read->second;
100 if (current_time - request->issue_time < m_deadlock_threshold)
101 continue;
102
103 panic("Possible Deadlock detected. Aborting!\n"
104 "version: %d request.paddr: %d m_readRequestTable: %d "
105 "current time: %u issue_time: %d difference: %d\n", m_version,
106 request->ruby_request.paddr, m_readRequestTable.size(),
107 current_time, request->issue_time,
108 current_time - request->issue_time);
109 }
110
111 RequestTable::iterator write = m_writeRequestTable.begin();
112 RequestTable::iterator write_end = m_writeRequestTable.end();
113 for (; write != write_end; ++write) {
114 SequencerRequest* request = write->second;
115 if (current_time - request->issue_time < m_deadlock_threshold)
116 continue;
117
118 panic("Possible Deadlock detected. Aborting!\n"
119 "version: %d request.paddr: %d m_writeRequestTable: %d "
120 "current time: %u issue_time: %d difference: %d\n", m_version,
121 request->ruby_request.paddr, m_writeRequestTable.size(),
122 current_time, request->issue_time,
123 current_time - request->issue_time);
124 }
125
126 total_outstanding += m_writeRequestTable.size();
127 total_outstanding += m_readRequestTable.size();
128
129 assert(m_outstanding_count == total_outstanding);
130
131 if (m_outstanding_count > 0) {
132 // If there are still outstanding requests, keep checking
133 schedule(deadlockCheckEvent,
134 m_deadlock_threshold * g_eventQueue_ptr->getClock() +
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "base/str.hh"
30#include "base/misc.hh"
31#include "cpu/testers/rubytest/RubyTester.hh"
32#include "mem/protocol/CacheMsg.hh"
33#include "mem/protocol/Protocol.hh"
34#include "mem/protocol/Protocol.hh"
35#include "mem/ruby/buffers/MessageBuffer.hh"
36#include "mem/ruby/common/Global.hh"
37#include "mem/ruby/common/SubBlock.hh"
38#include "mem/ruby/libruby.hh"
39#include "mem/ruby/profiler/Profiler.hh"
40#include "mem/ruby/recorder/Tracer.hh"
41#include "mem/ruby/slicc_interface/AbstractController.hh"
42#include "mem/ruby/system/CacheMemory.hh"
43#include "mem/ruby/system/Sequencer.hh"
44#include "mem/ruby/system/System.hh"
45#include "mem/packet.hh"
46#include "params/RubySequencer.hh"
47
48using namespace std;
49
50Sequencer *
51RubySequencerParams::create()
52{
53 return new Sequencer(this);
54}
55
56Sequencer::Sequencer(const Params *p)
57 : RubyPort(p), deadlockCheckEvent(this)
58{
59 m_store_waiting_on_load_cycles = 0;
60 m_store_waiting_on_store_cycles = 0;
61 m_load_waiting_on_store_cycles = 0;
62 m_load_waiting_on_load_cycles = 0;
63
64 m_outstanding_count = 0;
65
66 m_max_outstanding_requests = 0;
67 m_deadlock_threshold = 0;
68 m_instCache_ptr = NULL;
69 m_dataCache_ptr = NULL;
70
71 m_instCache_ptr = p->icache;
72 m_dataCache_ptr = p->dcache;
73 m_max_outstanding_requests = p->max_outstanding_requests;
74 m_deadlock_threshold = p->deadlock_threshold;
75 m_usingRubyTester = p->using_ruby_tester;
76
77 assert(m_max_outstanding_requests > 0);
78 assert(m_deadlock_threshold > 0);
79 assert(m_instCache_ptr != NULL);
80 assert(m_dataCache_ptr != NULL);
81}
82
83Sequencer::~Sequencer()
84{
85}
86
87void
88Sequencer::wakeup()
89{
90 // Check for deadlock of any of the requests
91 Time current_time = g_eventQueue_ptr->getTime();
92
93 // Check across all outstanding requests
94 int total_outstanding = 0;
95
96 RequestTable::iterator read = m_readRequestTable.begin();
97 RequestTable::iterator read_end = m_readRequestTable.end();
98 for (; read != read_end; ++read) {
99 SequencerRequest* request = read->second;
100 if (current_time - request->issue_time < m_deadlock_threshold)
101 continue;
102
103 panic("Possible Deadlock detected. Aborting!\n"
104 "version: %d request.paddr: %d m_readRequestTable: %d "
105 "current time: %u issue_time: %d difference: %d\n", m_version,
106 request->ruby_request.paddr, m_readRequestTable.size(),
107 current_time, request->issue_time,
108 current_time - request->issue_time);
109 }
110
111 RequestTable::iterator write = m_writeRequestTable.begin();
112 RequestTable::iterator write_end = m_writeRequestTable.end();
113 for (; write != write_end; ++write) {
114 SequencerRequest* request = write->second;
115 if (current_time - request->issue_time < m_deadlock_threshold)
116 continue;
117
118 panic("Possible Deadlock detected. Aborting!\n"
119 "version: %d request.paddr: %d m_writeRequestTable: %d "
120 "current time: %u issue_time: %d difference: %d\n", m_version,
121 request->ruby_request.paddr, m_writeRequestTable.size(),
122 current_time, request->issue_time,
123 current_time - request->issue_time);
124 }
125
126 total_outstanding += m_writeRequestTable.size();
127 total_outstanding += m_readRequestTable.size();
128
129 assert(m_outstanding_count == total_outstanding);
130
131 if (m_outstanding_count > 0) {
132 // If there are still outstanding requests, keep checking
133 schedule(deadlockCheckEvent,
134 m_deadlock_threshold * g_eventQueue_ptr->getClock() +
135 curTick);
135 curTick());
136 }
137}
138
139void
140Sequencer::printStats(ostream & out) const
141{
142 out << "Sequencer: " << m_name << endl
143 << " store_waiting_on_load_cycles: "
144 << m_store_waiting_on_load_cycles << endl
145 << " store_waiting_on_store_cycles: "
146 << m_store_waiting_on_store_cycles << endl
147 << " load_waiting_on_load_cycles: "
148 << m_load_waiting_on_load_cycles << endl
149 << " load_waiting_on_store_cycles: "
150 << m_load_waiting_on_store_cycles << endl;
151}
152
153void
154Sequencer::printProgress(ostream& out) const
155{
156#if 0
157 int total_demand = 0;
158 out << "Sequencer Stats Version " << m_version << endl;
159 out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
160 out << "---------------" << endl;
161 out << "outstanding requests" << endl;
162
163 out << "proc " << m_Read
164 << " version Requests = " << m_readRequestTable.size() << endl;
165
166 // print the request table
167 RequestTable::iterator read = m_readRequestTable.begin();
168 RequestTable::iterator read_end = m_readRequestTable.end();
169 for (; read != read_end; ++read) {
170 SequencerRequest* request = read->second;
171 out << "\tRequest[ " << i << " ] = " << request->type
172 << " Address " << rkeys[i]
173 << " Posted " << request->issue_time
174 << " PF " << PrefetchBit_No << endl;
175 total_demand++;
176 }
177
178 out << "proc " << m_version
179 << " Write Requests = " << m_writeRequestTable.size << endl;
180
181 // print the request table
182 RequestTable::iterator write = m_writeRequestTable.begin();
183 RequestTable::iterator write_end = m_writeRequestTable.end();
184 for (; write != write_end; ++write) {
185 SequencerRequest* request = write->second;
186 out << "\tRequest[ " << i << " ] = " << request.getType()
187 << " Address " << wkeys[i]
188 << " Posted " << request.getTime()
189 << " PF " << request.getPrefetch() << endl;
190 if (request.getPrefetch() == PrefetchBit_No) {
191 total_demand++;
192 }
193 }
194
195 out << endl;
196
197 out << "Total Number Outstanding: " << m_outstanding_count << endl
198 << "Total Number Demand : " << total_demand << endl
199 << "Total Number Prefetches : " << m_outstanding_count - total_demand
200 << endl << endl << endl;
201#endif
202}
203
204void
205Sequencer::printConfig(ostream& out) const
206{
207 out << "Seqeuncer config: " << m_name << endl
208 << " controller: " << m_controller->getName() << endl
209 << " version: " << m_version << endl
210 << " max_outstanding_requests: " << m_max_outstanding_requests << endl
211 << " deadlock_threshold: " << m_deadlock_threshold << endl;
212}
213
214// Insert the request on the correct request table. Return true if
215// the entry was already present.
216bool
217Sequencer::insertRequest(SequencerRequest* request)
218{
219 int total_outstanding =
220 m_writeRequestTable.size() + m_readRequestTable.size();
221
222 assert(m_outstanding_count == total_outstanding);
223
224 // See if we should schedule a deadlock check
225 if (deadlockCheckEvent.scheduled() == false) {
136 }
137}
138
139void
140Sequencer::printStats(ostream & out) const
141{
142 out << "Sequencer: " << m_name << endl
143 << " store_waiting_on_load_cycles: "
144 << m_store_waiting_on_load_cycles << endl
145 << " store_waiting_on_store_cycles: "
146 << m_store_waiting_on_store_cycles << endl
147 << " load_waiting_on_load_cycles: "
148 << m_load_waiting_on_load_cycles << endl
149 << " load_waiting_on_store_cycles: "
150 << m_load_waiting_on_store_cycles << endl;
151}
152
153void
154Sequencer::printProgress(ostream& out) const
155{
156#if 0
157 int total_demand = 0;
158 out << "Sequencer Stats Version " << m_version << endl;
159 out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
160 out << "---------------" << endl;
161 out << "outstanding requests" << endl;
162
163 out << "proc " << m_Read
164 << " version Requests = " << m_readRequestTable.size() << endl;
165
166 // print the request table
167 RequestTable::iterator read = m_readRequestTable.begin();
168 RequestTable::iterator read_end = m_readRequestTable.end();
169 for (; read != read_end; ++read) {
170 SequencerRequest* request = read->second;
171 out << "\tRequest[ " << i << " ] = " << request->type
172 << " Address " << rkeys[i]
173 << " Posted " << request->issue_time
174 << " PF " << PrefetchBit_No << endl;
175 total_demand++;
176 }
177
178 out << "proc " << m_version
179 << " Write Requests = " << m_writeRequestTable.size << endl;
180
181 // print the request table
182 RequestTable::iterator write = m_writeRequestTable.begin();
183 RequestTable::iterator write_end = m_writeRequestTable.end();
184 for (; write != write_end; ++write) {
185 SequencerRequest* request = write->second;
186 out << "\tRequest[ " << i << " ] = " << request.getType()
187 << " Address " << wkeys[i]
188 << " Posted " << request.getTime()
189 << " PF " << request.getPrefetch() << endl;
190 if (request.getPrefetch() == PrefetchBit_No) {
191 total_demand++;
192 }
193 }
194
195 out << endl;
196
197 out << "Total Number Outstanding: " << m_outstanding_count << endl
198 << "Total Number Demand : " << total_demand << endl
199 << "Total Number Prefetches : " << m_outstanding_count - total_demand
200 << endl << endl << endl;
201#endif
202}
203
204void
205Sequencer::printConfig(ostream& out) const
206{
207 out << "Seqeuncer config: " << m_name << endl
208 << " controller: " << m_controller->getName() << endl
209 << " version: " << m_version << endl
210 << " max_outstanding_requests: " << m_max_outstanding_requests << endl
211 << " deadlock_threshold: " << m_deadlock_threshold << endl;
212}
213
214// Insert the request on the correct request table. Return true if
215// the entry was already present.
216bool
217Sequencer::insertRequest(SequencerRequest* request)
218{
219 int total_outstanding =
220 m_writeRequestTable.size() + m_readRequestTable.size();
221
222 assert(m_outstanding_count == total_outstanding);
223
224 // See if we should schedule a deadlock check
225 if (deadlockCheckEvent.scheduled() == false) {
226 schedule(deadlockCheckEvent, m_deadlock_threshold + curTick);
226 schedule(deadlockCheckEvent, m_deadlock_threshold + curTick());
227 }
228
229 Address line_addr(request->ruby_request.paddr);
230 line_addr.makeLineAddress();
231 if ((request->ruby_request.type == RubyRequestType_ST) ||
232 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
233 (request->ruby_request.type == RubyRequestType_RMW_Write) ||
234 (request->ruby_request.type == RubyRequestType_Locked_Read) ||
235 (request->ruby_request.type == RubyRequestType_Locked_Write)) {
236 pair<RequestTable::iterator, bool> r =
237 m_writeRequestTable.insert(RequestTable::value_type(line_addr, 0));
238 bool success = r.second;
239 RequestTable::iterator i = r.first;
240 if (!success) {
241 i->second = request;
242 // return true;
243
244 // drh5: isn't this an error? do you lose the initial request?
245 assert(0);
246 }
247 i->second = request;
248 m_outstanding_count++;
249 } else {
250 pair<RequestTable::iterator, bool> r =
251 m_readRequestTable.insert(RequestTable::value_type(line_addr, 0));
252 bool success = r.second;
253 RequestTable::iterator i = r.first;
254 if (!success) {
255 i->second = request;
256 // return true;
257
258 // drh5: isn't this an error? do you lose the initial request?
259 assert(0);
260 }
261 i->second = request;
262 m_outstanding_count++;
263 }
264
265 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
266
267 total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
268 assert(m_outstanding_count == total_outstanding);
269
270 return false;
271}
272
273void
274Sequencer::markRemoved()
275{
276 m_outstanding_count--;
277 assert(m_outstanding_count ==
278 m_writeRequestTable.size() + m_readRequestTable.size());
279}
280
281void
282Sequencer::removeRequest(SequencerRequest* srequest)
283{
284 assert(m_outstanding_count ==
285 m_writeRequestTable.size() + m_readRequestTable.size());
286
287 const RubyRequest & ruby_request = srequest->ruby_request;
288 Address line_addr(ruby_request.paddr);
289 line_addr.makeLineAddress();
290 if ((ruby_request.type == RubyRequestType_ST) ||
291 (ruby_request.type == RubyRequestType_RMW_Read) ||
292 (ruby_request.type == RubyRequestType_RMW_Write) ||
293 (ruby_request.type == RubyRequestType_Locked_Read) ||
294 (ruby_request.type == RubyRequestType_Locked_Write)) {
295 m_writeRequestTable.erase(line_addr);
296 } else {
297 m_readRequestTable.erase(line_addr);
298 }
299
300 markRemoved();
301}
302
303bool
304Sequencer::handleLlsc(const Address& address, SequencerRequest* request)
305{
306 //
307 // The success flag indicates whether the LLSC operation was successful.
308 // LL ops will always succeed, but SC may fail if the cache line is no
309 // longer locked.
310 //
311 bool success = true;
312 if (request->ruby_request.type == RubyRequestType_Locked_Write) {
313 if (!m_dataCache_ptr->isLocked(address, m_version)) {
314 //
315 // For failed SC requests, indicate the failure to the cpu by
316 // setting the extra data to zero.
317 //
318 request->ruby_request.pkt->req->setExtraData(0);
319 success = false;
320 } else {
321 //
322 // For successful SC requests, indicate the success to the cpu by
323 // setting the extra data to one.
324 //
325 request->ruby_request.pkt->req->setExtraData(1);
326 }
327 //
328 // Independent of success, all SC operations must clear the lock
329 //
330 m_dataCache_ptr->clearLocked(address);
331 } else if (request->ruby_request.type == RubyRequestType_Locked_Read) {
332 //
333 // Note: To fully follow Alpha LLSC semantics, should the LL clear any
334 // previously locked cache lines?
335 //
336 m_dataCache_ptr->setLocked(address, m_version);
337 } else if (m_dataCache_ptr->isLocked(address, m_version)) {
338 //
339 // Normal writes should clear the locked address
340 //
341 m_dataCache_ptr->clearLocked(address);
342 }
343 return success;
344}
345
346void
347Sequencer::writeCallback(const Address& address, DataBlock& data)
348{
349 writeCallback(address, GenericMachineType_NULL, data);
350}
351
352void
353Sequencer::writeCallback(const Address& address,
354 GenericMachineType mach,
355 DataBlock& data)
356{
357 writeCallback(address, mach, data, 0, 0, 0);
358}
359
360void
361Sequencer::writeCallback(const Address& address,
362 GenericMachineType mach,
363 DataBlock& data,
364 Time initialRequestTime,
365 Time forwardRequestTime,
366 Time firstResponseTime)
367{
368 assert(address == line_address(address));
369 assert(m_writeRequestTable.count(line_address(address)));
370
371 RequestTable::iterator i = m_writeRequestTable.find(address);
372 assert(i != m_writeRequestTable.end());
373 SequencerRequest* request = i->second;
374
375 m_writeRequestTable.erase(i);
376 markRemoved();
377
378 assert((request->ruby_request.type == RubyRequestType_ST) ||
379 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
380 (request->ruby_request.type == RubyRequestType_RMW_Write) ||
381 (request->ruby_request.type == RubyRequestType_Locked_Read) ||
382 (request->ruby_request.type == RubyRequestType_Locked_Write));
383
384 //
385 // For Alpha, properly handle LL, SC, and write requests with respect to
386 // locked cache blocks.
387 //
388 bool success = handleLlsc(address, request);
389
390 if (request->ruby_request.type == RubyRequestType_RMW_Read) {
391 m_controller->blockOnQueue(address, m_mandatory_q_ptr);
392 } else if (request->ruby_request.type == RubyRequestType_RMW_Write) {
393 m_controller->unblock(address);
394 }
395
396 hitCallback(request, mach, data, success,
397 initialRequestTime, forwardRequestTime, firstResponseTime);
398}
399
400void
401Sequencer::readCallback(const Address& address, DataBlock& data)
402{
403 readCallback(address, GenericMachineType_NULL, data);
404}
405
406void
407Sequencer::readCallback(const Address& address,
408 GenericMachineType mach,
409 DataBlock& data)
410{
411 readCallback(address, mach, data, 0, 0, 0);
412}
413
414void
415Sequencer::readCallback(const Address& address,
416 GenericMachineType mach,
417 DataBlock& data,
418 Time initialRequestTime,
419 Time forwardRequestTime,
420 Time firstResponseTime)
421{
422 assert(address == line_address(address));
423 assert(m_readRequestTable.count(line_address(address)));
424
425 RequestTable::iterator i = m_readRequestTable.find(address);
426 assert(i != m_readRequestTable.end());
427 SequencerRequest* request = i->second;
428
429 m_readRequestTable.erase(i);
430 markRemoved();
431
432 assert((request->ruby_request.type == RubyRequestType_LD) ||
433 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
434 (request->ruby_request.type == RubyRequestType_IFETCH));
435
436 hitCallback(request, mach, data, true,
437 initialRequestTime, forwardRequestTime, firstResponseTime);
438}
439
440void
441Sequencer::hitCallback(SequencerRequest* srequest,
442 GenericMachineType mach,
443 DataBlock& data,
444 bool success,
445 Time initialRequestTime,
446 Time forwardRequestTime,
447 Time firstResponseTime)
448{
449 const RubyRequest & ruby_request = srequest->ruby_request;
450 Address request_address(ruby_request.paddr);
451 Address request_line_address(ruby_request.paddr);
452 request_line_address.makeLineAddress();
453 RubyRequestType type = ruby_request.type;
454 Time issued_time = srequest->issue_time;
455
456 // Set this cache entry to the most recently used
457 if (type == RubyRequestType_IFETCH) {
458 if (m_instCache_ptr->isTagPresent(request_line_address))
459 m_instCache_ptr->setMRU(request_line_address);
460 } else {
461 if (m_dataCache_ptr->isTagPresent(request_line_address))
462 m_dataCache_ptr->setMRU(request_line_address);
463 }
464
465 assert(g_eventQueue_ptr->getTime() >= issued_time);
466 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
467
468 // Profile the miss latency for all non-zero demand misses
469 if (miss_latency != 0) {
470 g_system_ptr->getProfiler()->missLatency(miss_latency, type, mach);
471
472 if (mach == GenericMachineType_L1Cache_wCC) {
473 g_system_ptr->getProfiler()->missLatencyWcc(issued_time,
474 initialRequestTime,
475 forwardRequestTime,
476 firstResponseTime,
477 g_eventQueue_ptr->getTime());
478 }
479
480 if (mach == GenericMachineType_Directory) {
481 g_system_ptr->getProfiler()->missLatencyDir(issued_time,
482 initialRequestTime,
483 forwardRequestTime,
484 firstResponseTime,
485 g_eventQueue_ptr->getTime());
486 }
487
488 if (Debug::getProtocolTrace()) {
489 if (success) {
490 g_system_ptr->getProfiler()->
491 profileTransition("Seq", m_version,
492 Address(ruby_request.paddr), "", "Done", "",
493 csprintf("%d cycles", miss_latency));
494 } else {
495 g_system_ptr->getProfiler()->
496 profileTransition("Seq", m_version,
497 Address(ruby_request.paddr), "", "SC_Failed", "",
498 csprintf("%d cycles", miss_latency));
499 }
500 }
501 }
502#if 0
503 if (request.getPrefetch() == PrefetchBit_Yes) {
504 return; // Ignore the prefetch
505 }
506#endif
507
508 // update the data
509 if (ruby_request.data != NULL) {
510 if ((type == RubyRequestType_LD) ||
511 (type == RubyRequestType_IFETCH) ||
512 (type == RubyRequestType_RMW_Read) ||
513 (type == RubyRequestType_Locked_Read)) {
514
515 memcpy(ruby_request.data,
516 data.getData(request_address.getOffset(), ruby_request.len),
517 ruby_request.len);
518 } else {
519 data.setData(ruby_request.data, request_address.getOffset(),
520 ruby_request.len);
521 }
522 } else {
523 DPRINTF(MemoryAccess,
524 "WARNING. Data not transfered from Ruby to M5 for type %s\n",
525 RubyRequestType_to_string(type));
526 }
527
528 // If using the RubyTester, update the RubyTester sender state's
529 // subBlock with the recieved data. The tester will later access
530 // this state.
531 // Note: RubyPort will access it's sender state before the
532 // RubyTester.
533 if (m_usingRubyTester) {
534 RubyPort::SenderState *requestSenderState =
535 safe_cast<RubyPort::SenderState*>(ruby_request.pkt->senderState);
536 RubyTester::SenderState* testerSenderState =
537 safe_cast<RubyTester::SenderState*>(requestSenderState->saved);
538 testerSenderState->subBlock->mergeFrom(data);
539 }
540
541 ruby_hit_callback(ruby_request.pkt);
542 delete srequest;
543}
544
545// Returns true if the sequencer already has a load or store outstanding
546RequestStatus
547Sequencer::getRequestStatus(const RubyRequest& request)
548{
549 bool is_outstanding_store =
550 !!m_writeRequestTable.count(line_address(Address(request.paddr)));
551 bool is_outstanding_load =
552 !!m_readRequestTable.count(line_address(Address(request.paddr)));
553 if (is_outstanding_store) {
554 if ((request.type == RubyRequestType_LD) ||
555 (request.type == RubyRequestType_IFETCH) ||
556 (request.type == RubyRequestType_RMW_Read)) {
557 m_store_waiting_on_load_cycles++;
558 } else {
559 m_store_waiting_on_store_cycles++;
560 }
561 return RequestStatus_Aliased;
562 } else if (is_outstanding_load) {
563 if ((request.type == RubyRequestType_ST) ||
564 (request.type == RubyRequestType_RMW_Write)) {
565 m_load_waiting_on_store_cycles++;
566 } else {
567 m_load_waiting_on_load_cycles++;
568 }
569 return RequestStatus_Aliased;
570 }
571
572 if (m_outstanding_count >= m_max_outstanding_requests) {
573 return RequestStatus_BufferFull;
574 }
575
576 return RequestStatus_Ready;
577}
578
579bool
580Sequencer::empty() const
581{
582 return m_writeRequestTable.empty() && m_readRequestTable.empty();
583}
584
585RequestStatus
586Sequencer::makeRequest(const RubyRequest &request)
587{
588 assert(Address(request.paddr).getOffset() + request.len <=
589 RubySystem::getBlockSizeBytes());
590 RequestStatus status = getRequestStatus(request);
591 if (status != RequestStatus_Ready)
592 return status;
593
594 SequencerRequest *srequest =
595 new SequencerRequest(request, g_eventQueue_ptr->getTime());
596 bool found = insertRequest(srequest);
597 if (found) {
598 panic("Sequencer::makeRequest should never be called if the "
599 "request is already outstanding\n");
600 return RequestStatus_NULL;
601 }
602
603 issueRequest(request);
604
605 // TODO: issue hardware prefetches here
606 return RequestStatus_Issued;
607}
608
609void
610Sequencer::issueRequest(const RubyRequest& request)
611{
612 // TODO: get rid of CacheMsg, CacheRequestType, and
613 // AccessModeTYpe, & have SLICC use RubyRequest and subtypes
614 // natively
615 CacheRequestType ctype;
616 switch(request.type) {
617 case RubyRequestType_IFETCH:
618 ctype = CacheRequestType_IFETCH;
619 break;
620 case RubyRequestType_LD:
621 ctype = CacheRequestType_LD;
622 break;
623 case RubyRequestType_ST:
624 ctype = CacheRequestType_ST;
625 break;
626 case RubyRequestType_Locked_Read:
627 case RubyRequestType_Locked_Write:
628 ctype = CacheRequestType_ATOMIC;
629 break;
630 case RubyRequestType_RMW_Read:
631 ctype = CacheRequestType_ATOMIC;
632 break;
633 case RubyRequestType_RMW_Write:
634 ctype = CacheRequestType_ATOMIC;
635 break;
636 default:
637 assert(0);
638 }
639
640 AccessModeType amtype;
641 switch(request.access_mode){
642 case RubyAccessMode_User:
643 amtype = AccessModeType_UserMode;
644 break;
645 case RubyAccessMode_Supervisor:
646 amtype = AccessModeType_SupervisorMode;
647 break;
648 case RubyAccessMode_Device:
649 amtype = AccessModeType_UserMode;
650 break;
651 default:
652 assert(0);
653 }
654
655 Address line_addr(request.paddr);
656 line_addr.makeLineAddress();
657 CacheMsg *msg = new CacheMsg(line_addr, Address(request.paddr), ctype,
658 Address(request.pc), amtype, request.len, PrefetchBit_No,
659 request.proc_id);
660
661 if (Debug::getProtocolTrace()) {
662 g_system_ptr->getProfiler()->
663 profileTransition("Seq", m_version, Address(request.paddr),
664 "", "Begin", "",
665 RubyRequestType_to_string(request.type));
666 }
667
668 if (g_system_ptr->getTracer()->traceEnabled()) {
669 g_system_ptr->getTracer()->
670 traceRequest(this, line_addr, Address(request.pc),
671 request.type, g_eventQueue_ptr->getTime());
672 }
673
674 Time latency = 0; // initialzed to an null value
675
676 if (request.type == RubyRequestType_IFETCH)
677 latency = m_instCache_ptr->getLatency();
678 else
679 latency = m_dataCache_ptr->getLatency();
680
681 // Send the message to the cache controller
682 assert(latency > 0);
683
684 assert(m_mandatory_q_ptr != NULL);
685 m_mandatory_q_ptr->enqueue(msg, latency);
686}
687
688#if 0
689bool
690Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
691 AccessModeType access_mode,
692 int size, DataBlock*& data_ptr)
693{
694 CacheMemory *cache =
695 (type == CacheRequestType_IFETCH) ? m_instCache_ptr : m_dataCache_ptr;
696
697 return cache->tryCacheAccess(line_address(addr), type, data_ptr);
698}
699#endif
700
701template <class KEY, class VALUE>
702std::ostream &
703operator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map)
704{
705 typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin();
706 typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end();
707
708 out << "[";
709 for (; i != end; ++i)
710 out << " " << i->first << "=" << i->second;
711 out << " ]";
712
713 return out;
714}
715
716void
717Sequencer::print(ostream& out) const
718{
719 out << "[Sequencer: " << m_version
720 << ", outstanding requests: " << m_outstanding_count
721 << ", read request table: " << m_readRequestTable
722 << ", write request table: " << m_writeRequestTable
723 << "]";
724}
725
726// this can be called from setState whenever coherence permissions are
727// upgraded when invoked, coherence violations will be checked for the
728// given block
729void
730Sequencer::checkCoherence(const Address& addr)
731{
732#ifdef CHECK_COHERENCE
733 g_system_ptr->checkGlobalCoherenceInvariant(addr);
734#endif
735}
227 }
228
229 Address line_addr(request->ruby_request.paddr);
230 line_addr.makeLineAddress();
231 if ((request->ruby_request.type == RubyRequestType_ST) ||
232 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
233 (request->ruby_request.type == RubyRequestType_RMW_Write) ||
234 (request->ruby_request.type == RubyRequestType_Locked_Read) ||
235 (request->ruby_request.type == RubyRequestType_Locked_Write)) {
236 pair<RequestTable::iterator, bool> r =
237 m_writeRequestTable.insert(RequestTable::value_type(line_addr, 0));
238 bool success = r.second;
239 RequestTable::iterator i = r.first;
240 if (!success) {
241 i->second = request;
242 // return true;
243
244 // drh5: isn't this an error? do you lose the initial request?
245 assert(0);
246 }
247 i->second = request;
248 m_outstanding_count++;
249 } else {
250 pair<RequestTable::iterator, bool> r =
251 m_readRequestTable.insert(RequestTable::value_type(line_addr, 0));
252 bool success = r.second;
253 RequestTable::iterator i = r.first;
254 if (!success) {
255 i->second = request;
256 // return true;
257
258 // drh5: isn't this an error? do you lose the initial request?
259 assert(0);
260 }
261 i->second = request;
262 m_outstanding_count++;
263 }
264
265 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
266
267 total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
268 assert(m_outstanding_count == total_outstanding);
269
270 return false;
271}
272
273void
274Sequencer::markRemoved()
275{
276 m_outstanding_count--;
277 assert(m_outstanding_count ==
278 m_writeRequestTable.size() + m_readRequestTable.size());
279}
280
281void
282Sequencer::removeRequest(SequencerRequest* srequest)
283{
284 assert(m_outstanding_count ==
285 m_writeRequestTable.size() + m_readRequestTable.size());
286
287 const RubyRequest & ruby_request = srequest->ruby_request;
288 Address line_addr(ruby_request.paddr);
289 line_addr.makeLineAddress();
290 if ((ruby_request.type == RubyRequestType_ST) ||
291 (ruby_request.type == RubyRequestType_RMW_Read) ||
292 (ruby_request.type == RubyRequestType_RMW_Write) ||
293 (ruby_request.type == RubyRequestType_Locked_Read) ||
294 (ruby_request.type == RubyRequestType_Locked_Write)) {
295 m_writeRequestTable.erase(line_addr);
296 } else {
297 m_readRequestTable.erase(line_addr);
298 }
299
300 markRemoved();
301}
302
303bool
304Sequencer::handleLlsc(const Address& address, SequencerRequest* request)
305{
306 //
307 // The success flag indicates whether the LLSC operation was successful.
308 // LL ops will always succeed, but SC may fail if the cache line is no
309 // longer locked.
310 //
311 bool success = true;
312 if (request->ruby_request.type == RubyRequestType_Locked_Write) {
313 if (!m_dataCache_ptr->isLocked(address, m_version)) {
314 //
315 // For failed SC requests, indicate the failure to the cpu by
316 // setting the extra data to zero.
317 //
318 request->ruby_request.pkt->req->setExtraData(0);
319 success = false;
320 } else {
321 //
322 // For successful SC requests, indicate the success to the cpu by
323 // setting the extra data to one.
324 //
325 request->ruby_request.pkt->req->setExtraData(1);
326 }
327 //
328 // Independent of success, all SC operations must clear the lock
329 //
330 m_dataCache_ptr->clearLocked(address);
331 } else if (request->ruby_request.type == RubyRequestType_Locked_Read) {
332 //
333 // Note: To fully follow Alpha LLSC semantics, should the LL clear any
334 // previously locked cache lines?
335 //
336 m_dataCache_ptr->setLocked(address, m_version);
337 } else if (m_dataCache_ptr->isLocked(address, m_version)) {
338 //
339 // Normal writes should clear the locked address
340 //
341 m_dataCache_ptr->clearLocked(address);
342 }
343 return success;
344}
345
346void
347Sequencer::writeCallback(const Address& address, DataBlock& data)
348{
349 writeCallback(address, GenericMachineType_NULL, data);
350}
351
352void
353Sequencer::writeCallback(const Address& address,
354 GenericMachineType mach,
355 DataBlock& data)
356{
357 writeCallback(address, mach, data, 0, 0, 0);
358}
359
360void
361Sequencer::writeCallback(const Address& address,
362 GenericMachineType mach,
363 DataBlock& data,
364 Time initialRequestTime,
365 Time forwardRequestTime,
366 Time firstResponseTime)
367{
368 assert(address == line_address(address));
369 assert(m_writeRequestTable.count(line_address(address)));
370
371 RequestTable::iterator i = m_writeRequestTable.find(address);
372 assert(i != m_writeRequestTable.end());
373 SequencerRequest* request = i->second;
374
375 m_writeRequestTable.erase(i);
376 markRemoved();
377
378 assert((request->ruby_request.type == RubyRequestType_ST) ||
379 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
380 (request->ruby_request.type == RubyRequestType_RMW_Write) ||
381 (request->ruby_request.type == RubyRequestType_Locked_Read) ||
382 (request->ruby_request.type == RubyRequestType_Locked_Write));
383
384 //
385 // For Alpha, properly handle LL, SC, and write requests with respect to
386 // locked cache blocks.
387 //
388 bool success = handleLlsc(address, request);
389
390 if (request->ruby_request.type == RubyRequestType_RMW_Read) {
391 m_controller->blockOnQueue(address, m_mandatory_q_ptr);
392 } else if (request->ruby_request.type == RubyRequestType_RMW_Write) {
393 m_controller->unblock(address);
394 }
395
396 hitCallback(request, mach, data, success,
397 initialRequestTime, forwardRequestTime, firstResponseTime);
398}
399
400void
401Sequencer::readCallback(const Address& address, DataBlock& data)
402{
403 readCallback(address, GenericMachineType_NULL, data);
404}
405
406void
407Sequencer::readCallback(const Address& address,
408 GenericMachineType mach,
409 DataBlock& data)
410{
411 readCallback(address, mach, data, 0, 0, 0);
412}
413
414void
415Sequencer::readCallback(const Address& address,
416 GenericMachineType mach,
417 DataBlock& data,
418 Time initialRequestTime,
419 Time forwardRequestTime,
420 Time firstResponseTime)
421{
422 assert(address == line_address(address));
423 assert(m_readRequestTable.count(line_address(address)));
424
425 RequestTable::iterator i = m_readRequestTable.find(address);
426 assert(i != m_readRequestTable.end());
427 SequencerRequest* request = i->second;
428
429 m_readRequestTable.erase(i);
430 markRemoved();
431
432 assert((request->ruby_request.type == RubyRequestType_LD) ||
433 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
434 (request->ruby_request.type == RubyRequestType_IFETCH));
435
436 hitCallback(request, mach, data, true,
437 initialRequestTime, forwardRequestTime, firstResponseTime);
438}
439
440void
441Sequencer::hitCallback(SequencerRequest* srequest,
442 GenericMachineType mach,
443 DataBlock& data,
444 bool success,
445 Time initialRequestTime,
446 Time forwardRequestTime,
447 Time firstResponseTime)
448{
449 const RubyRequest & ruby_request = srequest->ruby_request;
450 Address request_address(ruby_request.paddr);
451 Address request_line_address(ruby_request.paddr);
452 request_line_address.makeLineAddress();
453 RubyRequestType type = ruby_request.type;
454 Time issued_time = srequest->issue_time;
455
456 // Set this cache entry to the most recently used
457 if (type == RubyRequestType_IFETCH) {
458 if (m_instCache_ptr->isTagPresent(request_line_address))
459 m_instCache_ptr->setMRU(request_line_address);
460 } else {
461 if (m_dataCache_ptr->isTagPresent(request_line_address))
462 m_dataCache_ptr->setMRU(request_line_address);
463 }
464
465 assert(g_eventQueue_ptr->getTime() >= issued_time);
466 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
467
468 // Profile the miss latency for all non-zero demand misses
469 if (miss_latency != 0) {
470 g_system_ptr->getProfiler()->missLatency(miss_latency, type, mach);
471
472 if (mach == GenericMachineType_L1Cache_wCC) {
473 g_system_ptr->getProfiler()->missLatencyWcc(issued_time,
474 initialRequestTime,
475 forwardRequestTime,
476 firstResponseTime,
477 g_eventQueue_ptr->getTime());
478 }
479
480 if (mach == GenericMachineType_Directory) {
481 g_system_ptr->getProfiler()->missLatencyDir(issued_time,
482 initialRequestTime,
483 forwardRequestTime,
484 firstResponseTime,
485 g_eventQueue_ptr->getTime());
486 }
487
488 if (Debug::getProtocolTrace()) {
489 if (success) {
490 g_system_ptr->getProfiler()->
491 profileTransition("Seq", m_version,
492 Address(ruby_request.paddr), "", "Done", "",
493 csprintf("%d cycles", miss_latency));
494 } else {
495 g_system_ptr->getProfiler()->
496 profileTransition("Seq", m_version,
497 Address(ruby_request.paddr), "", "SC_Failed", "",
498 csprintf("%d cycles", miss_latency));
499 }
500 }
501 }
502#if 0
503 if (request.getPrefetch() == PrefetchBit_Yes) {
504 return; // Ignore the prefetch
505 }
506#endif
507
508 // update the data
509 if (ruby_request.data != NULL) {
510 if ((type == RubyRequestType_LD) ||
511 (type == RubyRequestType_IFETCH) ||
512 (type == RubyRequestType_RMW_Read) ||
513 (type == RubyRequestType_Locked_Read)) {
514
515 memcpy(ruby_request.data,
516 data.getData(request_address.getOffset(), ruby_request.len),
517 ruby_request.len);
518 } else {
519 data.setData(ruby_request.data, request_address.getOffset(),
520 ruby_request.len);
521 }
522 } else {
523 DPRINTF(MemoryAccess,
524 "WARNING. Data not transfered from Ruby to M5 for type %s\n",
525 RubyRequestType_to_string(type));
526 }
527
528 // If using the RubyTester, update the RubyTester sender state's
529 // subBlock with the recieved data. The tester will later access
530 // this state.
531 // Note: RubyPort will access it's sender state before the
532 // RubyTester.
533 if (m_usingRubyTester) {
534 RubyPort::SenderState *requestSenderState =
535 safe_cast<RubyPort::SenderState*>(ruby_request.pkt->senderState);
536 RubyTester::SenderState* testerSenderState =
537 safe_cast<RubyTester::SenderState*>(requestSenderState->saved);
538 testerSenderState->subBlock->mergeFrom(data);
539 }
540
541 ruby_hit_callback(ruby_request.pkt);
542 delete srequest;
543}
544
545// Returns true if the sequencer already has a load or store outstanding
546RequestStatus
547Sequencer::getRequestStatus(const RubyRequest& request)
548{
549 bool is_outstanding_store =
550 !!m_writeRequestTable.count(line_address(Address(request.paddr)));
551 bool is_outstanding_load =
552 !!m_readRequestTable.count(line_address(Address(request.paddr)));
553 if (is_outstanding_store) {
554 if ((request.type == RubyRequestType_LD) ||
555 (request.type == RubyRequestType_IFETCH) ||
556 (request.type == RubyRequestType_RMW_Read)) {
557 m_store_waiting_on_load_cycles++;
558 } else {
559 m_store_waiting_on_store_cycles++;
560 }
561 return RequestStatus_Aliased;
562 } else if (is_outstanding_load) {
563 if ((request.type == RubyRequestType_ST) ||
564 (request.type == RubyRequestType_RMW_Write)) {
565 m_load_waiting_on_store_cycles++;
566 } else {
567 m_load_waiting_on_load_cycles++;
568 }
569 return RequestStatus_Aliased;
570 }
571
572 if (m_outstanding_count >= m_max_outstanding_requests) {
573 return RequestStatus_BufferFull;
574 }
575
576 return RequestStatus_Ready;
577}
578
579bool
580Sequencer::empty() const
581{
582 return m_writeRequestTable.empty() && m_readRequestTable.empty();
583}
584
585RequestStatus
586Sequencer::makeRequest(const RubyRequest &request)
587{
588 assert(Address(request.paddr).getOffset() + request.len <=
589 RubySystem::getBlockSizeBytes());
590 RequestStatus status = getRequestStatus(request);
591 if (status != RequestStatus_Ready)
592 return status;
593
594 SequencerRequest *srequest =
595 new SequencerRequest(request, g_eventQueue_ptr->getTime());
596 bool found = insertRequest(srequest);
597 if (found) {
598 panic("Sequencer::makeRequest should never be called if the "
599 "request is already outstanding\n");
600 return RequestStatus_NULL;
601 }
602
603 issueRequest(request);
604
605 // TODO: issue hardware prefetches here
606 return RequestStatus_Issued;
607}
608
609void
610Sequencer::issueRequest(const RubyRequest& request)
611{
612 // TODO: get rid of CacheMsg, CacheRequestType, and
613 // AccessModeTYpe, & have SLICC use RubyRequest and subtypes
614 // natively
615 CacheRequestType ctype;
616 switch(request.type) {
617 case RubyRequestType_IFETCH:
618 ctype = CacheRequestType_IFETCH;
619 break;
620 case RubyRequestType_LD:
621 ctype = CacheRequestType_LD;
622 break;
623 case RubyRequestType_ST:
624 ctype = CacheRequestType_ST;
625 break;
626 case RubyRequestType_Locked_Read:
627 case RubyRequestType_Locked_Write:
628 ctype = CacheRequestType_ATOMIC;
629 break;
630 case RubyRequestType_RMW_Read:
631 ctype = CacheRequestType_ATOMIC;
632 break;
633 case RubyRequestType_RMW_Write:
634 ctype = CacheRequestType_ATOMIC;
635 break;
636 default:
637 assert(0);
638 }
639
640 AccessModeType amtype;
641 switch(request.access_mode){
642 case RubyAccessMode_User:
643 amtype = AccessModeType_UserMode;
644 break;
645 case RubyAccessMode_Supervisor:
646 amtype = AccessModeType_SupervisorMode;
647 break;
648 case RubyAccessMode_Device:
649 amtype = AccessModeType_UserMode;
650 break;
651 default:
652 assert(0);
653 }
654
655 Address line_addr(request.paddr);
656 line_addr.makeLineAddress();
657 CacheMsg *msg = new CacheMsg(line_addr, Address(request.paddr), ctype,
658 Address(request.pc), amtype, request.len, PrefetchBit_No,
659 request.proc_id);
660
661 if (Debug::getProtocolTrace()) {
662 g_system_ptr->getProfiler()->
663 profileTransition("Seq", m_version, Address(request.paddr),
664 "", "Begin", "",
665 RubyRequestType_to_string(request.type));
666 }
667
668 if (g_system_ptr->getTracer()->traceEnabled()) {
669 g_system_ptr->getTracer()->
670 traceRequest(this, line_addr, Address(request.pc),
671 request.type, g_eventQueue_ptr->getTime());
672 }
673
674 Time latency = 0; // initialzed to an null value
675
676 if (request.type == RubyRequestType_IFETCH)
677 latency = m_instCache_ptr->getLatency();
678 else
679 latency = m_dataCache_ptr->getLatency();
680
681 // Send the message to the cache controller
682 assert(latency > 0);
683
684 assert(m_mandatory_q_ptr != NULL);
685 m_mandatory_q_ptr->enqueue(msg, latency);
686}
687
688#if 0
689bool
690Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
691 AccessModeType access_mode,
692 int size, DataBlock*& data_ptr)
693{
694 CacheMemory *cache =
695 (type == CacheRequestType_IFETCH) ? m_instCache_ptr : m_dataCache_ptr;
696
697 return cache->tryCacheAccess(line_address(addr), type, data_ptr);
698}
699#endif
700
701template <class KEY, class VALUE>
702std::ostream &
703operator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map)
704{
705 typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin();
706 typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end();
707
708 out << "[";
709 for (; i != end; ++i)
710 out << " " << i->first << "=" << i->second;
711 out << " ]";
712
713 return out;
714}
715
716void
717Sequencer::print(ostream& out) const
718{
719 out << "[Sequencer: " << m_version
720 << ", outstanding requests: " << m_outstanding_count
721 << ", read request table: " << m_readRequestTable
722 << ", write request table: " << m_writeRequestTable
723 << "]";
724}
725
726// this can be called from setState whenever coherence permissions are
727// upgraded when invoked, coherence violations will be checked for the
728// given block
729void
730Sequencer::checkCoherence(const Address& addr)
731{
732#ifdef CHECK_COHERENCE
733 g_system_ptr->checkGlobalCoherenceInvariant(addr);
734#endif
735}