Sequencer.cc (8717:5c253f1031d7) Sequencer.cc (8828:e8fd0fc4a417)
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "base/misc.hh"
30#include "base/str.hh"
31#include "config/the_isa.hh"
32#if THE_ISA == X86_ISA
33#include "arch/x86/insts/microldstop.hh"
34#endif // X86_ISA
35#include "cpu/testers/rubytest/RubyTester.hh"
36#include "debug/MemoryAccess.hh"
37#include "debug/ProtocolTrace.hh"
38#include "debug/RubySequencer.hh"
39#include "mem/protocol/PrefetchBit.hh"
40#include "mem/protocol/RubyAccessMode.hh"
41#include "mem/ruby/buffers/MessageBuffer.hh"
42#include "mem/ruby/common/Global.hh"
43#include "mem/ruby/profiler/Profiler.hh"
44#include "mem/ruby/slicc_interface/RubyRequest.hh"
45#include "mem/ruby/system/CacheMemory.hh"
46#include "mem/ruby/system/Sequencer.hh"
47#include "mem/ruby/system/System.hh"
48#include "mem/packet.hh"
49#include "params/RubySequencer.hh"
50
51using namespace std;
52
53Sequencer *
54RubySequencerParams::create()
55{
56 return new Sequencer(this);
57}
58
59Sequencer::Sequencer(const Params *p)
60 : RubyPort(p), deadlockCheckEvent(this)
61{
62 m_store_waiting_on_load_cycles = 0;
63 m_store_waiting_on_store_cycles = 0;
64 m_load_waiting_on_store_cycles = 0;
65 m_load_waiting_on_load_cycles = 0;
66
67 m_outstanding_count = 0;
68
69 m_deadlock_threshold = 0;
70 m_instCache_ptr = NULL;
71 m_dataCache_ptr = NULL;
72
73 m_instCache_ptr = p->icache;
74 m_dataCache_ptr = p->dcache;
75 m_max_outstanding_requests = p->max_outstanding_requests;
76 m_deadlock_threshold = p->deadlock_threshold;
77
78 assert(m_max_outstanding_requests > 0);
79 assert(m_deadlock_threshold > 0);
80 assert(m_instCache_ptr != NULL);
81 assert(m_dataCache_ptr != NULL);
82
83 m_usingNetworkTester = p->using_network_tester;
84}
85
86Sequencer::~Sequencer()
87{
88}
89
90void
91Sequencer::wakeup()
92{
93 // Check for deadlock of any of the requests
94 Time current_time = g_eventQueue_ptr->getTime();
95
96 // Check across all outstanding requests
97 int total_outstanding = 0;
98
99 RequestTable::iterator read = m_readRequestTable.begin();
100 RequestTable::iterator read_end = m_readRequestTable.end();
101 for (; read != read_end; ++read) {
102 SequencerRequest* request = read->second;
103 if (current_time - request->issue_time < m_deadlock_threshold)
104 continue;
105
106 panic("Possible Deadlock detected. Aborting!\n"
107 "version: %d request.paddr: 0x%x m_readRequestTable: %d "
108 "current time: %u issue_time: %d difference: %d\n", m_version,
109 Address(request->pkt->getAddr()), m_readRequestTable.size(),
110 current_time, request->issue_time,
111 current_time - request->issue_time);
112 }
113
114 RequestTable::iterator write = m_writeRequestTable.begin();
115 RequestTable::iterator write_end = m_writeRequestTable.end();
116 for (; write != write_end; ++write) {
117 SequencerRequest* request = write->second;
118 if (current_time - request->issue_time < m_deadlock_threshold)
119 continue;
120
121 panic("Possible Deadlock detected. Aborting!\n"
122 "version: %d request.paddr: 0x%x m_writeRequestTable: %d "
123 "current time: %u issue_time: %d difference: %d\n", m_version,
124 Address(request->pkt->getAddr()), m_writeRequestTable.size(),
125 current_time, request->issue_time,
126 current_time - request->issue_time);
127 }
128
129 total_outstanding += m_writeRequestTable.size();
130 total_outstanding += m_readRequestTable.size();
131
132 assert(m_outstanding_count == total_outstanding);
133
134 if (m_outstanding_count > 0) {
135 // If there are still outstanding requests, keep checking
136 schedule(deadlockCheckEvent,
137 m_deadlock_threshold * g_eventQueue_ptr->getClock() +
138 curTick());
139 }
140}
141
142void
143Sequencer::printStats(ostream & out) const
144{
145 out << "Sequencer: " << m_name << endl
146 << " store_waiting_on_load_cycles: "
147 << m_store_waiting_on_load_cycles << endl
148 << " store_waiting_on_store_cycles: "
149 << m_store_waiting_on_store_cycles << endl
150 << " load_waiting_on_load_cycles: "
151 << m_load_waiting_on_load_cycles << endl
152 << " load_waiting_on_store_cycles: "
153 << m_load_waiting_on_store_cycles << endl;
154}
155
156void
157Sequencer::printProgress(ostream& out) const
158{
159#if 0
160 int total_demand = 0;
161 out << "Sequencer Stats Version " << m_version << endl;
162 out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
163 out << "---------------" << endl;
164 out << "outstanding requests" << endl;
165
166 out << "proc " << m_Read
167 << " version Requests = " << m_readRequestTable.size() << endl;
168
169 // print the request table
170 RequestTable::iterator read = m_readRequestTable.begin();
171 RequestTable::iterator read_end = m_readRequestTable.end();
172 for (; read != read_end; ++read) {
173 SequencerRequest* request = read->second;
174 out << "\tRequest[ " << i << " ] = " << request->type
175 << " Address " << rkeys[i]
176 << " Posted " << request->issue_time
177 << " PF " << PrefetchBit_No << endl;
178 total_demand++;
179 }
180
181 out << "proc " << m_version
182 << " Write Requests = " << m_writeRequestTable.size << endl;
183
184 // print the request table
185 RequestTable::iterator write = m_writeRequestTable.begin();
186 RequestTable::iterator write_end = m_writeRequestTable.end();
187 for (; write != write_end; ++write) {
188 SequencerRequest* request = write->second;
189 out << "\tRequest[ " << i << " ] = " << request.getType()
190 << " Address " << wkeys[i]
191 << " Posted " << request.getTime()
192 << " PF " << request.getPrefetch() << endl;
193 if (request.getPrefetch() == PrefetchBit_No) {
194 total_demand++;
195 }
196 }
197
198 out << endl;
199
200 out << "Total Number Outstanding: " << m_outstanding_count << endl
201 << "Total Number Demand : " << total_demand << endl
202 << "Total Number Prefetches : " << m_outstanding_count - total_demand
203 << endl << endl << endl;
204#endif
205}
206
207void
208Sequencer::printConfig(ostream& out) const
209{
210 out << "Seqeuncer config: " << m_name << endl
211 << " controller: " << m_controller->getName() << endl
212 << " version: " << m_version << endl
213 << " max_outstanding_requests: " << m_max_outstanding_requests << endl
214 << " deadlock_threshold: " << m_deadlock_threshold << endl;
215}
216
217// Insert the request on the correct request table. Return true if
218// the entry was already present.
219RequestStatus
220Sequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type)
221{
222 assert(m_outstanding_count ==
223 (m_writeRequestTable.size() + m_readRequestTable.size()));
224
225 // See if we should schedule a deadlock check
226 if (deadlockCheckEvent.scheduled() == false) {
227 schedule(deadlockCheckEvent, m_deadlock_threshold + curTick());
228 }
229
230 Address line_addr(pkt->getAddr());
231 line_addr.makeLineAddress();
232 if ((request_type == RubyRequestType_ST) ||
233 (request_type == RubyRequestType_RMW_Read) ||
234 (request_type == RubyRequestType_RMW_Write) ||
235 (request_type == RubyRequestType_Load_Linked) ||
236 (request_type == RubyRequestType_Store_Conditional) ||
237 (request_type == RubyRequestType_Locked_RMW_Read) ||
238 (request_type == RubyRequestType_Locked_RMW_Write) ||
239 (request_type == RubyRequestType_FLUSH)) {
240
241 // Check if there is any outstanding read request for the same
242 // cache line.
243 if (m_readRequestTable.count(line_addr) > 0) {
244 m_store_waiting_on_load_cycles++;
245 return RequestStatus_Aliased;
246 }
247
248 pair<RequestTable::iterator, bool> r =
249 m_writeRequestTable.insert(RequestTable::value_type(line_addr, 0));
250 if (r.second) {
251 RequestTable::iterator i = r.first;
252 i->second = new SequencerRequest(pkt, request_type,
253 g_eventQueue_ptr->getTime());
254 m_outstanding_count++;
255 } else {
256 // There is an outstanding write request for the cache line
257 m_store_waiting_on_store_cycles++;
258 return RequestStatus_Aliased;
259 }
260 } else {
261 // Check if there is any outstanding write request for the same
262 // cache line.
263 if (m_writeRequestTable.count(line_addr) > 0) {
264 m_load_waiting_on_store_cycles++;
265 return RequestStatus_Aliased;
266 }
267
268 pair<RequestTable::iterator, bool> r =
269 m_readRequestTable.insert(RequestTable::value_type(line_addr, 0));
270
271 if (r.second) {
272 RequestTable::iterator i = r.first;
273 i->second = new SequencerRequest(pkt, request_type,
274 g_eventQueue_ptr->getTime());
275 m_outstanding_count++;
276 } else {
277 // There is an outstanding read request for the cache line
278 m_load_waiting_on_load_cycles++;
279 return RequestStatus_Aliased;
280 }
281 }
282
283 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
284 assert(m_outstanding_count ==
285 (m_writeRequestTable.size() + m_readRequestTable.size()));
286
287 return RequestStatus_Ready;
288}
289
290void
291Sequencer::markRemoved()
292{
293 m_outstanding_count--;
294 assert(m_outstanding_count ==
295 m_writeRequestTable.size() + m_readRequestTable.size());
296}
297
298void
299Sequencer::removeRequest(SequencerRequest* srequest)
300{
301 assert(m_outstanding_count ==
302 m_writeRequestTable.size() + m_readRequestTable.size());
303
304 Address line_addr(srequest->pkt->getAddr());
305 line_addr.makeLineAddress();
306 if ((srequest->m_type == RubyRequestType_ST) ||
307 (srequest->m_type == RubyRequestType_RMW_Read) ||
308 (srequest->m_type == RubyRequestType_RMW_Write) ||
309 (srequest->m_type == RubyRequestType_Load_Linked) ||
310 (srequest->m_type == RubyRequestType_Store_Conditional) ||
311 (srequest->m_type == RubyRequestType_Locked_RMW_Read) ||
312 (srequest->m_type == RubyRequestType_Locked_RMW_Write)) {
313 m_writeRequestTable.erase(line_addr);
314 } else {
315 m_readRequestTable.erase(line_addr);
316 }
317
318 markRemoved();
319}
320
321bool
322Sequencer::handleLlsc(const Address& address, SequencerRequest* request)
323{
324 //
325 // The success flag indicates whether the LLSC operation was successful.
326 // LL ops will always succeed, but SC may fail if the cache line is no
327 // longer locked.
328 //
329 bool success = true;
330 if (request->m_type == RubyRequestType_Store_Conditional) {
331 if (!m_dataCache_ptr->isLocked(address, m_version)) {
332 //
333 // For failed SC requests, indicate the failure to the cpu by
334 // setting the extra data to zero.
335 //
336 request->pkt->req->setExtraData(0);
337 success = false;
338 } else {
339 //
340 // For successful SC requests, indicate the success to the cpu by
341 // setting the extra data to one.
342 //
343 request->pkt->req->setExtraData(1);
344 }
345 //
346 // Independent of success, all SC operations must clear the lock
347 //
348 m_dataCache_ptr->clearLocked(address);
349 } else if (request->m_type == RubyRequestType_Load_Linked) {
350 //
351 // Note: To fully follow Alpha LLSC semantics, should the LL clear any
352 // previously locked cache lines?
353 //
354 m_dataCache_ptr->setLocked(address, m_version);
355 } else if ((m_dataCache_ptr->isTagPresent(address)) &&
356 (m_dataCache_ptr->isLocked(address, m_version))) {
357 //
358 // Normal writes should clear the locked address
359 //
360 m_dataCache_ptr->clearLocked(address);
361 }
362 return success;
363}
364
365void
366Sequencer::writeCallback(const Address& address, DataBlock& data)
367{
368 writeCallback(address, GenericMachineType_NULL, data);
369}
370
371void
372Sequencer::writeCallback(const Address& address,
373 GenericMachineType mach,
374 DataBlock& data)
375{
376 writeCallback(address, mach, data, 0, 0, 0);
377}
378
379void
380Sequencer::writeCallback(const Address& address,
381 GenericMachineType mach,
382 DataBlock& data,
383 Time initialRequestTime,
384 Time forwardRequestTime,
385 Time firstResponseTime)
386{
387 assert(address == line_address(address));
388 assert(m_writeRequestTable.count(line_address(address)));
389
390 RequestTable::iterator i = m_writeRequestTable.find(address);
391 assert(i != m_writeRequestTable.end());
392 SequencerRequest* request = i->second;
393
394 m_writeRequestTable.erase(i);
395 markRemoved();
396
397 assert((request->m_type == RubyRequestType_ST) ||
398 (request->m_type == RubyRequestType_ATOMIC) ||
399 (request->m_type == RubyRequestType_RMW_Read) ||
400 (request->m_type == RubyRequestType_RMW_Write) ||
401 (request->m_type == RubyRequestType_Load_Linked) ||
402 (request->m_type == RubyRequestType_Store_Conditional) ||
403 (request->m_type == RubyRequestType_Locked_RMW_Read) ||
404 (request->m_type == RubyRequestType_Locked_RMW_Write) ||
405 (request->m_type == RubyRequestType_FLUSH));
406
407
408 //
409 // For Alpha, properly handle LL, SC, and write requests with respect to
410 // locked cache blocks.
411 //
412 // Not valid for Network_test protocl
413 //
414 bool success = true;
415 if(!m_usingNetworkTester)
416 success = handleLlsc(address, request);
417
418 if (request->m_type == RubyRequestType_Locked_RMW_Read) {
419 m_controller->blockOnQueue(address, m_mandatory_q_ptr);
420 } else if (request->m_type == RubyRequestType_Locked_RMW_Write) {
421 m_controller->unblock(address);
422 }
423
424 hitCallback(request, mach, data, success,
425 initialRequestTime, forwardRequestTime, firstResponseTime);
426}
427
428void
429Sequencer::readCallback(const Address& address, DataBlock& data)
430{
431 readCallback(address, GenericMachineType_NULL, data);
432}
433
434void
435Sequencer::readCallback(const Address& address,
436 GenericMachineType mach,
437 DataBlock& data)
438{
439 readCallback(address, mach, data, 0, 0, 0);
440}
441
442void
443Sequencer::readCallback(const Address& address,
444 GenericMachineType mach,
445 DataBlock& data,
446 Time initialRequestTime,
447 Time forwardRequestTime,
448 Time firstResponseTime)
449{
450 assert(address == line_address(address));
451 assert(m_readRequestTable.count(line_address(address)));
452
453 RequestTable::iterator i = m_readRequestTable.find(address);
454 assert(i != m_readRequestTable.end());
455 SequencerRequest* request = i->second;
456
457 m_readRequestTable.erase(i);
458 markRemoved();
459
460 assert((request->m_type == RubyRequestType_LD) ||
461 (request->m_type == RubyRequestType_IFETCH));
462
463 hitCallback(request, mach, data, true,
464 initialRequestTime, forwardRequestTime, firstResponseTime);
465}
466
467void
468Sequencer::hitCallback(SequencerRequest* srequest,
469 GenericMachineType mach,
470 DataBlock& data,
471 bool success,
472 Time initialRequestTime,
473 Time forwardRequestTime,
474 Time firstResponseTime)
475{
476 PacketPtr pkt = srequest->pkt;
477 Address request_address(pkt->getAddr());
478 Address request_line_address(pkt->getAddr());
479 request_line_address.makeLineAddress();
480 RubyRequestType type = srequest->m_type;
481 Time issued_time = srequest->issue_time;
482
483 // Set this cache entry to the most recently used
484 if (type == RubyRequestType_IFETCH) {
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "base/misc.hh"
30#include "base/str.hh"
31#include "config/the_isa.hh"
32#if THE_ISA == X86_ISA
33#include "arch/x86/insts/microldstop.hh"
34#endif // X86_ISA
35#include "cpu/testers/rubytest/RubyTester.hh"
36#include "debug/MemoryAccess.hh"
37#include "debug/ProtocolTrace.hh"
38#include "debug/RubySequencer.hh"
39#include "mem/protocol/PrefetchBit.hh"
40#include "mem/protocol/RubyAccessMode.hh"
41#include "mem/ruby/buffers/MessageBuffer.hh"
42#include "mem/ruby/common/Global.hh"
43#include "mem/ruby/profiler/Profiler.hh"
44#include "mem/ruby/slicc_interface/RubyRequest.hh"
45#include "mem/ruby/system/CacheMemory.hh"
46#include "mem/ruby/system/Sequencer.hh"
47#include "mem/ruby/system/System.hh"
48#include "mem/packet.hh"
49#include "params/RubySequencer.hh"
50
51using namespace std;
52
53Sequencer *
54RubySequencerParams::create()
55{
56 return new Sequencer(this);
57}
58
59Sequencer::Sequencer(const Params *p)
60 : RubyPort(p), deadlockCheckEvent(this)
61{
62 m_store_waiting_on_load_cycles = 0;
63 m_store_waiting_on_store_cycles = 0;
64 m_load_waiting_on_store_cycles = 0;
65 m_load_waiting_on_load_cycles = 0;
66
67 m_outstanding_count = 0;
68
69 m_deadlock_threshold = 0;
70 m_instCache_ptr = NULL;
71 m_dataCache_ptr = NULL;
72
73 m_instCache_ptr = p->icache;
74 m_dataCache_ptr = p->dcache;
75 m_max_outstanding_requests = p->max_outstanding_requests;
76 m_deadlock_threshold = p->deadlock_threshold;
77
78 assert(m_max_outstanding_requests > 0);
79 assert(m_deadlock_threshold > 0);
80 assert(m_instCache_ptr != NULL);
81 assert(m_dataCache_ptr != NULL);
82
83 m_usingNetworkTester = p->using_network_tester;
84}
85
86Sequencer::~Sequencer()
87{
88}
89
90void
91Sequencer::wakeup()
92{
93 // Check for deadlock of any of the requests
94 Time current_time = g_eventQueue_ptr->getTime();
95
96 // Check across all outstanding requests
97 int total_outstanding = 0;
98
99 RequestTable::iterator read = m_readRequestTable.begin();
100 RequestTable::iterator read_end = m_readRequestTable.end();
101 for (; read != read_end; ++read) {
102 SequencerRequest* request = read->second;
103 if (current_time - request->issue_time < m_deadlock_threshold)
104 continue;
105
106 panic("Possible Deadlock detected. Aborting!\n"
107 "version: %d request.paddr: 0x%x m_readRequestTable: %d "
108 "current time: %u issue_time: %d difference: %d\n", m_version,
109 Address(request->pkt->getAddr()), m_readRequestTable.size(),
110 current_time, request->issue_time,
111 current_time - request->issue_time);
112 }
113
114 RequestTable::iterator write = m_writeRequestTable.begin();
115 RequestTable::iterator write_end = m_writeRequestTable.end();
116 for (; write != write_end; ++write) {
117 SequencerRequest* request = write->second;
118 if (current_time - request->issue_time < m_deadlock_threshold)
119 continue;
120
121 panic("Possible Deadlock detected. Aborting!\n"
122 "version: %d request.paddr: 0x%x m_writeRequestTable: %d "
123 "current time: %u issue_time: %d difference: %d\n", m_version,
124 Address(request->pkt->getAddr()), m_writeRequestTable.size(),
125 current_time, request->issue_time,
126 current_time - request->issue_time);
127 }
128
129 total_outstanding += m_writeRequestTable.size();
130 total_outstanding += m_readRequestTable.size();
131
132 assert(m_outstanding_count == total_outstanding);
133
134 if (m_outstanding_count > 0) {
135 // If there are still outstanding requests, keep checking
136 schedule(deadlockCheckEvent,
137 m_deadlock_threshold * g_eventQueue_ptr->getClock() +
138 curTick());
139 }
140}
141
142void
143Sequencer::printStats(ostream & out) const
144{
145 out << "Sequencer: " << m_name << endl
146 << " store_waiting_on_load_cycles: "
147 << m_store_waiting_on_load_cycles << endl
148 << " store_waiting_on_store_cycles: "
149 << m_store_waiting_on_store_cycles << endl
150 << " load_waiting_on_load_cycles: "
151 << m_load_waiting_on_load_cycles << endl
152 << " load_waiting_on_store_cycles: "
153 << m_load_waiting_on_store_cycles << endl;
154}
155
156void
157Sequencer::printProgress(ostream& out) const
158{
159#if 0
160 int total_demand = 0;
161 out << "Sequencer Stats Version " << m_version << endl;
162 out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
163 out << "---------------" << endl;
164 out << "outstanding requests" << endl;
165
166 out << "proc " << m_Read
167 << " version Requests = " << m_readRequestTable.size() << endl;
168
169 // print the request table
170 RequestTable::iterator read = m_readRequestTable.begin();
171 RequestTable::iterator read_end = m_readRequestTable.end();
172 for (; read != read_end; ++read) {
173 SequencerRequest* request = read->second;
174 out << "\tRequest[ " << i << " ] = " << request->type
175 << " Address " << rkeys[i]
176 << " Posted " << request->issue_time
177 << " PF " << PrefetchBit_No << endl;
178 total_demand++;
179 }
180
181 out << "proc " << m_version
182 << " Write Requests = " << m_writeRequestTable.size << endl;
183
184 // print the request table
185 RequestTable::iterator write = m_writeRequestTable.begin();
186 RequestTable::iterator write_end = m_writeRequestTable.end();
187 for (; write != write_end; ++write) {
188 SequencerRequest* request = write->second;
189 out << "\tRequest[ " << i << " ] = " << request.getType()
190 << " Address " << wkeys[i]
191 << " Posted " << request.getTime()
192 << " PF " << request.getPrefetch() << endl;
193 if (request.getPrefetch() == PrefetchBit_No) {
194 total_demand++;
195 }
196 }
197
198 out << endl;
199
200 out << "Total Number Outstanding: " << m_outstanding_count << endl
201 << "Total Number Demand : " << total_demand << endl
202 << "Total Number Prefetches : " << m_outstanding_count - total_demand
203 << endl << endl << endl;
204#endif
205}
206
207void
208Sequencer::printConfig(ostream& out) const
209{
210 out << "Seqeuncer config: " << m_name << endl
211 << " controller: " << m_controller->getName() << endl
212 << " version: " << m_version << endl
213 << " max_outstanding_requests: " << m_max_outstanding_requests << endl
214 << " deadlock_threshold: " << m_deadlock_threshold << endl;
215}
216
217// Insert the request on the correct request table. Return true if
218// the entry was already present.
219RequestStatus
220Sequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type)
221{
222 assert(m_outstanding_count ==
223 (m_writeRequestTable.size() + m_readRequestTable.size()));
224
225 // See if we should schedule a deadlock check
226 if (deadlockCheckEvent.scheduled() == false) {
227 schedule(deadlockCheckEvent, m_deadlock_threshold + curTick());
228 }
229
230 Address line_addr(pkt->getAddr());
231 line_addr.makeLineAddress();
232 if ((request_type == RubyRequestType_ST) ||
233 (request_type == RubyRequestType_RMW_Read) ||
234 (request_type == RubyRequestType_RMW_Write) ||
235 (request_type == RubyRequestType_Load_Linked) ||
236 (request_type == RubyRequestType_Store_Conditional) ||
237 (request_type == RubyRequestType_Locked_RMW_Read) ||
238 (request_type == RubyRequestType_Locked_RMW_Write) ||
239 (request_type == RubyRequestType_FLUSH)) {
240
241 // Check if there is any outstanding read request for the same
242 // cache line.
243 if (m_readRequestTable.count(line_addr) > 0) {
244 m_store_waiting_on_load_cycles++;
245 return RequestStatus_Aliased;
246 }
247
248 pair<RequestTable::iterator, bool> r =
249 m_writeRequestTable.insert(RequestTable::value_type(line_addr, 0));
250 if (r.second) {
251 RequestTable::iterator i = r.first;
252 i->second = new SequencerRequest(pkt, request_type,
253 g_eventQueue_ptr->getTime());
254 m_outstanding_count++;
255 } else {
256 // There is an outstanding write request for the cache line
257 m_store_waiting_on_store_cycles++;
258 return RequestStatus_Aliased;
259 }
260 } else {
261 // Check if there is any outstanding write request for the same
262 // cache line.
263 if (m_writeRequestTable.count(line_addr) > 0) {
264 m_load_waiting_on_store_cycles++;
265 return RequestStatus_Aliased;
266 }
267
268 pair<RequestTable::iterator, bool> r =
269 m_readRequestTable.insert(RequestTable::value_type(line_addr, 0));
270
271 if (r.second) {
272 RequestTable::iterator i = r.first;
273 i->second = new SequencerRequest(pkt, request_type,
274 g_eventQueue_ptr->getTime());
275 m_outstanding_count++;
276 } else {
277 // There is an outstanding read request for the cache line
278 m_load_waiting_on_load_cycles++;
279 return RequestStatus_Aliased;
280 }
281 }
282
283 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
284 assert(m_outstanding_count ==
285 (m_writeRequestTable.size() + m_readRequestTable.size()));
286
287 return RequestStatus_Ready;
288}
289
290void
291Sequencer::markRemoved()
292{
293 m_outstanding_count--;
294 assert(m_outstanding_count ==
295 m_writeRequestTable.size() + m_readRequestTable.size());
296}
297
298void
299Sequencer::removeRequest(SequencerRequest* srequest)
300{
301 assert(m_outstanding_count ==
302 m_writeRequestTable.size() + m_readRequestTable.size());
303
304 Address line_addr(srequest->pkt->getAddr());
305 line_addr.makeLineAddress();
306 if ((srequest->m_type == RubyRequestType_ST) ||
307 (srequest->m_type == RubyRequestType_RMW_Read) ||
308 (srequest->m_type == RubyRequestType_RMW_Write) ||
309 (srequest->m_type == RubyRequestType_Load_Linked) ||
310 (srequest->m_type == RubyRequestType_Store_Conditional) ||
311 (srequest->m_type == RubyRequestType_Locked_RMW_Read) ||
312 (srequest->m_type == RubyRequestType_Locked_RMW_Write)) {
313 m_writeRequestTable.erase(line_addr);
314 } else {
315 m_readRequestTable.erase(line_addr);
316 }
317
318 markRemoved();
319}
320
321bool
322Sequencer::handleLlsc(const Address& address, SequencerRequest* request)
323{
324 //
325 // The success flag indicates whether the LLSC operation was successful.
326 // LL ops will always succeed, but SC may fail if the cache line is no
327 // longer locked.
328 //
329 bool success = true;
330 if (request->m_type == RubyRequestType_Store_Conditional) {
331 if (!m_dataCache_ptr->isLocked(address, m_version)) {
332 //
333 // For failed SC requests, indicate the failure to the cpu by
334 // setting the extra data to zero.
335 //
336 request->pkt->req->setExtraData(0);
337 success = false;
338 } else {
339 //
340 // For successful SC requests, indicate the success to the cpu by
341 // setting the extra data to one.
342 //
343 request->pkt->req->setExtraData(1);
344 }
345 //
346 // Independent of success, all SC operations must clear the lock
347 //
348 m_dataCache_ptr->clearLocked(address);
349 } else if (request->m_type == RubyRequestType_Load_Linked) {
350 //
351 // Note: To fully follow Alpha LLSC semantics, should the LL clear any
352 // previously locked cache lines?
353 //
354 m_dataCache_ptr->setLocked(address, m_version);
355 } else if ((m_dataCache_ptr->isTagPresent(address)) &&
356 (m_dataCache_ptr->isLocked(address, m_version))) {
357 //
358 // Normal writes should clear the locked address
359 //
360 m_dataCache_ptr->clearLocked(address);
361 }
362 return success;
363}
364
365void
366Sequencer::writeCallback(const Address& address, DataBlock& data)
367{
368 writeCallback(address, GenericMachineType_NULL, data);
369}
370
371void
372Sequencer::writeCallback(const Address& address,
373 GenericMachineType mach,
374 DataBlock& data)
375{
376 writeCallback(address, mach, data, 0, 0, 0);
377}
378
379void
380Sequencer::writeCallback(const Address& address,
381 GenericMachineType mach,
382 DataBlock& data,
383 Time initialRequestTime,
384 Time forwardRequestTime,
385 Time firstResponseTime)
386{
387 assert(address == line_address(address));
388 assert(m_writeRequestTable.count(line_address(address)));
389
390 RequestTable::iterator i = m_writeRequestTable.find(address);
391 assert(i != m_writeRequestTable.end());
392 SequencerRequest* request = i->second;
393
394 m_writeRequestTable.erase(i);
395 markRemoved();
396
397 assert((request->m_type == RubyRequestType_ST) ||
398 (request->m_type == RubyRequestType_ATOMIC) ||
399 (request->m_type == RubyRequestType_RMW_Read) ||
400 (request->m_type == RubyRequestType_RMW_Write) ||
401 (request->m_type == RubyRequestType_Load_Linked) ||
402 (request->m_type == RubyRequestType_Store_Conditional) ||
403 (request->m_type == RubyRequestType_Locked_RMW_Read) ||
404 (request->m_type == RubyRequestType_Locked_RMW_Write) ||
405 (request->m_type == RubyRequestType_FLUSH));
406
407
408 //
409 // For Alpha, properly handle LL, SC, and write requests with respect to
410 // locked cache blocks.
411 //
412 // Not valid for Network_test protocl
413 //
414 bool success = true;
415 if(!m_usingNetworkTester)
416 success = handleLlsc(address, request);
417
418 if (request->m_type == RubyRequestType_Locked_RMW_Read) {
419 m_controller->blockOnQueue(address, m_mandatory_q_ptr);
420 } else if (request->m_type == RubyRequestType_Locked_RMW_Write) {
421 m_controller->unblock(address);
422 }
423
424 hitCallback(request, mach, data, success,
425 initialRequestTime, forwardRequestTime, firstResponseTime);
426}
427
428void
429Sequencer::readCallback(const Address& address, DataBlock& data)
430{
431 readCallback(address, GenericMachineType_NULL, data);
432}
433
434void
435Sequencer::readCallback(const Address& address,
436 GenericMachineType mach,
437 DataBlock& data)
438{
439 readCallback(address, mach, data, 0, 0, 0);
440}
441
442void
443Sequencer::readCallback(const Address& address,
444 GenericMachineType mach,
445 DataBlock& data,
446 Time initialRequestTime,
447 Time forwardRequestTime,
448 Time firstResponseTime)
449{
450 assert(address == line_address(address));
451 assert(m_readRequestTable.count(line_address(address)));
452
453 RequestTable::iterator i = m_readRequestTable.find(address);
454 assert(i != m_readRequestTable.end());
455 SequencerRequest* request = i->second;
456
457 m_readRequestTable.erase(i);
458 markRemoved();
459
460 assert((request->m_type == RubyRequestType_LD) ||
461 (request->m_type == RubyRequestType_IFETCH));
462
463 hitCallback(request, mach, data, true,
464 initialRequestTime, forwardRequestTime, firstResponseTime);
465}
466
467void
468Sequencer::hitCallback(SequencerRequest* srequest,
469 GenericMachineType mach,
470 DataBlock& data,
471 bool success,
472 Time initialRequestTime,
473 Time forwardRequestTime,
474 Time firstResponseTime)
475{
476 PacketPtr pkt = srequest->pkt;
477 Address request_address(pkt->getAddr());
478 Address request_line_address(pkt->getAddr());
479 request_line_address.makeLineAddress();
480 RubyRequestType type = srequest->m_type;
481 Time issued_time = srequest->issue_time;
482
483 // Set this cache entry to the most recently used
484 if (type == RubyRequestType_IFETCH) {
485 if (m_instCache_ptr->isTagPresent(request_line_address))
486 m_instCache_ptr->setMRU(request_line_address);
485 m_instCache_ptr->setMRU(request_line_address);
487 } else {
486 } else {
488 if (m_dataCache_ptr->isTagPresent(request_line_address))
489 m_dataCache_ptr->setMRU(request_line_address);
487 m_dataCache_ptr->setMRU(request_line_address);
490 }
491
492 assert(g_eventQueue_ptr->getTime() >= issued_time);
493 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
494
495 // Profile the miss latency for all non-zero demand misses
496 if (miss_latency != 0) {
497 g_system_ptr->getProfiler()->missLatency(miss_latency, type, mach);
498
499 if (mach == GenericMachineType_L1Cache_wCC) {
500 g_system_ptr->getProfiler()->missLatencyWcc(issued_time,
501 initialRequestTime,
502 forwardRequestTime,
503 firstResponseTime,
504 g_eventQueue_ptr->getTime());
505 }
506
507 if (mach == GenericMachineType_Directory) {
508 g_system_ptr->getProfiler()->missLatencyDir(issued_time,
509 initialRequestTime,
510 forwardRequestTime,
511 firstResponseTime,
512 g_eventQueue_ptr->getTime());
513 }
514
515 DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n",
516 curTick(), m_version, "Seq",
517 success ? "Done" : "SC_Failed", "", "",
518 request_address, miss_latency);
519 }
520
521 // update the data
522 if (g_system_ptr->m_warmup_enabled) {
523 assert(pkt->getPtr<uint8_t>(false) != NULL);
524 data.setData(pkt->getPtr<uint8_t>(false),
525 request_address.getOffset(), pkt->getSize());
526 } else if (pkt->getPtr<uint8_t>(true) != NULL) {
527 if ((type == RubyRequestType_LD) ||
528 (type == RubyRequestType_IFETCH) ||
529 (type == RubyRequestType_RMW_Read) ||
530 (type == RubyRequestType_Locked_RMW_Read) ||
531 (type == RubyRequestType_Load_Linked)) {
532 memcpy(pkt->getPtr<uint8_t>(true),
533 data.getData(request_address.getOffset(), pkt->getSize()),
534 pkt->getSize());
535 } else {
536 data.setData(pkt->getPtr<uint8_t>(true),
537 request_address.getOffset(), pkt->getSize());
538 }
539 } else {
540 DPRINTF(MemoryAccess,
541 "WARNING. Data not transfered from Ruby to M5 for type %s\n",
542 RubyRequestType_to_string(type));
543 }
544
545 // If using the RubyTester, update the RubyTester sender state's
546 // subBlock with the recieved data. The tester will later access
547 // this state.
548 // Note: RubyPort will access it's sender state before the
549 // RubyTester.
550 if (m_usingRubyTester) {
551 RubyPort::SenderState *requestSenderState =
552 safe_cast<RubyPort::SenderState*>(pkt->senderState);
553 RubyTester::SenderState* testerSenderState =
554 safe_cast<RubyTester::SenderState*>(requestSenderState->saved);
555 testerSenderState->subBlock->mergeFrom(data);
556 }
557
558 delete srequest;
559
560 if (g_system_ptr->m_warmup_enabled) {
561 delete pkt;
562 g_system_ptr->m_cache_recorder->enqueueNextFetchRequest();
563 } else if (g_system_ptr->m_cooldown_enabled) {
564 delete pkt;
565 g_system_ptr->m_cache_recorder->enqueueNextFlushRequest();
566 } else {
567 ruby_hit_callback(pkt);
568 }
569}
570
571bool
572Sequencer::empty() const
573{
574 return m_writeRequestTable.empty() && m_readRequestTable.empty();
575}
576
577RequestStatus
578Sequencer::makeRequest(PacketPtr pkt)
579{
580 if (m_outstanding_count >= m_max_outstanding_requests) {
581 return RequestStatus_BufferFull;
582 }
583
584 RubyRequestType primary_type = RubyRequestType_NULL;
585 RubyRequestType secondary_type = RubyRequestType_NULL;
586
587 if (pkt->isLLSC()) {
588 //
589 // Alpha LL/SC instructions need to be handled carefully by the cache
590 // coherence protocol to ensure they follow the proper semantics. In
591 // particular, by identifying the operations as atomic, the protocol
592 // should understand that migratory sharing optimizations should not
593 // be performed (i.e. a load between the LL and SC should not steal
594 // away exclusive permission).
595 //
596 if (pkt->isWrite()) {
597 DPRINTF(RubySequencer, "Issuing SC\n");
598 primary_type = RubyRequestType_Store_Conditional;
599 } else {
600 DPRINTF(RubySequencer, "Issuing LL\n");
601 assert(pkt->isRead());
602 primary_type = RubyRequestType_Load_Linked;
603 }
604 secondary_type = RubyRequestType_ATOMIC;
605 } else if (pkt->req->isLocked()) {
606 //
607 // x86 locked instructions are translated to store cache coherence
608 // requests because these requests should always be treated as read
609 // exclusive operations and should leverage any migratory sharing
610 // optimization built into the protocol.
611 //
612 if (pkt->isWrite()) {
613 DPRINTF(RubySequencer, "Issuing Locked RMW Write\n");
614 primary_type = RubyRequestType_Locked_RMW_Write;
615 } else {
616 DPRINTF(RubySequencer, "Issuing Locked RMW Read\n");
617 assert(pkt->isRead());
618 primary_type = RubyRequestType_Locked_RMW_Read;
619 }
620 secondary_type = RubyRequestType_ST;
621 } else {
622 if (pkt->isRead()) {
623 if (pkt->req->isInstFetch()) {
624 primary_type = secondary_type = RubyRequestType_IFETCH;
625 } else {
626#if THE_ISA == X86_ISA
627 uint32_t flags = pkt->req->getFlags();
628 bool storeCheck = flags &
629 (TheISA::StoreCheck << TheISA::FlagShift);
630#else
631 bool storeCheck = false;
632#endif // X86_ISA
633 if (storeCheck) {
634 primary_type = RubyRequestType_RMW_Read;
635 secondary_type = RubyRequestType_ST;
636 } else {
637 primary_type = secondary_type = RubyRequestType_LD;
638 }
639 }
640 } else if (pkt->isWrite()) {
641 //
642 // Note: M5 packets do not differentiate ST from RMW_Write
643 //
644 primary_type = secondary_type = RubyRequestType_ST;
645 } else if (pkt->isFlush()) {
646 primary_type = secondary_type = RubyRequestType_FLUSH;
647 } else {
648 panic("Unsupported ruby packet type\n");
649 }
650 }
651
652 RequestStatus status = insertRequest(pkt, primary_type);
653 if (status != RequestStatus_Ready)
654 return status;
655
656 issueRequest(pkt, secondary_type);
657
658 // TODO: issue hardware prefetches here
659 return RequestStatus_Issued;
660}
661
662void
663Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
664{
665 int proc_id = -1;
666 if (pkt != NULL && pkt->req->hasContextId()) {
667 proc_id = pkt->req->contextId();
668 }
669
670 // If valid, copy the pc to the ruby request
671 Addr pc = 0;
672 if (pkt->req->hasPC()) {
673 pc = pkt->req->getPC();
674 }
675
676 RubyRequest *msg = new RubyRequest(pkt->getAddr(),
677 pkt->getPtr<uint8_t>(true),
678 pkt->getSize(), pc, secondary_type,
679 RubyAccessMode_Supervisor, pkt,
680 PrefetchBit_No, proc_id);
681
682 DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %s\n",
683 curTick(), m_version, "Seq", "Begin", "", "",
684 msg->getPhysicalAddress(),
685 RubyRequestType_to_string(secondary_type));
686
687 Time latency = 0; // initialzed to an null value
688
689 if (secondary_type == RubyRequestType_IFETCH)
690 latency = m_instCache_ptr->getLatency();
691 else
692 latency = m_dataCache_ptr->getLatency();
693
694 // Send the message to the cache controller
695 assert(latency > 0);
696
697 assert(m_mandatory_q_ptr != NULL);
698 m_mandatory_q_ptr->enqueue(msg, latency);
699}
700
701template <class KEY, class VALUE>
702std::ostream &
703operator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map)
704{
705 typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin();
706 typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end();
707
708 out << "[";
709 for (; i != end; ++i)
710 out << " " << i->first << "=" << i->second;
711 out << " ]";
712
713 return out;
714}
715
716void
717Sequencer::print(ostream& out) const
718{
719 out << "[Sequencer: " << m_version
720 << ", outstanding requests: " << m_outstanding_count
721 << ", read request table: " << m_readRequestTable
722 << ", write request table: " << m_writeRequestTable
723 << "]";
724}
725
726// this can be called from setState whenever coherence permissions are
727// upgraded when invoked, coherence violations will be checked for the
728// given block
729void
730Sequencer::checkCoherence(const Address& addr)
731{
732#ifdef CHECK_COHERENCE
733 g_system_ptr->checkGlobalCoherenceInvariant(addr);
734#endif
735}
736
737void
738Sequencer::evictionCallback(const Address& address)
739{
740 ruby_eviction_callback(address);
741}
488 }
489
490 assert(g_eventQueue_ptr->getTime() >= issued_time);
491 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
492
493 // Profile the miss latency for all non-zero demand misses
494 if (miss_latency != 0) {
495 g_system_ptr->getProfiler()->missLatency(miss_latency, type, mach);
496
497 if (mach == GenericMachineType_L1Cache_wCC) {
498 g_system_ptr->getProfiler()->missLatencyWcc(issued_time,
499 initialRequestTime,
500 forwardRequestTime,
501 firstResponseTime,
502 g_eventQueue_ptr->getTime());
503 }
504
505 if (mach == GenericMachineType_Directory) {
506 g_system_ptr->getProfiler()->missLatencyDir(issued_time,
507 initialRequestTime,
508 forwardRequestTime,
509 firstResponseTime,
510 g_eventQueue_ptr->getTime());
511 }
512
513 DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n",
514 curTick(), m_version, "Seq",
515 success ? "Done" : "SC_Failed", "", "",
516 request_address, miss_latency);
517 }
518
519 // update the data
520 if (g_system_ptr->m_warmup_enabled) {
521 assert(pkt->getPtr<uint8_t>(false) != NULL);
522 data.setData(pkt->getPtr<uint8_t>(false),
523 request_address.getOffset(), pkt->getSize());
524 } else if (pkt->getPtr<uint8_t>(true) != NULL) {
525 if ((type == RubyRequestType_LD) ||
526 (type == RubyRequestType_IFETCH) ||
527 (type == RubyRequestType_RMW_Read) ||
528 (type == RubyRequestType_Locked_RMW_Read) ||
529 (type == RubyRequestType_Load_Linked)) {
530 memcpy(pkt->getPtr<uint8_t>(true),
531 data.getData(request_address.getOffset(), pkt->getSize()),
532 pkt->getSize());
533 } else {
534 data.setData(pkt->getPtr<uint8_t>(true),
535 request_address.getOffset(), pkt->getSize());
536 }
537 } else {
538 DPRINTF(MemoryAccess,
539 "WARNING. Data not transfered from Ruby to M5 for type %s\n",
540 RubyRequestType_to_string(type));
541 }
542
543 // If using the RubyTester, update the RubyTester sender state's
544 // subBlock with the recieved data. The tester will later access
545 // this state.
546 // Note: RubyPort will access it's sender state before the
547 // RubyTester.
548 if (m_usingRubyTester) {
549 RubyPort::SenderState *requestSenderState =
550 safe_cast<RubyPort::SenderState*>(pkt->senderState);
551 RubyTester::SenderState* testerSenderState =
552 safe_cast<RubyTester::SenderState*>(requestSenderState->saved);
553 testerSenderState->subBlock->mergeFrom(data);
554 }
555
556 delete srequest;
557
558 if (g_system_ptr->m_warmup_enabled) {
559 delete pkt;
560 g_system_ptr->m_cache_recorder->enqueueNextFetchRequest();
561 } else if (g_system_ptr->m_cooldown_enabled) {
562 delete pkt;
563 g_system_ptr->m_cache_recorder->enqueueNextFlushRequest();
564 } else {
565 ruby_hit_callback(pkt);
566 }
567}
568
569bool
570Sequencer::empty() const
571{
572 return m_writeRequestTable.empty() && m_readRequestTable.empty();
573}
574
575RequestStatus
576Sequencer::makeRequest(PacketPtr pkt)
577{
578 if (m_outstanding_count >= m_max_outstanding_requests) {
579 return RequestStatus_BufferFull;
580 }
581
582 RubyRequestType primary_type = RubyRequestType_NULL;
583 RubyRequestType secondary_type = RubyRequestType_NULL;
584
585 if (pkt->isLLSC()) {
586 //
587 // Alpha LL/SC instructions need to be handled carefully by the cache
588 // coherence protocol to ensure they follow the proper semantics. In
589 // particular, by identifying the operations as atomic, the protocol
590 // should understand that migratory sharing optimizations should not
591 // be performed (i.e. a load between the LL and SC should not steal
592 // away exclusive permission).
593 //
594 if (pkt->isWrite()) {
595 DPRINTF(RubySequencer, "Issuing SC\n");
596 primary_type = RubyRequestType_Store_Conditional;
597 } else {
598 DPRINTF(RubySequencer, "Issuing LL\n");
599 assert(pkt->isRead());
600 primary_type = RubyRequestType_Load_Linked;
601 }
602 secondary_type = RubyRequestType_ATOMIC;
603 } else if (pkt->req->isLocked()) {
604 //
605 // x86 locked instructions are translated to store cache coherence
606 // requests because these requests should always be treated as read
607 // exclusive operations and should leverage any migratory sharing
608 // optimization built into the protocol.
609 //
610 if (pkt->isWrite()) {
611 DPRINTF(RubySequencer, "Issuing Locked RMW Write\n");
612 primary_type = RubyRequestType_Locked_RMW_Write;
613 } else {
614 DPRINTF(RubySequencer, "Issuing Locked RMW Read\n");
615 assert(pkt->isRead());
616 primary_type = RubyRequestType_Locked_RMW_Read;
617 }
618 secondary_type = RubyRequestType_ST;
619 } else {
620 if (pkt->isRead()) {
621 if (pkt->req->isInstFetch()) {
622 primary_type = secondary_type = RubyRequestType_IFETCH;
623 } else {
624#if THE_ISA == X86_ISA
625 uint32_t flags = pkt->req->getFlags();
626 bool storeCheck = flags &
627 (TheISA::StoreCheck << TheISA::FlagShift);
628#else
629 bool storeCheck = false;
630#endif // X86_ISA
631 if (storeCheck) {
632 primary_type = RubyRequestType_RMW_Read;
633 secondary_type = RubyRequestType_ST;
634 } else {
635 primary_type = secondary_type = RubyRequestType_LD;
636 }
637 }
638 } else if (pkt->isWrite()) {
639 //
640 // Note: M5 packets do not differentiate ST from RMW_Write
641 //
642 primary_type = secondary_type = RubyRequestType_ST;
643 } else if (pkt->isFlush()) {
644 primary_type = secondary_type = RubyRequestType_FLUSH;
645 } else {
646 panic("Unsupported ruby packet type\n");
647 }
648 }
649
650 RequestStatus status = insertRequest(pkt, primary_type);
651 if (status != RequestStatus_Ready)
652 return status;
653
654 issueRequest(pkt, secondary_type);
655
656 // TODO: issue hardware prefetches here
657 return RequestStatus_Issued;
658}
659
660void
661Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
662{
663 int proc_id = -1;
664 if (pkt != NULL && pkt->req->hasContextId()) {
665 proc_id = pkt->req->contextId();
666 }
667
668 // If valid, copy the pc to the ruby request
669 Addr pc = 0;
670 if (pkt->req->hasPC()) {
671 pc = pkt->req->getPC();
672 }
673
674 RubyRequest *msg = new RubyRequest(pkt->getAddr(),
675 pkt->getPtr<uint8_t>(true),
676 pkt->getSize(), pc, secondary_type,
677 RubyAccessMode_Supervisor, pkt,
678 PrefetchBit_No, proc_id);
679
680 DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %s\n",
681 curTick(), m_version, "Seq", "Begin", "", "",
682 msg->getPhysicalAddress(),
683 RubyRequestType_to_string(secondary_type));
684
685 Time latency = 0; // initialzed to an null value
686
687 if (secondary_type == RubyRequestType_IFETCH)
688 latency = m_instCache_ptr->getLatency();
689 else
690 latency = m_dataCache_ptr->getLatency();
691
692 // Send the message to the cache controller
693 assert(latency > 0);
694
695 assert(m_mandatory_q_ptr != NULL);
696 m_mandatory_q_ptr->enqueue(msg, latency);
697}
698
699template <class KEY, class VALUE>
700std::ostream &
701operator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map)
702{
703 typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin();
704 typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end();
705
706 out << "[";
707 for (; i != end; ++i)
708 out << " " << i->first << "=" << i->second;
709 out << " ]";
710
711 return out;
712}
713
714void
715Sequencer::print(ostream& out) const
716{
717 out << "[Sequencer: " << m_version
718 << ", outstanding requests: " << m_outstanding_count
719 << ", read request table: " << m_readRequestTable
720 << ", write request table: " << m_writeRequestTable
721 << "]";
722}
723
724// this can be called from setState whenever coherence permissions are
725// upgraded when invoked, coherence violations will be checked for the
726// given block
727void
728Sequencer::checkCoherence(const Address& addr)
729{
730#ifdef CHECK_COHERENCE
731 g_system_ptr->checkGlobalCoherenceInvariant(addr);
732#endif
733}
734
735void
736Sequencer::evictionCallback(const Address& address)
737{
738 ruby_eviction_callback(address);
739}