Sequencer.cc (11033:9a0022457323) Sequencer.cc (11049:dfb0aa3f0649)
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;

--- 303 unchanged lines hidden (view full) ---

312 }
313
314 markRemoved();
315}
316
317void
318Sequencer::invalidateSC(Addr address)
319{
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;

--- 303 unchanged lines hidden (view full) ---

312 }
313
314 markRemoved();
315}
316
317void
318Sequencer::invalidateSC(Addr address)
319{
320 AbstractCacheEntry *e = m_dataCache_ptr->lookup(address);
321 // The controller has lost the coherence permissions, hence the lock
322 // on the cache line maintained by the cache should be cleared.
323 if (e && e->isLocked(m_version)) {
324 e->clearLocked();
320 RequestTable::iterator i = m_writeRequestTable.find(address);
321 if (i != m_writeRequestTable.end()) {
322 SequencerRequest* request = i->second;
323 // The controller has lost the coherence permissions, hence the lock
324 // on the cache line maintained by the cache should be cleared.
325 if (request->m_type == RubyRequestType_Store_Conditional) {
326 m_dataCache_ptr->clearLocked(address);
327 }
325 }
326}
327
328bool
329Sequencer::handleLlsc(Addr address, SequencerRequest* request)
330{
328 }
329}
330
331bool
332Sequencer::handleLlsc(Addr address, SequencerRequest* request)
333{
331 AbstractCacheEntry *e = m_dataCache_ptr->lookup(address);
332 if (!e)
333 return true;
334
334 //
335 // The success flag indicates whether the LLSC operation was successful.
336 // LL ops will always succeed, but SC may fail if the cache line is no
337 // longer locked.
335 // The success flag indicates whether the LLSC operation was successful.
336 // LL ops will always succeed, but SC may fail if the cache line is no
337 // longer locked.
338 //
338 bool success = true;
339 if (request->m_type == RubyRequestType_Store_Conditional) {
339 bool success = true;
340 if (request->m_type == RubyRequestType_Store_Conditional) {
340 if (!e->isLocked(m_version)) {
341 if (!m_dataCache_ptr->isLocked(address, m_version)) {
341 //
342 // For failed SC requests, indicate the failure to the cpu by
343 // setting the extra data to zero.
344 //
345 request->pkt->req->setExtraData(0);
346 success = false;
347 } else {
348 //
349 // For successful SC requests, indicate the success to the cpu by
350 // setting the extra data to one.
351 //
352 request->pkt->req->setExtraData(1);
353 }
354 //
355 // Independent of success, all SC operations must clear the lock
356 //
342 //
343 // For failed SC requests, indicate the failure to the cpu by
344 // setting the extra data to zero.
345 //
346 request->pkt->req->setExtraData(0);
347 success = false;
348 } else {
349 //
350 // For successful SC requests, indicate the success to the cpu by
351 // setting the extra data to one.
352 //
353 request->pkt->req->setExtraData(1);
354 }
355 //
356 // Independent of success, all SC operations must clear the lock
357 //
357 e->clearLocked();
358 m_dataCache_ptr->clearLocked(address);
358 } else if (request->m_type == RubyRequestType_Load_Linked) {
359 //
360 // Note: To fully follow Alpha LLSC semantics, should the LL clear any
361 // previously locked cache lines?
362 //
359 } else if (request->m_type == RubyRequestType_Load_Linked) {
360 //
361 // Note: To fully follow Alpha LLSC semantics, should the LL clear any
362 // previously locked cache lines?
363 //
363 e->setLocked(m_version);
364 } else if (e->isLocked(m_version)) {
364 m_dataCache_ptr->setLocked(address, m_version);
365 } else if ((m_dataCache_ptr->isTagPresent(address)) &&
366 (m_dataCache_ptr->isLocked(address, m_version))) {
365 //
366 // Normal writes should clear the locked address
367 //
367 //
368 // Normal writes should clear the locked address
369 //
368 e->clearLocked();
370 m_dataCache_ptr->clearLocked(address);
369 }
370 return success;
371}
372
373void
374Sequencer::recordMissLatency(const Cycles cycles, const RubyRequestType type,
375 const MachineType respondingMach,
376 bool isExternalHit, Cycles issuedTime,

--- 114 unchanged lines hidden (view full) ---

491void
492Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data,
493 bool llscSuccess,
494 const MachineType mach, const bool externalHit,
495 const Cycles initialRequestTime,
496 const Cycles forwardRequestTime,
497 const Cycles firstResponseTime)
498{
371 }
372 return success;
373}
374
375void
376Sequencer::recordMissLatency(const Cycles cycles, const RubyRequestType type,
377 const MachineType respondingMach,
378 bool isExternalHit, Cycles issuedTime,

--- 114 unchanged lines hidden (view full) ---

493void
494Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data,
495 bool llscSuccess,
496 const MachineType mach, const bool externalHit,
497 const Cycles initialRequestTime,
498 const Cycles forwardRequestTime,
499 const Cycles firstResponseTime)
500{
499 warn_once("Replacement policy updates recently became the responsibility "
500 "of SLICC state machines. Make sure to setMRU() near callbacks "
501 "in .sm files!");
502
503 PacketPtr pkt = srequest->pkt;
504 Addr request_address(pkt->getAddr());
501 PacketPtr pkt = srequest->pkt;
502 Addr request_address(pkt->getAddr());
503 Addr request_line_address = makeLineAddress(pkt->getAddr());
505 RubyRequestType type = srequest->m_type;
506 Cycles issued_time = srequest->issue_time;
507
504 RubyRequestType type = srequest->m_type;
505 Cycles issued_time = srequest->issue_time;
506
507 // Set this cache entry to the most recently used
508 if (type == RubyRequestType_IFETCH) {
509 m_instCache_ptr->setMRU(request_line_address);
510 } else {
511 m_dataCache_ptr->setMRU(request_line_address);
512 }
513
508 assert(curCycle() >= issued_time);
509 Cycles total_latency = curCycle() - issued_time;
510
511 // Profile the latency for all demand accesses.
512 recordMissLatency(total_latency, type, mach, externalHit, issued_time,
513 initialRequestTime, forwardRequestTime,
514 firstResponseTime, curCycle());
515

--- 313 unchanged lines hidden ---
514 assert(curCycle() >= issued_time);
515 Cycles total_latency = curCycle() - issued_time;
516
517 // Profile the latency for all demand accesses.
518 recordMissLatency(total_latency, type, mach, externalHit, issued_time,
519 initialRequestTime, forwardRequestTime,
520 firstResponseTime, curCycle());
521

--- 313 unchanged lines hidden ---