CacheRecorder.cc (10302:0e9e99e6369a) CacheRecorder.cc (10653:e3fc6bc7f97e)
1/*
2 * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood
3 * Copyright (c) 2010 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include "debug/RubyCacheTrace.hh"
31#include "mem/ruby/system/CacheRecorder.hh"
32#include "mem/ruby/system/Sequencer.hh"
33#include "mem/ruby/system/System.hh"
34
35using namespace std;
36
37void
38TraceRecord::print(ostream& out) const
39{
40 out << "[TraceRecord: Node, " << m_cntrl_id << ", "
41 << m_data_address << ", " << m_pc_address << ", "
42 << m_type << ", Time: " << m_time << "]";
43}
44
45CacheRecorder::CacheRecorder()
46 : m_uncompressed_trace(NULL),
47 m_uncompressed_trace_size(0),
48 m_block_size_bytes(RubySystem::getBlockSizeBytes())
49{
50}
51
52CacheRecorder::CacheRecorder(uint8_t* uncompressed_trace,
53 uint64_t uncompressed_trace_size,
54 std::vector<Sequencer*>& seq_map,
55 uint64_t block_size_bytes)
56 : m_uncompressed_trace(uncompressed_trace),
57 m_uncompressed_trace_size(uncompressed_trace_size),
58 m_seq_map(seq_map), m_bytes_read(0), m_records_read(0),
59 m_records_flushed(0), m_block_size_bytes(block_size_bytes)
60{
61 if (m_uncompressed_trace != NULL) {
62 if (m_block_size_bytes < RubySystem::getBlockSizeBytes()) {
63 // Block sizes larger than when the trace was recorded are not
64 // supported, as we cannot reliably turn accesses to smaller blocks
65 // into larger ones.
66 panic("Recorded cache block size (%d) < current block size (%d) !!",
67 m_block_size_bytes, RubySystem::getBlockSizeBytes());
68 }
69 }
70}
71
72CacheRecorder::~CacheRecorder()
73{
74 if (m_uncompressed_trace != NULL) {
75 delete [] m_uncompressed_trace;
76 m_uncompressed_trace = NULL;
77 }
78 m_seq_map.clear();
79}
80
81void
82CacheRecorder::enqueueNextFlushRequest()
83{
84 if (m_records_flushed < m_records.size()) {
85 TraceRecord* rec = m_records[m_records_flushed];
86 m_records_flushed++;
87 Request* req = new Request(rec->m_data_address,
88 m_block_size_bytes, 0,
89 Request::funcMasterId);
90 MemCmd::Command requestType = MemCmd::FlushReq;
91 Packet *pkt = new Packet(req, requestType);
92
93 Sequencer* m_sequencer_ptr = m_seq_map[rec->m_cntrl_id];
94 assert(m_sequencer_ptr != NULL);
95 m_sequencer_ptr->makeRequest(pkt);
96
97 DPRINTF(RubyCacheTrace, "Flushing %s\n", *rec);
98 }
99}
100
101void
102CacheRecorder::enqueueNextFetchRequest()
103{
104 if (m_bytes_read < m_uncompressed_trace_size) {
105 TraceRecord* traceRecord = (TraceRecord*) (m_uncompressed_trace +
106 m_bytes_read);
107
108 DPRINTF(RubyCacheTrace, "Issuing %s\n", *traceRecord);
109
110 for (int rec_bytes_read = 0; rec_bytes_read < m_block_size_bytes;
111 rec_bytes_read += RubySystem::getBlockSizeBytes()) {
1/*
2 * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood
3 * Copyright (c) 2010 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include "debug/RubyCacheTrace.hh"
31#include "mem/ruby/system/CacheRecorder.hh"
32#include "mem/ruby/system/Sequencer.hh"
33#include "mem/ruby/system/System.hh"
34
35using namespace std;
36
37void
38TraceRecord::print(ostream& out) const
39{
40 out << "[TraceRecord: Node, " << m_cntrl_id << ", "
41 << m_data_address << ", " << m_pc_address << ", "
42 << m_type << ", Time: " << m_time << "]";
43}
44
45CacheRecorder::CacheRecorder()
46 : m_uncompressed_trace(NULL),
47 m_uncompressed_trace_size(0),
48 m_block_size_bytes(RubySystem::getBlockSizeBytes())
49{
50}
51
52CacheRecorder::CacheRecorder(uint8_t* uncompressed_trace,
53 uint64_t uncompressed_trace_size,
54 std::vector<Sequencer*>& seq_map,
55 uint64_t block_size_bytes)
56 : m_uncompressed_trace(uncompressed_trace),
57 m_uncompressed_trace_size(uncompressed_trace_size),
58 m_seq_map(seq_map), m_bytes_read(0), m_records_read(0),
59 m_records_flushed(0), m_block_size_bytes(block_size_bytes)
60{
61 if (m_uncompressed_trace != NULL) {
62 if (m_block_size_bytes < RubySystem::getBlockSizeBytes()) {
63 // Block sizes larger than when the trace was recorded are not
64 // supported, as we cannot reliably turn accesses to smaller blocks
65 // into larger ones.
66 panic("Recorded cache block size (%d) < current block size (%d) !!",
67 m_block_size_bytes, RubySystem::getBlockSizeBytes());
68 }
69 }
70}
71
72CacheRecorder::~CacheRecorder()
73{
74 if (m_uncompressed_trace != NULL) {
75 delete [] m_uncompressed_trace;
76 m_uncompressed_trace = NULL;
77 }
78 m_seq_map.clear();
79}
80
81void
82CacheRecorder::enqueueNextFlushRequest()
83{
84 if (m_records_flushed < m_records.size()) {
85 TraceRecord* rec = m_records[m_records_flushed];
86 m_records_flushed++;
87 Request* req = new Request(rec->m_data_address,
88 m_block_size_bytes, 0,
89 Request::funcMasterId);
90 MemCmd::Command requestType = MemCmd::FlushReq;
91 Packet *pkt = new Packet(req, requestType);
92
93 Sequencer* m_sequencer_ptr = m_seq_map[rec->m_cntrl_id];
94 assert(m_sequencer_ptr != NULL);
95 m_sequencer_ptr->makeRequest(pkt);
96
97 DPRINTF(RubyCacheTrace, "Flushing %s\n", *rec);
98 }
99}
100
101void
102CacheRecorder::enqueueNextFetchRequest()
103{
104 if (m_bytes_read < m_uncompressed_trace_size) {
105 TraceRecord* traceRecord = (TraceRecord*) (m_uncompressed_trace +
106 m_bytes_read);
107
108 DPRINTF(RubyCacheTrace, "Issuing %s\n", *traceRecord);
109
110 for (int rec_bytes_read = 0; rec_bytes_read < m_block_size_bytes;
111 rec_bytes_read += RubySystem::getBlockSizeBytes()) {
112 Request* req = new Request();
112 Request* req = nullptr;
113 MemCmd::Command requestType;
114
115 if (traceRecord->m_type == RubyRequestType_LD) {
116 requestType = MemCmd::ReadReq;
113 MemCmd::Command requestType;
114
115 if (traceRecord->m_type == RubyRequestType_LD) {
116 requestType = MemCmd::ReadReq;
117 req->setPhys(traceRecord->m_data_address + rec_bytes_read,
117 req = new Request(traceRecord->m_data_address + rec_bytes_read,
118 RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId);
119 } else if (traceRecord->m_type == RubyRequestType_IFETCH) {
120 requestType = MemCmd::ReadReq;
118 RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId);
119 } else if (traceRecord->m_type == RubyRequestType_IFETCH) {
120 requestType = MemCmd::ReadReq;
121 req->setPhys(traceRecord->m_data_address + rec_bytes_read,
121 req = new Request(traceRecord->m_data_address + rec_bytes_read,
122 RubySystem::getBlockSizeBytes(),
123 Request::INST_FETCH, Request::funcMasterId);
124 } else {
125 requestType = MemCmd::WriteReq;
122 RubySystem::getBlockSizeBytes(),
123 Request::INST_FETCH, Request::funcMasterId);
124 } else {
125 requestType = MemCmd::WriteReq;
126 req->setPhys(traceRecord->m_data_address + rec_bytes_read,
126 req = new Request(traceRecord->m_data_address + rec_bytes_read,
127 RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId);
128 }
129
130 Packet *pkt = new Packet(req, requestType);
131 pkt->dataStatic(traceRecord->m_data + rec_bytes_read);
132
133 Sequencer* m_sequencer_ptr = m_seq_map[traceRecord->m_cntrl_id];
134 assert(m_sequencer_ptr != NULL);
135 m_sequencer_ptr->makeRequest(pkt);
136 }
137
138 m_bytes_read += (sizeof(TraceRecord) + m_block_size_bytes);
139 m_records_read++;
140 }
141}
142
143void
144CacheRecorder::addRecord(int cntrl, const physical_address_t data_addr,
145 const physical_address_t pc_addr,
146 RubyRequestType type, Tick time, DataBlock& data)
147{
148 TraceRecord* rec = (TraceRecord*)malloc(sizeof(TraceRecord) +
149 m_block_size_bytes);
150 rec->m_cntrl_id = cntrl;
151 rec->m_time = time;
152 rec->m_data_address = data_addr;
153 rec->m_pc_address = pc_addr;
154 rec->m_type = type;
155 memcpy(rec->m_data, data.getData(0, m_block_size_bytes),
156 m_block_size_bytes);
157
158 m_records.push_back(rec);
159}
160
161uint64
162CacheRecorder::aggregateRecords(uint8_t** buf, uint64 total_size)
163{
164 std::sort(m_records.begin(), m_records.end(), compareTraceRecords);
165
166 int size = m_records.size();
167 uint64 current_size = 0;
168 int record_size = sizeof(TraceRecord) + m_block_size_bytes;
169
170 for (int i = 0; i < size; ++i) {
171 // Determine if we need to expand the buffer size
172 if (current_size + record_size > total_size) {
173 uint8_t* new_buf = new (nothrow) uint8_t[total_size * 2];
174 if (new_buf == NULL) {
175 fatal("Unable to allocate buffer of size %s\n",
176 total_size * 2);
177 }
178 total_size = total_size * 2;
179 uint8_t* old_buf = *buf;
180 memcpy(new_buf, old_buf, current_size);
181 *buf = new_buf;
182 delete [] old_buf;
183 }
184
185 // Copy the current record into the buffer
186 memcpy(&((*buf)[current_size]), m_records[i], record_size);
187 current_size += record_size;
188
189 free(m_records[i]);
190 m_records[i] = NULL;
191 }
192
193 m_records.clear();
194 return current_size;
195}
127 RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId);
128 }
129
130 Packet *pkt = new Packet(req, requestType);
131 pkt->dataStatic(traceRecord->m_data + rec_bytes_read);
132
133 Sequencer* m_sequencer_ptr = m_seq_map[traceRecord->m_cntrl_id];
134 assert(m_sequencer_ptr != NULL);
135 m_sequencer_ptr->makeRequest(pkt);
136 }
137
138 m_bytes_read += (sizeof(TraceRecord) + m_block_size_bytes);
139 m_records_read++;
140 }
141}
142
143void
144CacheRecorder::addRecord(int cntrl, const physical_address_t data_addr,
145 const physical_address_t pc_addr,
146 RubyRequestType type, Tick time, DataBlock& data)
147{
148 TraceRecord* rec = (TraceRecord*)malloc(sizeof(TraceRecord) +
149 m_block_size_bytes);
150 rec->m_cntrl_id = cntrl;
151 rec->m_time = time;
152 rec->m_data_address = data_addr;
153 rec->m_pc_address = pc_addr;
154 rec->m_type = type;
155 memcpy(rec->m_data, data.getData(0, m_block_size_bytes),
156 m_block_size_bytes);
157
158 m_records.push_back(rec);
159}
160
161uint64
162CacheRecorder::aggregateRecords(uint8_t** buf, uint64 total_size)
163{
164 std::sort(m_records.begin(), m_records.end(), compareTraceRecords);
165
166 int size = m_records.size();
167 uint64 current_size = 0;
168 int record_size = sizeof(TraceRecord) + m_block_size_bytes;
169
170 for (int i = 0; i < size; ++i) {
171 // Determine if we need to expand the buffer size
172 if (current_size + record_size > total_size) {
173 uint8_t* new_buf = new (nothrow) uint8_t[total_size * 2];
174 if (new_buf == NULL) {
175 fatal("Unable to allocate buffer of size %s\n",
176 total_size * 2);
177 }
178 total_size = total_size * 2;
179 uint8_t* old_buf = *buf;
180 memcpy(new_buf, old_buf, current_size);
181 *buf = new_buf;
182 delete [] old_buf;
183 }
184
185 // Copy the current record into the buffer
186 memcpy(&((*buf)[current_size]), m_records[i], record_size);
187 current_size += record_size;
188
189 free(m_records[i]);
190 m_records[i] = NULL;
191 }
192
193 m_records.clear();
194 return current_size;
195}