CacheRecorder.cc (11793:ef606668d247) CacheRecorder.cc (12749:223c83ed9979)
1/*
2 * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood
3 * Copyright (c) 2010 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include "mem/ruby/system/CacheRecorder.hh"
31
32#include "debug/RubyCacheTrace.hh"
33#include "mem/ruby/system/RubySystem.hh"
34#include "mem/ruby/system/Sequencer.hh"
35
36using namespace std;
37
38void
39TraceRecord::print(ostream& out) const
40{
41 out << "[TraceRecord: Node, " << m_cntrl_id << ", "
42 << m_data_address << ", " << m_pc_address << ", "
43 << m_type << ", Time: " << m_time << "]";
44}
45
46CacheRecorder::CacheRecorder()
47 : m_uncompressed_trace(NULL),
48 m_uncompressed_trace_size(0),
49 m_block_size_bytes(RubySystem::getBlockSizeBytes())
50{
51}
52
53CacheRecorder::CacheRecorder(uint8_t* uncompressed_trace,
54 uint64_t uncompressed_trace_size,
55 std::vector<Sequencer*>& seq_map,
56 uint64_t block_size_bytes)
57 : m_uncompressed_trace(uncompressed_trace),
58 m_uncompressed_trace_size(uncompressed_trace_size),
59 m_seq_map(seq_map), m_bytes_read(0), m_records_read(0),
60 m_records_flushed(0), m_block_size_bytes(block_size_bytes)
61{
62 if (m_uncompressed_trace != NULL) {
63 if (m_block_size_bytes < RubySystem::getBlockSizeBytes()) {
64 // Block sizes larger than when the trace was recorded are not
65 // supported, as we cannot reliably turn accesses to smaller blocks
66 // into larger ones.
67 panic("Recorded cache block size (%d) < current block size (%d) !!",
68 m_block_size_bytes, RubySystem::getBlockSizeBytes());
69 }
70 }
71}
72
73CacheRecorder::~CacheRecorder()
74{
75 if (m_uncompressed_trace != NULL) {
76 delete [] m_uncompressed_trace;
77 m_uncompressed_trace = NULL;
78 }
79 m_seq_map.clear();
80}
81
82void
83CacheRecorder::enqueueNextFlushRequest()
84{
85 if (m_records_flushed < m_records.size()) {
86 TraceRecord* rec = m_records[m_records_flushed];
87 m_records_flushed++;
1/*
2 * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood
3 * Copyright (c) 2010 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include "mem/ruby/system/CacheRecorder.hh"
31
32#include "debug/RubyCacheTrace.hh"
33#include "mem/ruby/system/RubySystem.hh"
34#include "mem/ruby/system/Sequencer.hh"
35
36using namespace std;
37
38void
39TraceRecord::print(ostream& out) const
40{
41 out << "[TraceRecord: Node, " << m_cntrl_id << ", "
42 << m_data_address << ", " << m_pc_address << ", "
43 << m_type << ", Time: " << m_time << "]";
44}
45
46CacheRecorder::CacheRecorder()
47 : m_uncompressed_trace(NULL),
48 m_uncompressed_trace_size(0),
49 m_block_size_bytes(RubySystem::getBlockSizeBytes())
50{
51}
52
53CacheRecorder::CacheRecorder(uint8_t* uncompressed_trace,
54 uint64_t uncompressed_trace_size,
55 std::vector<Sequencer*>& seq_map,
56 uint64_t block_size_bytes)
57 : m_uncompressed_trace(uncompressed_trace),
58 m_uncompressed_trace_size(uncompressed_trace_size),
59 m_seq_map(seq_map), m_bytes_read(0), m_records_read(0),
60 m_records_flushed(0), m_block_size_bytes(block_size_bytes)
61{
62 if (m_uncompressed_trace != NULL) {
63 if (m_block_size_bytes < RubySystem::getBlockSizeBytes()) {
64 // Block sizes larger than when the trace was recorded are not
65 // supported, as we cannot reliably turn accesses to smaller blocks
66 // into larger ones.
67 panic("Recorded cache block size (%d) < current block size (%d) !!",
68 m_block_size_bytes, RubySystem::getBlockSizeBytes());
69 }
70 }
71}
72
73CacheRecorder::~CacheRecorder()
74{
75 if (m_uncompressed_trace != NULL) {
76 delete [] m_uncompressed_trace;
77 m_uncompressed_trace = NULL;
78 }
79 m_seq_map.clear();
80}
81
82void
83CacheRecorder::enqueueNextFlushRequest()
84{
85 if (m_records_flushed < m_records.size()) {
86 TraceRecord* rec = m_records[m_records_flushed];
87 m_records_flushed++;
88 Request* req = new Request(rec->m_data_address,
89 m_block_size_bytes, 0,
90 Request::funcMasterId);
88 auto req = std::make_shared<Request>(rec->m_data_address,
89 m_block_size_bytes, 0,
90 Request::funcMasterId);
91 MemCmd::Command requestType = MemCmd::FlushReq;
92 Packet *pkt = new Packet(req, requestType);
93
94 Sequencer* m_sequencer_ptr = m_seq_map[rec->m_cntrl_id];
95 assert(m_sequencer_ptr != NULL);
96 m_sequencer_ptr->makeRequest(pkt);
97
98 DPRINTF(RubyCacheTrace, "Flushing %s\n", *rec);
99 } else {
100 DPRINTF(RubyCacheTrace, "Flushed all %d records\n", m_records_flushed);
101 }
102}
103
104void
105CacheRecorder::enqueueNextFetchRequest()
106{
107 if (m_bytes_read < m_uncompressed_trace_size) {
108 TraceRecord* traceRecord = (TraceRecord*) (m_uncompressed_trace +
109 m_bytes_read);
110
111 DPRINTF(RubyCacheTrace, "Issuing %s\n", *traceRecord);
112
113 for (int rec_bytes_read = 0; rec_bytes_read < m_block_size_bytes;
114 rec_bytes_read += RubySystem::getBlockSizeBytes()) {
91 MemCmd::Command requestType = MemCmd::FlushReq;
92 Packet *pkt = new Packet(req, requestType);
93
94 Sequencer* m_sequencer_ptr = m_seq_map[rec->m_cntrl_id];
95 assert(m_sequencer_ptr != NULL);
96 m_sequencer_ptr->makeRequest(pkt);
97
98 DPRINTF(RubyCacheTrace, "Flushing %s\n", *rec);
99 } else {
100 DPRINTF(RubyCacheTrace, "Flushed all %d records\n", m_records_flushed);
101 }
102}
103
104void
105CacheRecorder::enqueueNextFetchRequest()
106{
107 if (m_bytes_read < m_uncompressed_trace_size) {
108 TraceRecord* traceRecord = (TraceRecord*) (m_uncompressed_trace +
109 m_bytes_read);
110
111 DPRINTF(RubyCacheTrace, "Issuing %s\n", *traceRecord);
112
113 for (int rec_bytes_read = 0; rec_bytes_read < m_block_size_bytes;
114 rec_bytes_read += RubySystem::getBlockSizeBytes()) {
115 Request* req = nullptr;
115 RequestPtr req;
116 MemCmd::Command requestType;
117
118 if (traceRecord->m_type == RubyRequestType_LD) {
119 requestType = MemCmd::ReadReq;
116 MemCmd::Command requestType;
117
118 if (traceRecord->m_type == RubyRequestType_LD) {
119 requestType = MemCmd::ReadReq;
120 req = new Request(traceRecord->m_data_address + rec_bytes_read,
120 req = std::make_shared<Request>(
121 traceRecord->m_data_address + rec_bytes_read,
121 RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId);
122 } else if (traceRecord->m_type == RubyRequestType_IFETCH) {
123 requestType = MemCmd::ReadReq;
122 RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId);
123 } else if (traceRecord->m_type == RubyRequestType_IFETCH) {
124 requestType = MemCmd::ReadReq;
124 req = new Request(traceRecord->m_data_address + rec_bytes_read,
125 req = std::make_shared<Request>(
126 traceRecord->m_data_address + rec_bytes_read,
125 RubySystem::getBlockSizeBytes(),
126 Request::INST_FETCH, Request::funcMasterId);
127 } else {
128 requestType = MemCmd::WriteReq;
127 RubySystem::getBlockSizeBytes(),
128 Request::INST_FETCH, Request::funcMasterId);
129 } else {
130 requestType = MemCmd::WriteReq;
129 req = new Request(traceRecord->m_data_address + rec_bytes_read,
131 req = std::make_shared<Request>(
132 traceRecord->m_data_address + rec_bytes_read,
130 RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId);
131 }
132
133 Packet *pkt = new Packet(req, requestType);
134 pkt->dataStatic(traceRecord->m_data + rec_bytes_read);
135
136 Sequencer* m_sequencer_ptr = m_seq_map[traceRecord->m_cntrl_id];
137 assert(m_sequencer_ptr != NULL);
138 m_sequencer_ptr->makeRequest(pkt);
139 }
140
141 m_bytes_read += (sizeof(TraceRecord) + m_block_size_bytes);
142 m_records_read++;
143 } else {
144 DPRINTF(RubyCacheTrace, "Fetched all %d records\n", m_records_read);
145 }
146}
147
148void
149CacheRecorder::addRecord(int cntrl, Addr data_addr, Addr pc_addr,
150 RubyRequestType type, Tick time, DataBlock& data)
151{
152 TraceRecord* rec = (TraceRecord*)malloc(sizeof(TraceRecord) +
153 m_block_size_bytes);
154 rec->m_cntrl_id = cntrl;
155 rec->m_time = time;
156 rec->m_data_address = data_addr;
157 rec->m_pc_address = pc_addr;
158 rec->m_type = type;
159 memcpy(rec->m_data, data.getData(0, m_block_size_bytes),
160 m_block_size_bytes);
161
162 m_records.push_back(rec);
163}
164
165uint64_t
166CacheRecorder::aggregateRecords(uint8_t **buf, uint64_t total_size)
167{
168 std::sort(m_records.begin(), m_records.end(), compareTraceRecords);
169
170 int size = m_records.size();
171 uint64_t current_size = 0;
172 int record_size = sizeof(TraceRecord) + m_block_size_bytes;
173
174 for (int i = 0; i < size; ++i) {
175 // Determine if we need to expand the buffer size
176 if (current_size + record_size > total_size) {
177 uint8_t* new_buf = new (nothrow) uint8_t[total_size * 2];
178 if (new_buf == NULL) {
179 fatal("Unable to allocate buffer of size %s\n",
180 total_size * 2);
181 }
182 total_size = total_size * 2;
183 uint8_t* old_buf = *buf;
184 memcpy(new_buf, old_buf, current_size);
185 *buf = new_buf;
186 delete [] old_buf;
187 }
188
189 // Copy the current record into the buffer
190 memcpy(&((*buf)[current_size]), m_records[i], record_size);
191 current_size += record_size;
192
193 free(m_records[i]);
194 m_records[i] = NULL;
195 }
196
197 m_records.clear();
198 return current_size;
199}
133 RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId);
134 }
135
136 Packet *pkt = new Packet(req, requestType);
137 pkt->dataStatic(traceRecord->m_data + rec_bytes_read);
138
139 Sequencer* m_sequencer_ptr = m_seq_map[traceRecord->m_cntrl_id];
140 assert(m_sequencer_ptr != NULL);
141 m_sequencer_ptr->makeRequest(pkt);
142 }
143
144 m_bytes_read += (sizeof(TraceRecord) + m_block_size_bytes);
145 m_records_read++;
146 } else {
147 DPRINTF(RubyCacheTrace, "Fetched all %d records\n", m_records_read);
148 }
149}
150
151void
152CacheRecorder::addRecord(int cntrl, Addr data_addr, Addr pc_addr,
153 RubyRequestType type, Tick time, DataBlock& data)
154{
155 TraceRecord* rec = (TraceRecord*)malloc(sizeof(TraceRecord) +
156 m_block_size_bytes);
157 rec->m_cntrl_id = cntrl;
158 rec->m_time = time;
159 rec->m_data_address = data_addr;
160 rec->m_pc_address = pc_addr;
161 rec->m_type = type;
162 memcpy(rec->m_data, data.getData(0, m_block_size_bytes),
163 m_block_size_bytes);
164
165 m_records.push_back(rec);
166}
167
168uint64_t
169CacheRecorder::aggregateRecords(uint8_t **buf, uint64_t total_size)
170{
171 std::sort(m_records.begin(), m_records.end(), compareTraceRecords);
172
173 int size = m_records.size();
174 uint64_t current_size = 0;
175 int record_size = sizeof(TraceRecord) + m_block_size_bytes;
176
177 for (int i = 0; i < size; ++i) {
178 // Determine if we need to expand the buffer size
179 if (current_size + record_size > total_size) {
180 uint8_t* new_buf = new (nothrow) uint8_t[total_size * 2];
181 if (new_buf == NULL) {
182 fatal("Unable to allocate buffer of size %s\n",
183 total_size * 2);
184 }
185 total_size = total_size * 2;
186 uint8_t* old_buf = *buf;
187 memcpy(new_buf, old_buf, current_size);
188 *buf = new_buf;
189 delete [] old_buf;
190 }
191
192 // Copy the current record into the buffer
193 memcpy(&((*buf)[current_size]), m_records[i], record_size);
194 current_size += record_size;
195
196 free(m_records[i]);
197 m_records[i] = NULL;
198 }
199
200 m_records.clear();
201 return current_size;
202}