timing.cc (10596:1eec33d2fc52) timing.cc (10653:e3fc6bc7f97e)
1/*
2 * Copyright 2014 Google, Inc.
3 * Copyright (c) 2010-2013 ARM Limited
4 * All rights reserved
5 *
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder. You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
14 *
15 * Copyright (c) 2002-2005 The Regents of The University of Michigan
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Steve Reinhardt
42 */
43
44#include "arch/locked_mem.hh"
45#include "arch/mmapped_ipr.hh"
46#include "arch/utility.hh"
47#include "base/bigint.hh"
48#include "config/the_isa.hh"
49#include "cpu/simple/timing.hh"
50#include "cpu/exetrace.hh"
51#include "debug/Config.hh"
52#include "debug/Drain.hh"
53#include "debug/ExecFaulting.hh"
54#include "debug/SimpleCPU.hh"
55#include "mem/packet.hh"
56#include "mem/packet_access.hh"
57#include "params/TimingSimpleCPU.hh"
58#include "sim/faults.hh"
59#include "sim/full_system.hh"
60#include "sim/system.hh"
61
62#include "debug/Mwait.hh"
63
64using namespace std;
65using namespace TheISA;
66
67void
68TimingSimpleCPU::init()
69{
70 BaseCPU::init();
71
72 // Initialise the ThreadContext's memory proxies
73 tcBase()->initMemProxies(tcBase());
74
75 if (FullSystem && !params()->switched_out) {
76 for (int i = 0; i < threadContexts.size(); ++i) {
77 ThreadContext *tc = threadContexts[i];
78 // initialize CPU, including PC
79 TheISA::initCPU(tc, _cpuId);
80 }
81 }
82}
83
84void
85TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
86{
87 pkt = _pkt;
88 cpu->schedule(this, t);
89}
90
91TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
92 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this),
93 dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0),
94 fetchEvent(this), drainManager(NULL)
95{
96 _status = Idle;
97
98 system->totalNumInsts = 0;
99}
100
101
102
103TimingSimpleCPU::~TimingSimpleCPU()
104{
105}
106
107unsigned int
108TimingSimpleCPU::drain(DrainManager *drain_manager)
109{
110 assert(!drainManager);
111 if (switchedOut())
112 return 0;
113
114 if (_status == Idle ||
115 (_status == BaseSimpleCPU::Running && isDrained())) {
116 DPRINTF(Drain, "No need to drain.\n");
117 return 0;
118 } else {
119 drainManager = drain_manager;
120 DPRINTF(Drain, "Requesting drain: %s\n", pcState());
121
122 // The fetch event can become descheduled if a drain didn't
123 // succeed on the first attempt. We need to reschedule it if
124 // the CPU is waiting for a microcode routine to complete.
125 if (_status == BaseSimpleCPU::Running && !fetchEvent.scheduled())
126 schedule(fetchEvent, clockEdge());
127
128 return 1;
129 }
130}
131
132void
133TimingSimpleCPU::drainResume()
134{
135 assert(!fetchEvent.scheduled());
136 assert(!drainManager);
137 if (switchedOut())
138 return;
139
140 DPRINTF(SimpleCPU, "Resume\n");
141 verifyMemoryMode();
142
143 assert(!threadContexts.empty());
144 if (threadContexts.size() > 1)
145 fatal("The timing CPU only supports one thread.\n");
146
147 if (thread->status() == ThreadContext::Active) {
148 schedule(fetchEvent, nextCycle());
149 _status = BaseSimpleCPU::Running;
150 notIdleFraction = 1;
151 } else {
152 _status = BaseSimpleCPU::Idle;
153 notIdleFraction = 0;
154 }
155}
156
157bool
158TimingSimpleCPU::tryCompleteDrain()
159{
160 if (!drainManager)
161 return false;
162
163 DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState());
164 if (!isDrained())
165 return false;
166
167 DPRINTF(Drain, "CPU done draining, processing drain event\n");
168 drainManager->signalDrainDone();
169 drainManager = NULL;
170
171 return true;
172}
173
174void
175TimingSimpleCPU::switchOut()
176{
177 BaseSimpleCPU::switchOut();
178
179 assert(!fetchEvent.scheduled());
180 assert(_status == BaseSimpleCPU::Running || _status == Idle);
181 assert(!stayAtPC);
182 assert(microPC() == 0);
183
184 updateCycleCounts();
185}
186
187
188void
189TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
190{
191 BaseSimpleCPU::takeOverFrom(oldCPU);
192
193 previousCycle = curCycle();
194}
195
196void
197TimingSimpleCPU::verifyMemoryMode() const
198{
199 if (!system->isTimingMode()) {
200 fatal("The timing CPU requires the memory system to be in "
201 "'timing' mode.\n");
202 }
203}
204
205void
206TimingSimpleCPU::activateContext(ThreadID thread_num)
207{
208 DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num);
209
210 assert(thread_num == 0);
211 assert(thread);
212
213 assert(_status == Idle);
214
215 notIdleFraction = 1;
216 _status = BaseSimpleCPU::Running;
217
218 // kick things off by initiating the fetch of the next instruction
219 schedule(fetchEvent, clockEdge(Cycles(0)));
220}
221
222
223void
224TimingSimpleCPU::suspendContext(ThreadID thread_num)
225{
226 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
227
228 assert(thread_num == 0);
229 assert(thread);
230
231 if (_status == Idle)
232 return;
233
234 assert(_status == BaseSimpleCPU::Running);
235
236 // just change status to Idle... if status != Running,
237 // completeInst() will not initiate fetch of next instruction.
238
239 notIdleFraction = 0;
240 _status = Idle;
241}
242
243bool
244TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
245{
246 RequestPtr req = pkt->req;
247
248 // We're about the issues a locked load, so tell the monitor
249 // to start caring about this address
250 if (pkt->isRead() && pkt->req->isLLSC()) {
251 TheISA::handleLockedRead(thread, pkt->req);
252 }
253 if (req->isMmappedIpr()) {
254 Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt);
255 new IprEvent(pkt, this, clockEdge(delay));
256 _status = DcacheWaitResponse;
257 dcache_pkt = NULL;
258 } else if (!dcachePort.sendTimingReq(pkt)) {
259 _status = DcacheRetry;
260 dcache_pkt = pkt;
261 } else {
262 _status = DcacheWaitResponse;
263 // memory system takes ownership of packet
264 dcache_pkt = NULL;
265 }
266 return dcache_pkt == NULL;
267}
268
269void
270TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
271 bool read)
272{
1/*
2 * Copyright 2014 Google, Inc.
3 * Copyright (c) 2010-2013 ARM Limited
4 * All rights reserved
5 *
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder. You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
14 *
15 * Copyright (c) 2002-2005 The Regents of The University of Michigan
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Steve Reinhardt
42 */
43
44#include "arch/locked_mem.hh"
45#include "arch/mmapped_ipr.hh"
46#include "arch/utility.hh"
47#include "base/bigint.hh"
48#include "config/the_isa.hh"
49#include "cpu/simple/timing.hh"
50#include "cpu/exetrace.hh"
51#include "debug/Config.hh"
52#include "debug/Drain.hh"
53#include "debug/ExecFaulting.hh"
54#include "debug/SimpleCPU.hh"
55#include "mem/packet.hh"
56#include "mem/packet_access.hh"
57#include "params/TimingSimpleCPU.hh"
58#include "sim/faults.hh"
59#include "sim/full_system.hh"
60#include "sim/system.hh"
61
62#include "debug/Mwait.hh"
63
64using namespace std;
65using namespace TheISA;
66
67void
68TimingSimpleCPU::init()
69{
70 BaseCPU::init();
71
72 // Initialise the ThreadContext's memory proxies
73 tcBase()->initMemProxies(tcBase());
74
75 if (FullSystem && !params()->switched_out) {
76 for (int i = 0; i < threadContexts.size(); ++i) {
77 ThreadContext *tc = threadContexts[i];
78 // initialize CPU, including PC
79 TheISA::initCPU(tc, _cpuId);
80 }
81 }
82}
83
84void
85TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
86{
87 pkt = _pkt;
88 cpu->schedule(this, t);
89}
90
91TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
92 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this),
93 dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0),
94 fetchEvent(this), drainManager(NULL)
95{
96 _status = Idle;
97
98 system->totalNumInsts = 0;
99}
100
101
102
103TimingSimpleCPU::~TimingSimpleCPU()
104{
105}
106
107unsigned int
108TimingSimpleCPU::drain(DrainManager *drain_manager)
109{
110 assert(!drainManager);
111 if (switchedOut())
112 return 0;
113
114 if (_status == Idle ||
115 (_status == BaseSimpleCPU::Running && isDrained())) {
116 DPRINTF(Drain, "No need to drain.\n");
117 return 0;
118 } else {
119 drainManager = drain_manager;
120 DPRINTF(Drain, "Requesting drain: %s\n", pcState());
121
122 // The fetch event can become descheduled if a drain didn't
123 // succeed on the first attempt. We need to reschedule it if
124 // the CPU is waiting for a microcode routine to complete.
125 if (_status == BaseSimpleCPU::Running && !fetchEvent.scheduled())
126 schedule(fetchEvent, clockEdge());
127
128 return 1;
129 }
130}
131
132void
133TimingSimpleCPU::drainResume()
134{
135 assert(!fetchEvent.scheduled());
136 assert(!drainManager);
137 if (switchedOut())
138 return;
139
140 DPRINTF(SimpleCPU, "Resume\n");
141 verifyMemoryMode();
142
143 assert(!threadContexts.empty());
144 if (threadContexts.size() > 1)
145 fatal("The timing CPU only supports one thread.\n");
146
147 if (thread->status() == ThreadContext::Active) {
148 schedule(fetchEvent, nextCycle());
149 _status = BaseSimpleCPU::Running;
150 notIdleFraction = 1;
151 } else {
152 _status = BaseSimpleCPU::Idle;
153 notIdleFraction = 0;
154 }
155}
156
157bool
158TimingSimpleCPU::tryCompleteDrain()
159{
160 if (!drainManager)
161 return false;
162
163 DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState());
164 if (!isDrained())
165 return false;
166
167 DPRINTF(Drain, "CPU done draining, processing drain event\n");
168 drainManager->signalDrainDone();
169 drainManager = NULL;
170
171 return true;
172}
173
174void
175TimingSimpleCPU::switchOut()
176{
177 BaseSimpleCPU::switchOut();
178
179 assert(!fetchEvent.scheduled());
180 assert(_status == BaseSimpleCPU::Running || _status == Idle);
181 assert(!stayAtPC);
182 assert(microPC() == 0);
183
184 updateCycleCounts();
185}
186
187
188void
189TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
190{
191 BaseSimpleCPU::takeOverFrom(oldCPU);
192
193 previousCycle = curCycle();
194}
195
196void
197TimingSimpleCPU::verifyMemoryMode() const
198{
199 if (!system->isTimingMode()) {
200 fatal("The timing CPU requires the memory system to be in "
201 "'timing' mode.\n");
202 }
203}
204
205void
206TimingSimpleCPU::activateContext(ThreadID thread_num)
207{
208 DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num);
209
210 assert(thread_num == 0);
211 assert(thread);
212
213 assert(_status == Idle);
214
215 notIdleFraction = 1;
216 _status = BaseSimpleCPU::Running;
217
218 // kick things off by initiating the fetch of the next instruction
219 schedule(fetchEvent, clockEdge(Cycles(0)));
220}
221
222
223void
224TimingSimpleCPU::suspendContext(ThreadID thread_num)
225{
226 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
227
228 assert(thread_num == 0);
229 assert(thread);
230
231 if (_status == Idle)
232 return;
233
234 assert(_status == BaseSimpleCPU::Running);
235
236 // just change status to Idle... if status != Running,
237 // completeInst() will not initiate fetch of next instruction.
238
239 notIdleFraction = 0;
240 _status = Idle;
241}
242
243bool
244TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
245{
246 RequestPtr req = pkt->req;
247
248 // We're about the issues a locked load, so tell the monitor
249 // to start caring about this address
250 if (pkt->isRead() && pkt->req->isLLSC()) {
251 TheISA::handleLockedRead(thread, pkt->req);
252 }
253 if (req->isMmappedIpr()) {
254 Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt);
255 new IprEvent(pkt, this, clockEdge(delay));
256 _status = DcacheWaitResponse;
257 dcache_pkt = NULL;
258 } else if (!dcachePort.sendTimingReq(pkt)) {
259 _status = DcacheRetry;
260 dcache_pkt = pkt;
261 } else {
262 _status = DcacheWaitResponse;
263 // memory system takes ownership of packet
264 dcache_pkt = NULL;
265 }
266 return dcache_pkt == NULL;
267}
268
269void
270TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
271 bool read)
272{
273 PacketPtr pkt;
274 buildPacket(pkt, req, read);
273 PacketPtr pkt = buildPacket(req, read);
275 pkt->dataDynamic<uint8_t>(data);
276 if (req->getFlags().isSet(Request::NO_ACCESS)) {
277 assert(!dcache_pkt);
278 pkt->makeResponse();
279 completeDataAccess(pkt);
280 } else if (read) {
281 handleReadPacket(pkt);
282 } else {
283 bool do_access = true; // flag to suppress cache access
284
285 if (req->isLLSC()) {
286 do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask);
287 } else if (req->isCondSwap()) {
288 assert(res);
289 req->setExtraData(*res);
290 }
291
292 if (do_access) {
293 dcache_pkt = pkt;
294 handleWritePacket();
295 } else {
296 _status = DcacheWaitResponse;
297 completeDataAccess(pkt);
298 }
299 }
300}
301
302void
303TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
304 RequestPtr req, uint8_t *data, bool read)
305{
306 PacketPtr pkt1, pkt2;
307 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
308 if (req->getFlags().isSet(Request::NO_ACCESS)) {
309 assert(!dcache_pkt);
310 pkt1->makeResponse();
311 completeDataAccess(pkt1);
312 } else if (read) {
313 SplitFragmentSenderState * send_state =
314 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
315 if (handleReadPacket(pkt1)) {
316 send_state->clearFromParent();
317 send_state = dynamic_cast<SplitFragmentSenderState *>(
318 pkt2->senderState);
319 if (handleReadPacket(pkt2)) {
320 send_state->clearFromParent();
321 }
322 }
323 } else {
324 dcache_pkt = pkt1;
325 SplitFragmentSenderState * send_state =
326 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
327 if (handleWritePacket()) {
328 send_state->clearFromParent();
329 dcache_pkt = pkt2;
330 send_state = dynamic_cast<SplitFragmentSenderState *>(
331 pkt2->senderState);
332 if (handleWritePacket()) {
333 send_state->clearFromParent();
334 }
335 }
336 }
337}
338
339void
340TimingSimpleCPU::translationFault(const Fault &fault)
341{
342 // fault may be NoFault in cases where a fault is suppressed,
343 // for instance prefetches.
344 updateCycleCounts();
345
346 if (traceData) {
347 // Since there was a fault, we shouldn't trace this instruction.
348 delete traceData;
349 traceData = NULL;
350 }
351
352 postExecute();
353
354 advanceInst(fault);
355}
356
274 pkt->dataDynamic<uint8_t>(data);
275 if (req->getFlags().isSet(Request::NO_ACCESS)) {
276 assert(!dcache_pkt);
277 pkt->makeResponse();
278 completeDataAccess(pkt);
279 } else if (read) {
280 handleReadPacket(pkt);
281 } else {
282 bool do_access = true; // flag to suppress cache access
283
284 if (req->isLLSC()) {
285 do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask);
286 } else if (req->isCondSwap()) {
287 assert(res);
288 req->setExtraData(*res);
289 }
290
291 if (do_access) {
292 dcache_pkt = pkt;
293 handleWritePacket();
294 } else {
295 _status = DcacheWaitResponse;
296 completeDataAccess(pkt);
297 }
298 }
299}
300
301void
302TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
303 RequestPtr req, uint8_t *data, bool read)
304{
305 PacketPtr pkt1, pkt2;
306 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
307 if (req->getFlags().isSet(Request::NO_ACCESS)) {
308 assert(!dcache_pkt);
309 pkt1->makeResponse();
310 completeDataAccess(pkt1);
311 } else if (read) {
312 SplitFragmentSenderState * send_state =
313 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
314 if (handleReadPacket(pkt1)) {
315 send_state->clearFromParent();
316 send_state = dynamic_cast<SplitFragmentSenderState *>(
317 pkt2->senderState);
318 if (handleReadPacket(pkt2)) {
319 send_state->clearFromParent();
320 }
321 }
322 } else {
323 dcache_pkt = pkt1;
324 SplitFragmentSenderState * send_state =
325 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
326 if (handleWritePacket()) {
327 send_state->clearFromParent();
328 dcache_pkt = pkt2;
329 send_state = dynamic_cast<SplitFragmentSenderState *>(
330 pkt2->senderState);
331 if (handleWritePacket()) {
332 send_state->clearFromParent();
333 }
334 }
335 }
336}
337
338void
339TimingSimpleCPU::translationFault(const Fault &fault)
340{
341 // fault may be NoFault in cases where a fault is suppressed,
342 // for instance prefetches.
343 updateCycleCounts();
344
345 if (traceData) {
346 // Since there was a fault, we shouldn't trace this instruction.
347 delete traceData;
348 traceData = NULL;
349 }
350
351 postExecute();
352
353 advanceInst(fault);
354}
355
357void
358TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
356PacketPtr
357TimingSimpleCPU::buildPacket(RequestPtr req, bool read)
359{
358{
360 pkt = read ? Packet::createRead(req) : Packet::createWrite(req);
359 return read ? Packet::createRead(req) : Packet::createWrite(req);
361}
362
363void
364TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
365 RequestPtr req1, RequestPtr req2, RequestPtr req,
366 uint8_t *data, bool read)
367{
368 pkt1 = pkt2 = NULL;
369
370 assert(!req1->isMmappedIpr() && !req2->isMmappedIpr());
371
372 if (req->getFlags().isSet(Request::NO_ACCESS)) {
360}
361
362void
363TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
364 RequestPtr req1, RequestPtr req2, RequestPtr req,
365 uint8_t *data, bool read)
366{
367 pkt1 = pkt2 = NULL;
368
369 assert(!req1->isMmappedIpr() && !req2->isMmappedIpr());
370
371 if (req->getFlags().isSet(Request::NO_ACCESS)) {
373 buildPacket(pkt1, req, read);
372 pkt1 = buildPacket(req, read);
374 return;
375 }
376
373 return;
374 }
375
377 buildPacket(pkt1, req1, read);
378 buildPacket(pkt2, req2, read);
376 pkt1 = buildPacket(req1, read);
377 pkt2 = buildPacket(req2, read);
379
378
380 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags(), dataMasterId());
381 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand());
382
383 pkt->dataDynamic<uint8_t>(data);
384 pkt1->dataStatic<uint8_t>(data);
385 pkt2->dataStatic<uint8_t>(data + req1->getSize());
386
387 SplitMainSenderState * main_send_state = new SplitMainSenderState;
388 pkt->senderState = main_send_state;
389 main_send_state->fragments[0] = pkt1;
390 main_send_state->fragments[1] = pkt2;
391 main_send_state->outstanding = 2;
392 pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
393 pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
394}
395
396Fault
397TimingSimpleCPU::readMem(Addr addr, uint8_t *data,
398 unsigned size, unsigned flags)
399{
400 Fault fault;
401 const int asid = 0;
402 const ThreadID tid = 0;
403 const Addr pc = thread->instAddr();
404 unsigned block_size = cacheLineSize();
405 BaseTLB::Mode mode = BaseTLB::Read;
406
407 if (traceData) {
408 traceData->setAddr(addr);
409 }
410
411 RequestPtr req = new Request(asid, addr, size,
412 flags, dataMasterId(), pc, _cpuId, tid);
413
414 req->taskId(taskId());
415
416 Addr split_addr = roundDown(addr + size - 1, block_size);
417 assert(split_addr <= addr || split_addr - addr < block_size);
418
419 _status = DTBWaitResponse;
420 if (split_addr > addr) {
421 RequestPtr req1, req2;
422 assert(!req->isLLSC() && !req->isSwap());
423 req->splitOnVaddr(split_addr, req1, req2);
424
425 WholeTranslationState *state =
426 new WholeTranslationState(req, req1, req2, new uint8_t[size],
427 NULL, mode);
428 DataTranslation<TimingSimpleCPU *> *trans1 =
429 new DataTranslation<TimingSimpleCPU *>(this, state, 0);
430 DataTranslation<TimingSimpleCPU *> *trans2 =
431 new DataTranslation<TimingSimpleCPU *>(this, state, 1);
432
433 thread->dtb->translateTiming(req1, tc, trans1, mode);
434 thread->dtb->translateTiming(req2, tc, trans2, mode);
435 } else {
436 WholeTranslationState *state =
437 new WholeTranslationState(req, new uint8_t[size], NULL, mode);
438 DataTranslation<TimingSimpleCPU *> *translation
439 = new DataTranslation<TimingSimpleCPU *>(this, state);
440 thread->dtb->translateTiming(req, tc, translation, mode);
441 }
442
443 return NoFault;
444}
445
446bool
447TimingSimpleCPU::handleWritePacket()
448{
449 RequestPtr req = dcache_pkt->req;
450 if (req->isMmappedIpr()) {
451 Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
452 new IprEvent(dcache_pkt, this, clockEdge(delay));
453 _status = DcacheWaitResponse;
454 dcache_pkt = NULL;
455 } else if (!dcachePort.sendTimingReq(dcache_pkt)) {
456 _status = DcacheRetry;
457 } else {
458 _status = DcacheWaitResponse;
459 // memory system takes ownership of packet
460 dcache_pkt = NULL;
461 }
462 return dcache_pkt == NULL;
463}
464
465Fault
466TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
467 Addr addr, unsigned flags, uint64_t *res)
468{
469 uint8_t *newData = new uint8_t[size];
470 const int asid = 0;
471 const ThreadID tid = 0;
472 const Addr pc = thread->instAddr();
473 unsigned block_size = cacheLineSize();
474 BaseTLB::Mode mode = BaseTLB::Write;
475
476 if (data == NULL) {
477 assert(flags & Request::CACHE_BLOCK_ZERO);
478 // This must be a cache block cleaning request
479 memset(newData, 0, size);
480 } else {
481 memcpy(newData, data, size);
482 }
483
484 if (traceData) {
485 traceData->setAddr(addr);
486 }
487
488 RequestPtr req = new Request(asid, addr, size,
489 flags, dataMasterId(), pc, _cpuId, tid);
490
491 req->taskId(taskId());
492
493 Addr split_addr = roundDown(addr + size - 1, block_size);
494 assert(split_addr <= addr || split_addr - addr < block_size);
495
496 _status = DTBWaitResponse;
497 if (split_addr > addr) {
498 RequestPtr req1, req2;
499 assert(!req->isLLSC() && !req->isSwap());
500 req->splitOnVaddr(split_addr, req1, req2);
501
502 WholeTranslationState *state =
503 new WholeTranslationState(req, req1, req2, newData, res, mode);
504 DataTranslation<TimingSimpleCPU *> *trans1 =
505 new DataTranslation<TimingSimpleCPU *>(this, state, 0);
506 DataTranslation<TimingSimpleCPU *> *trans2 =
507 new DataTranslation<TimingSimpleCPU *>(this, state, 1);
508
509 thread->dtb->translateTiming(req1, tc, trans1, mode);
510 thread->dtb->translateTiming(req2, tc, trans2, mode);
511 } else {
512 WholeTranslationState *state =
513 new WholeTranslationState(req, newData, res, mode);
514 DataTranslation<TimingSimpleCPU *> *translation =
515 new DataTranslation<TimingSimpleCPU *>(this, state);
516 thread->dtb->translateTiming(req, tc, translation, mode);
517 }
518
519 // Translation faults will be returned via finishTranslation()
520 return NoFault;
521}
522
523
524void
525TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
526{
527 _status = BaseSimpleCPU::Running;
528
529 if (state->getFault() != NoFault) {
530 if (state->isPrefetch()) {
531 state->setNoFault();
532 }
533 delete [] state->data;
534 state->deleteReqs();
535 translationFault(state->getFault());
536 } else {
537 if (!state->isSplit) {
538 sendData(state->mainReq, state->data, state->res,
539 state->mode == BaseTLB::Read);
540 } else {
541 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
542 state->data, state->mode == BaseTLB::Read);
543 }
544 }
545
546 delete state;
547}
548
549
550void
551TimingSimpleCPU::fetch()
552{
553 DPRINTF(SimpleCPU, "Fetch\n");
554
555 if (!curStaticInst || !curStaticInst->isDelayedCommit()) {
556 checkForInterrupts();
557 checkPcEventQueue();
558 }
559
560 // We must have just got suspended by a PC event
561 if (_status == Idle)
562 return;
563
564 TheISA::PCState pcState = thread->pcState();
565 bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
566
567 if (needToFetch) {
568 _status = BaseSimpleCPU::Running;
569 Request *ifetch_req = new Request();
570 ifetch_req->taskId(taskId());
571 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
572 setupFetchRequest(ifetch_req);
573 DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
574 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
575 BaseTLB::Execute);
576 } else {
577 _status = IcacheWaitResponse;
578 completeIfetch(NULL);
579
580 updateCycleCounts();
581 }
582}
583
584
585void
586TimingSimpleCPU::sendFetch(const Fault &fault, RequestPtr req,
587 ThreadContext *tc)
588{
589 if (fault == NoFault) {
590 DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n",
591 req->getVaddr(), req->getPaddr());
592 ifetch_pkt = new Packet(req, MemCmd::ReadReq);
593 ifetch_pkt->dataStatic(&inst);
594 DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr());
595
596 if (!icachePort.sendTimingReq(ifetch_pkt)) {
597 // Need to wait for retry
598 _status = IcacheRetry;
599 } else {
600 // Need to wait for cache to respond
601 _status = IcacheWaitResponse;
602 // ownership of packet transferred to memory system
603 ifetch_pkt = NULL;
604 }
605 } else {
606 DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
607 delete req;
608 // fetch fault: advance directly to next instruction (fault handler)
609 _status = BaseSimpleCPU::Running;
610 advanceInst(fault);
611 }
612
613 updateCycleCounts();
614}
615
616
617void
618TimingSimpleCPU::advanceInst(const Fault &fault)
619{
620 if (_status == Faulting)
621 return;
622
623 if (fault != NoFault) {
624 advancePC(fault);
625 DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
626 reschedule(fetchEvent, clockEdge(), true);
627 _status = Faulting;
628 return;
629 }
630
631
632 if (!stayAtPC)
633 advancePC(fault);
634
635 if (tryCompleteDrain())
636 return;
637
638 if (_status == BaseSimpleCPU::Running) {
639 // kick off fetch of next instruction... callback from icache
640 // response will cause that instruction to be executed,
641 // keeping the CPU running.
642 fetch();
643 }
644}
645
646
647void
648TimingSimpleCPU::completeIfetch(PacketPtr pkt)
649{
650 DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ?
651 pkt->getAddr() : 0);
652
653 // received a response from the icache: execute the received
654 // instruction
655 assert(!pkt || !pkt->isError());
656 assert(_status == IcacheWaitResponse);
657
658 _status = BaseSimpleCPU::Running;
659
660 updateCycleCounts();
661
662 if (pkt)
663 pkt->req->setAccessLatency();
664
665
666 preExecute();
667 if (curStaticInst && curStaticInst->isMemRef()) {
668 // load or store: just send to dcache
669 Fault fault = curStaticInst->initiateAcc(this, traceData);
670
671 // If we're not running now the instruction will complete in a dcache
672 // response callback or the instruction faulted and has started an
673 // ifetch
674 if (_status == BaseSimpleCPU::Running) {
675 if (fault != NoFault && traceData) {
676 // If there was a fault, we shouldn't trace this instruction.
677 delete traceData;
678 traceData = NULL;
679 }
680
681 postExecute();
682 // @todo remove me after debugging with legion done
683 if (curStaticInst && (!curStaticInst->isMicroop() ||
684 curStaticInst->isFirstMicroop()))
685 instCnt++;
686 advanceInst(fault);
687 }
688 } else if (curStaticInst) {
689 // non-memory instruction: execute completely now
690 Fault fault = curStaticInst->execute(this, traceData);
691
692 // keep an instruction count
693 if (fault == NoFault)
694 countInst();
695 else if (traceData && !DTRACE(ExecFaulting)) {
696 delete traceData;
697 traceData = NULL;
698 }
699
700 postExecute();
701 // @todo remove me after debugging with legion done
702 if (curStaticInst && (!curStaticInst->isMicroop() ||
703 curStaticInst->isFirstMicroop()))
704 instCnt++;
705 advanceInst(fault);
706 } else {
707 advanceInst(NoFault);
708 }
709
710 if (pkt) {
711 delete pkt->req;
712 delete pkt;
713 }
714}
715
716void
717TimingSimpleCPU::IcachePort::ITickEvent::process()
718{
719 cpu->completeIfetch(pkt);
720}
721
722bool
723TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt)
724{
725 DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
726 // delay processing of returned data until next CPU clock edge
727 Tick next_tick = cpu->clockEdge();
728
729 if (next_tick == curTick())
730 cpu->completeIfetch(pkt);
731 else
732 tickEvent.schedule(pkt, next_tick);
733
734 return true;
735}
736
737void
738TimingSimpleCPU::IcachePort::recvRetry()
739{
740 // we shouldn't get a retry unless we have a packet that we're
741 // waiting to transmit
742 assert(cpu->ifetch_pkt != NULL);
743 assert(cpu->_status == IcacheRetry);
744 PacketPtr tmp = cpu->ifetch_pkt;
745 if (sendTimingReq(tmp)) {
746 cpu->_status = IcacheWaitResponse;
747 cpu->ifetch_pkt = NULL;
748 }
749}
750
751void
752TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
753{
754 // received a response from the dcache: complete the load or store
755 // instruction
756 assert(!pkt->isError());
757 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
758 pkt->req->getFlags().isSet(Request::NO_ACCESS));
759
760 pkt->req->setAccessLatency();
761
762 updateCycleCounts();
763
764 if (pkt->senderState) {
765 SplitFragmentSenderState * send_state =
766 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
767 assert(send_state);
768 delete pkt->req;
769 delete pkt;
770 PacketPtr big_pkt = send_state->bigPkt;
771 delete send_state;
772
773 SplitMainSenderState * main_send_state =
774 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
775 assert(main_send_state);
776 // Record the fact that this packet is no longer outstanding.
777 assert(main_send_state->outstanding != 0);
778 main_send_state->outstanding--;
779
780 if (main_send_state->outstanding) {
781 return;
782 } else {
783 delete main_send_state;
784 big_pkt->senderState = NULL;
785 pkt = big_pkt;
786 }
787 }
788
789 _status = BaseSimpleCPU::Running;
790
791 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
792
793 // keep an instruction count
794 if (fault == NoFault)
795 countInst();
796 else if (traceData) {
797 // If there was a fault, we shouldn't trace this instruction.
798 delete traceData;
799 traceData = NULL;
800 }
801
802 delete pkt->req;
803 delete pkt;
804
805 postExecute();
806
807 advanceInst(fault);
808}
809
810void
811TimingSimpleCPU::updateCycleCounts()
812{
813 const Cycles delta(curCycle() - previousCycle);
814
815 numCycles += delta;
816 ppCycles->notify(delta);
817
818 previousCycle = curCycle();
819}
820
821void
822TimingSimpleCPU::DcachePort::recvTimingSnoopReq(PacketPtr pkt)
823{
824 // X86 ISA: Snooping an invalidation for monitor/mwait
825 if(cpu->getAddrMonitor()->doMonitor(pkt)) {
826 cpu->wakeup();
827 }
828 TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask);
829}
830
831void
832TimingSimpleCPU::DcachePort::recvFunctionalSnoop(PacketPtr pkt)
833{
834 // X86 ISA: Snooping an invalidation for monitor/mwait
835 if(cpu->getAddrMonitor()->doMonitor(pkt)) {
836 cpu->wakeup();
837 }
838}
839
840bool
841TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt)
842{
843 // delay processing of returned data until next CPU clock edge
844 Tick next_tick = cpu->clockEdge();
845
846 if (next_tick == curTick()) {
847 cpu->completeDataAccess(pkt);
848 } else {
849 if (!tickEvent.scheduled()) {
850 tickEvent.schedule(pkt, next_tick);
851 } else {
852 // In the case of a split transaction and a cache that is
853 // faster than a CPU we could get two responses before
854 // next_tick expires
855 if (!retryEvent.scheduled())
856 cpu->schedule(retryEvent, next_tick);
857 return false;
858 }
859 }
860
861 return true;
862}
863
864void
865TimingSimpleCPU::DcachePort::DTickEvent::process()
866{
867 cpu->completeDataAccess(pkt);
868}
869
870void
871TimingSimpleCPU::DcachePort::recvRetry()
872{
873 // we shouldn't get a retry unless we have a packet that we're
874 // waiting to transmit
875 assert(cpu->dcache_pkt != NULL);
876 assert(cpu->_status == DcacheRetry);
877 PacketPtr tmp = cpu->dcache_pkt;
878 if (tmp->senderState) {
879 // This is a packet from a split access.
880 SplitFragmentSenderState * send_state =
881 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
882 assert(send_state);
883 PacketPtr big_pkt = send_state->bigPkt;
884
885 SplitMainSenderState * main_send_state =
886 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
887 assert(main_send_state);
888
889 if (sendTimingReq(tmp)) {
890 // If we were able to send without retrying, record that fact
891 // and try sending the other fragment.
892 send_state->clearFromParent();
893 int other_index = main_send_state->getPendingFragment();
894 if (other_index > 0) {
895 tmp = main_send_state->fragments[other_index];
896 cpu->dcache_pkt = tmp;
897 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
898 (big_pkt->isWrite() && cpu->handleWritePacket())) {
899 main_send_state->fragments[other_index] = NULL;
900 }
901 } else {
902 cpu->_status = DcacheWaitResponse;
903 // memory system takes ownership of packet
904 cpu->dcache_pkt = NULL;
905 }
906 }
907 } else if (sendTimingReq(tmp)) {
908 cpu->_status = DcacheWaitResponse;
909 // memory system takes ownership of packet
910 cpu->dcache_pkt = NULL;
911 }
912}
913
914TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
915 Tick t)
916 : pkt(_pkt), cpu(_cpu)
917{
918 cpu->schedule(this, t);
919}
920
921void
922TimingSimpleCPU::IprEvent::process()
923{
924 cpu->completeDataAccess(pkt);
925}
926
927const char *
928TimingSimpleCPU::IprEvent::description() const
929{
930 return "Timing Simple CPU Delay IPR event";
931}
932
933
934void
935TimingSimpleCPU::printAddr(Addr a)
936{
937 dcachePort.printAddr(a);
938}
939
940
941////////////////////////////////////////////////////////////////////////
942//
943// TimingSimpleCPU Simulation Object
944//
945TimingSimpleCPU *
946TimingSimpleCPUParams::create()
947{
948 numThreads = 1;
949 if (!FullSystem && workload.size() != 1)
950 panic("only one workload allowed");
951 return new TimingSimpleCPU(this);
952}
379 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand());
380
381 pkt->dataDynamic<uint8_t>(data);
382 pkt1->dataStatic<uint8_t>(data);
383 pkt2->dataStatic<uint8_t>(data + req1->getSize());
384
385 SplitMainSenderState * main_send_state = new SplitMainSenderState;
386 pkt->senderState = main_send_state;
387 main_send_state->fragments[0] = pkt1;
388 main_send_state->fragments[1] = pkt2;
389 main_send_state->outstanding = 2;
390 pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
391 pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
392}
393
394Fault
395TimingSimpleCPU::readMem(Addr addr, uint8_t *data,
396 unsigned size, unsigned flags)
397{
398 Fault fault;
399 const int asid = 0;
400 const ThreadID tid = 0;
401 const Addr pc = thread->instAddr();
402 unsigned block_size = cacheLineSize();
403 BaseTLB::Mode mode = BaseTLB::Read;
404
405 if (traceData) {
406 traceData->setAddr(addr);
407 }
408
409 RequestPtr req = new Request(asid, addr, size,
410 flags, dataMasterId(), pc, _cpuId, tid);
411
412 req->taskId(taskId());
413
414 Addr split_addr = roundDown(addr + size - 1, block_size);
415 assert(split_addr <= addr || split_addr - addr < block_size);
416
417 _status = DTBWaitResponse;
418 if (split_addr > addr) {
419 RequestPtr req1, req2;
420 assert(!req->isLLSC() && !req->isSwap());
421 req->splitOnVaddr(split_addr, req1, req2);
422
423 WholeTranslationState *state =
424 new WholeTranslationState(req, req1, req2, new uint8_t[size],
425 NULL, mode);
426 DataTranslation<TimingSimpleCPU *> *trans1 =
427 new DataTranslation<TimingSimpleCPU *>(this, state, 0);
428 DataTranslation<TimingSimpleCPU *> *trans2 =
429 new DataTranslation<TimingSimpleCPU *>(this, state, 1);
430
431 thread->dtb->translateTiming(req1, tc, trans1, mode);
432 thread->dtb->translateTiming(req2, tc, trans2, mode);
433 } else {
434 WholeTranslationState *state =
435 new WholeTranslationState(req, new uint8_t[size], NULL, mode);
436 DataTranslation<TimingSimpleCPU *> *translation
437 = new DataTranslation<TimingSimpleCPU *>(this, state);
438 thread->dtb->translateTiming(req, tc, translation, mode);
439 }
440
441 return NoFault;
442}
443
444bool
445TimingSimpleCPU::handleWritePacket()
446{
447 RequestPtr req = dcache_pkt->req;
448 if (req->isMmappedIpr()) {
449 Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
450 new IprEvent(dcache_pkt, this, clockEdge(delay));
451 _status = DcacheWaitResponse;
452 dcache_pkt = NULL;
453 } else if (!dcachePort.sendTimingReq(dcache_pkt)) {
454 _status = DcacheRetry;
455 } else {
456 _status = DcacheWaitResponse;
457 // memory system takes ownership of packet
458 dcache_pkt = NULL;
459 }
460 return dcache_pkt == NULL;
461}
462
463Fault
464TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
465 Addr addr, unsigned flags, uint64_t *res)
466{
467 uint8_t *newData = new uint8_t[size];
468 const int asid = 0;
469 const ThreadID tid = 0;
470 const Addr pc = thread->instAddr();
471 unsigned block_size = cacheLineSize();
472 BaseTLB::Mode mode = BaseTLB::Write;
473
474 if (data == NULL) {
475 assert(flags & Request::CACHE_BLOCK_ZERO);
476 // This must be a cache block cleaning request
477 memset(newData, 0, size);
478 } else {
479 memcpy(newData, data, size);
480 }
481
482 if (traceData) {
483 traceData->setAddr(addr);
484 }
485
486 RequestPtr req = new Request(asid, addr, size,
487 flags, dataMasterId(), pc, _cpuId, tid);
488
489 req->taskId(taskId());
490
491 Addr split_addr = roundDown(addr + size - 1, block_size);
492 assert(split_addr <= addr || split_addr - addr < block_size);
493
494 _status = DTBWaitResponse;
495 if (split_addr > addr) {
496 RequestPtr req1, req2;
497 assert(!req->isLLSC() && !req->isSwap());
498 req->splitOnVaddr(split_addr, req1, req2);
499
500 WholeTranslationState *state =
501 new WholeTranslationState(req, req1, req2, newData, res, mode);
502 DataTranslation<TimingSimpleCPU *> *trans1 =
503 new DataTranslation<TimingSimpleCPU *>(this, state, 0);
504 DataTranslation<TimingSimpleCPU *> *trans2 =
505 new DataTranslation<TimingSimpleCPU *>(this, state, 1);
506
507 thread->dtb->translateTiming(req1, tc, trans1, mode);
508 thread->dtb->translateTiming(req2, tc, trans2, mode);
509 } else {
510 WholeTranslationState *state =
511 new WholeTranslationState(req, newData, res, mode);
512 DataTranslation<TimingSimpleCPU *> *translation =
513 new DataTranslation<TimingSimpleCPU *>(this, state);
514 thread->dtb->translateTiming(req, tc, translation, mode);
515 }
516
517 // Translation faults will be returned via finishTranslation()
518 return NoFault;
519}
520
521
522void
523TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
524{
525 _status = BaseSimpleCPU::Running;
526
527 if (state->getFault() != NoFault) {
528 if (state->isPrefetch()) {
529 state->setNoFault();
530 }
531 delete [] state->data;
532 state->deleteReqs();
533 translationFault(state->getFault());
534 } else {
535 if (!state->isSplit) {
536 sendData(state->mainReq, state->data, state->res,
537 state->mode == BaseTLB::Read);
538 } else {
539 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
540 state->data, state->mode == BaseTLB::Read);
541 }
542 }
543
544 delete state;
545}
546
547
548void
549TimingSimpleCPU::fetch()
550{
551 DPRINTF(SimpleCPU, "Fetch\n");
552
553 if (!curStaticInst || !curStaticInst->isDelayedCommit()) {
554 checkForInterrupts();
555 checkPcEventQueue();
556 }
557
558 // We must have just got suspended by a PC event
559 if (_status == Idle)
560 return;
561
562 TheISA::PCState pcState = thread->pcState();
563 bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
564
565 if (needToFetch) {
566 _status = BaseSimpleCPU::Running;
567 Request *ifetch_req = new Request();
568 ifetch_req->taskId(taskId());
569 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
570 setupFetchRequest(ifetch_req);
571 DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
572 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
573 BaseTLB::Execute);
574 } else {
575 _status = IcacheWaitResponse;
576 completeIfetch(NULL);
577
578 updateCycleCounts();
579 }
580}
581
582
583void
584TimingSimpleCPU::sendFetch(const Fault &fault, RequestPtr req,
585 ThreadContext *tc)
586{
587 if (fault == NoFault) {
588 DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n",
589 req->getVaddr(), req->getPaddr());
590 ifetch_pkt = new Packet(req, MemCmd::ReadReq);
591 ifetch_pkt->dataStatic(&inst);
592 DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr());
593
594 if (!icachePort.sendTimingReq(ifetch_pkt)) {
595 // Need to wait for retry
596 _status = IcacheRetry;
597 } else {
598 // Need to wait for cache to respond
599 _status = IcacheWaitResponse;
600 // ownership of packet transferred to memory system
601 ifetch_pkt = NULL;
602 }
603 } else {
604 DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
605 delete req;
606 // fetch fault: advance directly to next instruction (fault handler)
607 _status = BaseSimpleCPU::Running;
608 advanceInst(fault);
609 }
610
611 updateCycleCounts();
612}
613
614
615void
616TimingSimpleCPU::advanceInst(const Fault &fault)
617{
618 if (_status == Faulting)
619 return;
620
621 if (fault != NoFault) {
622 advancePC(fault);
623 DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
624 reschedule(fetchEvent, clockEdge(), true);
625 _status = Faulting;
626 return;
627 }
628
629
630 if (!stayAtPC)
631 advancePC(fault);
632
633 if (tryCompleteDrain())
634 return;
635
636 if (_status == BaseSimpleCPU::Running) {
637 // kick off fetch of next instruction... callback from icache
638 // response will cause that instruction to be executed,
639 // keeping the CPU running.
640 fetch();
641 }
642}
643
644
645void
646TimingSimpleCPU::completeIfetch(PacketPtr pkt)
647{
648 DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ?
649 pkt->getAddr() : 0);
650
651 // received a response from the icache: execute the received
652 // instruction
653 assert(!pkt || !pkt->isError());
654 assert(_status == IcacheWaitResponse);
655
656 _status = BaseSimpleCPU::Running;
657
658 updateCycleCounts();
659
660 if (pkt)
661 pkt->req->setAccessLatency();
662
663
664 preExecute();
665 if (curStaticInst && curStaticInst->isMemRef()) {
666 // load or store: just send to dcache
667 Fault fault = curStaticInst->initiateAcc(this, traceData);
668
669 // If we're not running now the instruction will complete in a dcache
670 // response callback or the instruction faulted and has started an
671 // ifetch
672 if (_status == BaseSimpleCPU::Running) {
673 if (fault != NoFault && traceData) {
674 // If there was a fault, we shouldn't trace this instruction.
675 delete traceData;
676 traceData = NULL;
677 }
678
679 postExecute();
680 // @todo remove me after debugging with legion done
681 if (curStaticInst && (!curStaticInst->isMicroop() ||
682 curStaticInst->isFirstMicroop()))
683 instCnt++;
684 advanceInst(fault);
685 }
686 } else if (curStaticInst) {
687 // non-memory instruction: execute completely now
688 Fault fault = curStaticInst->execute(this, traceData);
689
690 // keep an instruction count
691 if (fault == NoFault)
692 countInst();
693 else if (traceData && !DTRACE(ExecFaulting)) {
694 delete traceData;
695 traceData = NULL;
696 }
697
698 postExecute();
699 // @todo remove me after debugging with legion done
700 if (curStaticInst && (!curStaticInst->isMicroop() ||
701 curStaticInst->isFirstMicroop()))
702 instCnt++;
703 advanceInst(fault);
704 } else {
705 advanceInst(NoFault);
706 }
707
708 if (pkt) {
709 delete pkt->req;
710 delete pkt;
711 }
712}
713
714void
715TimingSimpleCPU::IcachePort::ITickEvent::process()
716{
717 cpu->completeIfetch(pkt);
718}
719
720bool
721TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt)
722{
723 DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
724 // delay processing of returned data until next CPU clock edge
725 Tick next_tick = cpu->clockEdge();
726
727 if (next_tick == curTick())
728 cpu->completeIfetch(pkt);
729 else
730 tickEvent.schedule(pkt, next_tick);
731
732 return true;
733}
734
735void
736TimingSimpleCPU::IcachePort::recvRetry()
737{
738 // we shouldn't get a retry unless we have a packet that we're
739 // waiting to transmit
740 assert(cpu->ifetch_pkt != NULL);
741 assert(cpu->_status == IcacheRetry);
742 PacketPtr tmp = cpu->ifetch_pkt;
743 if (sendTimingReq(tmp)) {
744 cpu->_status = IcacheWaitResponse;
745 cpu->ifetch_pkt = NULL;
746 }
747}
748
749void
750TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
751{
752 // received a response from the dcache: complete the load or store
753 // instruction
754 assert(!pkt->isError());
755 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
756 pkt->req->getFlags().isSet(Request::NO_ACCESS));
757
758 pkt->req->setAccessLatency();
759
760 updateCycleCounts();
761
762 if (pkt->senderState) {
763 SplitFragmentSenderState * send_state =
764 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
765 assert(send_state);
766 delete pkt->req;
767 delete pkt;
768 PacketPtr big_pkt = send_state->bigPkt;
769 delete send_state;
770
771 SplitMainSenderState * main_send_state =
772 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
773 assert(main_send_state);
774 // Record the fact that this packet is no longer outstanding.
775 assert(main_send_state->outstanding != 0);
776 main_send_state->outstanding--;
777
778 if (main_send_state->outstanding) {
779 return;
780 } else {
781 delete main_send_state;
782 big_pkt->senderState = NULL;
783 pkt = big_pkt;
784 }
785 }
786
787 _status = BaseSimpleCPU::Running;
788
789 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
790
791 // keep an instruction count
792 if (fault == NoFault)
793 countInst();
794 else if (traceData) {
795 // If there was a fault, we shouldn't trace this instruction.
796 delete traceData;
797 traceData = NULL;
798 }
799
800 delete pkt->req;
801 delete pkt;
802
803 postExecute();
804
805 advanceInst(fault);
806}
807
808void
809TimingSimpleCPU::updateCycleCounts()
810{
811 const Cycles delta(curCycle() - previousCycle);
812
813 numCycles += delta;
814 ppCycles->notify(delta);
815
816 previousCycle = curCycle();
817}
818
819void
820TimingSimpleCPU::DcachePort::recvTimingSnoopReq(PacketPtr pkt)
821{
822 // X86 ISA: Snooping an invalidation for monitor/mwait
823 if(cpu->getAddrMonitor()->doMonitor(pkt)) {
824 cpu->wakeup();
825 }
826 TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask);
827}
828
829void
830TimingSimpleCPU::DcachePort::recvFunctionalSnoop(PacketPtr pkt)
831{
832 // X86 ISA: Snooping an invalidation for monitor/mwait
833 if(cpu->getAddrMonitor()->doMonitor(pkt)) {
834 cpu->wakeup();
835 }
836}
837
838bool
839TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt)
840{
841 // delay processing of returned data until next CPU clock edge
842 Tick next_tick = cpu->clockEdge();
843
844 if (next_tick == curTick()) {
845 cpu->completeDataAccess(pkt);
846 } else {
847 if (!tickEvent.scheduled()) {
848 tickEvent.schedule(pkt, next_tick);
849 } else {
850 // In the case of a split transaction and a cache that is
851 // faster than a CPU we could get two responses before
852 // next_tick expires
853 if (!retryEvent.scheduled())
854 cpu->schedule(retryEvent, next_tick);
855 return false;
856 }
857 }
858
859 return true;
860}
861
862void
863TimingSimpleCPU::DcachePort::DTickEvent::process()
864{
865 cpu->completeDataAccess(pkt);
866}
867
868void
869TimingSimpleCPU::DcachePort::recvRetry()
870{
871 // we shouldn't get a retry unless we have a packet that we're
872 // waiting to transmit
873 assert(cpu->dcache_pkt != NULL);
874 assert(cpu->_status == DcacheRetry);
875 PacketPtr tmp = cpu->dcache_pkt;
876 if (tmp->senderState) {
877 // This is a packet from a split access.
878 SplitFragmentSenderState * send_state =
879 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
880 assert(send_state);
881 PacketPtr big_pkt = send_state->bigPkt;
882
883 SplitMainSenderState * main_send_state =
884 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
885 assert(main_send_state);
886
887 if (sendTimingReq(tmp)) {
888 // If we were able to send without retrying, record that fact
889 // and try sending the other fragment.
890 send_state->clearFromParent();
891 int other_index = main_send_state->getPendingFragment();
892 if (other_index > 0) {
893 tmp = main_send_state->fragments[other_index];
894 cpu->dcache_pkt = tmp;
895 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
896 (big_pkt->isWrite() && cpu->handleWritePacket())) {
897 main_send_state->fragments[other_index] = NULL;
898 }
899 } else {
900 cpu->_status = DcacheWaitResponse;
901 // memory system takes ownership of packet
902 cpu->dcache_pkt = NULL;
903 }
904 }
905 } else if (sendTimingReq(tmp)) {
906 cpu->_status = DcacheWaitResponse;
907 // memory system takes ownership of packet
908 cpu->dcache_pkt = NULL;
909 }
910}
911
912TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
913 Tick t)
914 : pkt(_pkt), cpu(_cpu)
915{
916 cpu->schedule(this, t);
917}
918
919void
920TimingSimpleCPU::IprEvent::process()
921{
922 cpu->completeDataAccess(pkt);
923}
924
925const char *
926TimingSimpleCPU::IprEvent::description() const
927{
928 return "Timing Simple CPU Delay IPR event";
929}
930
931
932void
933TimingSimpleCPU::printAddr(Addr a)
934{
935 dcachePort.printAddr(a);
936}
937
938
939////////////////////////////////////////////////////////////////////////
940//
941// TimingSimpleCPU Simulation Object
942//
943TimingSimpleCPU *
944TimingSimpleCPUParams::create()
945{
946 numThreads = 1;
947 if (!FullSystem && workload.size() != 1)
948 panic("only one workload allowed");
949 return new TimingSimpleCPU(this);
950}