base.cc (10905:a6ca6831e775) base.cc (11321:02e930db812d)
1/*
2 * Copyright (c) 2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Andreas Sandberg
38 */
39
40#include "debug/VIO.hh"
41#include "dev/virtio/base.hh"
42#include "params/VirtIODeviceBase.hh"
43
44VirtDescriptor::VirtDescriptor(PortProxy &_memProxy, VirtQueue &_queue,
45 Index descIndex)
46 : memProxy(&_memProxy), queue(&_queue), _index(descIndex),
47 desc{0, 0, 0, 0}
48{
49}
50
51VirtDescriptor::VirtDescriptor(VirtDescriptor &&other) noexcept
52{
53 *this = std::forward<VirtDescriptor>(other);
54}
55
56VirtDescriptor::~VirtDescriptor() noexcept
57{
58}
59
60VirtDescriptor &
61VirtDescriptor::operator=(VirtDescriptor &&rhs) noexcept
62{
63 memProxy = std::move(rhs.memProxy);
64 queue = std::move(rhs.queue);
65 _index = std::move(rhs._index);
66 desc = std::move(rhs.desc);
67
68 return *this;
69}
70
71void
72VirtDescriptor::update()
73{
74 const Addr vq_addr(queue->getAddress());
75 // Check if the queue has been initialized yet
76 if (vq_addr == 0)
77 return;
78
79 assert(_index < queue->getSize());
80 const Addr desc_addr(vq_addr + sizeof(desc) * _index);
81 vring_desc guest_desc;
82 memProxy->readBlob(desc_addr, (uint8_t *)&guest_desc, sizeof(guest_desc));
83 desc = vtoh_legacy(guest_desc);
84 DPRINTF(VIO,
85 "VirtDescriptor(%i): Addr: 0x%x, Len: %i, Flags: 0x%x, "
86 "Next: 0x%x\n",
87 _index, desc.addr, desc.len, desc.flags, desc.next);
88}
89
90void
91VirtDescriptor::updateChain()
92{
93 VirtDescriptor *desc(this);
94 do {
95 desc->update();
1/*
2 * Copyright (c) 2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Andreas Sandberg
38 */
39
40#include "debug/VIO.hh"
41#include "dev/virtio/base.hh"
42#include "params/VirtIODeviceBase.hh"
43
44VirtDescriptor::VirtDescriptor(PortProxy &_memProxy, VirtQueue &_queue,
45 Index descIndex)
46 : memProxy(&_memProxy), queue(&_queue), _index(descIndex),
47 desc{0, 0, 0, 0}
48{
49}
50
51VirtDescriptor::VirtDescriptor(VirtDescriptor &&other) noexcept
52{
53 *this = std::forward<VirtDescriptor>(other);
54}
55
56VirtDescriptor::~VirtDescriptor() noexcept
57{
58}
59
60VirtDescriptor &
61VirtDescriptor::operator=(VirtDescriptor &&rhs) noexcept
62{
63 memProxy = std::move(rhs.memProxy);
64 queue = std::move(rhs.queue);
65 _index = std::move(rhs._index);
66 desc = std::move(rhs.desc);
67
68 return *this;
69}
70
71void
72VirtDescriptor::update()
73{
74 const Addr vq_addr(queue->getAddress());
75 // Check if the queue has been initialized yet
76 if (vq_addr == 0)
77 return;
78
79 assert(_index < queue->getSize());
80 const Addr desc_addr(vq_addr + sizeof(desc) * _index);
81 vring_desc guest_desc;
82 memProxy->readBlob(desc_addr, (uint8_t *)&guest_desc, sizeof(guest_desc));
83 desc = vtoh_legacy(guest_desc);
84 DPRINTF(VIO,
85 "VirtDescriptor(%i): Addr: 0x%x, Len: %i, Flags: 0x%x, "
86 "Next: 0x%x\n",
87 _index, desc.addr, desc.len, desc.flags, desc.next);
88}
89
90void
91VirtDescriptor::updateChain()
92{
93 VirtDescriptor *desc(this);
94 do {
95 desc->update();
96 } while((desc = desc->next()) != NULL && desc != this);
96 } while ((desc = desc->next()) != NULL && desc != this);
97
98 if (desc == this)
99 panic("Loop in descriptor chain!\n");
100}
101
102void
103VirtDescriptor::dump() const
104{
105 if (!DTRACE(VIO))
106 return;
107
108 DPRINTF(VIO, "Descriptor[%i]: "
109 "Addr: 0x%x, Len: %i, Flags: 0x%x, Next: 0x%x\n",
110 _index, desc.addr, desc.len, desc.flags, desc.next);
111
112 if (isIncoming()) {
113 uint8_t data[desc.len];
114 read(0, data, desc.len);
115 DDUMP(VIO, data, desc.len);
116 }
117}
118
119void
120VirtDescriptor::dumpChain() const
121{
122 if (!DTRACE(VIO))
123 return;
124
125 const VirtDescriptor *desc(this);
126 do {
127 desc->dump();
97
98 if (desc == this)
99 panic("Loop in descriptor chain!\n");
100}
101
102void
103VirtDescriptor::dump() const
104{
105 if (!DTRACE(VIO))
106 return;
107
108 DPRINTF(VIO, "Descriptor[%i]: "
109 "Addr: 0x%x, Len: %i, Flags: 0x%x, Next: 0x%x\n",
110 _index, desc.addr, desc.len, desc.flags, desc.next);
111
112 if (isIncoming()) {
113 uint8_t data[desc.len];
114 read(0, data, desc.len);
115 DDUMP(VIO, data, desc.len);
116 }
117}
118
119void
120VirtDescriptor::dumpChain() const
121{
122 if (!DTRACE(VIO))
123 return;
124
125 const VirtDescriptor *desc(this);
126 do {
127 desc->dump();
128 } while((desc = desc->next()) != NULL);
128 } while ((desc = desc->next()) != NULL);
129}
130
131VirtDescriptor *
132VirtDescriptor::next() const
133{
134 if (hasNext()) {
135 return queue->getDescriptor(desc.next);
136 } else {
137 return NULL;
138 }
139}
140
141void
142VirtDescriptor::read(size_t offset, uint8_t *dst, size_t size) const
143{
144 DPRINTF(VIO, "VirtDescriptor(%p, 0x%x, %i)::read: offset: %i, dst: 0x%x, size: %i\n",
145 this, desc.addr, desc.len, offset, (long)dst, size);
146 assert(size <= desc.len - offset);
147 if (!isIncoming())
148 panic("Trying to read from outgoing buffer\n");
149
150 memProxy->readBlob(desc.addr + offset, dst, size);
151}
152
153void
154VirtDescriptor::write(size_t offset, const uint8_t *src, size_t size)
155{
156 DPRINTF(VIO, "VirtDescriptor(%p, 0x%x, %i)::write: offset: %i, src: 0x%x, size: %i\n",
157 this, desc.addr, desc.len, offset, (long)src, size);
158 assert(size <= desc.len - offset);
159 if (!isOutgoing())
160 panic("Trying to write to incoming buffer\n");
161
162 memProxy->writeBlob(desc.addr + offset, const_cast<uint8_t *>(src), size);
163}
164
165void
166VirtDescriptor::chainRead(size_t offset, uint8_t *dst, size_t size) const
167{
168 const VirtDescriptor *desc(this);
169 const size_t full_size(size);
170 do {
171 if (offset < desc->size()) {
172 const size_t chunk_size(std::min(desc->size() - offset, size));
173 desc->read(offset, dst, chunk_size);
174 dst += chunk_size;
175 size -= chunk_size;
176 offset = 0;
177 } else {
178 offset -= desc->size();
179 }
129}
130
131VirtDescriptor *
132VirtDescriptor::next() const
133{
134 if (hasNext()) {
135 return queue->getDescriptor(desc.next);
136 } else {
137 return NULL;
138 }
139}
140
141void
142VirtDescriptor::read(size_t offset, uint8_t *dst, size_t size) const
143{
144 DPRINTF(VIO, "VirtDescriptor(%p, 0x%x, %i)::read: offset: %i, dst: 0x%x, size: %i\n",
145 this, desc.addr, desc.len, offset, (long)dst, size);
146 assert(size <= desc.len - offset);
147 if (!isIncoming())
148 panic("Trying to read from outgoing buffer\n");
149
150 memProxy->readBlob(desc.addr + offset, dst, size);
151}
152
153void
154VirtDescriptor::write(size_t offset, const uint8_t *src, size_t size)
155{
156 DPRINTF(VIO, "VirtDescriptor(%p, 0x%x, %i)::write: offset: %i, src: 0x%x, size: %i\n",
157 this, desc.addr, desc.len, offset, (long)src, size);
158 assert(size <= desc.len - offset);
159 if (!isOutgoing())
160 panic("Trying to write to incoming buffer\n");
161
162 memProxy->writeBlob(desc.addr + offset, const_cast<uint8_t *>(src), size);
163}
164
165void
166VirtDescriptor::chainRead(size_t offset, uint8_t *dst, size_t size) const
167{
168 const VirtDescriptor *desc(this);
169 const size_t full_size(size);
170 do {
171 if (offset < desc->size()) {
172 const size_t chunk_size(std::min(desc->size() - offset, size));
173 desc->read(offset, dst, chunk_size);
174 dst += chunk_size;
175 size -= chunk_size;
176 offset = 0;
177 } else {
178 offset -= desc->size();
179 }
180 } while((desc = desc->next()) != NULL && desc->isIncoming() && size > 0);
180 } while ((desc = desc->next()) != NULL && desc->isIncoming() && size > 0);
181
182 if (size != 0) {
183 panic("Failed to read %i bytes from chain of %i bytes @ offset %i\n",
184 full_size, chainSize(), offset);
185 }
186}
187
188void
189VirtDescriptor::chainWrite(size_t offset, const uint8_t *src, size_t size)
190{
191 VirtDescriptor *desc(this);
192 const size_t full_size(size);
193 do {
194 if (offset < desc->size()) {
195 const size_t chunk_size(std::min(desc->size() - offset, size));
196 desc->write(offset, src, chunk_size);
197 src += chunk_size;
198 size -= chunk_size;
199 offset = 0;
200 } else {
201 offset -= desc->size();
202 }
181
182 if (size != 0) {
183 panic("Failed to read %i bytes from chain of %i bytes @ offset %i\n",
184 full_size, chainSize(), offset);
185 }
186}
187
188void
189VirtDescriptor::chainWrite(size_t offset, const uint8_t *src, size_t size)
190{
191 VirtDescriptor *desc(this);
192 const size_t full_size(size);
193 do {
194 if (offset < desc->size()) {
195 const size_t chunk_size(std::min(desc->size() - offset, size));
196 desc->write(offset, src, chunk_size);
197 src += chunk_size;
198 size -= chunk_size;
199 offset = 0;
200 } else {
201 offset -= desc->size();
202 }
203 } while((desc = desc->next()) != NULL && size > 0);
203 } while ((desc = desc->next()) != NULL && size > 0);
204
205 if (size != 0) {
206 panic("Failed to write %i bytes into chain of %i bytes @ offset %i\n",
207 full_size, chainSize(), offset);
208 }
209}
210
211size_t
212VirtDescriptor::chainSize() const
213{
214 size_t size(0);
215 const VirtDescriptor *desc(this);
216 do {
217 size += desc->size();
204
205 if (size != 0) {
206 panic("Failed to write %i bytes into chain of %i bytes @ offset %i\n",
207 full_size, chainSize(), offset);
208 }
209}
210
211size_t
212VirtDescriptor::chainSize() const
213{
214 size_t size(0);
215 const VirtDescriptor *desc(this);
216 do {
217 size += desc->size();
218 } while((desc = desc->next()) != NULL);
218 } while ((desc = desc->next()) != NULL);
219
220 return size;
221}
222
223
224
225VirtQueue::VirtQueue(PortProxy &proxy, uint16_t size)
226 : _size(size), _address(0), memProxy(proxy),
227 avail(proxy, size), used(proxy, size),
228 _last_avail(0)
229{
230 descriptors.reserve(_size);
231 for (int i = 0; i < _size; ++i)
232 descriptors.emplace_back(proxy, *this, i);
233}
234
235void
236VirtQueue::serialize(CheckpointOut &cp) const
237{
238 SERIALIZE_SCALAR(_address);
239 SERIALIZE_SCALAR(_last_avail);
240}
241
242void
243VirtQueue::unserialize(CheckpointIn &cp)
244{
245 Addr addr_in;
246
247 paramIn(cp, "_address", addr_in);
248 UNSERIALIZE_SCALAR(_last_avail);
249
250 // Use the address setter to ensure that the ring buffer addresses
251 // are updated as well.
252 setAddress(addr_in);
253}
254
255void
256VirtQueue::setAddress(Addr address)
257{
258 const Addr addr_avail(address + _size * sizeof(struct vring_desc));
259 const Addr addr_avail_end(addr_avail + sizeof(struct vring_avail) +
260 _size * sizeof(uint16_t));
261 const Addr addr_used((addr_avail_end + sizeof(uint16_t) +
262 (ALIGN_SIZE - 1)) & ~(ALIGN_SIZE - 1));
263 _address = address;
264 avail.setAddress(addr_avail);
265 used.setAddress(addr_used);
266}
267
268VirtDescriptor *
269VirtQueue::consumeDescriptor()
270{
271 avail.read();
272 DPRINTF(VIO, "consumeDescriptor: _last_avail: %i, avail.idx: %i (->%i)\n",
273 _last_avail, avail.header.index,
274 avail.ring[_last_avail % used.ring.size()]);
275 if (_last_avail == avail.header.index)
276 return NULL;
277
278 VirtDescriptor::Index index(avail.ring[_last_avail % used.ring.size()]);
279 ++_last_avail;
280
281 VirtDescriptor *d(&descriptors[index]);
282 d->updateChain();
283
284 return d;
285}
286
287void
288VirtQueue::produceDescriptor(VirtDescriptor *desc, uint32_t len)
289{
290 used.readHeader();
291 DPRINTF(VIO, "produceDescriptor: dscIdx: %i, len: %i, used.idx: %i\n",
292 desc->index(), len, used.header.index);
293
294 struct vring_used_elem &e(used.ring[used.header.index % used.ring.size()]);
295 e.id = desc->index();
296 e.len = len;
297 used.header.index += 1;
298 used.write();
299}
300
301void
302VirtQueue::dump() const
303{
304 if (!DTRACE(VIO))
305 return;
306
307 for (const VirtDescriptor &d : descriptors)
308 d.dump();
309}
310
311void
312VirtQueue::onNotify()
313{
314 DPRINTF(VIO, "onNotify\n");
315
316 // Consume all pending descriptors from the input queue.
317 VirtDescriptor *d;
219
220 return size;
221}
222
223
224
225VirtQueue::VirtQueue(PortProxy &proxy, uint16_t size)
226 : _size(size), _address(0), memProxy(proxy),
227 avail(proxy, size), used(proxy, size),
228 _last_avail(0)
229{
230 descriptors.reserve(_size);
231 for (int i = 0; i < _size; ++i)
232 descriptors.emplace_back(proxy, *this, i);
233}
234
235void
236VirtQueue::serialize(CheckpointOut &cp) const
237{
238 SERIALIZE_SCALAR(_address);
239 SERIALIZE_SCALAR(_last_avail);
240}
241
242void
243VirtQueue::unserialize(CheckpointIn &cp)
244{
245 Addr addr_in;
246
247 paramIn(cp, "_address", addr_in);
248 UNSERIALIZE_SCALAR(_last_avail);
249
250 // Use the address setter to ensure that the ring buffer addresses
251 // are updated as well.
252 setAddress(addr_in);
253}
254
255void
256VirtQueue::setAddress(Addr address)
257{
258 const Addr addr_avail(address + _size * sizeof(struct vring_desc));
259 const Addr addr_avail_end(addr_avail + sizeof(struct vring_avail) +
260 _size * sizeof(uint16_t));
261 const Addr addr_used((addr_avail_end + sizeof(uint16_t) +
262 (ALIGN_SIZE - 1)) & ~(ALIGN_SIZE - 1));
263 _address = address;
264 avail.setAddress(addr_avail);
265 used.setAddress(addr_used);
266}
267
268VirtDescriptor *
269VirtQueue::consumeDescriptor()
270{
271 avail.read();
272 DPRINTF(VIO, "consumeDescriptor: _last_avail: %i, avail.idx: %i (->%i)\n",
273 _last_avail, avail.header.index,
274 avail.ring[_last_avail % used.ring.size()]);
275 if (_last_avail == avail.header.index)
276 return NULL;
277
278 VirtDescriptor::Index index(avail.ring[_last_avail % used.ring.size()]);
279 ++_last_avail;
280
281 VirtDescriptor *d(&descriptors[index]);
282 d->updateChain();
283
284 return d;
285}
286
287void
288VirtQueue::produceDescriptor(VirtDescriptor *desc, uint32_t len)
289{
290 used.readHeader();
291 DPRINTF(VIO, "produceDescriptor: dscIdx: %i, len: %i, used.idx: %i\n",
292 desc->index(), len, used.header.index);
293
294 struct vring_used_elem &e(used.ring[used.header.index % used.ring.size()]);
295 e.id = desc->index();
296 e.len = len;
297 used.header.index += 1;
298 used.write();
299}
300
301void
302VirtQueue::dump() const
303{
304 if (!DTRACE(VIO))
305 return;
306
307 for (const VirtDescriptor &d : descriptors)
308 d.dump();
309}
310
311void
312VirtQueue::onNotify()
313{
314 DPRINTF(VIO, "onNotify\n");
315
316 // Consume all pending descriptors from the input queue.
317 VirtDescriptor *d;
318 while((d = consumeDescriptor()) != NULL)
318 while ((d = consumeDescriptor()) != NULL)
319 onNotifyDescriptor(d);
320}
321
322
323VirtIODeviceBase::VirtIODeviceBase(Params *params, DeviceId id,
324 size_t config_size, FeatureBits features)
325 : SimObject(params),
326 guestFeatures(0),
327 deviceId(id), configSize(config_size), deviceFeatures(features),
328 _deviceStatus(0), _queueSelect(0),
329 transKick(NULL)
330{
331}
332
333
334VirtIODeviceBase::~VirtIODeviceBase()
335{
336}
337
338void
339VirtIODeviceBase::serialize(CheckpointOut &cp) const
340{
341 SERIALIZE_SCALAR(guestFeatures);
342 SERIALIZE_SCALAR(_deviceStatus);
343 SERIALIZE_SCALAR(_queueSelect);
344 for (QueueID i = 0; i < _queues.size(); ++i)
345 _queues[i]->serializeSection(cp, csprintf("_queues.%i", i));
346}
347
348void
349VirtIODeviceBase::unserialize(CheckpointIn &cp)
350{
351 UNSERIALIZE_SCALAR(guestFeatures);
352 UNSERIALIZE_SCALAR(_deviceStatus);
353 UNSERIALIZE_SCALAR(_queueSelect);
354 for (QueueID i = 0; i < _queues.size(); ++i)
355 _queues[i]->unserializeSection(cp, csprintf("_queues.%i", i));
356}
357
358void
359VirtIODeviceBase::reset()
360{
361 _queueSelect = 0;
362 guestFeatures = 0;
363 _deviceStatus = 0;
364
365 for (QueueID i = 0; i < _queues.size(); ++i)
366 _queues[i]->setAddress(0);
367}
368
369void
370VirtIODeviceBase::onNotify(QueueID idx)
371{
372 DPRINTF(VIO, "onNotify: idx: %i\n", idx);
373 if (idx >= _queues.size()) {
374 panic("Guest tried to notify queue (%i), but only %i "
375 "queues registered.\n",
376 idx, _queues.size());
377 }
378 _queues[idx]->onNotify();
379}
380
381void
382VirtIODeviceBase::setGuestFeatures(FeatureBits features)
383{
384 DPRINTF(VIO, "Setting guest features: 0x%x\n", features);
385 if (~deviceFeatures & features) {
386 panic("Guest tried to enable unsupported features:\n"
387 "Device features: 0x%x\n"
388 "Requested features: 0x%x\n",
389 deviceFeatures, features);
390 }
391 guestFeatures = features;
392}
393
394
395void
396VirtIODeviceBase::setDeviceStatus(DeviceStatus status)
397{
398 _deviceStatus = status;
399 DPRINTF(VIO, "ACK: %i, DRIVER: %i, DRIVER_OK: %i, FAILED: %i\n",
400 status.acknowledge, status.driver, status.driver_ok, status.failed);
401 if (status == 0)
402 reset();
403}
404
405void
406VirtIODeviceBase::readConfig(PacketPtr pkt, Addr cfgOffset)
407{
408 panic("Unhandled device config read (offset: 0x%x).\n", cfgOffset);
409}
410
411void
412VirtIODeviceBase::writeConfig(PacketPtr pkt, Addr cfgOffset)
413{
414 panic("Unhandled device config write (offset: 0x%x).\n", cfgOffset);
415}
416
417void
418VirtIODeviceBase::readConfigBlob(PacketPtr pkt, Addr cfgOffset, const uint8_t *cfg)
419{
420 const unsigned size(pkt->getSize());
421
422 if (cfgOffset + size > configSize)
423 panic("Config read out of bounds.\n");
424
425 pkt->makeResponse();
426 pkt->setData(const_cast<uint8_t *>(cfg) + cfgOffset);
427}
428
429void
430VirtIODeviceBase::writeConfigBlob(PacketPtr pkt, Addr cfgOffset, uint8_t *cfg)
431{
432 const unsigned size(pkt->getSize());
433
434 if (cfgOffset + size > configSize)
435 panic("Config write out of bounds.\n");
436
437 pkt->makeResponse();
438 pkt->writeData((uint8_t *)cfg + cfgOffset);
439}
440
441
442const VirtQueue &
443VirtIODeviceBase::getCurrentQueue() const
444{
445 if (_queueSelect >= _queues.size())
446 panic("Guest tried to access non-existing VirtQueue (%i).\n", _queueSelect);
447
448 return *_queues[_queueSelect];
449}
450
451VirtQueue &
452VirtIODeviceBase::getCurrentQueue()
453{
454 if (_queueSelect >= _queues.size())
455 panic("Guest tried to access non-existing VirtQueue (%i).\n", _queueSelect);
456
457 return *_queues[_queueSelect];
458}
459
460void
461VirtIODeviceBase::setQueueAddress(uint32_t address)
462{
463 getCurrentQueue().setAddress(address * VirtQueue::ALIGN_SIZE);
464}
465
466uint32_t
467VirtIODeviceBase::getQueueAddress() const
468{
469 Addr address(getCurrentQueue().getAddress());
470 assert(!(address & ((1 >> VirtQueue::ALIGN_BITS) - 1)));
471 return address >> VirtQueue::ALIGN_BITS;
472}
473
474void
475VirtIODeviceBase::registerQueue(VirtQueue &queue)
476{
477 _queues.push_back(&queue);
478}
319 onNotifyDescriptor(d);
320}
321
322
323VirtIODeviceBase::VirtIODeviceBase(Params *params, DeviceId id,
324 size_t config_size, FeatureBits features)
325 : SimObject(params),
326 guestFeatures(0),
327 deviceId(id), configSize(config_size), deviceFeatures(features),
328 _deviceStatus(0), _queueSelect(0),
329 transKick(NULL)
330{
331}
332
333
334VirtIODeviceBase::~VirtIODeviceBase()
335{
336}
337
338void
339VirtIODeviceBase::serialize(CheckpointOut &cp) const
340{
341 SERIALIZE_SCALAR(guestFeatures);
342 SERIALIZE_SCALAR(_deviceStatus);
343 SERIALIZE_SCALAR(_queueSelect);
344 for (QueueID i = 0; i < _queues.size(); ++i)
345 _queues[i]->serializeSection(cp, csprintf("_queues.%i", i));
346}
347
348void
349VirtIODeviceBase::unserialize(CheckpointIn &cp)
350{
351 UNSERIALIZE_SCALAR(guestFeatures);
352 UNSERIALIZE_SCALAR(_deviceStatus);
353 UNSERIALIZE_SCALAR(_queueSelect);
354 for (QueueID i = 0; i < _queues.size(); ++i)
355 _queues[i]->unserializeSection(cp, csprintf("_queues.%i", i));
356}
357
358void
359VirtIODeviceBase::reset()
360{
361 _queueSelect = 0;
362 guestFeatures = 0;
363 _deviceStatus = 0;
364
365 for (QueueID i = 0; i < _queues.size(); ++i)
366 _queues[i]->setAddress(0);
367}
368
369void
370VirtIODeviceBase::onNotify(QueueID idx)
371{
372 DPRINTF(VIO, "onNotify: idx: %i\n", idx);
373 if (idx >= _queues.size()) {
374 panic("Guest tried to notify queue (%i), but only %i "
375 "queues registered.\n",
376 idx, _queues.size());
377 }
378 _queues[idx]->onNotify();
379}
380
381void
382VirtIODeviceBase::setGuestFeatures(FeatureBits features)
383{
384 DPRINTF(VIO, "Setting guest features: 0x%x\n", features);
385 if (~deviceFeatures & features) {
386 panic("Guest tried to enable unsupported features:\n"
387 "Device features: 0x%x\n"
388 "Requested features: 0x%x\n",
389 deviceFeatures, features);
390 }
391 guestFeatures = features;
392}
393
394
395void
396VirtIODeviceBase::setDeviceStatus(DeviceStatus status)
397{
398 _deviceStatus = status;
399 DPRINTF(VIO, "ACK: %i, DRIVER: %i, DRIVER_OK: %i, FAILED: %i\n",
400 status.acknowledge, status.driver, status.driver_ok, status.failed);
401 if (status == 0)
402 reset();
403}
404
405void
406VirtIODeviceBase::readConfig(PacketPtr pkt, Addr cfgOffset)
407{
408 panic("Unhandled device config read (offset: 0x%x).\n", cfgOffset);
409}
410
411void
412VirtIODeviceBase::writeConfig(PacketPtr pkt, Addr cfgOffset)
413{
414 panic("Unhandled device config write (offset: 0x%x).\n", cfgOffset);
415}
416
417void
418VirtIODeviceBase::readConfigBlob(PacketPtr pkt, Addr cfgOffset, const uint8_t *cfg)
419{
420 const unsigned size(pkt->getSize());
421
422 if (cfgOffset + size > configSize)
423 panic("Config read out of bounds.\n");
424
425 pkt->makeResponse();
426 pkt->setData(const_cast<uint8_t *>(cfg) + cfgOffset);
427}
428
429void
430VirtIODeviceBase::writeConfigBlob(PacketPtr pkt, Addr cfgOffset, uint8_t *cfg)
431{
432 const unsigned size(pkt->getSize());
433
434 if (cfgOffset + size > configSize)
435 panic("Config write out of bounds.\n");
436
437 pkt->makeResponse();
438 pkt->writeData((uint8_t *)cfg + cfgOffset);
439}
440
441
442const VirtQueue &
443VirtIODeviceBase::getCurrentQueue() const
444{
445 if (_queueSelect >= _queues.size())
446 panic("Guest tried to access non-existing VirtQueue (%i).\n", _queueSelect);
447
448 return *_queues[_queueSelect];
449}
450
451VirtQueue &
452VirtIODeviceBase::getCurrentQueue()
453{
454 if (_queueSelect >= _queues.size())
455 panic("Guest tried to access non-existing VirtQueue (%i).\n", _queueSelect);
456
457 return *_queues[_queueSelect];
458}
459
460void
461VirtIODeviceBase::setQueueAddress(uint32_t address)
462{
463 getCurrentQueue().setAddress(address * VirtQueue::ALIGN_SIZE);
464}
465
466uint32_t
467VirtIODeviceBase::getQueueAddress() const
468{
469 Addr address(getCurrentQueue().getAddress());
470 assert(!(address & ((1 >> VirtQueue::ALIGN_BITS) - 1)));
471 return address >> VirtQueue::ALIGN_BITS;
472}
473
474void
475VirtIODeviceBase::registerQueue(VirtQueue &queue)
476{
477 _queues.push_back(&queue);
478}