base.cc revision 11793
1/*
2 * Copyright (c) 2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Andreas Sandberg
38 */
39
40#include "dev/virtio/base.hh"
41
42#include "debug/VIO.hh"
43#include "params/VirtIODeviceBase.hh"
44
45VirtDescriptor::VirtDescriptor(PortProxy &_memProxy, VirtQueue &_queue,
46                               Index descIndex)
47    : memProxy(&_memProxy), queue(&_queue), _index(descIndex),
48      desc{0, 0, 0, 0}
49{
50}
51
52VirtDescriptor::VirtDescriptor(VirtDescriptor &&other) noexcept
53{
54    *this = std::forward<VirtDescriptor>(other);
55}
56
57VirtDescriptor::~VirtDescriptor() noexcept
58{
59}
60
61VirtDescriptor &
62VirtDescriptor::operator=(VirtDescriptor &&rhs) noexcept
63{
64    memProxy = std::move(rhs.memProxy);
65    queue = std::move(rhs.queue);
66    _index = std::move(rhs._index);
67    desc = std::move(rhs.desc);
68
69    return *this;
70}
71
72void
73VirtDescriptor::update()
74{
75    const Addr vq_addr(queue->getAddress());
76    // Check if the queue has been initialized yet
77    if (vq_addr == 0)
78        return;
79
80    assert(_index < queue->getSize());
81    const Addr desc_addr(vq_addr + sizeof(desc) * _index);
82    vring_desc guest_desc;
83    memProxy->readBlob(desc_addr, (uint8_t *)&guest_desc, sizeof(guest_desc));
84    desc = vtoh_legacy(guest_desc);
85    DPRINTF(VIO,
86            "VirtDescriptor(%i): Addr: 0x%x, Len: %i, Flags: 0x%x, "
87            "Next: 0x%x\n",
88            _index, desc.addr, desc.len, desc.flags, desc.next);
89}
90
91void
92VirtDescriptor::updateChain()
93{
94    VirtDescriptor *desc(this);
95    do {
96        desc->update();
97    } while ((desc = desc->next()) != NULL && desc != this);
98
99    if (desc == this)
100        panic("Loop in descriptor chain!\n");
101}
102
103void
104VirtDescriptor::dump() const
105{
106    if (!DTRACE(VIO))
107        return;
108
109    DPRINTF(VIO, "Descriptor[%i]: "
110            "Addr: 0x%x, Len: %i, Flags: 0x%x, Next: 0x%x\n",
111            _index, desc.addr, desc.len, desc.flags, desc.next);
112
113    if (isIncoming()) {
114        uint8_t data[desc.len];
115        read(0, data, desc.len);
116        DDUMP(VIO, data, desc.len);
117    }
118}
119
120void
121VirtDescriptor::dumpChain() const
122{
123    if (!DTRACE(VIO))
124        return;
125
126    const VirtDescriptor *desc(this);
127    do {
128        desc->dump();
129    } while ((desc = desc->next()) != NULL);
130}
131
132VirtDescriptor *
133VirtDescriptor::next() const
134{
135    if (hasNext()) {
136        return queue->getDescriptor(desc.next);
137    } else {
138        return NULL;
139    }
140}
141
142void
143VirtDescriptor::read(size_t offset, uint8_t *dst, size_t size) const
144{
145    DPRINTF(VIO, "VirtDescriptor(%p, 0x%x, %i)::read: offset: %i, dst: 0x%x, size: %i\n",
146            this, desc.addr, desc.len, offset, (long)dst, size);
147    assert(size <= desc.len - offset);
148    if (!isIncoming())
149        panic("Trying to read from outgoing buffer\n");
150
151    memProxy->readBlob(desc.addr + offset, dst, size);
152}
153
154void
155VirtDescriptor::write(size_t offset, const uint8_t *src, size_t size)
156{
157    DPRINTF(VIO, "VirtDescriptor(%p, 0x%x, %i)::write: offset: %i, src: 0x%x, size: %i\n",
158            this, desc.addr, desc.len, offset, (long)src, size);
159    assert(size <= desc.len - offset);
160    if (!isOutgoing())
161        panic("Trying to write to incoming buffer\n");
162
163    memProxy->writeBlob(desc.addr + offset, const_cast<uint8_t *>(src), size);
164}
165
166void
167VirtDescriptor::chainRead(size_t offset, uint8_t *dst, size_t size) const
168{
169    const VirtDescriptor *desc(this);
170    const size_t full_size(size);
171    do {
172        if (offset < desc->size()) {
173            const size_t chunk_size(std::min(desc->size() - offset, size));
174            desc->read(offset, dst, chunk_size);
175            dst += chunk_size;
176            size -= chunk_size;
177            offset = 0;
178        } else {
179            offset -= desc->size();
180        }
181    } while ((desc = desc->next()) != NULL && desc->isIncoming() && size > 0);
182
183    if (size != 0) {
184        panic("Failed to read %i bytes from chain of %i bytes @ offset %i\n",
185              full_size, chainSize(), offset);
186    }
187}
188
189void
190VirtDescriptor::chainWrite(size_t offset, const uint8_t *src, size_t size)
191{
192    VirtDescriptor *desc(this);
193    const size_t full_size(size);
194    do {
195        if (offset < desc->size()) {
196            const size_t chunk_size(std::min(desc->size() - offset, size));
197            desc->write(offset, src, chunk_size);
198            src += chunk_size;
199            size -= chunk_size;
200            offset = 0;
201        } else {
202            offset -= desc->size();
203        }
204    } while ((desc = desc->next()) != NULL && size > 0);
205
206    if (size != 0) {
207        panic("Failed to write %i bytes into chain of %i bytes @ offset %i\n",
208              full_size, chainSize(), offset);
209    }
210}
211
212size_t
213VirtDescriptor::chainSize() const
214{
215    size_t size(0);
216    const VirtDescriptor *desc(this);
217    do {
218        size += desc->size();
219    } while ((desc = desc->next()) != NULL);
220
221    return size;
222}
223
224
225
226VirtQueue::VirtQueue(PortProxy &proxy, uint16_t size)
227    : _size(size), _address(0), memProxy(proxy),
228      avail(proxy, size), used(proxy, size),
229      _last_avail(0)
230{
231    descriptors.reserve(_size);
232    for (int i = 0; i < _size; ++i)
233        descriptors.emplace_back(proxy, *this, i);
234}
235
236void
237VirtQueue::serialize(CheckpointOut &cp) const
238{
239    SERIALIZE_SCALAR(_address);
240    SERIALIZE_SCALAR(_last_avail);
241}
242
243void
244VirtQueue::unserialize(CheckpointIn &cp)
245{
246    Addr addr_in;
247
248    paramIn(cp, "_address", addr_in);
249    UNSERIALIZE_SCALAR(_last_avail);
250
251    // Use the address setter to ensure that the ring buffer addresses
252    // are updated as well.
253    setAddress(addr_in);
254}
255
256void
257VirtQueue::setAddress(Addr address)
258{
259    const Addr addr_avail(address + _size * sizeof(struct vring_desc));
260    const Addr addr_avail_end(addr_avail + sizeof(struct vring_avail) +
261                              _size * sizeof(uint16_t));
262    const Addr addr_used((addr_avail_end + sizeof(uint16_t) +
263                          (ALIGN_SIZE - 1)) & ~(ALIGN_SIZE - 1));
264    _address = address;
265    avail.setAddress(addr_avail);
266    used.setAddress(addr_used);
267}
268
269VirtDescriptor *
270VirtQueue::consumeDescriptor()
271{
272    avail.read();
273    DPRINTF(VIO, "consumeDescriptor: _last_avail: %i, avail.idx: %i (->%i)\n",
274            _last_avail, avail.header.index,
275            avail.ring[_last_avail % used.ring.size()]);
276    if (_last_avail == avail.header.index)
277        return NULL;
278
279    VirtDescriptor::Index index(avail.ring[_last_avail % used.ring.size()]);
280    ++_last_avail;
281
282    VirtDescriptor *d(&descriptors[index]);
283    d->updateChain();
284
285    return d;
286}
287
288void
289VirtQueue::produceDescriptor(VirtDescriptor *desc, uint32_t len)
290{
291    used.readHeader();
292    DPRINTF(VIO, "produceDescriptor: dscIdx: %i, len: %i, used.idx: %i\n",
293            desc->index(), len, used.header.index);
294
295    struct vring_used_elem &e(used.ring[used.header.index % used.ring.size()]);
296    e.id = desc->index();
297    e.len = len;
298    used.header.index += 1;
299    used.write();
300}
301
302void
303VirtQueue::dump() const
304{
305    if (!DTRACE(VIO))
306        return;
307
308    for (const VirtDescriptor &d : descriptors)
309        d.dump();
310}
311
312void
313VirtQueue::onNotify()
314{
315    DPRINTF(VIO, "onNotify\n");
316
317    // Consume all pending descriptors from the input queue.
318    VirtDescriptor *d;
319    while ((d = consumeDescriptor()) != NULL)
320        onNotifyDescriptor(d);
321}
322
323
324VirtIODeviceBase::VirtIODeviceBase(Params *params, DeviceId id,
325                                   size_t config_size, FeatureBits features)
326    : SimObject(params),
327      guestFeatures(0),
328      deviceId(id), configSize(config_size), deviceFeatures(features),
329      _deviceStatus(0), _queueSelect(0),
330      transKick(NULL)
331{
332}
333
334
335VirtIODeviceBase::~VirtIODeviceBase()
336{
337}
338
339void
340VirtIODeviceBase::serialize(CheckpointOut &cp) const
341{
342    SERIALIZE_SCALAR(guestFeatures);
343    SERIALIZE_SCALAR(_deviceStatus);
344    SERIALIZE_SCALAR(_queueSelect);
345    for (QueueID i = 0; i < _queues.size(); ++i)
346        _queues[i]->serializeSection(cp, csprintf("_queues.%i", i));
347}
348
349void
350VirtIODeviceBase::unserialize(CheckpointIn &cp)
351{
352    UNSERIALIZE_SCALAR(guestFeatures);
353    UNSERIALIZE_SCALAR(_deviceStatus);
354    UNSERIALIZE_SCALAR(_queueSelect);
355    for (QueueID i = 0; i < _queues.size(); ++i)
356        _queues[i]->unserializeSection(cp, csprintf("_queues.%i", i));
357}
358
359void
360VirtIODeviceBase::reset()
361{
362    _queueSelect = 0;
363    guestFeatures = 0;
364    _deviceStatus = 0;
365
366    for (QueueID i = 0; i < _queues.size(); ++i)
367        _queues[i]->setAddress(0);
368}
369
370void
371VirtIODeviceBase::onNotify(QueueID idx)
372{
373    DPRINTF(VIO, "onNotify: idx: %i\n", idx);
374    if (idx >= _queues.size()) {
375        panic("Guest tried to notify queue (%i), but only %i "
376              "queues registered.\n",
377              idx, _queues.size());
378    }
379    _queues[idx]->onNotify();
380}
381
382void
383VirtIODeviceBase::setGuestFeatures(FeatureBits features)
384{
385    DPRINTF(VIO, "Setting guest features: 0x%x\n", features);
386    if (~deviceFeatures & features) {
387        panic("Guest tried to enable unsupported features:\n"
388              "Device features: 0x%x\n"
389              "Requested features: 0x%x\n",
390              deviceFeatures, features);
391    }
392    guestFeatures = features;
393}
394
395
396void
397VirtIODeviceBase::setDeviceStatus(DeviceStatus status)
398{
399    _deviceStatus = status;
400    DPRINTF(VIO, "ACK: %i, DRIVER: %i, DRIVER_OK: %i, FAILED: %i\n",
401            status.acknowledge, status.driver, status.driver_ok, status.failed);
402    if (status == 0)
403        reset();
404}
405
406void
407VirtIODeviceBase::readConfig(PacketPtr pkt, Addr cfgOffset)
408{
409    panic("Unhandled device config read (offset: 0x%x).\n", cfgOffset);
410}
411
412void
413VirtIODeviceBase::writeConfig(PacketPtr pkt, Addr cfgOffset)
414{
415    panic("Unhandled device config write (offset: 0x%x).\n", cfgOffset);
416}
417
418void
419VirtIODeviceBase::readConfigBlob(PacketPtr pkt, Addr cfgOffset, const uint8_t *cfg)
420{
421    const unsigned size(pkt->getSize());
422
423    if (cfgOffset + size > configSize)
424        panic("Config read out of bounds.\n");
425
426    pkt->makeResponse();
427    pkt->setData(const_cast<uint8_t *>(cfg) + cfgOffset);
428}
429
430void
431VirtIODeviceBase::writeConfigBlob(PacketPtr pkt, Addr cfgOffset, uint8_t *cfg)
432{
433    const unsigned size(pkt->getSize());
434
435    if (cfgOffset + size > configSize)
436        panic("Config write out of bounds.\n");
437
438    pkt->makeResponse();
439    pkt->writeData((uint8_t *)cfg + cfgOffset);
440}
441
442
443const VirtQueue &
444VirtIODeviceBase::getCurrentQueue() const
445{
446    if (_queueSelect >= _queues.size())
447        panic("Guest tried to access non-existing VirtQueue (%i).\n", _queueSelect);
448
449    return *_queues[_queueSelect];
450}
451
452VirtQueue &
453VirtIODeviceBase::getCurrentQueue()
454{
455    if (_queueSelect >= _queues.size())
456        panic("Guest tried to access non-existing VirtQueue (%i).\n", _queueSelect);
457
458    return *_queues[_queueSelect];
459}
460
461void
462VirtIODeviceBase::setQueueAddress(uint32_t address)
463{
464    getCurrentQueue().setAddress(address * VirtQueue::ALIGN_SIZE);
465}
466
467uint32_t
468VirtIODeviceBase::getQueueAddress() const
469{
470    Addr address(getCurrentQueue().getAddress());
471    assert(!(address & ((1 >> VirtQueue::ALIGN_BITS) - 1)));
472    return address >> VirtQueue::ALIGN_BITS;
473}
474
475void
476VirtIODeviceBase::registerQueue(VirtQueue &queue)
477{
478    _queues.push_back(&queue);
479}
480