1/*
2 * Copyright (c) 2014, 2016 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Andreas Sandberg
38 */
39
40#include "dev/virtio/base.hh"
41
42#include "debug/VIO.hh"
43#include "params/VirtIODeviceBase.hh"
44#include "params/VirtIODummyDevice.hh"
45
46VirtDescriptor::VirtDescriptor(PortProxy &_memProxy, VirtQueue &_queue,
47                               Index descIndex)
48    : memProxy(&_memProxy), queue(&_queue), _index(descIndex),
49      desc{0, 0, 0, 0}
50{
51}
52
53VirtDescriptor::VirtDescriptor(VirtDescriptor &&other) noexcept
54{
55    *this = std::forward<VirtDescriptor>(other);
56}
57
58VirtDescriptor::~VirtDescriptor() noexcept
59{
60}
61
62VirtDescriptor &
63VirtDescriptor::operator=(VirtDescriptor &&rhs) noexcept
64{
65    memProxy = std::move(rhs.memProxy);
66    queue = std::move(rhs.queue);
67    _index = std::move(rhs._index);
68    desc = std::move(rhs.desc);
69
70    return *this;
71}
72
73void
74VirtDescriptor::update()
75{
76    const Addr vq_addr(queue->getAddress());
77    // Check if the queue has been initialized yet
78    if (vq_addr == 0)
79        return;
80
81    assert(_index < queue->getSize());
82    const Addr desc_addr(vq_addr + sizeof(desc) * _index);
83    vring_desc guest_desc;
84    memProxy->readBlob(desc_addr, &guest_desc, sizeof(guest_desc));
85    desc = vtoh_legacy(guest_desc);
86    DPRINTF(VIO,
87            "VirtDescriptor(%i): Addr: 0x%x, Len: %i, Flags: 0x%x, "
88            "Next: 0x%x\n",
89            _index, desc.addr, desc.len, desc.flags, desc.next);
90}
91
92void
93VirtDescriptor::updateChain()
94{
95    VirtDescriptor *desc(this);
96    do {
97        desc->update();
98    } while ((desc = desc->next()) != NULL && desc != this);
99
100    if (desc == this)
101        panic("Loop in descriptor chain!\n");
102}
103
104void
105VirtDescriptor::dump() const
106{
107    if (!DTRACE(VIO))
108        return;
109
110    DPRINTF(VIO, "Descriptor[%i]: "
111            "Addr: 0x%x, Len: %i, Flags: 0x%x, Next: 0x%x\n",
112            _index, desc.addr, desc.len, desc.flags, desc.next);
113
114    if (isIncoming()) {
115        uint8_t data[desc.len];
116        read(0, data, desc.len);
117        DDUMP(VIO, data, desc.len);
118    }
119}
120
121void
122VirtDescriptor::dumpChain() const
123{
124    if (!DTRACE(VIO))
125        return;
126
127    const VirtDescriptor *desc(this);
128    do {
129        desc->dump();
130    } while ((desc = desc->next()) != NULL);
131}
132
133VirtDescriptor *
134VirtDescriptor::next() const
135{
136    if (hasNext()) {
137        return queue->getDescriptor(desc.next);
138    } else {
139        return NULL;
140    }
141}
142
143void
144VirtDescriptor::read(size_t offset, uint8_t *dst, size_t size) const
145{
146    DPRINTF(VIO, "VirtDescriptor(%p, 0x%x, %i)::read: offset: %i, dst: 0x%x, size: %i\n",
147            this, desc.addr, desc.len, offset, (long)dst, size);
148    assert(size <= desc.len - offset);
149    if (!isIncoming())
150        panic("Trying to read from outgoing buffer\n");
151
152    memProxy->readBlob(desc.addr + offset, dst, size);
153}
154
155void
156VirtDescriptor::write(size_t offset, const uint8_t *src, size_t size)
157{
158    DPRINTF(VIO, "VirtDescriptor(%p, 0x%x, %i)::write: offset: %i, src: 0x%x, size: %i\n",
159            this, desc.addr, desc.len, offset, (long)src, size);
160    assert(size <= desc.len - offset);
161    if (!isOutgoing())
162        panic("Trying to write to incoming buffer\n");
163
164    memProxy->writeBlob(desc.addr + offset, src, size);
165}
166
167void
168VirtDescriptor::chainRead(size_t offset, uint8_t *dst, size_t size) const
169{
170    const VirtDescriptor *desc(this);
171    const size_t full_size(size);
172    do {
173        if (offset < desc->size()) {
174            const size_t chunk_size(std::min(desc->size() - offset, size));
175            desc->read(offset, dst, chunk_size);
176            dst += chunk_size;
177            size -= chunk_size;
178            offset = 0;
179        } else {
180            offset -= desc->size();
181        }
182    } while ((desc = desc->next()) != NULL && desc->isIncoming() && size > 0);
183
184    if (size != 0) {
185        panic("Failed to read %i bytes from chain of %i bytes @ offset %i\n",
186              full_size, chainSize(), offset);
187    }
188}
189
190void
191VirtDescriptor::chainWrite(size_t offset, const uint8_t *src, size_t size)
192{
193    VirtDescriptor *desc(this);
194    const size_t full_size(size);
195    do {
196        if (offset < desc->size()) {
197            const size_t chunk_size(std::min(desc->size() - offset, size));
198            desc->write(offset, src, chunk_size);
199            src += chunk_size;
200            size -= chunk_size;
201            offset = 0;
202        } else {
203            offset -= desc->size();
204        }
205    } while ((desc = desc->next()) != NULL && size > 0);
206
207    if (size != 0) {
208        panic("Failed to write %i bytes into chain of %i bytes @ offset %i\n",
209              full_size, chainSize(), offset);
210    }
211}
212
213size_t
214VirtDescriptor::chainSize() const
215{
216    size_t size(0);
217    const VirtDescriptor *desc(this);
218    do {
219        size += desc->size();
220    } while ((desc = desc->next()) != NULL);
221
222    return size;
223}
224
225
226
227VirtQueue::VirtQueue(PortProxy &proxy, uint16_t size)
228    : _size(size), _address(0), memProxy(proxy),
229      avail(proxy, size), used(proxy, size),
230      _last_avail(0)
231{
232    descriptors.reserve(_size);
233    for (int i = 0; i < _size; ++i)
234        descriptors.emplace_back(proxy, *this, i);
235}
236
237void
238VirtQueue::serialize(CheckpointOut &cp) const
239{
240    SERIALIZE_SCALAR(_address);
241    SERIALIZE_SCALAR(_last_avail);
242}
243
244void
245VirtQueue::unserialize(CheckpointIn &cp)
246{
247    Addr addr_in;
248
249    paramIn(cp, "_address", addr_in);
250    UNSERIALIZE_SCALAR(_last_avail);
251
252    // Use the address setter to ensure that the ring buffer addresses
253    // are updated as well.
254    setAddress(addr_in);
255}
256
257void
258VirtQueue::setAddress(Addr address)
259{
260    const Addr addr_avail(address + _size * sizeof(struct vring_desc));
261    const Addr addr_avail_end(addr_avail + sizeof(struct vring_avail) +
262                              _size * sizeof(uint16_t));
263    const Addr addr_used((addr_avail_end + sizeof(uint16_t) +
264                          (ALIGN_SIZE - 1)) & ~(ALIGN_SIZE - 1));
265    _address = address;
266    avail.setAddress(addr_avail);
267    used.setAddress(addr_used);
268}
269
270VirtDescriptor *
271VirtQueue::consumeDescriptor()
272{
273    avail.read();
274    DPRINTF(VIO, "consumeDescriptor: _last_avail: %i, avail.idx: %i (->%i)\n",
275            _last_avail, avail.header.index,
276            avail.ring[_last_avail % used.ring.size()]);
277    if (_last_avail == avail.header.index)
278        return NULL;
279
280    VirtDescriptor::Index index(avail.ring[_last_avail % used.ring.size()]);
281    ++_last_avail;
282
283    VirtDescriptor *d(&descriptors[index]);
284    d->updateChain();
285
286    return d;
287}
288
289void
290VirtQueue::produceDescriptor(VirtDescriptor *desc, uint32_t len)
291{
292    used.readHeader();
293    DPRINTF(VIO, "produceDescriptor: dscIdx: %i, len: %i, used.idx: %i\n",
294            desc->index(), len, used.header.index);
295
296    struct vring_used_elem &e(used.ring[used.header.index % used.ring.size()]);
297    e.id = desc->index();
298    e.len = len;
299    used.header.index += 1;
300    used.write();
301}
302
303void
304VirtQueue::dump() const
305{
306    if (!DTRACE(VIO))
307        return;
308
309    for (const VirtDescriptor &d : descriptors)
310        d.dump();
311}
312
313void
314VirtQueue::onNotify()
315{
316    DPRINTF(VIO, "onNotify\n");
317
318    // Consume all pending descriptors from the input queue.
319    VirtDescriptor *d;
320    while ((d = consumeDescriptor()) != NULL)
321        onNotifyDescriptor(d);
322}
323
324
325VirtIODeviceBase::VirtIODeviceBase(Params *params, DeviceId id,
326                                   size_t config_size, FeatureBits features)
327    : SimObject(params),
328      guestFeatures(0),
329      deviceId(id), configSize(config_size), deviceFeatures(features),
330      _deviceStatus(0), _queueSelect(0),
331      transKick(NULL)
332{
333}
334
335
336VirtIODeviceBase::~VirtIODeviceBase()
337{
338}
339
340void
341VirtIODeviceBase::serialize(CheckpointOut &cp) const
342{
343    SERIALIZE_SCALAR(guestFeatures);
344    SERIALIZE_SCALAR(_deviceStatus);
345    SERIALIZE_SCALAR(_queueSelect);
346    for (QueueID i = 0; i < _queues.size(); ++i)
347        _queues[i]->serializeSection(cp, csprintf("_queues.%i", i));
348}
349
350void
351VirtIODeviceBase::unserialize(CheckpointIn &cp)
352{
353    UNSERIALIZE_SCALAR(guestFeatures);
354    UNSERIALIZE_SCALAR(_deviceStatus);
355    UNSERIALIZE_SCALAR(_queueSelect);
356    for (QueueID i = 0; i < _queues.size(); ++i)
357        _queues[i]->unserializeSection(cp, csprintf("_queues.%i", i));
358}
359
360void
361VirtIODeviceBase::reset()
362{
363    _queueSelect = 0;
364    guestFeatures = 0;
365    _deviceStatus = 0;
366
367    for (QueueID i = 0; i < _queues.size(); ++i)
368        _queues[i]->setAddress(0);
369}
370
371void
372VirtIODeviceBase::onNotify(QueueID idx)
373{
374    DPRINTF(VIO, "onNotify: idx: %i\n", idx);
375    if (idx >= _queues.size()) {
376        panic("Guest tried to notify queue (%i), but only %i "
377              "queues registered.\n",
378              idx, _queues.size());
379    }
380    _queues[idx]->onNotify();
381}
382
383void
384VirtIODeviceBase::setGuestFeatures(FeatureBits features)
385{
386    DPRINTF(VIO, "Setting guest features: 0x%x\n", features);
387    if (~deviceFeatures & features) {
388        panic("Guest tried to enable unsupported features:\n"
389              "Device features: 0x%x\n"
390              "Requested features: 0x%x\n",
391              deviceFeatures, features);
392    }
393    guestFeatures = features;
394}
395
396
397void
398VirtIODeviceBase::setDeviceStatus(DeviceStatus status)
399{
400    _deviceStatus = status;
401    DPRINTF(VIO, "ACK: %i, DRIVER: %i, DRIVER_OK: %i, FAILED: %i\n",
402            status.acknowledge, status.driver, status.driver_ok, status.failed);
403    if (status == 0)
404        reset();
405}
406
407void
408VirtIODeviceBase::readConfig(PacketPtr pkt, Addr cfgOffset)
409{
410    panic("Unhandled device config read (offset: 0x%x).\n", cfgOffset);
411}
412
413void
414VirtIODeviceBase::writeConfig(PacketPtr pkt, Addr cfgOffset)
415{
416    panic("Unhandled device config write (offset: 0x%x).\n", cfgOffset);
417}
418
419void
420VirtIODeviceBase::readConfigBlob(PacketPtr pkt, Addr cfgOffset, const uint8_t *cfg)
421{
422    const unsigned size(pkt->getSize());
423
424    if (cfgOffset + size > configSize)
425        panic("Config read out of bounds.\n");
426
427    pkt->makeResponse();
428    pkt->setData(const_cast<uint8_t *>(cfg) + cfgOffset);
429}
430
431void
432VirtIODeviceBase::writeConfigBlob(PacketPtr pkt, Addr cfgOffset, uint8_t *cfg)
433{
434    const unsigned size(pkt->getSize());
435
436    if (cfgOffset + size > configSize)
437        panic("Config write out of bounds.\n");
438
439    pkt->makeResponse();
440    pkt->writeData((uint8_t *)cfg + cfgOffset);
441}
442
443
444const VirtQueue &
445VirtIODeviceBase::getCurrentQueue() const
446{
447    if (_queueSelect >= _queues.size())
448        panic("Guest tried to access non-existing VirtQueue (%i).\n", _queueSelect);
449
450    return *_queues[_queueSelect];
451}
452
453VirtQueue &
454VirtIODeviceBase::getCurrentQueue()
455{
456    if (_queueSelect >= _queues.size())
457        panic("Guest tried to access non-existing VirtQueue (%i).\n", _queueSelect);
458
459    return *_queues[_queueSelect];
460}
461
462void
463VirtIODeviceBase::setQueueAddress(uint32_t address)
464{
465    getCurrentQueue().setAddress(address * VirtQueue::ALIGN_SIZE);
466}
467
468uint32_t
469VirtIODeviceBase::getQueueAddress() const
470{
471    Addr address(getCurrentQueue().getAddress());
472    assert(!(address & ((1 >> VirtQueue::ALIGN_BITS) - 1)));
473    return address >> VirtQueue::ALIGN_BITS;
474}
475
476void
477VirtIODeviceBase::registerQueue(VirtQueue &queue)
478{
479    _queues.push_back(&queue);
480}
481
482
483VirtIODummyDevice::VirtIODummyDevice(VirtIODummyDeviceParams *params)
484    : VirtIODeviceBase(params, ID_INVALID, 0, 0)
485{
486}
487
488VirtIODummyDevice *
489VirtIODummyDeviceParams::create()
490{
491    return new VirtIODummyDevice(this);
492}
493