base.cc revision 10388:a26a20060ba3
1/*
2 * Copyright (c) 2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Andreas Sandberg
38 */
39
40#include "debug/VIO.hh"
41#include "dev/virtio/base.hh"
42#include "params/VirtIODeviceBase.hh"
43
44VirtDescriptor::VirtDescriptor(PortProxy &_memProxy, VirtQueue &_queue,
45                               Index descIndex)
46    : memProxy(&_memProxy), queue(&_queue), _index(descIndex)
47{
48}
49
50VirtDescriptor::VirtDescriptor(VirtDescriptor &&other) noexcept
51{
52    *this = std::forward<VirtDescriptor>(other);
53}
54
55VirtDescriptor::~VirtDescriptor() noexcept
56{
57}
58
59VirtDescriptor &
60VirtDescriptor::operator=(VirtDescriptor &&rhs) noexcept
61{
62    memProxy = std::move(rhs.memProxy);
63    queue = std::move(rhs.queue);
64    _index = std::move(rhs._index);
65    desc = std::move(rhs.desc);
66
67    return *this;
68}
69
70void
71VirtDescriptor::update()
72{
73    const Addr vq_addr(queue->getAddress());
74    // Check if the queue has been initialized yet
75    if (vq_addr == 0)
76        return;
77
78    assert(_index < queue->getSize());
79    const Addr desc_addr(vq_addr + sizeof(desc) * _index);
80    vring_desc guest_desc;
81    memProxy->readBlob(desc_addr, (uint8_t *)&guest_desc, sizeof(guest_desc));
82    desc = vtoh_legacy(guest_desc);
83    DPRINTF(VIO,
84            "VirtDescriptor(%i): Addr: 0x%x, Len: %i, Flags: 0x%x, "
85            "Next: 0x%x\n",
86            _index, desc.addr, desc.len, desc.flags, desc.next);
87}
88
89void
90VirtDescriptor::updateChain()
91{
92    VirtDescriptor *desc(this);
93    do {
94        desc->update();
95    } while((desc = desc->next()) != NULL && desc != this);
96
97    if (desc == this)
98        panic("Loop in descriptor chain!\n");
99}
100
101void
102VirtDescriptor::dump() const
103{
104    if (!DTRACE(VIO))
105        return;
106
107    DPRINTF(VIO, "Descriptor[%i]: "
108            "Addr: 0x%x, Len: %i, Flags: 0x%x, Next: 0x%x\n",
109            _index, desc.addr, desc.len, desc.flags, desc.next);
110
111    if (isIncoming()) {
112        uint8_t data[desc.len];
113        read(0, data, desc.len);
114        DDUMP(VIO, data, desc.len);
115    }
116}
117
118void
119VirtDescriptor::dumpChain() const
120{
121    if (!DTRACE(VIO))
122        return;
123
124    const VirtDescriptor *desc(this);
125    do {
126        desc->dump();
127    } while((desc = desc->next()) != NULL);
128}
129
130VirtDescriptor *
131VirtDescriptor::next() const
132{
133    if (hasNext()) {
134        return queue->getDescriptor(desc.next);
135    } else {
136        return NULL;
137    }
138}
139
140void
141VirtDescriptor::read(size_t offset, uint8_t *dst, size_t size) const
142{
143    DPRINTF(VIO, "VirtDescriptor(%p, 0x%x, %i)::read: offset: %i, dst: 0x%x, size: %i\n",
144            this, desc.addr, desc.len, offset, (long)dst, size);
145    assert(size <= desc.len - offset);
146    if (!isIncoming())
147        panic("Trying to read from outgoing buffer\n");
148
149    memProxy->readBlob(desc.addr + offset, dst, size);
150}
151
152void
153VirtDescriptor::write(size_t offset, const uint8_t *src, size_t size)
154{
155    DPRINTF(VIO, "VirtDescriptor(%p, 0x%x, %i)::write: offset: %i, src: 0x%x, size: %i\n",
156            this, desc.addr, desc.len, offset, (long)src, size);
157    assert(size <= desc.len - offset);
158    if (!isOutgoing())
159        panic("Trying to write to incoming buffer\n");
160
161    memProxy->writeBlob(desc.addr + offset, const_cast<uint8_t *>(src), size);
162}
163
164void
165VirtDescriptor::chainRead(size_t offset, uint8_t *dst, size_t size) const
166{
167    const VirtDescriptor *desc(this);
168    const size_t full_size(size);
169    do {
170        if (offset < desc->size()) {
171            const size_t chunk_size(std::min(desc->size() - offset, size));
172            desc->read(offset, dst, chunk_size);
173            dst += chunk_size;
174            size -= chunk_size;
175            offset = 0;
176        } else {
177            offset -= desc->size();
178        }
179    } while((desc = desc->next()) != NULL && desc->isIncoming() && size > 0);
180
181    if (size != 0) {
182        panic("Failed to read %i bytes from chain of %i bytes @ offset %i\n",
183              full_size, chainSize(), offset);
184    }
185}
186
187void
188VirtDescriptor::chainWrite(size_t offset, const uint8_t *src, size_t size)
189{
190    VirtDescriptor *desc(this);
191    const size_t full_size(size);
192    do {
193        if (offset < desc->size()) {
194            const size_t chunk_size(std::min(desc->size() - offset, size));
195            desc->write(offset, src, chunk_size);
196            src += chunk_size;
197            size -= chunk_size;
198            offset = 0;
199        } else {
200            offset -= desc->size();
201        }
202    } while((desc = desc->next()) != NULL && size > 0);
203
204    if (size != 0) {
205        panic("Failed to write %i bytes into chain of %i bytes @ offset %i\n",
206              full_size, chainSize(), offset);
207    }
208}
209
210size_t
211VirtDescriptor::chainSize() const
212{
213    size_t size(0);
214    const VirtDescriptor *desc(this);
215    do {
216        size += desc->size();
217    } while((desc = desc->next()) != NULL);
218
219    return size;
220}
221
222
223
224VirtQueue::VirtQueue(PortProxy &proxy, uint16_t size)
225    : _size(size), _address(0), memProxy(proxy),
226      avail(proxy, size), used(proxy, size),
227      _last_avail(0)
228{
229    descriptors.reserve(_size);
230    for (int i = 0; i < _size; ++i)
231        descriptors.emplace_back(proxy, *this, i);
232}
233
234void
235VirtQueue::serialize(std::ostream &os)
236{
237    SERIALIZE_SCALAR(_address);
238    SERIALIZE_SCALAR(_last_avail);
239}
240
241void
242VirtQueue::unserialize(Checkpoint *cp, const std::string &section)
243{
244    Addr addr_in;
245
246    paramIn(cp, section, "_address", addr_in);
247    UNSERIALIZE_SCALAR(_last_avail);
248
249    // Use the address setter to ensure that the ring buffer addresses
250    // are updated as well.
251    setAddress(addr_in);
252}
253
254void
255VirtQueue::setAddress(Addr address)
256{
257    const Addr addr_avail(address + _size * sizeof(struct vring_desc));
258    const Addr addr_avail_end(addr_avail + sizeof(struct vring_avail) +
259                              _size * sizeof(uint16_t));
260    const Addr addr_used((addr_avail_end + sizeof(uint16_t) +
261                          (ALIGN_SIZE - 1)) & ~(ALIGN_SIZE - 1));
262    _address = address;
263    avail.setAddress(addr_avail);
264    used.setAddress(addr_used);
265}
266
267VirtDescriptor *
268VirtQueue::consumeDescriptor()
269{
270    avail.read();
271    DPRINTF(VIO, "consumeDescriptor: _last_avail: %i, avail.idx: %i (->%i)\n",
272            _last_avail, avail.header.index,
273            avail.ring[_last_avail % used.ring.size()]);
274    if (_last_avail == avail.header.index)
275        return NULL;
276
277    VirtDescriptor::Index index(avail.ring[_last_avail % used.ring.size()]);
278    ++_last_avail;
279
280    VirtDescriptor *d(&descriptors[index]);
281    d->updateChain();
282
283    return d;
284}
285
286void
287VirtQueue::produceDescriptor(VirtDescriptor *desc, uint32_t len)
288{
289    used.readHeader();
290    DPRINTF(VIO, "produceDescriptor: dscIdx: %i, len: %i, used.idx: %i\n",
291            desc->index(), len, used.header.index);
292
293    struct vring_used_elem &e(used.ring[used.header.index % used.ring.size()]);
294    e.id = desc->index();
295    e.len = len;
296    used.header.index += 1;
297    used.write();
298}
299
300void
301VirtQueue::dump() const
302{
303    if (!DTRACE(VIO))
304        return;
305
306    for (const VirtDescriptor &d : descriptors)
307        d.dump();
308}
309
310void
311VirtQueue::onNotify()
312{
313    DPRINTF(VIO, "onNotify\n");
314
315    // Consume all pending descriptors from the input queue.
316    VirtDescriptor *d;
317    while((d = consumeDescriptor()) != NULL)
318        onNotifyDescriptor(d);
319}
320
321
322VirtIODeviceBase::VirtIODeviceBase(Params *params, DeviceId id,
323                                   size_t config_size, FeatureBits features)
324    : SimObject(params),
325      guestFeatures(0),
326      deviceId(id), configSize(config_size), deviceFeatures(features),
327      _deviceStatus(0), _queueSelect(0),
328      transKick(NULL)
329{
330}
331
332
333VirtIODeviceBase::~VirtIODeviceBase()
334{
335}
336
337void
338VirtIODeviceBase::serialize(std::ostream &os)
339{
340    SERIALIZE_SCALAR(guestFeatures);
341    paramOut(os, "_deviceStatus", (uint8_t)_deviceStatus);
342    SERIALIZE_SCALAR(_queueSelect);
343    for (QueueID i = 0; i < _queues.size(); ++i) {
344        nameOut(os, csprintf("%s._queues.%i", name(), i));
345        _queues[i]->serialize(os);
346    }
347}
348
349void
350VirtIODeviceBase::unserialize(Checkpoint *cp, const std::string &section)
351{
352    UNSERIALIZE_SCALAR(guestFeatures);
353    uint8_t status;
354    paramIn(cp, section, "_deviceStatus", status);
355    _deviceStatus = status;
356    UNSERIALIZE_SCALAR(_queueSelect);
357    for (QueueID i = 0; i < _queues.size(); ++i)
358        _queues[i]->unserialize(cp, csprintf("%s._queues.%i", section, i));
359}
360
361void
362VirtIODeviceBase::reset()
363{
364    _queueSelect = 0;
365    guestFeatures = 0;
366    _deviceStatus = 0;
367
368    for (QueueID i = 0; i < _queues.size(); ++i)
369        _queues[i]->setAddress(0);
370}
371
372void
373VirtIODeviceBase::onNotify(QueueID idx)
374{
375    DPRINTF(VIO, "onNotify: idx: %i\n", idx);
376    if (idx >= _queues.size()) {
377        panic("Guest tried to notify queue (%i), but only %i "
378              "queues registered.\n",
379              idx, _queues.size());
380    }
381    _queues[idx]->onNotify();
382}
383
384void
385VirtIODeviceBase::setGuestFeatures(FeatureBits features)
386{
387    DPRINTF(VIO, "Setting guest features: 0x%x\n", features);
388    if (~deviceFeatures & features) {
389        panic("Guest tried to enable unsupported features:\n"
390              "Device features: 0x%x\n"
391              "Requested features: 0x%x\n",
392              deviceFeatures, features);
393    }
394    guestFeatures = features;
395}
396
397
398void
399VirtIODeviceBase::setDeviceStatus(DeviceStatus status)
400{
401    _deviceStatus = status;
402    DPRINTF(VIO, "ACK: %i, DRIVER: %i, DRIVER_OK: %i, FAILED: %i\n",
403            status.acknowledge, status.driver, status.driver_ok, status.failed);
404    if (status == 0)
405        reset();
406}
407
408void
409VirtIODeviceBase::readConfig(PacketPtr pkt, Addr cfgOffset)
410{
411    panic("Unhandled device config read (offset: 0x%x).\n", cfgOffset);
412}
413
414void
415VirtIODeviceBase::writeConfig(PacketPtr pkt, Addr cfgOffset)
416{
417    panic("Unhandled device config write (offset: 0x%x).\n", cfgOffset);
418}
419
420void
421VirtIODeviceBase::readConfigBlob(PacketPtr pkt, Addr cfgOffset, const uint8_t *cfg)
422{
423    const unsigned size(pkt->getSize());
424    pkt->allocate();
425
426    if (cfgOffset + size > configSize)
427        panic("Config read out of bounds.\n");
428
429    pkt->setData(const_cast<uint8_t *>(cfg) + cfgOffset);
430}
431
432void
433VirtIODeviceBase::writeConfigBlob(PacketPtr pkt, Addr cfgOffset, uint8_t *cfg)
434{
435    const unsigned size(pkt->getSize());
436    pkt->allocate();
437
438    if (cfgOffset + size > configSize)
439        panic("Config write out of bounds.\n");
440
441    pkt->writeData((uint8_t *)cfg + cfgOffset);
442}
443
444
445const VirtQueue &
446VirtIODeviceBase::getCurrentQueue() const
447{
448    if (_queueSelect >= _queues.size())
449        panic("Guest tried to access non-existing VirtQueue (%i).\n", _queueSelect);
450
451    return *_queues[_queueSelect];
452}
453
454VirtQueue &
455VirtIODeviceBase::getCurrentQueue()
456{
457    if (_queueSelect >= _queues.size())
458        panic("Guest tried to access non-existing VirtQueue (%i).\n", _queueSelect);
459
460    return *_queues[_queueSelect];
461}
462
463void
464VirtIODeviceBase::setQueueAddress(uint32_t address)
465{
466    getCurrentQueue().setAddress(address * VirtQueue::ALIGN_SIZE);
467}
468
469uint32_t
470VirtIODeviceBase::getQueueAddress() const
471{
472    Addr address(getCurrentQueue().getAddress());
473    assert(!(address & ((1 >> VirtQueue::ALIGN_BITS) - 1)));
474    return address >> VirtQueue::ALIGN_BITS;
475}
476
477void
478VirtIODeviceBase::registerQueue(VirtQueue &queue)
479{
480    _queues.push_back(&queue);
481}
482