summaryrefslogtreecommitdiff
path: root/middleware/multicore/open-amp/virtio
diff options
context:
space:
mode:
authorStefan Agner <stefan.agner@toradex.com>2016-01-12 14:06:54 -0800
committerStefan Agner <stefan.agner@toradex.com>2016-01-12 14:06:54 -0800
commita57cc2c988482010061b9e68344fdf1969889763 (patch)
tree5c050337492ce27c09b47421b123980b5a79f8d9 /middleware/multicore/open-amp/virtio
initial commit, FreeRTOS_BSP_1.0.0_iMX7D
Diffstat (limited to 'middleware/multicore/open-amp/virtio')
-rw-r--r--middleware/multicore/open-amp/virtio/virtio.c96
-rw-r--r--middleware/multicore/open-amp/virtio/virtio.h151
-rw-r--r--middleware/multicore/open-amp/virtio/virtio_ring.h165
-rw-r--r--middleware/multicore/open-amp/virtio/virtqueue.c693
-rw-r--r--middleware/multicore/open-amp/virtio/virtqueue.h227
5 files changed, 1332 insertions, 0 deletions
diff --git a/middleware/multicore/open-amp/virtio/virtio.c b/middleware/multicore/open-amp/virtio/virtio.c
new file mode 100644
index 0000000..b2de79e
--- /dev/null
+++ b/middleware/multicore/open-amp/virtio/virtio.c
@@ -0,0 +1,96 @@
+/*-
+ * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "virtio.h"
+
+static const char *virtio_feature_name(unsigned long feature, struct virtio_feature_desc *);
+
+//TODO : This structure may change depending on the types of devices we support.
+static struct virtio_ident {
+ unsigned short devid;
+ const char *name;
+} virtio_ident_table[] = {
+ { VIRTIO_ID_NETWORK, "Network" },
+ { VIRTIO_ID_BLOCK, "Block" },
+ { VIRTIO_ID_CONSOLE, "Console" },
+ { VIRTIO_ID_ENTROPY, "Entropy" },
+ { VIRTIO_ID_BALLOON, "Balloon" },
+ { VIRTIO_ID_IOMEMORY, "IOMemory" },
+ { VIRTIO_ID_SCSI, "SCSI" },
+ { VIRTIO_ID_9P, "9P Transport" },
+
+ { 0, NULL }
+};
+
+/* Device independent features. */
+static struct virtio_feature_desc virtio_common_feature_desc[] = {
+ { VIRTIO_F_NOTIFY_ON_EMPTY, "NotifyOnEmpty" },
+ { VIRTIO_RING_F_INDIRECT_DESC, "RingIndirect" },
+ { VIRTIO_RING_F_EVENT_IDX, "EventIdx" },
+ { VIRTIO_F_BAD_FEATURE, "BadFeature" },
+
+ { 0, NULL }
+};
+
+const char *
+virtio_dev_name(unsigned short devid)
+{
+ struct virtio_ident *ident;
+
+ for (ident = virtio_ident_table; ident->name != NULL; ident++) {
+ if (ident->devid == devid)
+ return (ident->name);
+ }
+
+ return (NULL);
+}
+
+static const char *
+virtio_feature_name(unsigned long val, struct virtio_feature_desc *desc)
+{
+ int i, j;
+ struct virtio_feature_desc *descs[2] = { desc,
+ virtio_common_feature_desc };
+
+ for (i = 0; i < 2; i++) {
+ if (descs[i] == NULL)
+ continue;
+
+ for (j = 0; descs[i][j].vfd_val != 0; j++) {
+ if (val == descs[i][j].vfd_val)
+ return (descs[i][j].vfd_str);
+ }
+ }
+
+ return (NULL);
+}
+
+void virtio_describe(struct virtio_device *dev, const char *msg,
+ uint32_t features, struct virtio_feature_desc *desc)
+{
+ // TODO: Not used currently - keeping it for future use
+ virtio_feature_name(0,desc);
+}
+
diff --git a/middleware/multicore/open-amp/virtio/virtio.h b/middleware/multicore/open-amp/virtio/virtio.h
new file mode 100644
index 0000000..ab11dcd
--- /dev/null
+++ b/middleware/multicore/open-amp/virtio/virtio.h
@@ -0,0 +1,151 @@
+/*-
+ * This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _VIRTIO_H_
+#define _VIRTIO_H_
+
+#include "virtqueue.h"
+
+/* VirtIO device IDs. */
+#define VIRTIO_ID_NETWORK 0x01
+#define VIRTIO_ID_BLOCK 0x02
+#define VIRTIO_ID_CONSOLE 0x03
+#define VIRTIO_ID_ENTROPY 0x04
+#define VIRTIO_ID_BALLOON 0x05
+#define VIRTIO_ID_IOMEMORY 0x06
+#define VIRTIO_ID_RPMSG 0x07 /* virtio remote remote_proc messaging */
+#define VIRTIO_ID_SCSI 0x08
+#define VIRTIO_ID_9P 0x09
+
+/* Status byte for guest to report progress. */
+#define VIRTIO_CONFIG_STATUS_RESET 0x00
+#define VIRTIO_CONFIG_STATUS_ACK 0x01
+#define VIRTIO_CONFIG_STATUS_DRIVER 0x02
+#define VIRTIO_CONFIG_STATUS_DRIVER_OK 0x04
+#define VIRTIO_CONFIG_STATUS_FAILED 0x80
+
+/*
+ * Generate interrupt when the virtqueue ring is
+ * completely used, even if we've suppressed them.
+ */
+#define VIRTIO_F_NOTIFY_ON_EMPTY (1 << 24)
+
+/*
+ * The guest should never negotiate this feature; it
+ * is used to detect faulty drivers.
+ */
+#define VIRTIO_F_BAD_FEATURE (1 << 30)
+
+/*
+ * Some VirtIO feature bits (currently bits 28 through 31) are
+ * reserved for the transport being used (eg. virtio_ring), the
+ * rest are per-device feature bits.
+ */
+#define VIRTIO_TRANSPORT_F_START 28
+#define VIRTIO_TRANSPORT_F_END 32
+
+typedef struct _virtio_dispatch_ virtio_dispatch;
+
+struct virtio_feature_desc {
+ uint32_t vfd_val;
+ const char *vfd_str;
+};
+
+/*
+ * Structure definition for virtio devices for use by the
+ * applications/drivers
+ *
+ */
+
+struct virtio_device {
+ /*
+ * Since there is no generic device structure so
+ * keep its type as void. The driver layer will take
+ * care of it.
+ */
+ void *device;
+
+ /* Device name */
+ char *name;
+
+ /* List of virtqueues encapsulated by virtio device. */
+ //TODO : Need to implement a list service for ipc stack.
+ void *vq_list;
+
+ /* Virtio device specific features */
+ uint32_t features;
+
+ /* Virtio dispatch table */
+ virtio_dispatch *func;
+
+ /*
+ * Pointer to hold some private data, useful
+ * in callbacks.
+ */
+ void *data;
+};
+
+/*
+ * Helper functions.
+ */
+const char *virtio_dev_name(uint16_t devid);
+void virtio_describe(struct virtio_device *dev, const char *msg,
+ uint32_t features, struct virtio_feature_desc *feature_desc);
+
+/*
+ * Functions for virtio device configuration as defined in Rusty Russell's paper.
+ * Drivers are expected to implement these functions in their respective codes.
+ *
+ */
+
+struct _virtio_dispatch_ {
+ int (*create_virtqueues)(struct virtio_device *dev, int flags, int nvqs,
+ const char *names[], vq_callback *callbacks[],
+ struct virtqueue *vqs[]);
+ uint8_t (*get_status)(struct virtio_device *dev);
+ void (*set_status)(struct virtio_device *dev, uint8_t status);
+ uint32_t (*get_features)(struct virtio_device *dev);
+ void (*set_features)(struct virtio_device *dev, uint32_t feature);
+ uint32_t (*negotiate_features)(struct virtio_device *dev, uint32_t features);
+
+ /*
+ * Read/write a variable amount from the device specific (ie, network)
+ * configuration region. This region is encoded in the same endian as
+ * the guest.
+ */
+ void (*read_config)(struct virtio_device *dev, uint32_t offset, void *dst,
+ int length);
+ void (*write_config)(struct virtio_device *dev, uint32_t offset, void *src,
+ int length);
+ void (*reset_device)(struct virtio_device *dev);
+
+};
+
+#endif /* _VIRTIO_H_ */
diff --git a/middleware/multicore/open-amp/virtio/virtio_ring.h b/middleware/multicore/open-amp/virtio/virtio_ring.h
new file mode 100644
index 0000000..5bea08b
--- /dev/null
+++ b/middleware/multicore/open-amp/virtio/virtio_ring.h
@@ -0,0 +1,165 @@
+/*-
+ * Copyright Rusty Russell IBM Corporation 2007.
+ *
+ * This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef VIRTIO_RING_H
+#define VIRTIO_RING_H
+
+/* This marks a buffer as continuing via the next field. */
+#define VRING_DESC_F_NEXT 1
+/* This marks a buffer as write-only (otherwise read-only). */
+#define VRING_DESC_F_WRITE 2
+/* This means the buffer contains a list of buffer descriptors. */
+#define VRING_DESC_F_INDIRECT 4 /*not used?*/
+
+/* The Host uses this in used->flags to advise the Guest: don't kick me
+ * when you add a buffer. It's unreliable, so it's simply an
+ * optimization. Guest will still kick if it's out of buffers. */
+#define VRING_USED_F_NO_NOTIFY 1
+/* The Guest uses this in avail->flags to advise the Host: don't
+ * interrupt me when you consume a buffer. It's unreliable, so it's
+ * simply an optimization. */
+#define VRING_AVAIL_F_NO_INTERRUPT 1
+
+/* VirtIO ring descriptors: 16 bytes.
+ * These can chain together via "next". */
+struct vring_desc {
+ /* Address (guest-physical). */
+ uint64_t addr;
+ /* Length. */
+ uint32_t len;
+ /* The flags as indicated above. */
+ uint16_t flags;
+ /* We chain unused descriptors via this, too. */
+ uint16_t next;
+};
+
+struct vring_avail {
+ uint16_t flags;
+ uint16_t idx;
+ uint16_t ring[0];
+};
+
+/* uint32_t is used here for ids for padding reasons. */
+struct vring_used_elem {
+ /* Index of start of used descriptor chain. */
+ uint32_t id;
+ /* Total length of the descriptor chain which was written to. */
+ uint32_t len;
+};
+
+struct vring_used {
+ uint16_t flags;
+ uint16_t idx;
+ struct vring_used_elem ring[0];
+};
+
+struct vring {
+ unsigned int num;
+
+ struct vring_desc *desc;
+ struct vring_avail *avail;
+ struct vring_used *used;
+};
+
+/* The standard layout for the ring is a continuous chunk of memory which
+ * looks like this. We assume num is a power of 2.
+ *
+ * struct vring {
+ * // The actual descriptors (16 bytes each)
+ * struct vring_desc desc[num];
+ *
+ * // A ring of available descriptor heads with free-running index.
+ * __u16 avail_flags;
+ * __u16 avail_idx;
+ * __u16 available[num];
+ * __u16 used_event_idx;
+ *
+ * // Padding to the next align boundary.
+ * char pad[];
+ *
+ * // A ring of used descriptor heads with free-running index.
+ * __u16 used_flags;
+ * __u16 used_idx;
+ * struct vring_used_elem used[num];
+ * __u16 avail_event_idx;
+ * };
+ *
+ * NOTE: for VirtIO PCI, align is 4096.
+ */
+
+/*
+ * We publish the used event index at the end of the available ring, and vice
+ * versa. They are at the end for backwards compatibility.
+ */
+#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
+#define vring_avail_event(vr) (*(uint16_t *)&(vr)->used->ring[(vr)->num])
+
+static inline int
+vring_size(unsigned int num, unsigned long align)
+{
+ int size;
+
+ size = num * sizeof(struct vring_desc);
+ size += sizeof(struct vring_avail) + (num * sizeof(uint16_t)) +
+ sizeof(uint16_t);
+ size = (size + align - 1) & ~(align - 1);
+ size += sizeof(struct vring_used) +
+ (num * sizeof(struct vring_used_elem)) + sizeof(uint16_t);
+ return (size);
+}
+
+static inline void
+vring_init(struct vring *vr, unsigned int num, uint8_t *p,
+ unsigned long align)
+{
+ vr->num = num;
+ vr->desc = (struct vring_desc *) p;
+ vr->avail = (struct vring_avail *) (p +
+ num * sizeof(struct vring_desc));
+ vr->used = (void *)
+ (((unsigned long) &vr->avail->ring[num] + align-1) & ~(align-1));
+}
+
+/*
+ * The following is used with VIRTIO_RING_F_EVENT_IDX.
+ *
+ * Assuming a given event_idx value from the other size, if we have
+ * just incremented index from old to new_idx, should we trigger an
+ * event?
+ */
+static inline int
+vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
+{
+
+ return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
+}
+#endif /* VIRTIO_RING_H */
diff --git a/middleware/multicore/open-amp/virtio/virtqueue.c b/middleware/multicore/open-amp/virtio/virtqueue.c
new file mode 100644
index 0000000..2ea87cd
--- /dev/null
+++ b/middleware/multicore/open-amp/virtio/virtqueue.c
@@ -0,0 +1,693 @@
+/*-
+ * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "virtqueue.h"
+
+/* Prototype for internal functions. */
+static void vq_ring_init(struct virtqueue *);
+static void vq_ring_update_avail(struct virtqueue *, uint16_t);
+static uint16_t vq_ring_add_buffer(struct virtqueue *, struct vring_desc *,
+ uint16_t, struct llist *, int, int);
+static int vq_ring_enable_interrupt(struct virtqueue *, uint16_t);
+static void vq_ring_free_chain(struct virtqueue *, uint16_t);
+static int vq_ring_must_notify_host(struct virtqueue *vq);
+static void vq_ring_notify_host(struct virtqueue *vq);
+static int virtqueue_nused(struct virtqueue *vq);
+
+/**
+ * virtqueue_create - Creates new VirtIO queue
+ *
+ * @param device - Pointer to VirtIO device
+ * @param id - VirtIO queue ID , must be unique
+ * @param name - Name of VirtIO queue
+ * @param ring - Pointer to vring_alloc_info control block
+ * @param callback - Pointer to callback function, invoked
+ * when message is available on VirtIO queue
+ * @param notify - Pointer to notify function, used to notify
+ * other side that there is job available for it
+ * @param v_queue - Created VirtIO queue.
+ *
+ * @return - Function status
+ */
+int virtqueue_create(struct virtio_device *virt_dev, unsigned short id, char *name,
+ struct vring_alloc_info *ring, void (*callback)(struct virtqueue *vq),
+ void (*notify)(struct virtqueue *vq),
+ struct virtqueue **v_queue) {
+
+ struct virtqueue *vq = VQ_NULL;
+ int status = VQUEUE_SUCCESS;
+ uint32_t vq_size = 0;
+
+ VQ_PARAM_CHK(ring == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
+ VQ_PARAM_CHK(ring->num_descs == 0, status, ERROR_VQUEUE_INVLD_PARAM);
+ VQ_PARAM_CHK(ring->num_descs & (ring->num_descs - 1), status,
+ ERROR_VRING_ALIGN);
+
+ //TODO : Error check for indirect buffer addition
+
+ if (status == VQUEUE_SUCCESS) {
+
+ vq_size = sizeof(struct virtqueue)
+ + (ring->num_descs) * sizeof(struct vq_desc_extra);
+ vq = (struct virtqueue *) env_allocate_memory(vq_size);
+
+
+ if (vq == VQ_NULL) {
+ return (ERROR_NO_MEM);
+ }
+
+ env_memset(vq, 0x00, vq_size);
+
+
+ vq->vq_dev = virt_dev;
+ env_strncpy(vq->vq_name, name, VIRTQUEUE_MAX_NAME_SZ);
+ vq->vq_queue_index = id;
+ vq->vq_alignment = ring->align; /*ring info comes from proc_table*/
+ vq->vq_nentries = ring->num_descs;
+ vq->vq_free_cnt = vq->vq_nentries;
+ vq->callback = callback; /*rpmsg_tx_callback, rpmsg_rx_callback*/
+ vq->notify = notify; /*hil_vring_notify*/
+
+ //TODO : Whether we want to support indirect addition or not.
+ vq->vq_ring_size = vring_size(ring->num_descs, ring->align);
+ vq->vq_ring_mem = (void *) ring->phy_addr;
+
+ /* Initialize vring control block in virtqueue. */
+ vq_ring_init(vq);
+
+ /* Disable callbacks - will be enabled by the application
+ * once initialization is completed.
+ */
+ virtqueue_disable_cb(vq);
+
+ *v_queue = vq;
+
+ //TODO : Need to add cleanup in case of error used with the indirect buffer addition
+ //TODO: do we need to save the new queue in db based on its id
+ }
+
+ return (status);
+}
+
+/**
+ * virtqueue_add_buffer() - Enqueues new buffer in vring for consumption
+ * by other side. Readable buffers are always
+ * inserted before writable buffers
+ *
+ * @param vq - Pointer to VirtIO queue control block.
+ * @param buffer - Pointer to buffer list
+ * @param readable - Number of readable buffers
+ * @param writable - Number of writable buffers
+ * @param cookie - Pointer to hold call back data
+ *
+ * @return - Function status
+ */
+int virtqueue_add_buffer(struct virtqueue *vq, struct llist *buffer,
+ int readable, int writable, void *cookie) {
+
+ struct vq_desc_extra *dxp = VQ_NULL;
+ int status = VQUEUE_SUCCESS;
+ uint16_t head_idx;
+ uint16_t idx;
+ int needed;
+
+ needed = readable + writable; /*only one can be set to 1*/
+
+ VQ_PARAM_CHK(vq == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
+ VQ_PARAM_CHK(needed < 1, status, ERROR_VQUEUE_INVLD_PARAM);
+ VQ_PARAM_CHK(vq->vq_free_cnt == 0, status, ERROR_VRING_FULL);
+
+ //TODO: Add parameters validation for indirect buffer addition
+
+ VQUEUE_BUSY(vq);
+
+ if (status == VQUEUE_SUCCESS) {
+
+ //TODO : Indirect buffer addition support
+
+ VQASSERT(vq, cookie != VQ_NULL, "enqueuing with no cookie");
+
+ head_idx = vq->vq_desc_head_idx;
+ VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
+ dxp = &vq->vq_descx[head_idx];
+
+ VQASSERT(vq, (dxp->cookie == VQ_NULL), "cookie already exists for index");
+
+ dxp->cookie = cookie;
+ dxp->ndescs = needed;
+
+ /* Enqueue buffer onto the ring. */
+ idx = vq_ring_add_buffer(vq, vq->vq_ring.desc, head_idx, buffer, /*idx now "desc.next"*/
+ readable, writable);
+
+ vq->vq_desc_head_idx = idx;
+ vq->vq_free_cnt -= needed;
+
+ if (vq->vq_free_cnt == 0)
+ VQ_RING_ASSERT_CHAIN_TERM(vq);
+ else
+ VQ_RING_ASSERT_VALID_IDX(vq, idx);
+
+ /*
+ * Update vring_avail control block fields so that other
+ * side can get buffer using it.
+ */
+ vq_ring_update_avail(vq, head_idx);
+ }
+
+ VQUEUE_IDLE(vq);
+
+ return (status);
+}
+
+/**
+ * virtqueue_add_single_buffer - Enqueues single buffer in vring
+ *
+ * @param vq - Pointer to VirtIO queue control block
+ * @param cookie - Pointer to hold call back data
+ * @param buffer_addr - Address of buffer
+ * @param len - Length of buffer
+ * @param writable - If buffer writable
+ * @param has_next - If buffers for subsequent call are
+ * to be chained
+ *
+ * @return - Function status
+ */
+int virtqueue_add_single_buffer(struct virtqueue *vq, void *cookie, /*this function is not used at all*/
+ void *buffer_addr, uint_t len, int writable, boolean has_next) {
+
+ struct vq_desc_extra *dxp;
+ struct vring_desc *dp;
+ uint16_t head_idx;
+ uint16_t idx;
+ int status = VQUEUE_SUCCESS;
+
+ VQ_PARAM_CHK(vq == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
+ VQ_PARAM_CHK(vq->vq_free_cnt == 0, status, ERROR_VRING_FULL);
+
+ VQUEUE_BUSY(vq);
+
+ if (status == VQUEUE_SUCCESS) {
+
+ VQASSERT(vq, cookie != VQ_NULL, "enqueuing with no cookie");
+
+ head_idx = vq->vq_desc_head_idx;
+ dxp = &vq->vq_descx[head_idx];
+
+ dxp->cookie = cookie;
+ dxp->ndescs = 1;
+ idx = head_idx;
+
+ dp = &vq->vq_ring.desc[idx];
+ dp->addr = env_map_vatopa(buffer_addr);
+ dp->len = len;
+ dp->flags = 0;
+ idx = dp->next;
+
+ if (has_next)
+ dp->flags |= VRING_DESC_F_NEXT;
+ if (writable)
+ dp->flags |= VRING_DESC_F_WRITE;
+
+ vq->vq_desc_head_idx = idx;
+ vq->vq_free_cnt--;
+
+ if (vq->vq_free_cnt == 0)
+ VQ_RING_ASSERT_CHAIN_TERM(vq);
+ else
+ VQ_RING_ASSERT_VALID_IDX(vq, idx);
+
+ vq_ring_update_avail(vq, head_idx);
+ }
+
+ VQUEUE_IDLE(vq);
+
+ return (status);
+}
+
+/**
+ * virtqueue_get_buffer - Returns used buffers from VirtIO queue
+ *
+ * @param vq - Pointer to VirtIO queue control block
+ * @param len - Length of conumed buffer
+ *
+ * @return - Pointer to used buffer
+ */
+void *virtqueue_get_buffer(struct virtqueue *vq, uint32_t *len) {
+ struct vring_used_elem *uep;
+ void *cookie;
+ uint16_t used_idx, desc_idx;
+
+ if ((vq == VQ_NULL) || (vq->vq_used_cons_idx == vq->vq_ring.used->idx))
+ return (VQ_NULL);
+
+ VQUEUE_BUSY(vq);
+
+ used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
+ uep = &vq->vq_ring.used->ring[used_idx];
+
+ env_rmb();
+
+ desc_idx = (uint16_t) uep->id;
+ if (len != VQ_NULL)
+ *len = uep->len;
+
+ vq_ring_free_chain(vq, desc_idx);
+
+ cookie = vq->vq_descx[desc_idx].cookie;
+ vq->vq_descx[desc_idx].cookie = VQ_NULL;
+
+ VQUEUE_IDLE(vq);
+
+ return (cookie);
+}
+
+/**
+ * virtqueue_free - Frees VirtIO queue resources
+ *
+ * @param vq - Pointer to VirtIO queue control block
+ *
+ */
+void virtqueue_free(struct virtqueue *vq) {
+
+ if (vq != VQ_NULL) {
+
+ if (vq->vq_free_cnt != vq->vq_nentries) {
+ env_print("\r\nWARNING %s: freeing non-empty virtqueue\r\n", vq->vq_name);
+ }
+
+ //TODO : Need to free indirect buffers here
+
+ if (vq->vq_ring_mem != VQ_NULL) {
+ vq->vq_ring_size = 0;
+ vq->vq_ring_mem = VQ_NULL;
+ }
+
+ env_free_memory(vq);
+ }
+}
+
+/**
+ * virtqueue_get_available_buffer - Returns buffer available for use in the
+ * VirtIO queue
+ *
+ * @param vq - Pointer to VirtIO queue control block
+ * @param avail_idx - Pointer to index used in vring desc table
+ * @param len - Length of buffer
+ *
+ * @return - Pointer to available buffer
+ */
+void *virtqueue_get_available_buffer(struct virtqueue *vq, uint16_t *avail_idx,
+ uint32_t *len) {
+
+ uint16_t head_idx = 0;
+ void *buffer;
+
+ if (vq->vq_available_idx == vq->vq_ring.avail->idx) { /*vq->vq_ring.avial->idx is updated in "rpmsg_rdev_create_virtqueues" "virtqueue_add_buffer" */
+ return (VQ_NULL);
+ }
+
+ VQUEUE_BUSY(vq);
+
+ head_idx = vq->vq_available_idx++ & (vq->vq_nentries - 1);
+ *avail_idx = vq->vq_ring.avail->ring[head_idx];
+
+ env_rmb();
+
+ buffer = env_map_patova(vq->vq_ring.desc[*avail_idx].addr);
+ *len = vq->vq_ring.desc[*avail_idx].len;
+
+ VQUEUE_IDLE(vq);
+
+ return (buffer);
+}
+
+/**
+ * virtqueue_add_consumed_buffer - Returns consumed buffer back to VirtIO queue
+ *
+ * @param vq - Pointer to VirtIO queue control block
+ * @param head_idx - Index of vring desc containing used buffer
+ * @param len - Length of buffer
+ *
+ * @return - Function status
+ */
+int virtqueue_add_consumed_buffer(struct virtqueue *vq, uint16_t head_idx,
+ uint_t len) {
+
+ struct vring_used_elem *used_desc = VQ_NULL;
+ uint16_t used_idx;
+
+ if ((head_idx > vq->vq_nentries) || (head_idx < 0)) {
+ return (ERROR_VRING_NO_BUFF);
+ }
+
+ VQUEUE_BUSY(vq);
+
+ used_idx = vq->vq_ring.used->idx & (vq->vq_nentries - 1);
+ used_desc = &(vq->vq_ring.used->ring[used_idx]);
+ used_desc->id = head_idx;
+ used_desc->len = len;
+
+ env_wmb();
+
+ vq->vq_ring.used->idx++;
+
+ VQUEUE_IDLE(vq);
+
+ return (VQUEUE_SUCCESS);
+}
+
+/**
+ * virtqueue_enable_cb - Enables callback generation
+ *
+ * @param vq - Pointer to VirtIO queue control block
+ *
+ * @return - Function status
+ */
+int virtqueue_enable_cb(struct virtqueue *vq) {
+
+ return (vq_ring_enable_interrupt(vq, 0));
+}
+
+/**
+ * virtqueue_enable_cb - Disables callback generation
+ *
+ * @param vq - Pointer to VirtIO queue control block
+ *
+ */
+void virtqueue_disable_cb(struct virtqueue *vq) {
+
+ VQUEUE_BUSY(vq);
+
+ if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
+ vring_used_event(&vq->vq_ring)= vq->vq_used_cons_idx - vq->vq_nentries
+ - 1;
+ } else {
+ vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+ }
+
+ VQUEUE_IDLE(vq);
+}
+
+/**
+ * virtqueue_kick - Notifies other side that there is buffer available for it.
+ *
+ * @param vq - Pointer to VirtIO queue control block
+ */
+void virtqueue_kick(struct virtqueue *vq) {
+
+ VQUEUE_BUSY(vq);
+
+ /* Ensure updated avail->idx is visible to host. */
+ env_mb();
+
+ if (vq_ring_must_notify_host(vq))
+ vq_ring_notify_host(vq);
+
+ vq->vq_queued_cnt = 0;
+
+ VQUEUE_IDLE(vq);
+}
+
+/**
+ * virtqueue_dump Dumps important virtqueue fields , use for debugging purposes
+ *
+ * @param vq - Pointer to VirtIO queue control block
+ */
+void virtqueue_dump(struct virtqueue *vq) {
+
+ if (vq == VQ_NULL)
+ return;
+
+ env_print("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
+ "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
+ "used.idx=%d; avail.flags=0x%x; used.flags=0x%x\r\n", vq->vq_name,
+ vq->vq_nentries, vq->vq_free_cnt, virtqueue_nused(vq),
+ vq->vq_queued_cnt, vq->vq_desc_head_idx, vq->vq_ring.avail->idx,
+ vq->vq_used_cons_idx, vq->vq_ring.used->idx,
+ vq->vq_ring.avail->flags, vq->vq_ring.used->flags);
+}
+
+/**
+ * virtqueue_get_desc_size - Returns vring descriptor size
+ *
+ * @param vq - Pointer to VirtIO queue control block
+ *
+ * @return - Descriptor length
+ */
+uint32_t virtqueue_get_desc_size(struct virtqueue *vq) {
+ uint16_t head_idx = 0;
+ uint16_t avail_idx = 0;
+ uint32_t len = 0;
+
+ if (vq->vq_available_idx == vq->vq_ring.avail->idx) {
+ return (VQ_NULL);
+ }
+
+ VQUEUE_BUSY(vq);
+
+ head_idx = vq->vq_available_idx & (vq->vq_nentries - 1);
+ avail_idx = vq->vq_ring.avail->ring[head_idx];
+ len = vq->vq_ring.desc[avail_idx].len;
+
+ VQUEUE_IDLE(vq);
+
+ return (len);
+}
+/**************************************************************************
+ * Helper Functions *
+ **************************************************************************/
+
+/**
+ *
+ * vq_ring_add_buffer
+ *
+ */
+static uint16_t vq_ring_add_buffer(struct virtqueue *vq,
+ struct vring_desc *desc, uint16_t head_idx, struct llist *buffer,
+ int readable, int writable) {
+
+ struct vring_desc *dp;
+ int i, needed;
+ uint16_t idx;
+
+ needed = readable + writable;
+
+ for (i = 0, idx = head_idx; (i < needed && buffer != VQ_NULL);
+ i++, idx = dp->next, buffer = buffer->next) {
+
+ VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
+ "premature end of free desc chain");
+
+ dp = &desc[idx];
+ dp->addr = env_map_vatopa(buffer->data);
+ dp->len = buffer->attr;
+ dp->flags = 0;
+
+ if (i < needed - 1)
+ dp->flags |= VRING_DESC_F_NEXT;
+
+ /* Readable buffers are inserted into vring before the writable buffers.*/
+ if (i >= readable)
+ dp->flags |= VRING_DESC_F_WRITE;
+ }
+
+ return (idx);
+}
+
+/**
+ *
+ * vq_ring_free_chain
+ *
+ */
+static void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) {
+ struct vring_desc *dp;
+ struct vq_desc_extra *dxp;
+
+ VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
+ dp = &vq->vq_ring.desc[desc_idx];
+ dxp = &vq->vq_descx[desc_idx];
+
+ if (vq->vq_free_cnt == 0)
+ VQ_RING_ASSERT_CHAIN_TERM(vq);
+
+ vq->vq_free_cnt += dxp->ndescs;
+ dxp->ndescs--;
+
+ if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
+ while (dp->flags & VRING_DESC_F_NEXT) {
+ VQ_RING_ASSERT_VALID_IDX(vq, dp->next);
+ dp = &vq->vq_ring.desc[dp->next];
+ dxp->ndescs--;
+ }
+ }
+
+ VQASSERT(vq, (dxp->ndescs == 0),
+ "failed to free entire desc chain, remaining");
+
+ /*
+ * We must append the existing free chain, if any, to the end of
+ * newly freed chain. If the virtqueue was completely used, then
+ * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
+ */
+ dp->next = vq->vq_desc_head_idx;
+ vq->vq_desc_head_idx = desc_idx;
+}
+
+/**
+ *
+ * vq_ring_init
+ *
+ */
+static void vq_ring_init(struct virtqueue *vq) {
+ struct vring *vr;
+ unsigned char *ring_mem;
+ int i, size;
+
+ ring_mem = vq->vq_ring_mem;
+ size = vq->vq_nentries;
+ vr = &vq->vq_ring;
+
+ vring_init(vr, size, ring_mem, vq->vq_alignment);
+
+ for (i = 0; i < size - 1; i++)
+ vr->desc[i].next = i + 1;
+ vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
+}
+
+/**
+ *
+ * vq_ring_update_avail
+ *
+ */
+static void vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx) {
+ uint16_t avail_idx;
+
+ /*
+ * Place the head of the descriptor chain into the next slot and make
+ * it usable to the host. The chain is made available now rather than
+ * deferring to virtqueue_notify() in the hopes that if the host is
+ * currently running on another CPU, we can keep it processing the new
+ * descriptor.
+ */
+ avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);
+ vq->vq_ring.avail->ring[avail_idx] = desc_idx;
+
+ env_wmb();
+
+ vq->vq_ring.avail->idx++;
+
+ /* Keep pending count until virtqueue_notify(). */
+ vq->vq_queued_cnt++;
+}
+
+/**
+ *
+ * vq_ring_enable_interrupt
+ *
+ */
+static int vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc) {
+
+ /*
+ * Enable interrupts, making sure we get the latest index of
+ * what's already been consumed.
+ */
+ if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
+ vring_used_event(&vq->vq_ring)= vq->vq_used_cons_idx + ndesc;
+ } else {
+ vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
+ }
+
+ env_mb();
+
+ /*
+ * Enough items may have already been consumed to meet our threshold
+ * since we last checked. Let our caller know so it processes the new
+ * entries.
+ */
+ if (virtqueue_nused(vq) > ndesc) {
+ return (1);
+ }
+
+ return (0);
+}
+
+/**
+ *
+ * virtqueue_interrupt
+ *
+ */
+void virtqueue_notification(struct virtqueue *vq) {
+
+ if (vq->callback != VQ_NULL)
+ vq->callback(vq);
+}
+
+/**
+ *
+ * vq_ring_must_notify_host
+ *
+ */
+static int vq_ring_must_notify_host(struct virtqueue *vq) {
+ uint16_t new_idx, prev_idx, event_idx;
+
+ if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
+ new_idx = vq->vq_ring.avail->idx;
+ prev_idx = new_idx - vq->vq_queued_cnt;
+ event_idx = vring_avail_event(&vq->vq_ring);
+
+ return (vring_need_event(event_idx, new_idx, prev_idx) != 0);
+ }
+
+ return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0);
+}
+
+/**
+ *
+ * vq_ring_notify_host
+ *
+ */
+static void vq_ring_notify_host(struct virtqueue *vq) {
+
+ if (vq->notify != VQ_NULL)
+ vq->notify(vq); /*hil_vring_notify*/
+}
+
+/**
+ *
+ * virtqueue_nused
+ *
+ */
+static int virtqueue_nused(struct virtqueue *vq) {
+ uint16_t used_idx, nused;
+
+ used_idx = vq->vq_ring.used->idx;
+
+ nused = (uint16_t) (used_idx - vq->vq_used_cons_idx);
+ VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
+
+ return (nused);
+}
diff --git a/middleware/multicore/open-amp/virtio/virtqueue.h b/middleware/multicore/open-amp/virtio/virtqueue.h
new file mode 100644
index 0000000..4671f6e
--- /dev/null
+++ b/middleware/multicore/open-amp/virtio/virtqueue.h
@@ -0,0 +1,227 @@
+#ifndef VIRTQUEUE_H_
+#define VIRTQUEUE_H_
+
+/*-
+ * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#include <stdint.h>
+
+typedef unsigned int uint_t;
+typedef uint8_t boolean;
+
+#include "virtio_ring.h"
+#include "../porting/env/env.h"
+#include "../common/llist/llist.h"
+
+/*Error Codes*/
+#define VQ_ERROR_BASE -3000
+#define ERROR_VRING_FULL (VQ_ERROR_BASE - 1)
+#define ERROR_INVLD_DESC_IDX (VQ_ERROR_BASE - 2)
+#define ERROR_EMPTY_RING (VQ_ERROR_BASE - 3)
+#define ERROR_NO_MEM (VQ_ERROR_BASE - 4)
+#define ERROR_VRING_MAX_DESC (VQ_ERROR_BASE - 5)
+#define ERROR_VRING_ALIGN (VQ_ERROR_BASE - 6)
+#define ERROR_VRING_NO_BUFF (VQ_ERROR_BASE - 7)
+#define ERROR_VQUEUE_INVLD_PARAM (VQ_ERROR_BASE - 8)
+
+#define true 1
+#define false 0
+#define VQUEUE_SUCCESS 0
+#define VQUEUE_DEBUG false
+
+//TODO:
+/* This is temporary macro to replace C NULL support.
+ * At the moment all the RTL specific functions are present in env.
+ * */
+#define VQ_NULL 0
+
+/* The maximum virtqueue size is 2^15. Use that value as the end of
+ * descriptor chain terminator since it will never be a valid index
+ * in the descriptor table. This is used to verify we are correctly
+ * handling vq_free_cnt.
+ */
+#define VQ_RING_DESC_CHAIN_END 32768
+#define VIRTQUEUE_FLAG_INDIRECT 0x0001
+#define VIRTQUEUE_FLAG_EVENT_IDX 0x0002
+#define VIRTQUEUE_MAX_NAME_SZ 32
+
+/* Support for indirect buffer descriptors. */
+#define VIRTIO_RING_F_INDIRECT_DESC (1 << 28)
+
+/* Support to suppress interrupt until specific index is reached. */
+#define VIRTIO_RING_F_EVENT_IDX (1 << 29)
+
+/*
+ * Hint on how long the next interrupt should be postponed. This is
+ * only used when the EVENT_IDX feature is negotiated.
+ */
+typedef enum {
+ VQ_POSTPONE_SHORT,
+ VQ_POSTPONE_LONG,
+ VQ_POSTPONE_EMPTIED /* Until all available desc are used. */
+} vq_postpone_t;
+
+struct virtqueue {
+ //TODO: Need to define proper structure for
+ // virtio device with RPmsg and paravirtualization.
+
+ struct virtio_device *vq_dev;
+ char vq_name[VIRTQUEUE_MAX_NAME_SZ];
+ uint16_t vq_queue_index;
+ uint16_t vq_nentries;
+ uint32_t vq_flags;
+ int vq_alignment;
+ int vq_ring_size; /*Seems not used at all*/
+ boolean vq_inuse;
+ void *vq_ring_mem;
+ void (*callback)(struct virtqueue *vq);
+ void (*notify)(struct virtqueue *vq);
+ int vq_max_indirect_size;
+ int vq_indirect_mem_size;
+ struct vring vq_ring;
+ uint16_t vq_free_cnt;
+ uint16_t vq_queued_cnt;
+
+ /*
+ * Head of the free chain in the descriptor table. If
+ * there are no free descriptors, this will be set to
+ * VQ_RING_DESC_CHAIN_END.
+ */
+ uint16_t vq_desc_head_idx;
+
+ /*
+ * Last consumed descriptor in the used table,
+ * trails vq_ring.used->idx.
+ */
+ uint16_t vq_used_cons_idx;
+
+ /*
+ * Last consumed descriptor in the available table -
+ * used by the consumer side.
+ */
+ uint16_t vq_available_idx;
+
+ uint8_t padd;
+
+ /*
+ * Used by the host side during callback. Cookie
+ * holds the address of buffer received from other side.
+ * Other fields in this structure are not used currently.
+ */
+
+ struct vq_desc_extra {
+ void *cookie;
+ struct vring_desc *indirect;
+ uint32_t indirect_paddr;
+ uint16_t ndescs;
+ } vq_descx[0];
+};
+
+/* struct to hold vring specific information */
+struct vring_alloc_info {
+ void *phy_addr;
+ uint32_t align;
+ uint16_t num_descs;
+ uint16_t pad;
+};
+
+typedef void vq_callback(struct virtqueue *);
+typedef void vq_notify(struct virtqueue *);
+
+#if (VQUEUE_DEBUG == true)
+
+#define VQASSERT(_vq, _exp, _msg) do{ \
+ if (!(_exp)){ env_print("%s: %s - "_msg, __func__, (_vq)->vq_name); while(1);} \
+ } while(0)
+
+#define VQ_RING_ASSERT_VALID_IDX(_vq, _idx) \
+ VQASSERT((_vq), (_idx) < (_vq)->vq_nentries, \
+ "invalid ring index")
+
+#define VQ_RING_ASSERT_CHAIN_TERM(_vq) \
+ VQASSERT((_vq), (_vq)->vq_desc_head_idx == \
+ VQ_RING_DESC_CHAIN_END, "full ring terminated incorrectly: invalid head")
+
+#define VQ_PARAM_CHK(condition, status_var, status_err) \
+ if ((status_var == 0) && (condition)) \
+ { \
+ status_var = status_err; \
+ }
+
+#define VQUEUE_BUSY(vq) if ((vq)->vq_inuse == false) \
+ (vq)->vq_inuse = true; \
+ else \
+ VQASSERT(vq, (vq)->vq_inuse == false, \
+ "VirtQueue already in use")
+
+#define VQUEUE_IDLE(vq) ((vq)->vq_inuse = false)
+
+#else
+
+#define KASSERT(cond, str)
+#define VQASSERT(_vq, _exp, _msg)
+#define VQ_RING_ASSERT_VALID_IDX(_vq, _idx)
+#define VQ_RING_ASSERT_CHAIN_TERM(_vq)
+#define VQ_PARAM_CHK(condition, status_var, status_err)
+#define VQUEUE_BUSY(vq)
+#define VQUEUE_IDLE(vq)
+
+#endif
+
+int virtqueue_create(struct virtio_device *device, unsigned short id, char *name,
+ struct vring_alloc_info *ring, void (*callback)(struct virtqueue *vq),
+ void (*notify)(struct virtqueue *vq), struct virtqueue **v_queue);
+
+int virtqueue_add_buffer(struct virtqueue *vq, struct llist *buffer,
+ int readable, int writable, void *cookie);
+
+int virtqueue_add_single_buffer(struct virtqueue *vq, void *cookie,
+ void* buffer_addr, uint_t len, int writable, boolean has_next);
+
+void *virtqueue_get_buffer(struct virtqueue *vq, uint32_t *len);
+
+void *virtqueue_get_available_buffer(struct virtqueue *vq, uint16_t *avail_idx,
+ uint32_t *len);
+
+int virtqueue_add_consumed_buffer(struct virtqueue *vq, uint16_t head_idx,
+ uint_t len);
+
+void virtqueue_disable_cb(struct virtqueue *vq);
+
+int virtqueue_enable_cb(struct virtqueue *vq);
+
+void virtqueue_kick(struct virtqueue *vq);
+
+void virtqueue_free(struct virtqueue *vq);
+
+void virtqueue_dump(struct virtqueue *vq);
+
+void virtqueue_notification(struct virtqueue *vq);
+
+uint32_t virtqueue_get_desc_size(struct virtqueue *vq);
+
+#endif /* VIRTQUEUE_H_ */