summaryrefslogtreecommitdiff
path: root/virtio-ring.h
diff options
context:
space:
mode:
Diffstat (limited to 'virtio-ring.h')
-rw-r--r--virtio-ring.h100
1 files changed, 29 insertions, 71 deletions
diff --git a/virtio-ring.h b/virtio-ring.h
index 6d1e2dd..aa01d92 100644
--- a/virtio-ring.h
+++ b/virtio-ring.h
@@ -1,5 +1,5 @@
-#ifndef VIRTIO_RING_H
-#define VIRTIO_RING_H
+#ifndef VIRTQUEUE_H
+#define VIRTQUEUE_H
/* An interface for efficient virtio implementation.
*
* This header is BSD licensed so anyone can use the definitions
@@ -35,33 +35,33 @@
#include <stdint.h>
/* This marks a buffer as continuing via the next field. */
-#define VRING_DESC_F_NEXT 1
+#define VIRTQ_DESC_F_NEXT 1
/* This marks a buffer as write-only (otherwise read-only). */
-#define VRING_DESC_F_WRITE 2
+#define VIRTQ_DESC_F_WRITE 2
/* This means the buffer contains a list of buffer descriptors. */
-#define VRING_DESC_F_INDIRECT 4
+#define VIRTQ_DESC_F_INDIRECT 4
/* The device uses this in used->flags to advise the driver: don't kick me
* when you add a buffer. It's unreliable, so it's simply an
* optimization. */
-#define VRING_USED_F_NO_NOTIFY 1
+#define VIRTQ_USED_F_NO_NOTIFY 1
/* The driver uses this in avail->flags to advise the device: don't
* interrupt me when you consume a buffer. It's unreliable, so it's
* simply an optimization. */
-#define VRING_AVAIL_F_NO_INTERRUPT 1
+#define VIRTQ_AVAIL_F_NO_INTERRUPT 1
/* Support for indirect descriptors */
-#define VIRTIO_RING_F_INDIRECT_DESC 28
+#define VIRTIO_F_INDIRECT_DESC 28
/* Support for avail_idx and used_idx fields */
-#define VIRTIO_RING_F_EVENT_IDX 29
+#define VIRTIO_F_EVENT_IDX 29
/* Arbitrary descriptor layouts. */
-#define VIRTIO_F_ANY_LAYOUT 27
+#define VIRTIO_F_ANY_LAYOUT 27
-/* Virtio ring descriptors: 16 bytes.
+/* Virtqueue descriptors: 16 bytes.
* These can chain together via "next". */
-struct vring_desc {
+struct virtq_desc {
/* Address (guest-physical). */
le64 addr;
/* Length. */
@@ -72,93 +72,51 @@ struct vring_desc {
le16 next;
};
-struct vring_avail {
+struct virtq_avail {
le16 flags;
le16 idx;
le16 ring[];
- /* Only if VIRTIO_RING_F_EVENT_IDX: le16 used_event; */
+ /* Only if VIRTIO_F_EVENT_IDX: le16 used_event; */
};
/* le32 is used here for ids for padding reasons. */
-struct vring_used_elem {
+struct virtq_used_elem {
/* Index of start of used descriptor chain. */
le32 id;
/* Total length of the descriptor chain which was written to. */
le32 len;
};
-struct vring_used {
+struct virtq_used {
le16 flags;
le16 idx;
- struct vring_used_elem ring[];
- /* Only if VIRTIO_RING_F_EVENT_IDX: le16 avail_event; */
+ struct virtq_used_elem ring[];
+ /* Only if VIRTIO_F_EVENT_IDX: le16 avail_event; */
};
-struct vring {
+struct virtq {
unsigned int num;
- struct vring_desc *desc;
- struct vring_avail *avail;
- struct vring_used *used;
+ struct virtq_desc *desc;
+ struct virtq_avail *avail;
+ struct virtq_used *used;
};
-/* The standard layout for the ring is a continuous chunk of memory which
- * looks like this. We assume num is a power of 2.
- *
- * struct vring {
- * // The actual descriptors (16 bytes each)
- * struct vring_desc desc[num];
- *
- * // A ring of available descriptor heads with free-running index.
- * le16 avail_flags;
- * le16 avail_idx;
- * le16 available[num];
- * le16 used_event_idx; // Only if VIRTIO_RING_F_EVENT_IDX
- *
- * // Padding to the next align boundary.
- * char pad[];
- *
- * // A ring of used descriptor heads with free-running index.
- * le16 used_flags;
- * le16 used_idx;
- * struct vring_used_elem used[num];
- * le16 avail_event_idx; // Only if VIRTIO_RING_F_EVENT_IDX
- * };
- * Note: for virtio PCI, align is 4096.
- */
-static inline void vring_init(struct vring *vr, unsigned int num, void *p,
- unsigned long align)
-{
- vr->num = num;
- vr->desc = p;
- vr->avail = p + num*sizeof(struct vring_desc);
- vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(le16)
- + align-1)
- & ~(align - 1));
-}
-
-static inline unsigned vring_size(unsigned int num, unsigned long align)
-{
- return ((sizeof(struct vring_desc)*num + sizeof(le16)*(3+num)
- + align - 1) & ~(align - 1))
- + sizeof(le16)*3 + sizeof(struct vring_used_elem)*num;
-}
-
-static inline int vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old_idx)
+static inline int virtq_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old_idx)
{
return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old_idx);
}
-/* Get location of event indices (only with VIRTIO_RING_F_EVENT_IDX) */
-static inline le16 *vring_used_event(struct vring *vr)
+/* Get location of event indices (only with VIRTIO_F_EVENT_IDX) */
+static inline le16 *virtq_used_event(struct virtq *vq)
{
/* For backwards compat, used event index is at *end* of avail ring. */
- return &vr->avail->ring[vr->num];
+ return &vq->avail->ring[vq->num];
}
-static inline le16 *vring_avail_event(struct vring *vr)
+static inline le16 *virtq_avail_event(struct virtq *vq)
{
/* For backwards compat, avail event index is at *end* of used ring. */
- return (le16 *)&vr->used->ring[vr->num];
+ return (le16 *)&vq->used->ring[vq->num];
}
-#endif /* VIRTIO_RING_H */
+#endif /* VIRTQUEUE_H */