|
@@ -52,6 +52,12 @@
|
|
|
/* We support indirect buffer descriptors */
|
|
|
#define VIRTIO_RING_F_INDIRECT_DESC 28
|
|
|
|
|
|
+/* The Guest publishes the used index for which it expects an interrupt
|
|
|
+ * at the end of the avail ring. Host should ignore the avail->flags field. */
|
|
|
+/* The Host publishes the avail index for which it expects a kick
|
|
|
+ * at the end of the used ring. Guest should ignore the used->flags field. */
|
|
|
+#define VIRTIO_RING_F_EVENT_IDX 29
|
|
|
+
|
|
|
/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
|
|
|
struct vring_desc {
|
|
|
/* Address (guest-physical). */
|
|
@@ -106,6 +112,7 @@ struct vring {
|
|
|
* __u16 avail_flags;
|
|
|
* __u16 avail_idx;
|
|
|
* __u16 available[num];
|
|
|
+ * __u16 used_event_idx;
|
|
|
*
|
|
|
* // Padding to the next align boundary.
|
|
|
* char pad[];
|
|
@@ -114,8 +121,14 @@ struct vring {
|
|
|
* __u16 used_flags;
|
|
|
* __u16 used_idx;
|
|
|
* struct vring_used_elem used[num];
|
|
|
+ * __u16 avail_event_idx;
|
|
|
* };
|
|
|
*/
|
|
|
+/* We publish the used event index at the end of the available ring, and vice
|
|
|
+ * versa. They are at the end for backwards compatibility. */
|
|
|
+#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
|
|
|
+#define vring_avail_event(vr) (*(__u16 *)&(vr)->used->ring[(vr)->num])
|
|
|
+
|
|
|
static inline void vring_init(struct vring *vr, unsigned int num, void *p,
|
|
|
unsigned long align)
|
|
|
{
|
|
@@ -130,7 +143,7 @@ static inline unsigned vring_size(unsigned int num, unsigned long align)
|
|
|
{
|
|
|
return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (2 + num)
|
|
|
+ align - 1) & ~(align - 1))
|
|
|
- + sizeof(__u16) * 2 + sizeof(struct vring_used_elem) * num;
|
|
|
+ + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num;
|
|
|
}
|
|
|
|
|
|
#ifdef __KERNEL__
|