|
@@ -82,6 +82,9 @@ struct vring_virtqueue
|
|
|
/* Host supports indirect buffers */
|
|
|
bool indirect;
|
|
|
|
|
|
+ /* Host publishes avail event idx */
|
|
|
+ bool event;
|
|
|
+
|
|
|
/* Number of free buffers */
|
|
|
unsigned int num_free;
|
|
|
/* Head of free buffer list. */
|
|
@@ -237,18 +240,22 @@ EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp);
|
|
|
void virtqueue_kick(struct virtqueue *_vq)
|
|
|
{
|
|
|
struct vring_virtqueue *vq = to_vvq(_vq);
|
|
|
+ u16 new, old;
|
|
|
START_USE(vq);
|
|
|
/* Descriptors and available array need to be set before we expose the
|
|
|
* new available array entries. */
|
|
|
virtio_wmb();
|
|
|
|
|
|
- vq->vring.avail->idx += vq->num_added;
|
|
|
+ old = vq->vring.avail->idx;
|
|
|
+ new = vq->vring.avail->idx = old + vq->num_added;
|
|
|
vq->num_added = 0;
|
|
|
|
|
|
/* Need to update avail index before checking if we should notify */
|
|
|
virtio_mb();
|
|
|
|
|
|
- if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
|
|
|
+ if (vq->event ?
|
|
|
+ vring_need_event(vring_avail_event(&vq->vring), new, old) :
|
|
|
+ !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
|
|
|
/* Prod other side to tell it about changes. */
|
|
|
vq->notify(&vq->vq);
|
|
|
|
|
@@ -324,6 +331,14 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
|
|
|
ret = vq->data[i];
|
|
|
detach_buf(vq, i);
|
|
|
vq->last_used_idx++;
|
|
|
+ /* If we expect an interrupt for the next entry, tell host
|
|
|
+ * by writing event index and flush out the write before
|
|
|
+ * the read in the next get_buf call. */
|
|
|
+ if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
|
|
|
+ vring_used_event(&vq->vring) = vq->last_used_idx;
|
|
|
+ virtio_mb();
|
|
|
+ }
|
|
|
+
|
|
|
END_USE(vq);
|
|
|
return ret;
|
|
|
}
|
|
@@ -345,7 +360,11 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
|
|
|
|
|
|
/* We optimistically turn back on interrupts, then check if there was
|
|
|
* more to do. */
|
|
|
+ /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
|
|
|
+ * either clear the flags bit or point the event index at the next
|
|
|
+ * entry. Always do both to keep code simple. */
|
|
|
vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
|
|
|
+ vring_used_event(&vq->vring) = vq->last_used_idx;
|
|
|
virtio_mb();
|
|
|
if (unlikely(more_used(vq))) {
|
|
|
END_USE(vq);
|
|
@@ -438,6 +457,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
|
|
|
#endif
|
|
|
|
|
|
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
|
|
|
+ vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
|
|
|
|
|
|
/* No callback? Tell other side not to bother us. */
|
|
|
if (!callback)
|
|
@@ -472,6 +492,8 @@ void vring_transport_features(struct virtio_device *vdev)
|
|
|
switch (i) {
|
|
|
case VIRTIO_RING_F_INDIRECT_DESC:
|
|
|
break;
|
|
|
+ case VIRTIO_RING_F_EVENT_IDX:
|
|
|
+ break;
|
|
|
default:
|
|
|
/* We don't understand this bit. */
|
|
|
clear_bit(i, vdev->features);
|