|
@@ -536,20 +536,21 @@ static void *_check_pointer(unsigned long addr, unsigned int size,
|
|
|
/* Each buffer in the virtqueues is actually a chain of descriptors. This
|
|
|
* function returns the next descriptor in the chain, or vq->vring.num if we're
|
|
|
* at the end. */
|
|
|
-static unsigned next_desc(struct virtqueue *vq, unsigned int i)
|
|
|
+static unsigned next_desc(struct vring_desc *desc,
|
|
|
+ unsigned int i, unsigned int max)
|
|
|
{
|
|
|
unsigned int next;
|
|
|
|
|
|
/* If this descriptor says it doesn't chain, we're done. */
|
|
|
- if (!(vq->vring.desc[i].flags & VRING_DESC_F_NEXT))
|
|
|
- return vq->vring.num;
|
|
|
+ if (!(desc[i].flags & VRING_DESC_F_NEXT))
|
|
|
+ return max;
|
|
|
|
|
|
/* Check they're not leading us off end of descriptors. */
|
|
|
- next = vq->vring.desc[i].next;
|
|
|
+ next = desc[i].next;
|
|
|
/* Make sure compiler knows to grab that: we don't want it changing! */
|
|
|
wmb();
|
|
|
|
|
|
- if (next >= vq->vring.num)
|
|
|
+ if (next >= max)
|
|
|
errx(1, "Desc next is %u", next);
|
|
|
|
|
|
return next;
|
|
@@ -585,7 +586,8 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq,
|
|
|
struct iovec iov[],
|
|
|
unsigned int *out_num, unsigned int *in_num)
|
|
|
{
|
|
|
- unsigned int i, head;
|
|
|
+ unsigned int i, head, max;
|
|
|
+ struct vring_desc *desc;
|
|
|
u16 last_avail = lg_last_avail(vq);
|
|
|
|
|
|
while (last_avail == vq->vring.avail->idx) {
|
|
@@ -630,15 +632,28 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq,
|
|
|
/* When we start there are none of either input nor output. */
|
|
|
*out_num = *in_num = 0;
|
|
|
|
|
|
+ max = vq->vring.num;
|
|
|
+ desc = vq->vring.desc;
|
|
|
i = head;
|
|
|
+
|
|
|
+ /* If this is an indirect entry, then this buffer contains a descriptor
|
|
|
+ * table which we handle as if it's any normal descriptor chain. */
|
|
|
+ if (desc[i].flags & VRING_DESC_F_INDIRECT) {
|
|
|
+ if (desc[i].len % sizeof(struct vring_desc))
|
|
|
+ errx(1, "Invalid size for indirect buffer table");
|
|
|
+
|
|
|
+ max = desc[i].len / sizeof(struct vring_desc);
|
|
|
+ desc = check_pointer(desc[i].addr, desc[i].len);
|
|
|
+ i = 0;
|
|
|
+ }
|
|
|
+
|
|
|
do {
|
|
|
/* Grab the first descriptor, and check it's OK. */
|
|
|
- iov[*out_num + *in_num].iov_len = vq->vring.desc[i].len;
|
|
|
+ iov[*out_num + *in_num].iov_len = desc[i].len;
|
|
|
iov[*out_num + *in_num].iov_base
|
|
|
- = check_pointer(vq->vring.desc[i].addr,
|
|
|
- vq->vring.desc[i].len);
|
|
|
+ = check_pointer(desc[i].addr, desc[i].len);
|
|
|
/* If this is an input descriptor, increment that count. */
|
|
|
- if (vq->vring.desc[i].flags & VRING_DESC_F_WRITE)
|
|
|
+ if (desc[i].flags & VRING_DESC_F_WRITE)
|
|
|
(*in_num)++;
|
|
|
else {
|
|
|
/* If it's an output descriptor, they're all supposed
|
|
@@ -649,9 +664,9 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq,
|
|
|
}
|
|
|
|
|
|
/* If we've got too many, that implies a descriptor loop. */
|
|
|
- if (*out_num + *in_num > vq->vring.num)
|
|
|
+ if (*out_num + *in_num > max)
|
|
|
errx(1, "Looped descriptor");
|
|
|
- } while ((i = next_desc(vq, i)) != vq->vring.num);
|
|
|
+ } while ((i = next_desc(desc, i, max)) != max);
|
|
|
|
|
|
return head;
|
|
|
}
|
|
@@ -1331,6 +1346,8 @@ static void setup_tun_net(char *arg)
|
|
|
add_feature(dev, VIRTIO_NET_F_HOST_TSO4);
|
|
|
add_feature(dev, VIRTIO_NET_F_HOST_TSO6);
|
|
|
add_feature(dev, VIRTIO_NET_F_HOST_ECN);
|
|
|
+ /* We handle indirect ring entries */
|
|
|
+ add_feature(dev, VIRTIO_RING_F_INDIRECT_DESC);
|
|
|
set_config(dev, sizeof(conf), &conf);
|
|
|
|
|
|
/* We don't need the socket any more; setup is done. */
|