|
@@ -108,6 +108,36 @@ static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * __vb2_plane_dmabuf_put() - release memory associated with
|
|
|
+ * a DMABUF shared plane
|
|
|
+ */
|
|
|
+static void __vb2_plane_dmabuf_put(struct vb2_queue *q, struct vb2_plane *p)
|
|
|
+{
|
|
|
+ if (!p->mem_priv)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (p->dbuf_mapped)
|
|
|
+ call_memop(q, unmap_dmabuf, p->mem_priv);
|
|
|
+
|
|
|
+ call_memop(q, detach_dmabuf, p->mem_priv);
|
|
|
+ dma_buf_put(p->dbuf);
|
|
|
+ memset(p, 0, sizeof(*p));
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * __vb2_buf_dmabuf_put() - release memory associated with
|
|
|
+ * a DMABUF shared buffer
|
|
|
+ */
|
|
|
+static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
|
|
|
+{
|
|
|
+ struct vb2_queue *q = vb->vb2_queue;
|
|
|
+ unsigned int plane;
|
|
|
+
|
|
|
+ for (plane = 0; plane < vb->num_planes; ++plane)
|
|
|
+ __vb2_plane_dmabuf_put(q, &vb->planes[plane]);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* __setup_offsets() - setup unique offsets ("cookies") for every plane in
|
|
|
* every buffer on the queue
|
|
@@ -230,6 +260,8 @@ static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
|
|
|
/* Free MMAP buffers or release USERPTR buffers */
|
|
|
if (q->memory == V4L2_MEMORY_MMAP)
|
|
|
__vb2_buf_mem_free(vb);
|
|
|
+ else if (q->memory == V4L2_MEMORY_DMABUF)
|
|
|
+ __vb2_buf_dmabuf_put(vb);
|
|
|
else
|
|
|
__vb2_buf_userptr_put(vb);
|
|
|
}
|
|
@@ -362,6 +394,8 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
|
|
|
b->m.offset = vb->v4l2_planes[0].m.mem_offset;
|
|
|
else if (q->memory == V4L2_MEMORY_USERPTR)
|
|
|
b->m.userptr = vb->v4l2_planes[0].m.userptr;
|
|
|
+ else if (q->memory == V4L2_MEMORY_DMABUF)
|
|
|
+ b->m.fd = vb->v4l2_planes[0].m.fd;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -453,6 +487,20 @@ static int __verify_mmap_ops(struct vb2_queue *q)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * __verify_dmabuf_ops() - verify that all memory operations required for
|
|
|
+ * DMABUF queue type have been provided
|
|
|
+ */
|
|
|
+static int __verify_dmabuf_ops(struct vb2_queue *q)
|
|
|
+{
|
|
|
+ if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
|
|
|
+ !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf ||
|
|
|
+ !q->mem_ops->unmap_dmabuf)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* __verify_memory_type() - Check whether the memory type and buffer type
|
|
|
* passed to a buffer operation are compatible with the queue.
|
|
@@ -460,7 +508,8 @@ static int __verify_mmap_ops(struct vb2_queue *q)
|
|
|
static int __verify_memory_type(struct vb2_queue *q,
|
|
|
enum v4l2_memory memory, enum v4l2_buf_type type)
|
|
|
{
|
|
|
- if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR) {
|
|
|
+ if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR &&
|
|
|
+ memory != V4L2_MEMORY_DMABUF) {
|
|
|
dprintk(1, "reqbufs: unsupported memory type\n");
|
|
|
return -EINVAL;
|
|
|
}
|
|
@@ -484,6 +533,11 @@ static int __verify_memory_type(struct vb2_queue *q,
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
+ if (memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
|
|
|
+ dprintk(1, "reqbufs: DMABUF for current setup unsupported\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* Place the busy tests at the end: -EBUSY can be ignored when
|
|
|
* create_bufs is called with count == 0, but count == 0 should still
|
|
@@ -845,6 +899,16 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
|
|
|
b->m.planes[plane].length;
|
|
|
}
|
|
|
}
|
|
|
+ if (b->memory == V4L2_MEMORY_DMABUF) {
|
|
|
+ for (plane = 0; plane < vb->num_planes; ++plane) {
|
|
|
+ v4l2_planes[plane].m.fd =
|
|
|
+ b->m.planes[plane].m.fd;
|
|
|
+ v4l2_planes[plane].length =
|
|
|
+ b->m.planes[plane].length;
|
|
|
+ v4l2_planes[plane].data_offset =
|
|
|
+ b->m.planes[plane].data_offset;
|
|
|
+ }
|
|
|
+ }
|
|
|
} else {
|
|
|
/*
|
|
|
* Single-planar buffers do not use planes array,
|
|
@@ -859,6 +923,13 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
|
|
|
v4l2_planes[0].m.userptr = b->m.userptr;
|
|
|
v4l2_planes[0].length = b->length;
|
|
|
}
|
|
|
+
|
|
|
+ if (b->memory == V4L2_MEMORY_DMABUF) {
|
|
|
+ v4l2_planes[0].m.fd = b->m.fd;
|
|
|
+ v4l2_planes[0].length = b->length;
|
|
|
+ v4l2_planes[0].data_offset = 0;
|
|
|
+ }
|
|
|
+
|
|
|
}
|
|
|
|
|
|
vb->v4l2_buf.field = b->field;
|
|
@@ -958,6 +1029,107 @@ static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * __qbuf_dmabuf() - handle qbuf of a DMABUF buffer
|
|
|
+ */
|
|
|
+static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
|
|
|
+{
|
|
|
+ struct v4l2_plane planes[VIDEO_MAX_PLANES];
|
|
|
+ struct vb2_queue *q = vb->vb2_queue;
|
|
|
+ void *mem_priv;
|
|
|
+ unsigned int plane;
|
|
|
+ int ret;
|
|
|
+ int write = !V4L2_TYPE_IS_OUTPUT(q->type);
|
|
|
+
|
|
|
+ /* Verify and copy relevant information provided by the userspace */
|
|
|
+ __fill_vb2_buffer(vb, b, planes);
|
|
|
+
|
|
|
+ for (plane = 0; plane < vb->num_planes; ++plane) {
|
|
|
+ struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
|
|
|
+
|
|
|
+ if (IS_ERR_OR_NULL(dbuf)) {
|
|
|
+ dprintk(1, "qbuf: invalid dmabuf fd for plane %d\n",
|
|
|
+ plane);
|
|
|
+ ret = -EINVAL;
|
|
|
+ goto err;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* use DMABUF size if length is not provided */
|
|
|
+ if (planes[plane].length == 0)
|
|
|
+ planes[plane].length = dbuf->size;
|
|
|
+
|
|
|
+ if (planes[plane].length < planes[plane].data_offset +
|
|
|
+ q->plane_sizes[plane]) {
|
|
|
+ ret = -EINVAL;
|
|
|
+ goto err;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Skip the plane if already verified */
|
|
|
+ if (dbuf == vb->planes[plane].dbuf &&
|
|
|
+ vb->v4l2_planes[plane].length == planes[plane].length) {
|
|
|
+ dma_buf_put(dbuf);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ dprintk(1, "qbuf: buffer for plane %d changed\n", plane);
|
|
|
+
|
|
|
+ /* Release previously acquired memory if present */
|
|
|
+ __vb2_plane_dmabuf_put(q, &vb->planes[plane]);
|
|
|
+ memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
|
|
|
+
|
|
|
+ /* Acquire each plane's memory */
|
|
|
+ mem_priv = call_memop(q, attach_dmabuf, q->alloc_ctx[plane],
|
|
|
+ dbuf, planes[plane].length, write);
|
|
|
+ if (IS_ERR(mem_priv)) {
|
|
|
+ dprintk(1, "qbuf: failed to attach dmabuf\n");
|
|
|
+ ret = PTR_ERR(mem_priv);
|
|
|
+ dma_buf_put(dbuf);
|
|
|
+ goto err;
|
|
|
+ }
|
|
|
+
|
|
|
+ vb->planes[plane].dbuf = dbuf;
|
|
|
+ vb->planes[plane].mem_priv = mem_priv;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* TODO: This pins the buffer(s) with dma_buf_map_attachment()).. but
|
|
|
+ * really we want to do this just before the DMA, not while queueing
|
|
|
+ * the buffer(s)..
|
|
|
+ */
|
|
|
+ for (plane = 0; plane < vb->num_planes; ++plane) {
|
|
|
+ ret = call_memop(q, map_dmabuf, vb->planes[plane].mem_priv);
|
|
|
+ if (ret) {
|
|
|
+ dprintk(1, "qbuf: failed to map dmabuf for plane %d\n",
|
|
|
+ plane);
|
|
|
+ goto err;
|
|
|
+ }
|
|
|
+ vb->planes[plane].dbuf_mapped = 1;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Call driver-specific initialization on the newly acquired buffer,
|
|
|
+ * if provided.
|
|
|
+ */
|
|
|
+ ret = call_qop(q, buf_init, vb);
|
|
|
+ if (ret) {
|
|
|
+ dprintk(1, "qbuf: buffer initialization failed\n");
|
|
|
+ goto err;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Now that everything is in order, copy relevant information
|
|
|
+ * provided by userspace.
|
|
|
+ */
|
|
|
+ for (plane = 0; plane < vb->num_planes; ++plane)
|
|
|
+ vb->v4l2_planes[plane] = planes[plane];
|
|
|
+
|
|
|
+ return 0;
|
|
|
+err:
|
|
|
+ /* In case of errors, release planes that were already acquired */
|
|
|
+ __vb2_buf_dmabuf_put(vb);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
|
|
|
*/
|
|
@@ -982,6 +1154,9 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
|
|
|
case V4L2_MEMORY_USERPTR:
|
|
|
ret = __qbuf_userptr(vb, b);
|
|
|
break;
|
|
|
+ case V4L2_MEMORY_DMABUF:
|
|
|
+ ret = __qbuf_dmabuf(vb, b);
|
|
|
+ break;
|
|
|
default:
|
|
|
WARN(1, "Invalid queue type\n");
|
|
|
ret = -EINVAL;
|
|
@@ -1302,6 +1477,30 @@ int vb2_wait_for_all_buffers(struct vb2_queue *q)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
|
|
|
|
|
|
+/**
|
|
|
+ * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
|
|
|
+ */
|
|
|
+static void __vb2_dqbuf(struct vb2_buffer *vb)
|
|
|
+{
|
|
|
+ struct vb2_queue *q = vb->vb2_queue;
|
|
|
+ unsigned int i;
|
|
|
+
|
|
|
+ /* nothing to do if the buffer is already dequeued */
|
|
|
+ if (vb->state == VB2_BUF_STATE_DEQUEUED)
|
|
|
+ return;
|
|
|
+
|
|
|
+ vb->state = VB2_BUF_STATE_DEQUEUED;
|
|
|
+
|
|
|
+ /* unmap DMABUF buffer */
|
|
|
+ if (q->memory == V4L2_MEMORY_DMABUF)
|
|
|
+ for (i = 0; i < vb->num_planes; ++i) {
|
|
|
+ if (!vb->planes[i].dbuf_mapped)
|
|
|
+ continue;
|
|
|
+ call_memop(q, unmap_dmabuf, vb->planes[i].mem_priv);
|
|
|
+ vb->planes[i].dbuf_mapped = 0;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* vb2_dqbuf() - Dequeue a buffer to the userspace
|
|
|
* @q: videobuf2 queue
|
|
@@ -1363,11 +1562,12 @@ int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
|
|
|
__fill_v4l2_buffer(vb, b);
|
|
|
/* Remove from videobuf queue */
|
|
|
list_del(&vb->queued_entry);
|
|
|
+ /* go back to dequeued state */
|
|
|
+ __vb2_dqbuf(vb);
|
|
|
|
|
|
dprintk(1, "dqbuf of buffer %d, with state %d\n",
|
|
|
vb->v4l2_buf.index, vb->state);
|
|
|
|
|
|
- vb->state = VB2_BUF_STATE_DEQUEUED;
|
|
|
return 0;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(vb2_dqbuf);
|
|
@@ -1406,7 +1606,7 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
|
|
|
* Reinitialize all buffers for next use.
|
|
|
*/
|
|
|
for (i = 0; i < q->num_buffers; ++i)
|
|
|
- q->bufs[i]->state = VB2_BUF_STATE_DEQUEUED;
|
|
|
+ __vb2_dqbuf(q->bufs[i]);
|
|
|
}
|
|
|
|
|
|
/**
|