|
@@ -17,7 +17,7 @@
|
|
#include <linux/sched.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/slab.h>
|
|
|
|
|
|
-#include <media/videobuf-core.h>
|
|
|
|
|
|
+#include <media/videobuf2-core.h>
|
|
#include <media/v4l2-mem2mem.h>
|
|
#include <media/v4l2-mem2mem.h>
|
|
|
|
|
|
MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
|
|
MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
|
|
@@ -65,21 +65,16 @@ struct v4l2_m2m_dev {
|
|
static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
|
|
static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
|
|
enum v4l2_buf_type type)
|
|
enum v4l2_buf_type type)
|
|
{
|
|
{
|
|
- switch (type) {
|
|
|
|
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
|
|
|
|
- return &m2m_ctx->cap_q_ctx;
|
|
|
|
- case V4L2_BUF_TYPE_VIDEO_OUTPUT:
|
|
|
|
|
|
+ if (V4L2_TYPE_IS_OUTPUT(type))
|
|
return &m2m_ctx->out_q_ctx;
|
|
return &m2m_ctx->out_q_ctx;
|
|
- default:
|
|
|
|
- printk(KERN_ERR "Invalid buffer type\n");
|
|
|
|
- return NULL;
|
|
|
|
- }
|
|
|
|
|
|
+ else
|
|
|
|
+ return &m2m_ctx->cap_q_ctx;
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
- * v4l2_m2m_get_vq() - return videobuf_queue for the given type
|
|
|
|
|
|
+ * v4l2_m2m_get_vq() - return vb2_queue for the given type
|
|
*/
|
|
*/
|
|
-struct videobuf_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
|
|
|
|
|
|
+struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
|
|
enum v4l2_buf_type type)
|
|
enum v4l2_buf_type type)
|
|
{
|
|
{
|
|
struct v4l2_m2m_queue_ctx *q_ctx;
|
|
struct v4l2_m2m_queue_ctx *q_ctx;
|
|
@@ -95,27 +90,20 @@ EXPORT_SYMBOL(v4l2_m2m_get_vq);
|
|
/**
|
|
/**
|
|
* v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
|
|
* v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
|
|
*/
|
|
*/
|
|
-void *v4l2_m2m_next_buf(struct v4l2_m2m_ctx *m2m_ctx, enum v4l2_buf_type type)
|
|
|
|
|
|
+void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
|
|
{
|
|
{
|
|
- struct v4l2_m2m_queue_ctx *q_ctx;
|
|
|
|
- struct videobuf_buffer *vb = NULL;
|
|
|
|
|
|
+ struct v4l2_m2m_buffer *b = NULL;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
- q_ctx = get_queue_ctx(m2m_ctx, type);
|
|
|
|
- if (!q_ctx)
|
|
|
|
- return NULL;
|
|
|
|
-
|
|
|
|
- spin_lock_irqsave(q_ctx->q.irqlock, flags);
|
|
|
|
|
|
+ spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
|
|
|
|
|
|
if (list_empty(&q_ctx->rdy_queue))
|
|
if (list_empty(&q_ctx->rdy_queue))
|
|
goto end;
|
|
goto end;
|
|
|
|
|
|
- vb = list_entry(q_ctx->rdy_queue.next, struct videobuf_buffer, queue);
|
|
|
|
- vb->state = VIDEOBUF_ACTIVE;
|
|
|
|
-
|
|
|
|
|
|
+ b = list_entry(q_ctx->rdy_queue.next, struct v4l2_m2m_buffer, list);
|
|
end:
|
|
end:
|
|
- spin_unlock_irqrestore(q_ctx->q.irqlock, flags);
|
|
|
|
- return vb;
|
|
|
|
|
|
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
|
|
|
|
+ return &b->vb;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
|
|
EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
|
|
|
|
|
|
@@ -123,26 +111,21 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
|
|
* v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
|
|
* v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
|
|
* return it
|
|
* return it
|
|
*/
|
|
*/
|
|
-void *v4l2_m2m_buf_remove(struct v4l2_m2m_ctx *m2m_ctx, enum v4l2_buf_type type)
|
|
|
|
|
|
+void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
|
|
{
|
|
{
|
|
- struct v4l2_m2m_queue_ctx *q_ctx;
|
|
|
|
- struct videobuf_buffer *vb = NULL;
|
|
|
|
|
|
+ struct v4l2_m2m_buffer *b = NULL;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
- q_ctx = get_queue_ctx(m2m_ctx, type);
|
|
|
|
- if (!q_ctx)
|
|
|
|
- return NULL;
|
|
|
|
-
|
|
|
|
- spin_lock_irqsave(q_ctx->q.irqlock, flags);
|
|
|
|
|
|
+ spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
|
|
if (!list_empty(&q_ctx->rdy_queue)) {
|
|
if (!list_empty(&q_ctx->rdy_queue)) {
|
|
- vb = list_entry(q_ctx->rdy_queue.next, struct videobuf_buffer,
|
|
|
|
- queue);
|
|
|
|
- list_del(&vb->queue);
|
|
|
|
|
|
+ b = list_entry(q_ctx->rdy_queue.next, struct v4l2_m2m_buffer,
|
|
|
|
+ list);
|
|
|
|
+ list_del(&b->list);
|
|
q_ctx->num_rdy--;
|
|
q_ctx->num_rdy--;
|
|
}
|
|
}
|
|
- spin_unlock_irqrestore(q_ctx->q.irqlock, flags);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
|
|
|
|
|
|
- return vb;
|
|
|
|
|
|
+ return &b->vb;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
|
|
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
|
|
|
|
|
|
@@ -235,20 +218,20 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
- spin_lock_irqsave(m2m_ctx->out_q_ctx.q.irqlock, flags);
|
|
|
|
|
|
+ spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
|
|
if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) {
|
|
if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) {
|
|
- spin_unlock_irqrestore(m2m_ctx->out_q_ctx.q.irqlock, flags);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
|
|
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
|
|
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
|
|
dprintk("No input buffers available\n");
|
|
dprintk("No input buffers available\n");
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) {
|
|
if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) {
|
|
- spin_unlock_irqrestore(m2m_ctx->out_q_ctx.q.irqlock, flags);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
|
|
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
|
|
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
|
|
dprintk("No output buffers available\n");
|
|
dprintk("No output buffers available\n");
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
- spin_unlock_irqrestore(m2m_ctx->out_q_ctx.q.irqlock, flags);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
|
|
|
|
|
|
if (m2m_dev->m2m_ops->job_ready
|
|
if (m2m_dev->m2m_ops->job_ready
|
|
&& (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
|
|
&& (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
|
|
@@ -291,6 +274,7 @@ void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
|
|
|
|
|
|
list_del(&m2m_dev->curr_ctx->queue);
|
|
list_del(&m2m_dev->curr_ctx->queue);
|
|
m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
|
|
m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
|
|
|
|
+ wake_up(&m2m_dev->curr_ctx->finished);
|
|
m2m_dev->curr_ctx = NULL;
|
|
m2m_dev->curr_ctx = NULL;
|
|
|
|
|
|
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
|
|
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
|
|
@@ -309,10 +293,10 @@ EXPORT_SYMBOL(v4l2_m2m_job_finish);
|
|
int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
|
int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
|
struct v4l2_requestbuffers *reqbufs)
|
|
struct v4l2_requestbuffers *reqbufs)
|
|
{
|
|
{
|
|
- struct videobuf_queue *vq;
|
|
|
|
|
|
+ struct vb2_queue *vq;
|
|
|
|
|
|
vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
|
|
vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
|
|
- return videobuf_reqbufs(vq, reqbufs);
|
|
|
|
|
|
+ return vb2_reqbufs(vq, reqbufs);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
|
|
EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
|
|
|
|
|
|
@@ -324,15 +308,22 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
|
|
int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
|
int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
|
struct v4l2_buffer *buf)
|
|
struct v4l2_buffer *buf)
|
|
{
|
|
{
|
|
- struct videobuf_queue *vq;
|
|
|
|
- int ret;
|
|
|
|
|
|
+ struct vb2_queue *vq;
|
|
|
|
+ int ret = 0;
|
|
|
|
+ unsigned int i;
|
|
|
|
|
|
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
|
|
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
|
|
- ret = videobuf_querybuf(vq, buf);
|
|
|
|
-
|
|
|
|
- if (buf->memory == V4L2_MEMORY_MMAP
|
|
|
|
- && vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
|
|
|
|
- buf->m.offset += DST_QUEUE_OFF_BASE;
|
|
|
|
|
|
+ ret = vb2_querybuf(vq, buf);
|
|
|
|
+
|
|
|
|
+ /* Adjust MMAP memory offsets for the CAPTURE queue */
|
|
|
|
+ if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
|
|
|
|
+ if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
|
|
|
|
+ for (i = 0; i < buf->length; ++i)
|
|
|
|
+ buf->m.planes[i].m.mem_offset
|
|
|
|
+ += DST_QUEUE_OFF_BASE;
|
|
|
|
+ } else {
|
|
|
|
+ buf->m.offset += DST_QUEUE_OFF_BASE;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
@@ -346,11 +337,11 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
|
|
int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
|
int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
|
struct v4l2_buffer *buf)
|
|
struct v4l2_buffer *buf)
|
|
{
|
|
{
|
|
- struct videobuf_queue *vq;
|
|
|
|
|
|
+ struct vb2_queue *vq;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
|
|
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
|
|
- ret = videobuf_qbuf(vq, buf);
|
|
|
|
|
|
+ ret = vb2_qbuf(vq, buf);
|
|
if (!ret)
|
|
if (!ret)
|
|
v4l2_m2m_try_schedule(m2m_ctx);
|
|
v4l2_m2m_try_schedule(m2m_ctx);
|
|
|
|
|
|
@@ -365,10 +356,10 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
|
|
int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
|
int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
|
struct v4l2_buffer *buf)
|
|
struct v4l2_buffer *buf)
|
|
{
|
|
{
|
|
- struct videobuf_queue *vq;
|
|
|
|
|
|
+ struct vb2_queue *vq;
|
|
|
|
|
|
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
|
|
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
|
|
- return videobuf_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
|
|
|
|
|
|
+ return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
|
|
EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
|
|
|
|
|
|
@@ -378,11 +369,11 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
|
|
int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
|
int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
|
enum v4l2_buf_type type)
|
|
enum v4l2_buf_type type)
|
|
{
|
|
{
|
|
- struct videobuf_queue *vq;
|
|
|
|
|
|
+ struct vb2_queue *vq;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
vq = v4l2_m2m_get_vq(m2m_ctx, type);
|
|
vq = v4l2_m2m_get_vq(m2m_ctx, type);
|
|
- ret = videobuf_streamon(vq);
|
|
|
|
|
|
+ ret = vb2_streamon(vq, type);
|
|
if (!ret)
|
|
if (!ret)
|
|
v4l2_m2m_try_schedule(m2m_ctx);
|
|
v4l2_m2m_try_schedule(m2m_ctx);
|
|
|
|
|
|
@@ -396,10 +387,10 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
|
|
int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
|
int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
|
enum v4l2_buf_type type)
|
|
enum v4l2_buf_type type)
|
|
{
|
|
{
|
|
- struct videobuf_queue *vq;
|
|
|
|
|
|
+ struct vb2_queue *vq;
|
|
|
|
|
|
vq = v4l2_m2m_get_vq(m2m_ctx, type);
|
|
vq = v4l2_m2m_get_vq(m2m_ctx, type);
|
|
- return videobuf_streamoff(vq);
|
|
|
|
|
|
+ return vb2_streamoff(vq, type);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
|
|
EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
|
|
|
|
|
|
@@ -414,44 +405,53 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
|
|
unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
|
unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
|
struct poll_table_struct *wait)
|
|
struct poll_table_struct *wait)
|
|
{
|
|
{
|
|
- struct videobuf_queue *src_q, *dst_q;
|
|
|
|
- struct videobuf_buffer *src_vb = NULL, *dst_vb = NULL;
|
|
|
|
|
|
+ struct vb2_queue *src_q, *dst_q;
|
|
|
|
+ struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
|
|
unsigned int rc = 0;
|
|
unsigned int rc = 0;
|
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
src_q = v4l2_m2m_get_src_vq(m2m_ctx);
|
|
src_q = v4l2_m2m_get_src_vq(m2m_ctx);
|
|
dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
|
|
dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
|
|
|
|
|
|
- videobuf_queue_lock(src_q);
|
|
|
|
- videobuf_queue_lock(dst_q);
|
|
|
|
-
|
|
|
|
- if (src_q->streaming && !list_empty(&src_q->stream))
|
|
|
|
- src_vb = list_first_entry(&src_q->stream,
|
|
|
|
- struct videobuf_buffer, stream);
|
|
|
|
- if (dst_q->streaming && !list_empty(&dst_q->stream))
|
|
|
|
- dst_vb = list_first_entry(&dst_q->stream,
|
|
|
|
- struct videobuf_buffer, stream);
|
|
|
|
-
|
|
|
|
- if (!src_vb && !dst_vb) {
|
|
|
|
|
|
+ /*
|
|
|
|
+ * There has to be at least one buffer queued on each queued_list, which
|
|
|
|
+ * means either in driver already or waiting for driver to claim it
|
|
|
|
+ * and start processing.
|
|
|
|
+ */
|
|
|
|
+ if ((!src_q->streaming || list_empty(&src_q->queued_list))
|
|
|
|
+ && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
|
|
rc = POLLERR;
|
|
rc = POLLERR;
|
|
goto end;
|
|
goto end;
|
|
}
|
|
}
|
|
|
|
|
|
- if (src_vb) {
|
|
|
|
- poll_wait(file, &src_vb->done, wait);
|
|
|
|
- if (src_vb->state == VIDEOBUF_DONE
|
|
|
|
- || src_vb->state == VIDEOBUF_ERROR)
|
|
|
|
- rc |= POLLOUT | POLLWRNORM;
|
|
|
|
- }
|
|
|
|
- if (dst_vb) {
|
|
|
|
- poll_wait(file, &dst_vb->done, wait);
|
|
|
|
- if (dst_vb->state == VIDEOBUF_DONE
|
|
|
|
- || dst_vb->state == VIDEOBUF_ERROR)
|
|
|
|
- rc |= POLLIN | POLLRDNORM;
|
|
|
|
- }
|
|
|
|
|
|
+ if (m2m_ctx->m2m_dev->m2m_ops->unlock)
|
|
|
|
+ m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv);
|
|
|
|
+
|
|
|
|
+ poll_wait(file, &src_q->done_wq, wait);
|
|
|
|
+ poll_wait(file, &dst_q->done_wq, wait);
|
|
|
|
+
|
|
|
|
+ if (m2m_ctx->m2m_dev->m2m_ops->lock)
|
|
|
|
+ m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv);
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&src_q->done_lock, flags);
|
|
|
|
+ if (!list_empty(&src_q->done_list))
|
|
|
|
+ src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
|
|
|
|
+ done_entry);
|
|
|
|
+ if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
|
|
|
|
+ || src_vb->state == VB2_BUF_STATE_ERROR))
|
|
|
|
+ rc |= POLLOUT | POLLWRNORM;
|
|
|
|
+ spin_unlock_irqrestore(&src_q->done_lock, flags);
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&dst_q->done_lock, flags);
|
|
|
|
+ if (!list_empty(&dst_q->done_list))
|
|
|
|
+ dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
|
|
|
|
+ done_entry);
|
|
|
|
+ if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
|
|
|
|
+ || dst_vb->state == VB2_BUF_STATE_ERROR))
|
|
|
|
+ rc |= POLLIN | POLLRDNORM;
|
|
|
|
+ spin_unlock_irqrestore(&dst_q->done_lock, flags);
|
|
|
|
|
|
end:
|
|
end:
|
|
- videobuf_queue_unlock(dst_q);
|
|
|
|
- videobuf_queue_unlock(src_q);
|
|
|
|
return rc;
|
|
return rc;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
|
|
EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
|
|
@@ -470,7 +470,7 @@ int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
|
struct vm_area_struct *vma)
|
|
struct vm_area_struct *vma)
|
|
{
|
|
{
|
|
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
|
|
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
|
|
- struct videobuf_queue *vq;
|
|
|
|
|
|
+ struct vb2_queue *vq;
|
|
|
|
|
|
if (offset < DST_QUEUE_OFF_BASE) {
|
|
if (offset < DST_QUEUE_OFF_BASE) {
|
|
vq = v4l2_m2m_get_src_vq(m2m_ctx);
|
|
vq = v4l2_m2m_get_src_vq(m2m_ctx);
|
|
@@ -479,7 +479,7 @@ int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
|
vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
|
|
vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
|
|
}
|
|
}
|
|
|
|
|
|
- return videobuf_mmap_mapper(vq, vma);
|
|
|
|
|
|
+ return vb2_mmap(vq, vma);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(v4l2_m2m_mmap);
|
|
EXPORT_SYMBOL(v4l2_m2m_mmap);
|
|
|
|
|
|
@@ -531,36 +531,41 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_release);
|
|
*
|
|
*
|
|
* Usually called from driver's open() function.
|
|
* Usually called from driver's open() function.
|
|
*/
|
|
*/
|
|
-struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(void *priv, struct v4l2_m2m_dev *m2m_dev,
|
|
|
|
- void (*vq_init)(void *priv, struct videobuf_queue *,
|
|
|
|
- enum v4l2_buf_type))
|
|
|
|
|
|
+struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
|
|
|
|
+ void *drv_priv,
|
|
|
|
+ int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
|
|
{
|
|
{
|
|
struct v4l2_m2m_ctx *m2m_ctx;
|
|
struct v4l2_m2m_ctx *m2m_ctx;
|
|
struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
|
|
struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
|
|
-
|
|
|
|
- if (!vq_init)
|
|
|
|
- return ERR_PTR(-EINVAL);
|
|
|
|
|
|
+ int ret;
|
|
|
|
|
|
m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
|
|
m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
|
|
if (!m2m_ctx)
|
|
if (!m2m_ctx)
|
|
return ERR_PTR(-ENOMEM);
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
- m2m_ctx->priv = priv;
|
|
|
|
|
|
+ m2m_ctx->priv = drv_priv;
|
|
m2m_ctx->m2m_dev = m2m_dev;
|
|
m2m_ctx->m2m_dev = m2m_dev;
|
|
|
|
+ init_waitqueue_head(&m2m_ctx->finished);
|
|
|
|
|
|
- out_q_ctx = get_queue_ctx(m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
|
|
|
|
- cap_q_ctx = get_queue_ctx(m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
|
|
|
|
|
|
+ out_q_ctx = &m2m_ctx->out_q_ctx;
|
|
|
|
+ cap_q_ctx = &m2m_ctx->cap_q_ctx;
|
|
|
|
|
|
INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
|
|
INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
|
|
INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
|
|
INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
|
|
|
|
+ spin_lock_init(&out_q_ctx->rdy_spinlock);
|
|
|
|
+ spin_lock_init(&cap_q_ctx->rdy_spinlock);
|
|
|
|
|
|
INIT_LIST_HEAD(&m2m_ctx->queue);
|
|
INIT_LIST_HEAD(&m2m_ctx->queue);
|
|
|
|
|
|
- vq_init(priv, &out_q_ctx->q, V4L2_BUF_TYPE_VIDEO_OUTPUT);
|
|
|
|
- vq_init(priv, &cap_q_ctx->q, V4L2_BUF_TYPE_VIDEO_CAPTURE);
|
|
|
|
- out_q_ctx->q.priv_data = cap_q_ctx->q.priv_data = priv;
|
|
|
|
|
|
+ ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
|
|
|
|
+
|
|
|
|
+ if (ret)
|
|
|
|
+ goto err;
|
|
|
|
|
|
return m2m_ctx;
|
|
return m2m_ctx;
|
|
|
|
+err:
|
|
|
|
+ kfree(m2m_ctx);
|
|
|
|
+ return ERR_PTR(ret);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
|
|
EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
|
|
|
|
|
|
@@ -572,7 +577,6 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
|
|
void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
|
|
void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
|
|
{
|
|
{
|
|
struct v4l2_m2m_dev *m2m_dev;
|
|
struct v4l2_m2m_dev *m2m_dev;
|
|
- struct videobuf_buffer *vb;
|
|
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
m2m_dev = m2m_ctx->m2m_dev;
|
|
m2m_dev = m2m_ctx->m2m_dev;
|
|
@@ -582,10 +586,7 @@ void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
|
|
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
|
|
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
|
|
m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
|
|
m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
|
|
dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
|
|
dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
|
|
- vb = v4l2_m2m_next_dst_buf(m2m_ctx);
|
|
|
|
- BUG_ON(NULL == vb);
|
|
|
|
- wait_event(vb->done, vb->state != VIDEOBUF_ACTIVE
|
|
|
|
- && vb->state != VIDEOBUF_QUEUED);
|
|
|
|
|
|
+ wait_event(m2m_ctx->finished, !(m2m_ctx->job_flags & TRANS_RUNNING));
|
|
} else if (m2m_ctx->job_flags & TRANS_QUEUED) {
|
|
} else if (m2m_ctx->job_flags & TRANS_QUEUED) {
|
|
list_del(&m2m_ctx->queue);
|
|
list_del(&m2m_ctx->queue);
|
|
m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
|
|
m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
|
|
@@ -597,11 +598,8 @@ void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
|
|
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
|
|
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
- videobuf_stop(&m2m_ctx->cap_q_ctx.q);
|
|
|
|
- videobuf_stop(&m2m_ctx->out_q_ctx.q);
|
|
|
|
-
|
|
|
|
- videobuf_mmap_free(&m2m_ctx->cap_q_ctx.q);
|
|
|
|
- videobuf_mmap_free(&m2m_ctx->out_q_ctx.q);
|
|
|
|
|
|
+ vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
|
|
|
|
+ vb2_queue_release(&m2m_ctx->out_q_ctx.q);
|
|
|
|
|
|
kfree(m2m_ctx);
|
|
kfree(m2m_ctx);
|
|
}
|
|
}
|
|
@@ -611,23 +609,21 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
|
|
* v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
|
|
* v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
|
|
*
|
|
*
|
|
* Call from buf_queue(), videobuf_queue_ops callback.
|
|
* Call from buf_queue(), videobuf_queue_ops callback.
|
|
- *
|
|
|
|
- * Locking: Caller holds q->irqlock (taken by videobuf before calling buf_queue
|
|
|
|
- * callback in the driver).
|
|
|
|
*/
|
|
*/
|
|
-void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct videobuf_queue *vq,
|
|
|
|
- struct videobuf_buffer *vb)
|
|
|
|
|
|
+void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb)
|
|
{
|
|
{
|
|
|
|
+ struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb);
|
|
struct v4l2_m2m_queue_ctx *q_ctx;
|
|
struct v4l2_m2m_queue_ctx *q_ctx;
|
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
- q_ctx = get_queue_ctx(m2m_ctx, vq->type);
|
|
|
|
|
|
+ q_ctx = get_queue_ctx(m2m_ctx, vb->vb2_queue->type);
|
|
if (!q_ctx)
|
|
if (!q_ctx)
|
|
return;
|
|
return;
|
|
|
|
|
|
- list_add_tail(&vb->queue, &q_ctx->rdy_queue);
|
|
|
|
|
|
+ spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
|
|
|
|
+ list_add_tail(&b->list, &q_ctx->rdy_queue);
|
|
q_ctx->num_rdy++;
|
|
q_ctx->num_rdy++;
|
|
-
|
|
|
|
- vb->state = VIDEOBUF_QUEUED;
|
|
|
|
|
|
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
|
|
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
|
|
|
|
|