|
@@ -265,6 +265,39 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
|
|
|
v4l2_m2m_try_run(m2m_dev);
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * v4l2_m2m_cancel_job() - cancel pending jobs for the context
|
|
|
+ *
|
|
|
+ * In case of streamoff or release called on any context,
|
|
|
+ * 1] If the context is currently running, then abort job will be called
|
|
|
+ * 2] If the context is queued, then the context will be removed from
|
|
|
+ * the job_queue
|
|
|
+ */
|
|
|
+static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
|
|
|
+{
|
|
|
+ struct v4l2_m2m_dev *m2m_dev;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ m2m_dev = m2m_ctx->m2m_dev;
|
|
|
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
|
|
|
+ if (m2m_ctx->job_flags & TRANS_RUNNING) {
|
|
|
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
|
|
|
+ m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
|
|
|
+ dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
|
|
|
+ wait_event(m2m_ctx->finished,
|
|
|
+ !(m2m_ctx->job_flags & TRANS_RUNNING));
|
|
|
+ } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
|
|
|
+ list_del(&m2m_ctx->queue);
|
|
|
+ m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
|
|
|
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
|
|
|
+ dprintk("m2m_ctx: %p had been on queue and was removed\n",
|
|
|
+ m2m_ctx);
|
|
|
+ } else {
|
|
|
+ /* Do nothing, was not on queue/running */
|
|
|
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* v4l2_m2m_job_finish() - inform the framework that a job has been finished
|
|
|
* and have it clean up
|
|
@@ -436,6 +469,9 @@ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
|
|
unsigned long flags_job, flags;
|
|
|
int ret;
|
|
|
|
|
|
+ /* wait until the current context is dequeued from job_queue */
|
|
|
+ v4l2_m2m_cancel_job(m2m_ctx);
|
|
|
+
|
|
|
q_ctx = get_queue_ctx(m2m_ctx, type);
|
|
|
ret = vb2_streamoff(&q_ctx->q, type);
|
|
|
if (ret)
|
|
@@ -658,27 +694,8 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
|
|
|
*/
|
|
|
void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
|
|
|
{
|
|
|
- struct v4l2_m2m_dev *m2m_dev;
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- m2m_dev = m2m_ctx->m2m_dev;
|
|
|
-
|
|
|
- spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
|
|
|
- if (m2m_ctx->job_flags & TRANS_RUNNING) {
|
|
|
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
|
|
|
- m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
|
|
|
- dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
|
|
|
- wait_event(m2m_ctx->finished, !(m2m_ctx->job_flags & TRANS_RUNNING));
|
|
|
- } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
|
|
|
- list_del(&m2m_ctx->queue);
|
|
|
- m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
|
|
|
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
|
|
|
- dprintk("m2m_ctx: %p had been on queue and was removed\n",
|
|
|
- m2m_ctx);
|
|
|
- } else {
|
|
|
- /* Do nothing, was not on queue/running */
|
|
|
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
|
|
|
- }
|
|
|
+ /* wait until the current context is dequeued from job_queue */
|
|
|
+ v4l2_m2m_cancel_job(m2m_ctx);
|
|
|
|
|
|
vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
|
|
|
vb2_queue_release(&m2m_ctx->out_q_ctx.q);
|