v4l2-mem2mem.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702
  1. /*
  2. * Memory-to-memory device framework for Video for Linux 2 and videobuf.
  3. *
  4. * Helper functions for devices that use videobuf buffers for both their
  5. * source and destination.
  6. *
  7. * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
  8. * Pawel Osciak, <pawel@osciak.com>
  9. * Marek Szyprowski, <m.szyprowski@samsung.com>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by the
  13. * Free Software Foundation; either version 2 of the License, or (at your
  14. * option) any later version.
  15. */
  16. #include <linux/module.h>
  17. #include <linux/sched.h>
  18. #include <linux/slab.h>
  19. #include <media/videobuf2-core.h>
  20. #include <media/v4l2-mem2mem.h>
  21. #include <media/v4l2-dev.h>
  22. #include <media/v4l2-fh.h>
  23. #include <media/v4l2-event.h>
  24. MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
  25. MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
  26. MODULE_LICENSE("GPL");
  27. static bool debug;
  28. module_param(debug, bool, 0644);
  29. #define dprintk(fmt, arg...) \
  30. do { \
  31. if (debug) \
  32. printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
  33. } while (0)
  34. /* Instance is already queued on the job_queue */
  35. #define TRANS_QUEUED (1 << 0)
  36. /* Instance is currently running in hardware */
  37. #define TRANS_RUNNING (1 << 1)
  38. /* Offset base for buffers on the destination queue - used to distinguish
  39. * between source and destination buffers when mmapping - they receive the same
  40. * offsets but for different queues */
  41. #define DST_QUEUE_OFF_BASE (1 << 30)
  42. /**
  43. * struct v4l2_m2m_dev - per-device context
  44. * @curr_ctx: currently running instance
  45. * @job_queue: instances queued to run
  46. * @job_spinlock: protects job_queue
  47. * @m2m_ops: driver callbacks
  48. */
  49. struct v4l2_m2m_dev {
  50. struct v4l2_m2m_ctx *curr_ctx;
  51. struct list_head job_queue;
  52. spinlock_t job_spinlock;
  53. const struct v4l2_m2m_ops *m2m_ops;
  54. };
  55. static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
  56. enum v4l2_buf_type type)
  57. {
  58. if (V4L2_TYPE_IS_OUTPUT(type))
  59. return &m2m_ctx->out_q_ctx;
  60. else
  61. return &m2m_ctx->cap_q_ctx;
  62. }
  63. /**
  64. * v4l2_m2m_get_vq() - return vb2_queue for the given type
  65. */
  66. struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
  67. enum v4l2_buf_type type)
  68. {
  69. struct v4l2_m2m_queue_ctx *q_ctx;
  70. q_ctx = get_queue_ctx(m2m_ctx, type);
  71. if (!q_ctx)
  72. return NULL;
  73. return &q_ctx->q;
  74. }
  75. EXPORT_SYMBOL(v4l2_m2m_get_vq);
  76. /**
  77. * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
  78. */
  79. void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
  80. {
  81. struct v4l2_m2m_buffer *b = NULL;
  82. unsigned long flags;
  83. spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
  84. if (list_empty(&q_ctx->rdy_queue)) {
  85. spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
  86. return NULL;
  87. }
  88. b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
  89. spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
  90. return &b->vb;
  91. }
  92. EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
  93. /**
  94. * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
  95. * return it
  96. */
  97. void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
  98. {
  99. struct v4l2_m2m_buffer *b = NULL;
  100. unsigned long flags;
  101. spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
  102. if (list_empty(&q_ctx->rdy_queue)) {
  103. spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
  104. return NULL;
  105. }
  106. b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
  107. list_del(&b->list);
  108. q_ctx->num_rdy--;
  109. spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
  110. return &b->vb;
  111. }
  112. EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
  113. /*
  114. * Scheduling handlers
  115. */
  116. /**
  117. * v4l2_m2m_get_curr_priv() - return driver private data for the currently
  118. * running instance or NULL if no instance is running
  119. */
  120. void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
  121. {
  122. unsigned long flags;
  123. void *ret = NULL;
  124. spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
  125. if (m2m_dev->curr_ctx)
  126. ret = m2m_dev->curr_ctx->priv;
  127. spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
  128. return ret;
  129. }
  130. EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
  131. /**
  132. * v4l2_m2m_try_run() - select next job to perform and run it if possible
  133. *
  134. * Get next transaction (if present) from the waiting jobs list and run it.
  135. */
  136. static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
  137. {
  138. unsigned long flags;
  139. spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
  140. if (NULL != m2m_dev->curr_ctx) {
  141. spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
  142. dprintk("Another instance is running, won't run now\n");
  143. return;
  144. }
  145. if (list_empty(&m2m_dev->job_queue)) {
  146. spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
  147. dprintk("No job pending\n");
  148. return;
  149. }
  150. m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
  151. struct v4l2_m2m_ctx, queue);
  152. m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
  153. spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
  154. m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
  155. }
  156. /**
  157. * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
  158. * the pending job queue and add it if so.
  159. * @m2m_ctx: m2m context assigned to the instance to be checked
  160. *
  161. * There are three basic requirements an instance has to meet to be able to run:
  162. * 1) at least one source buffer has to be queued,
  163. * 2) at least one destination buffer has to be queued,
  164. * 3) streaming has to be on.
  165. *
  166. * There may also be additional, custom requirements. In such case the driver
  167. * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
  168. * return 1 if the instance is ready.
  169. * An example of the above could be an instance that requires more than one
  170. * src/dst buffer per transaction.
  171. */
  172. static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
  173. {
  174. struct v4l2_m2m_dev *m2m_dev;
  175. unsigned long flags_job, flags;
  176. m2m_dev = m2m_ctx->m2m_dev;
  177. dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
  178. if (!m2m_ctx->out_q_ctx.q.streaming
  179. || !m2m_ctx->cap_q_ctx.q.streaming) {
  180. dprintk("Streaming needs to be on for both queues\n");
  181. return;
  182. }
  183. spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
  184. if (m2m_ctx->job_flags & TRANS_QUEUED) {
  185. spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
  186. dprintk("On job queue already\n");
  187. return;
  188. }
  189. spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
  190. if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) {
  191. spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
  192. spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
  193. dprintk("No input buffers available\n");
  194. return;
  195. }
  196. spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
  197. if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) {
  198. spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
  199. spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
  200. spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
  201. dprintk("No output buffers available\n");
  202. return;
  203. }
  204. spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
  205. spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
  206. if (m2m_dev->m2m_ops->job_ready
  207. && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
  208. spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
  209. dprintk("Driver not ready\n");
  210. return;
  211. }
  212. list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
  213. m2m_ctx->job_flags |= TRANS_QUEUED;
  214. spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
  215. v4l2_m2m_try_run(m2m_dev);
  216. }
  217. /**
  218. * v4l2_m2m_job_finish() - inform the framework that a job has been finished
  219. * and have it clean up
  220. *
  221. * Called by a driver to yield back the device after it has finished with it.
  222. * Should be called as soon as possible after reaching a state which allows
  223. * other instances to take control of the device.
  224. *
  225. * This function has to be called only after device_run() callback has been
  226. * called on the driver. To prevent recursion, it should not be called directly
  227. * from the device_run() callback though.
  228. */
  229. void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
  230. struct v4l2_m2m_ctx *m2m_ctx)
  231. {
  232. unsigned long flags;
  233. spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
  234. if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
  235. spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
  236. dprintk("Called by an instance not currently running\n");
  237. return;
  238. }
  239. list_del(&m2m_dev->curr_ctx->queue);
  240. m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
  241. wake_up(&m2m_dev->curr_ctx->finished);
  242. m2m_dev->curr_ctx = NULL;
  243. spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
  244. /* This instance might have more buffers ready, but since we do not
  245. * allow more than one job on the job_queue per instance, each has
  246. * to be scheduled separately after the previous one finishes. */
  247. v4l2_m2m_try_schedule(m2m_ctx);
  248. v4l2_m2m_try_run(m2m_dev);
  249. }
  250. EXPORT_SYMBOL(v4l2_m2m_job_finish);
  251. /**
  252. * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
  253. */
  254. int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  255. struct v4l2_requestbuffers *reqbufs)
  256. {
  257. struct vb2_queue *vq;
  258. vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
  259. return vb2_reqbufs(vq, reqbufs);
  260. }
  261. EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
  262. /**
  263. * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
  264. *
  265. * See v4l2_m2m_mmap() documentation for details.
  266. */
  267. int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  268. struct v4l2_buffer *buf)
  269. {
  270. struct vb2_queue *vq;
  271. int ret = 0;
  272. unsigned int i;
  273. vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
  274. ret = vb2_querybuf(vq, buf);
  275. /* Adjust MMAP memory offsets for the CAPTURE queue */
  276. if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
  277. if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
  278. for (i = 0; i < buf->length; ++i)
  279. buf->m.planes[i].m.mem_offset
  280. += DST_QUEUE_OFF_BASE;
  281. } else {
  282. buf->m.offset += DST_QUEUE_OFF_BASE;
  283. }
  284. }
  285. return ret;
  286. }
  287. EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
  288. /**
  289. * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
  290. * the type
  291. */
  292. int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  293. struct v4l2_buffer *buf)
  294. {
  295. struct vb2_queue *vq;
  296. int ret;
  297. vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
  298. ret = vb2_qbuf(vq, buf);
  299. if (!ret)
  300. v4l2_m2m_try_schedule(m2m_ctx);
  301. return ret;
  302. }
  303. EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
  304. /**
  305. * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
  306. * the type
  307. */
  308. int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  309. struct v4l2_buffer *buf)
  310. {
  311. struct vb2_queue *vq;
  312. vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
  313. return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
  314. }
  315. EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
  316. /**
  317. * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
  318. * on the type
  319. */
  320. int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  321. struct v4l2_create_buffers *create)
  322. {
  323. struct vb2_queue *vq;
  324. vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
  325. return vb2_create_bufs(vq, create);
  326. }
  327. EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
  328. /**
  329. * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
  330. * the type
  331. */
  332. int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  333. struct v4l2_exportbuffer *eb)
  334. {
  335. struct vb2_queue *vq;
  336. vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
  337. return vb2_expbuf(vq, eb);
  338. }
  339. EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
  340. /**
  341. * v4l2_m2m_streamon() - turn on streaming for a video queue
  342. */
  343. int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  344. enum v4l2_buf_type type)
  345. {
  346. struct vb2_queue *vq;
  347. int ret;
  348. vq = v4l2_m2m_get_vq(m2m_ctx, type);
  349. ret = vb2_streamon(vq, type);
  350. if (!ret)
  351. v4l2_m2m_try_schedule(m2m_ctx);
  352. return ret;
  353. }
  354. EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
  355. /**
  356. * v4l2_m2m_streamoff() - turn off streaming for a video queue
  357. */
  358. int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  359. enum v4l2_buf_type type)
  360. {
  361. struct v4l2_m2m_dev *m2m_dev;
  362. struct v4l2_m2m_queue_ctx *q_ctx;
  363. unsigned long flags_job, flags;
  364. int ret;
  365. q_ctx = get_queue_ctx(m2m_ctx, type);
  366. ret = vb2_streamoff(&q_ctx->q, type);
  367. if (ret)
  368. return ret;
  369. m2m_dev = m2m_ctx->m2m_dev;
  370. spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
  371. /* We should not be scheduled anymore, since we're dropping a queue. */
  372. INIT_LIST_HEAD(&m2m_ctx->queue);
  373. m2m_ctx->job_flags = 0;
  374. spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
  375. /* Drop queue, since streamoff returns device to the same state as after
  376. * calling reqbufs. */
  377. INIT_LIST_HEAD(&q_ctx->rdy_queue);
  378. spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
  379. if (m2m_dev->curr_ctx == m2m_ctx) {
  380. m2m_dev->curr_ctx = NULL;
  381. wake_up(&m2m_ctx->finished);
  382. }
  383. spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
  384. return 0;
  385. }
  386. EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
  387. /**
  388. * v4l2_m2m_poll() - poll replacement, for destination buffers only
  389. *
  390. * Call from the driver's poll() function. Will poll both queues. If a buffer
  391. * is available to dequeue (with dqbuf) from the source queue, this will
  392. * indicate that a non-blocking write can be performed, while read will be
  393. * returned in case of the destination queue.
  394. */
  395. unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  396. struct poll_table_struct *wait)
  397. {
  398. struct video_device *vfd = video_devdata(file);
  399. unsigned long req_events = poll_requested_events(wait);
  400. struct vb2_queue *src_q, *dst_q;
  401. struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
  402. unsigned int rc = 0;
  403. unsigned long flags;
  404. if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
  405. struct v4l2_fh *fh = file->private_data;
  406. if (v4l2_event_pending(fh))
  407. rc = POLLPRI;
  408. else if (req_events & POLLPRI)
  409. poll_wait(file, &fh->wait, wait);
  410. if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM)))
  411. return rc;
  412. }
  413. src_q = v4l2_m2m_get_src_vq(m2m_ctx);
  414. dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
  415. /*
  416. * There has to be at least one buffer queued on each queued_list, which
  417. * means either in driver already or waiting for driver to claim it
  418. * and start processing.
  419. */
  420. if ((!src_q->streaming || list_empty(&src_q->queued_list))
  421. && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
  422. rc |= POLLERR;
  423. goto end;
  424. }
  425. if (m2m_ctx->m2m_dev->m2m_ops->unlock)
  426. m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv);
  427. if (list_empty(&src_q->done_list))
  428. poll_wait(file, &src_q->done_wq, wait);
  429. if (list_empty(&dst_q->done_list))
  430. poll_wait(file, &dst_q->done_wq, wait);
  431. if (m2m_ctx->m2m_dev->m2m_ops->lock)
  432. m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv);
  433. spin_lock_irqsave(&src_q->done_lock, flags);
  434. if (!list_empty(&src_q->done_list))
  435. src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
  436. done_entry);
  437. if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
  438. || src_vb->state == VB2_BUF_STATE_ERROR))
  439. rc |= POLLOUT | POLLWRNORM;
  440. spin_unlock_irqrestore(&src_q->done_lock, flags);
  441. spin_lock_irqsave(&dst_q->done_lock, flags);
  442. if (!list_empty(&dst_q->done_list))
  443. dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
  444. done_entry);
  445. if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
  446. || dst_vb->state == VB2_BUF_STATE_ERROR))
  447. rc |= POLLIN | POLLRDNORM;
  448. spin_unlock_irqrestore(&dst_q->done_lock, flags);
  449. end:
  450. return rc;
  451. }
  452. EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
  453. /**
  454. * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
  455. *
  456. * Call from driver's mmap() function. Will handle mmap() for both queues
  457. * seamlessly for videobuffer, which will receive normal per-queue offsets and
  458. * proper videobuf queue pointers. The differentiation is made outside videobuf
  459. * by adding a predefined offset to buffers from one of the queues and
  460. * subtracting it before passing it back to videobuf. Only drivers (and
  461. * thus applications) receive modified offsets.
  462. */
  463. int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  464. struct vm_area_struct *vma)
  465. {
  466. unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
  467. struct vb2_queue *vq;
  468. if (offset < DST_QUEUE_OFF_BASE) {
  469. vq = v4l2_m2m_get_src_vq(m2m_ctx);
  470. } else {
  471. vq = v4l2_m2m_get_dst_vq(m2m_ctx);
  472. vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
  473. }
  474. return vb2_mmap(vq, vma);
  475. }
  476. EXPORT_SYMBOL(v4l2_m2m_mmap);
  477. /**
  478. * v4l2_m2m_init() - initialize per-driver m2m data
  479. *
  480. * Usually called from driver's probe() function.
  481. */
  482. struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
  483. {
  484. struct v4l2_m2m_dev *m2m_dev;
  485. if (!m2m_ops || WARN_ON(!m2m_ops->device_run) ||
  486. WARN_ON(!m2m_ops->job_abort))
  487. return ERR_PTR(-EINVAL);
  488. m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
  489. if (!m2m_dev)
  490. return ERR_PTR(-ENOMEM);
  491. m2m_dev->curr_ctx = NULL;
  492. m2m_dev->m2m_ops = m2m_ops;
  493. INIT_LIST_HEAD(&m2m_dev->job_queue);
  494. spin_lock_init(&m2m_dev->job_spinlock);
  495. return m2m_dev;
  496. }
  497. EXPORT_SYMBOL_GPL(v4l2_m2m_init);
  498. /**
  499. * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
  500. *
  501. * Usually called from driver's remove() function.
  502. */
  503. void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
  504. {
  505. kfree(m2m_dev);
  506. }
  507. EXPORT_SYMBOL_GPL(v4l2_m2m_release);
  508. /**
  509. * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
  510. * @priv - driver's instance private data
  511. * @m2m_dev - a previously initialized m2m_dev struct
  512. * @vq_init - a callback for queue type-specific initialization function to be
  513. * used for initializing videobuf_queues
  514. *
  515. * Usually called from driver's open() function.
  516. */
  517. struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
  518. void *drv_priv,
  519. int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
  520. {
  521. struct v4l2_m2m_ctx *m2m_ctx;
  522. struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
  523. int ret;
  524. m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
  525. if (!m2m_ctx)
  526. return ERR_PTR(-ENOMEM);
  527. m2m_ctx->priv = drv_priv;
  528. m2m_ctx->m2m_dev = m2m_dev;
  529. init_waitqueue_head(&m2m_ctx->finished);
  530. out_q_ctx = &m2m_ctx->out_q_ctx;
  531. cap_q_ctx = &m2m_ctx->cap_q_ctx;
  532. INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
  533. INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
  534. spin_lock_init(&out_q_ctx->rdy_spinlock);
  535. spin_lock_init(&cap_q_ctx->rdy_spinlock);
  536. INIT_LIST_HEAD(&m2m_ctx->queue);
  537. ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
  538. if (ret)
  539. goto err;
  540. return m2m_ctx;
  541. err:
  542. kfree(m2m_ctx);
  543. return ERR_PTR(ret);
  544. }
  545. EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
  546. /**
  547. * v4l2_m2m_ctx_release() - release m2m context
  548. *
  549. * Usually called from driver's release() function.
  550. */
  551. void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
  552. {
  553. struct v4l2_m2m_dev *m2m_dev;
  554. unsigned long flags;
  555. m2m_dev = m2m_ctx->m2m_dev;
  556. spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
  557. if (m2m_ctx->job_flags & TRANS_RUNNING) {
  558. spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
  559. m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
  560. dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
  561. wait_event(m2m_ctx->finished, !(m2m_ctx->job_flags & TRANS_RUNNING));
  562. } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
  563. list_del(&m2m_ctx->queue);
  564. m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
  565. spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
  566. dprintk("m2m_ctx: %p had been on queue and was removed\n",
  567. m2m_ctx);
  568. } else {
  569. /* Do nothing, was not on queue/running */
  570. spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
  571. }
  572. vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
  573. vb2_queue_release(&m2m_ctx->out_q_ctx.q);
  574. kfree(m2m_ctx);
  575. }
  576. EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
  577. /**
  578. * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
  579. *
  580. * Call from buf_queue(), videobuf_queue_ops callback.
  581. */
  582. void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb)
  583. {
  584. struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb);
  585. struct v4l2_m2m_queue_ctx *q_ctx;
  586. unsigned long flags;
  587. q_ctx = get_queue_ctx(m2m_ctx, vb->vb2_queue->type);
  588. if (!q_ctx)
  589. return;
  590. spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
  591. list_add_tail(&b->list, &q_ctx->rdy_queue);
  592. q_ctx->num_rdy++;
  593. spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
  594. }
  595. EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);