videobuf-core.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204
  1. /*
  2. * generic helper functions for handling video4linux capture buffers
  3. *
  4. * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
  5. *
  6. * Highly based on video-buf written originally by:
  7. * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
  8. * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org>
  9. * (c) 2006 Ted Walther and John Sokol
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2
  14. */
  15. #include <linux/init.h>
  16. #include <linux/module.h>
  17. #include <linux/moduleparam.h>
  18. #include <linux/mm.h>
  19. #include <linux/sched.h>
  20. #include <linux/slab.h>
  21. #include <linux/interrupt.h>
  22. #include <media/videobuf-core.h>
  23. #define MAGIC_BUFFER 0x20070728
  24. #define MAGIC_CHECK(is, should) \
  25. do { \
  26. if (unlikely((is) != (should))) { \
  27. printk(KERN_ERR \
  28. "magic mismatch: %x (expected %x)\n", \
  29. is, should); \
  30. BUG(); \
  31. } \
  32. } while (0)
  33. static int debug;
  34. module_param(debug, int, 0644);
  35. MODULE_DESCRIPTION("helper module to manage video4linux buffers");
  36. MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
  37. MODULE_LICENSE("GPL");
  38. #define dprintk(level, fmt, arg...) \
  39. do { \
  40. if (debug >= level) \
  41. printk(KERN_DEBUG "vbuf: " fmt, ## arg); \
  42. } while (0)
  43. /* --------------------------------------------------------------------- */
  44. #define CALL(q, f, arg...) \
  45. ((q->int_ops->f) ? q->int_ops->f(arg) : 0)
  46. struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q)
  47. {
  48. struct videobuf_buffer *vb;
  49. BUG_ON(q->msize < sizeof(*vb));
  50. if (!q->int_ops || !q->int_ops->alloc_vb) {
  51. printk(KERN_ERR "No specific ops defined!\n");
  52. BUG();
  53. }
  54. vb = q->int_ops->alloc_vb(q->msize);
  55. if (NULL != vb) {
  56. init_waitqueue_head(&vb->done);
  57. vb->magic = MAGIC_BUFFER;
  58. }
  59. return vb;
  60. }
  61. EXPORT_SYMBOL_GPL(videobuf_alloc_vb);
  62. static int is_state_active_or_queued(struct videobuf_queue *q, struct videobuf_buffer *vb)
  63. {
  64. unsigned long flags;
  65. bool rc;
  66. spin_lock_irqsave(q->irqlock, flags);
  67. rc = vb->state != VIDEOBUF_ACTIVE && vb->state != VIDEOBUF_QUEUED;
  68. spin_unlock_irqrestore(q->irqlock, flags);
  69. return rc;
  70. };
  71. int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb,
  72. int non_blocking, int intr)
  73. {
  74. bool is_ext_locked;
  75. int ret = 0;
  76. MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
  77. if (non_blocking) {
  78. if (is_state_active_or_queued(q, vb))
  79. return 0;
  80. return -EAGAIN;
  81. }
  82. is_ext_locked = q->ext_lock && mutex_is_locked(q->ext_lock);
  83. /* Release vdev lock to prevent this wait from blocking outside access to
  84. the device. */
  85. if (is_ext_locked)
  86. mutex_unlock(q->ext_lock);
  87. if (intr)
  88. ret = wait_event_interruptible(vb->done, is_state_active_or_queued(q, vb));
  89. else
  90. wait_event(vb->done, is_state_active_or_queued(q, vb));
  91. /* Relock */
  92. if (is_ext_locked)
  93. mutex_lock(q->ext_lock);
  94. return ret;
  95. }
  96. EXPORT_SYMBOL_GPL(videobuf_waiton);
  97. int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
  98. struct v4l2_framebuffer *fbuf)
  99. {
  100. MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
  101. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  102. return CALL(q, iolock, q, vb, fbuf);
  103. }
  104. EXPORT_SYMBOL_GPL(videobuf_iolock);
  105. void *videobuf_queue_to_vaddr(struct videobuf_queue *q,
  106. struct videobuf_buffer *buf)
  107. {
  108. if (q->int_ops->vaddr)
  109. return q->int_ops->vaddr(buf);
  110. return NULL;
  111. }
  112. EXPORT_SYMBOL_GPL(videobuf_queue_to_vaddr);
  113. /* --------------------------------------------------------------------- */
  114. void videobuf_queue_core_init(struct videobuf_queue *q,
  115. const struct videobuf_queue_ops *ops,
  116. struct device *dev,
  117. spinlock_t *irqlock,
  118. enum v4l2_buf_type type,
  119. enum v4l2_field field,
  120. unsigned int msize,
  121. void *priv,
  122. struct videobuf_qtype_ops *int_ops,
  123. struct mutex *ext_lock)
  124. {
  125. BUG_ON(!q);
  126. memset(q, 0, sizeof(*q));
  127. q->irqlock = irqlock;
  128. q->ext_lock = ext_lock;
  129. q->dev = dev;
  130. q->type = type;
  131. q->field = field;
  132. q->msize = msize;
  133. q->ops = ops;
  134. q->priv_data = priv;
  135. q->int_ops = int_ops;
  136. /* All buffer operations are mandatory */
  137. BUG_ON(!q->ops->buf_setup);
  138. BUG_ON(!q->ops->buf_prepare);
  139. BUG_ON(!q->ops->buf_queue);
  140. BUG_ON(!q->ops->buf_release);
  141. /* Lock is mandatory for queue_cancel to work */
  142. BUG_ON(!irqlock);
  143. /* Having implementations for abstract methods are mandatory */
  144. BUG_ON(!q->int_ops);
  145. mutex_init(&q->vb_lock);
  146. init_waitqueue_head(&q->wait);
  147. INIT_LIST_HEAD(&q->stream);
  148. }
  149. EXPORT_SYMBOL_GPL(videobuf_queue_core_init);
  150. /* Locking: Only usage in bttv unsafe find way to remove */
  151. int videobuf_queue_is_busy(struct videobuf_queue *q)
  152. {
  153. int i;
  154. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  155. if (q->streaming) {
  156. dprintk(1, "busy: streaming active\n");
  157. return 1;
  158. }
  159. if (q->reading) {
  160. dprintk(1, "busy: pending read #1\n");
  161. return 1;
  162. }
  163. if (q->read_buf) {
  164. dprintk(1, "busy: pending read #2\n");
  165. return 1;
  166. }
  167. for (i = 0; i < VIDEO_MAX_FRAME; i++) {
  168. if (NULL == q->bufs[i])
  169. continue;
  170. if (q->bufs[i]->map) {
  171. dprintk(1, "busy: buffer #%d mapped\n", i);
  172. return 1;
  173. }
  174. if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
  175. dprintk(1, "busy: buffer #%d queued\n", i);
  176. return 1;
  177. }
  178. if (q->bufs[i]->state == VIDEOBUF_ACTIVE) {
  179. dprintk(1, "busy: buffer #%d avtive\n", i);
  180. return 1;
  181. }
  182. }
  183. return 0;
  184. }
  185. EXPORT_SYMBOL_GPL(videobuf_queue_is_busy);
  186. /**
  187. * __videobuf_free() - free all the buffers and their control structures
  188. *
  189. * This function can only be called if streaming/reading is off, i.e. no buffers
  190. * are under control of the driver.
  191. */
  192. /* Locking: Caller holds q->vb_lock */
  193. static int __videobuf_free(struct videobuf_queue *q)
  194. {
  195. int i;
  196. dprintk(1, "%s\n", __func__);
  197. if (!q)
  198. return 0;
  199. if (q->streaming || q->reading) {
  200. dprintk(1, "Cannot free buffers when streaming or reading\n");
  201. return -EBUSY;
  202. }
  203. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  204. for (i = 0; i < VIDEO_MAX_FRAME; i++)
  205. if (q->bufs[i] && q->bufs[i]->map) {
  206. dprintk(1, "Cannot free mmapped buffers\n");
  207. return -EBUSY;
  208. }
  209. for (i = 0; i < VIDEO_MAX_FRAME; i++) {
  210. if (NULL == q->bufs[i])
  211. continue;
  212. q->ops->buf_release(q, q->bufs[i]);
  213. kfree(q->bufs[i]);
  214. q->bufs[i] = NULL;
  215. }
  216. return 0;
  217. }
  218. /* Locking: Caller holds q->vb_lock */
  219. void videobuf_queue_cancel(struct videobuf_queue *q)
  220. {
  221. unsigned long flags = 0;
  222. int i;
  223. q->streaming = 0;
  224. q->reading = 0;
  225. wake_up_interruptible_sync(&q->wait);
  226. /* remove queued buffers from list */
  227. spin_lock_irqsave(q->irqlock, flags);
  228. for (i = 0; i < VIDEO_MAX_FRAME; i++) {
  229. if (NULL == q->bufs[i])
  230. continue;
  231. if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
  232. list_del(&q->bufs[i]->queue);
  233. q->bufs[i]->state = VIDEOBUF_ERROR;
  234. wake_up_all(&q->bufs[i]->done);
  235. }
  236. }
  237. spin_unlock_irqrestore(q->irqlock, flags);
  238. /* free all buffers + clear queue */
  239. for (i = 0; i < VIDEO_MAX_FRAME; i++) {
  240. if (NULL == q->bufs[i])
  241. continue;
  242. q->ops->buf_release(q, q->bufs[i]);
  243. }
  244. INIT_LIST_HEAD(&q->stream);
  245. }
  246. EXPORT_SYMBOL_GPL(videobuf_queue_cancel);
  247. /* --------------------------------------------------------------------- */
  248. /* Locking: Caller holds q->vb_lock */
  249. enum v4l2_field videobuf_next_field(struct videobuf_queue *q)
  250. {
  251. enum v4l2_field field = q->field;
  252. BUG_ON(V4L2_FIELD_ANY == field);
  253. if (V4L2_FIELD_ALTERNATE == field) {
  254. if (V4L2_FIELD_TOP == q->last) {
  255. field = V4L2_FIELD_BOTTOM;
  256. q->last = V4L2_FIELD_BOTTOM;
  257. } else {
  258. field = V4L2_FIELD_TOP;
  259. q->last = V4L2_FIELD_TOP;
  260. }
  261. }
  262. return field;
  263. }
  264. EXPORT_SYMBOL_GPL(videobuf_next_field);
  265. /* Locking: Caller holds q->vb_lock */
  266. static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
  267. struct videobuf_buffer *vb, enum v4l2_buf_type type)
  268. {
  269. MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
  270. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  271. b->index = vb->i;
  272. b->type = type;
  273. b->memory = vb->memory;
  274. switch (b->memory) {
  275. case V4L2_MEMORY_MMAP:
  276. b->m.offset = vb->boff;
  277. b->length = vb->bsize;
  278. break;
  279. case V4L2_MEMORY_USERPTR:
  280. b->m.userptr = vb->baddr;
  281. b->length = vb->bsize;
  282. break;
  283. case V4L2_MEMORY_OVERLAY:
  284. b->m.offset = vb->boff;
  285. break;
  286. }
  287. b->flags = 0;
  288. if (vb->map)
  289. b->flags |= V4L2_BUF_FLAG_MAPPED;
  290. switch (vb->state) {
  291. case VIDEOBUF_PREPARED:
  292. case VIDEOBUF_QUEUED:
  293. case VIDEOBUF_ACTIVE:
  294. b->flags |= V4L2_BUF_FLAG_QUEUED;
  295. break;
  296. case VIDEOBUF_ERROR:
  297. b->flags |= V4L2_BUF_FLAG_ERROR;
  298. /* fall through */
  299. case VIDEOBUF_DONE:
  300. b->flags |= V4L2_BUF_FLAG_DONE;
  301. break;
  302. case VIDEOBUF_NEEDS_INIT:
  303. case VIDEOBUF_IDLE:
  304. /* nothing */
  305. break;
  306. }
  307. if (vb->input != UNSET) {
  308. b->flags |= V4L2_BUF_FLAG_INPUT;
  309. b->input = vb->input;
  310. }
  311. b->field = vb->field;
  312. b->timestamp = vb->ts;
  313. b->bytesused = vb->size;
  314. b->sequence = vb->field_count >> 1;
  315. }
  316. int videobuf_mmap_free(struct videobuf_queue *q)
  317. {
  318. int ret;
  319. videobuf_queue_lock(q);
  320. ret = __videobuf_free(q);
  321. videobuf_queue_unlock(q);
  322. return ret;
  323. }
  324. EXPORT_SYMBOL_GPL(videobuf_mmap_free);
  325. /* Locking: Caller holds q->vb_lock */
  326. int __videobuf_mmap_setup(struct videobuf_queue *q,
  327. unsigned int bcount, unsigned int bsize,
  328. enum v4l2_memory memory)
  329. {
  330. unsigned int i;
  331. int err;
  332. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  333. err = __videobuf_free(q);
  334. if (0 != err)
  335. return err;
  336. /* Allocate and initialize buffers */
  337. for (i = 0; i < bcount; i++) {
  338. q->bufs[i] = videobuf_alloc_vb(q);
  339. if (NULL == q->bufs[i])
  340. break;
  341. q->bufs[i]->i = i;
  342. q->bufs[i]->input = UNSET;
  343. q->bufs[i]->memory = memory;
  344. q->bufs[i]->bsize = bsize;
  345. switch (memory) {
  346. case V4L2_MEMORY_MMAP:
  347. q->bufs[i]->boff = PAGE_ALIGN(bsize) * i;
  348. break;
  349. case V4L2_MEMORY_USERPTR:
  350. case V4L2_MEMORY_OVERLAY:
  351. /* nothing */
  352. break;
  353. }
  354. }
  355. if (!i)
  356. return -ENOMEM;
  357. dprintk(1, "mmap setup: %d buffers, %d bytes each\n", i, bsize);
  358. return i;
  359. }
  360. EXPORT_SYMBOL_GPL(__videobuf_mmap_setup);
  361. int videobuf_mmap_setup(struct videobuf_queue *q,
  362. unsigned int bcount, unsigned int bsize,
  363. enum v4l2_memory memory)
  364. {
  365. int ret;
  366. videobuf_queue_lock(q);
  367. ret = __videobuf_mmap_setup(q, bcount, bsize, memory);
  368. videobuf_queue_unlock(q);
  369. return ret;
  370. }
  371. EXPORT_SYMBOL_GPL(videobuf_mmap_setup);
  372. int videobuf_reqbufs(struct videobuf_queue *q,
  373. struct v4l2_requestbuffers *req)
  374. {
  375. unsigned int size, count;
  376. int retval;
  377. if (req->count < 1) {
  378. dprintk(1, "reqbufs: count invalid (%d)\n", req->count);
  379. return -EINVAL;
  380. }
  381. if (req->memory != V4L2_MEMORY_MMAP &&
  382. req->memory != V4L2_MEMORY_USERPTR &&
  383. req->memory != V4L2_MEMORY_OVERLAY) {
  384. dprintk(1, "reqbufs: memory type invalid\n");
  385. return -EINVAL;
  386. }
  387. videobuf_queue_lock(q);
  388. if (req->type != q->type) {
  389. dprintk(1, "reqbufs: queue type invalid\n");
  390. retval = -EINVAL;
  391. goto done;
  392. }
  393. if (q->streaming) {
  394. dprintk(1, "reqbufs: streaming already exists\n");
  395. retval = -EBUSY;
  396. goto done;
  397. }
  398. if (!list_empty(&q->stream)) {
  399. dprintk(1, "reqbufs: stream running\n");
  400. retval = -EBUSY;
  401. goto done;
  402. }
  403. count = req->count;
  404. if (count > VIDEO_MAX_FRAME)
  405. count = VIDEO_MAX_FRAME;
  406. size = 0;
  407. q->ops->buf_setup(q, &count, &size);
  408. dprintk(1, "reqbufs: bufs=%d, size=0x%x [%u pages total]\n",
  409. count, size,
  410. (unsigned int)((count * PAGE_ALIGN(size)) >> PAGE_SHIFT));
  411. retval = __videobuf_mmap_setup(q, count, size, req->memory);
  412. if (retval < 0) {
  413. dprintk(1, "reqbufs: mmap setup returned %d\n", retval);
  414. goto done;
  415. }
  416. req->count = retval;
  417. retval = 0;
  418. done:
  419. videobuf_queue_unlock(q);
  420. return retval;
  421. }
  422. EXPORT_SYMBOL_GPL(videobuf_reqbufs);
  423. int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
  424. {
  425. int ret = -EINVAL;
  426. videobuf_queue_lock(q);
  427. if (unlikely(b->type != q->type)) {
  428. dprintk(1, "querybuf: Wrong type.\n");
  429. goto done;
  430. }
  431. if (unlikely(b->index >= VIDEO_MAX_FRAME)) {
  432. dprintk(1, "querybuf: index out of range.\n");
  433. goto done;
  434. }
  435. if (unlikely(NULL == q->bufs[b->index])) {
  436. dprintk(1, "querybuf: buffer is null.\n");
  437. goto done;
  438. }
  439. videobuf_status(q, b, q->bufs[b->index], q->type);
  440. ret = 0;
  441. done:
  442. videobuf_queue_unlock(q);
  443. return ret;
  444. }
  445. EXPORT_SYMBOL_GPL(videobuf_querybuf);
  446. int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
  447. {
  448. struct videobuf_buffer *buf;
  449. enum v4l2_field field;
  450. unsigned long flags = 0;
  451. int retval;
  452. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  453. if (b->memory == V4L2_MEMORY_MMAP)
  454. down_read(&current->mm->mmap_sem);
  455. videobuf_queue_lock(q);
  456. retval = -EBUSY;
  457. if (q->reading) {
  458. dprintk(1, "qbuf: Reading running...\n");
  459. goto done;
  460. }
  461. retval = -EINVAL;
  462. if (b->type != q->type) {
  463. dprintk(1, "qbuf: Wrong type.\n");
  464. goto done;
  465. }
  466. if (b->index >= VIDEO_MAX_FRAME) {
  467. dprintk(1, "qbuf: index out of range.\n");
  468. goto done;
  469. }
  470. buf = q->bufs[b->index];
  471. if (NULL == buf) {
  472. dprintk(1, "qbuf: buffer is null.\n");
  473. goto done;
  474. }
  475. MAGIC_CHECK(buf->magic, MAGIC_BUFFER);
  476. if (buf->memory != b->memory) {
  477. dprintk(1, "qbuf: memory type is wrong.\n");
  478. goto done;
  479. }
  480. if (buf->state != VIDEOBUF_NEEDS_INIT && buf->state != VIDEOBUF_IDLE) {
  481. dprintk(1, "qbuf: buffer is already queued or active.\n");
  482. goto done;
  483. }
  484. if (b->flags & V4L2_BUF_FLAG_INPUT) {
  485. if (b->input >= q->inputs) {
  486. dprintk(1, "qbuf: wrong input.\n");
  487. goto done;
  488. }
  489. buf->input = b->input;
  490. } else {
  491. buf->input = UNSET;
  492. }
  493. switch (b->memory) {
  494. case V4L2_MEMORY_MMAP:
  495. if (0 == buf->baddr) {
  496. dprintk(1, "qbuf: mmap requested "
  497. "but buffer addr is zero!\n");
  498. goto done;
  499. }
  500. if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT
  501. || q->type == V4L2_BUF_TYPE_VBI_OUTPUT
  502. || q->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) {
  503. buf->size = b->bytesused;
  504. buf->field = b->field;
  505. buf->ts = b->timestamp;
  506. }
  507. break;
  508. case V4L2_MEMORY_USERPTR:
  509. if (b->length < buf->bsize) {
  510. dprintk(1, "qbuf: buffer length is not enough\n");
  511. goto done;
  512. }
  513. if (VIDEOBUF_NEEDS_INIT != buf->state &&
  514. buf->baddr != b->m.userptr)
  515. q->ops->buf_release(q, buf);
  516. buf->baddr = b->m.userptr;
  517. break;
  518. case V4L2_MEMORY_OVERLAY:
  519. buf->boff = b->m.offset;
  520. break;
  521. default:
  522. dprintk(1, "qbuf: wrong memory type\n");
  523. goto done;
  524. }
  525. dprintk(1, "qbuf: requesting next field\n");
  526. field = videobuf_next_field(q);
  527. retval = q->ops->buf_prepare(q, buf, field);
  528. if (0 != retval) {
  529. dprintk(1, "qbuf: buffer_prepare returned %d\n", retval);
  530. goto done;
  531. }
  532. list_add_tail(&buf->stream, &q->stream);
  533. if (q->streaming) {
  534. spin_lock_irqsave(q->irqlock, flags);
  535. q->ops->buf_queue(q, buf);
  536. spin_unlock_irqrestore(q->irqlock, flags);
  537. }
  538. dprintk(1, "qbuf: succeeded\n");
  539. retval = 0;
  540. wake_up_interruptible_sync(&q->wait);
  541. done:
  542. videobuf_queue_unlock(q);
  543. if (b->memory == V4L2_MEMORY_MMAP)
  544. up_read(&current->mm->mmap_sem);
  545. return retval;
  546. }
  547. EXPORT_SYMBOL_GPL(videobuf_qbuf);
  548. /* Locking: Caller holds q->vb_lock */
  549. static int stream_next_buffer_check_queue(struct videobuf_queue *q, int noblock)
  550. {
  551. int retval;
  552. checks:
  553. if (!q->streaming) {
  554. dprintk(1, "next_buffer: Not streaming\n");
  555. retval = -EINVAL;
  556. goto done;
  557. }
  558. if (list_empty(&q->stream)) {
  559. if (noblock) {
  560. retval = -EAGAIN;
  561. dprintk(2, "next_buffer: no buffers to dequeue\n");
  562. goto done;
  563. } else {
  564. dprintk(2, "next_buffer: waiting on buffer\n");
  565. /* Drop lock to avoid deadlock with qbuf */
  566. videobuf_queue_unlock(q);
  567. /* Checking list_empty and streaming is safe without
  568. * locks because we goto checks to validate while
  569. * holding locks before proceeding */
  570. retval = wait_event_interruptible(q->wait,
  571. !list_empty(&q->stream) || !q->streaming);
  572. videobuf_queue_lock(q);
  573. if (retval)
  574. goto done;
  575. goto checks;
  576. }
  577. }
  578. retval = 0;
  579. done:
  580. return retval;
  581. }
  582. /* Locking: Caller holds q->vb_lock */
  583. static int stream_next_buffer(struct videobuf_queue *q,
  584. struct videobuf_buffer **vb, int nonblocking)
  585. {
  586. int retval;
  587. struct videobuf_buffer *buf = NULL;
  588. retval = stream_next_buffer_check_queue(q, nonblocking);
  589. if (retval)
  590. goto done;
  591. buf = list_entry(q->stream.next, struct videobuf_buffer, stream);
  592. retval = videobuf_waiton(q, buf, nonblocking, 1);
  593. if (retval < 0)
  594. goto done;
  595. *vb = buf;
  596. done:
  597. return retval;
  598. }
  599. int videobuf_dqbuf(struct videobuf_queue *q,
  600. struct v4l2_buffer *b, int nonblocking)
  601. {
  602. struct videobuf_buffer *buf = NULL;
  603. int retval;
  604. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  605. memset(b, 0, sizeof(*b));
  606. videobuf_queue_lock(q);
  607. retval = stream_next_buffer(q, &buf, nonblocking);
  608. if (retval < 0) {
  609. dprintk(1, "dqbuf: next_buffer error: %i\n", retval);
  610. goto done;
  611. }
  612. switch (buf->state) {
  613. case VIDEOBUF_ERROR:
  614. dprintk(1, "dqbuf: state is error\n");
  615. break;
  616. case VIDEOBUF_DONE:
  617. dprintk(1, "dqbuf: state is done\n");
  618. break;
  619. default:
  620. dprintk(1, "dqbuf: state invalid\n");
  621. retval = -EINVAL;
  622. goto done;
  623. }
  624. CALL(q, sync, q, buf);
  625. videobuf_status(q, b, buf, q->type);
  626. list_del(&buf->stream);
  627. buf->state = VIDEOBUF_IDLE;
  628. b->flags &= ~V4L2_BUF_FLAG_DONE;
  629. done:
  630. videobuf_queue_unlock(q);
  631. return retval;
  632. }
  633. EXPORT_SYMBOL_GPL(videobuf_dqbuf);
  634. int videobuf_streamon(struct videobuf_queue *q)
  635. {
  636. struct videobuf_buffer *buf;
  637. unsigned long flags = 0;
  638. int retval;
  639. videobuf_queue_lock(q);
  640. retval = -EBUSY;
  641. if (q->reading)
  642. goto done;
  643. retval = 0;
  644. if (q->streaming)
  645. goto done;
  646. q->streaming = 1;
  647. spin_lock_irqsave(q->irqlock, flags);
  648. list_for_each_entry(buf, &q->stream, stream)
  649. if (buf->state == VIDEOBUF_PREPARED)
  650. q->ops->buf_queue(q, buf);
  651. spin_unlock_irqrestore(q->irqlock, flags);
  652. wake_up_interruptible_sync(&q->wait);
  653. done:
  654. videobuf_queue_unlock(q);
  655. return retval;
  656. }
  657. EXPORT_SYMBOL_GPL(videobuf_streamon);
  658. /* Locking: Caller holds q->vb_lock */
  659. static int __videobuf_streamoff(struct videobuf_queue *q)
  660. {
  661. if (!q->streaming)
  662. return -EINVAL;
  663. videobuf_queue_cancel(q);
  664. return 0;
  665. }
  666. int videobuf_streamoff(struct videobuf_queue *q)
  667. {
  668. int retval;
  669. videobuf_queue_lock(q);
  670. retval = __videobuf_streamoff(q);
  671. videobuf_queue_unlock(q);
  672. return retval;
  673. }
  674. EXPORT_SYMBOL_GPL(videobuf_streamoff);
  675. /* Locking: Caller holds q->vb_lock */
  676. static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
  677. char __user *data,
  678. size_t count, loff_t *ppos)
  679. {
  680. enum v4l2_field field;
  681. unsigned long flags = 0;
  682. int retval;
  683. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  684. /* setup stuff */
  685. q->read_buf = videobuf_alloc_vb(q);
  686. if (NULL == q->read_buf)
  687. return -ENOMEM;
  688. q->read_buf->memory = V4L2_MEMORY_USERPTR;
  689. q->read_buf->baddr = (unsigned long)data;
  690. q->read_buf->bsize = count;
  691. field = videobuf_next_field(q);
  692. retval = q->ops->buf_prepare(q, q->read_buf, field);
  693. if (0 != retval)
  694. goto done;
  695. /* start capture & wait */
  696. spin_lock_irqsave(q->irqlock, flags);
  697. q->ops->buf_queue(q, q->read_buf);
  698. spin_unlock_irqrestore(q->irqlock, flags);
  699. retval = videobuf_waiton(q, q->read_buf, 0, 0);
  700. if (0 == retval) {
  701. CALL(q, sync, q, q->read_buf);
  702. if (VIDEOBUF_ERROR == q->read_buf->state)
  703. retval = -EIO;
  704. else
  705. retval = q->read_buf->size;
  706. }
  707. done:
  708. /* cleanup */
  709. q->ops->buf_release(q, q->read_buf);
  710. kfree(q->read_buf);
  711. q->read_buf = NULL;
  712. return retval;
  713. }
  714. static int __videobuf_copy_to_user(struct videobuf_queue *q,
  715. struct videobuf_buffer *buf,
  716. char __user *data, size_t count,
  717. int nonblocking)
  718. {
  719. void *vaddr = CALL(q, vaddr, buf);
  720. /* copy to userspace */
  721. if (count > buf->size - q->read_off)
  722. count = buf->size - q->read_off;
  723. if (copy_to_user(data, vaddr + q->read_off, count))
  724. return -EFAULT;
  725. return count;
  726. }
  727. static int __videobuf_copy_stream(struct videobuf_queue *q,
  728. struct videobuf_buffer *buf,
  729. char __user *data, size_t count, size_t pos,
  730. int vbihack, int nonblocking)
  731. {
  732. unsigned int *fc = CALL(q, vaddr, buf);
  733. if (vbihack) {
  734. /* dirty, undocumented hack -- pass the frame counter
  735. * within the last four bytes of each vbi data block.
  736. * We need that one to maintain backward compatibility
  737. * to all vbi decoding software out there ... */
  738. fc += (buf->size >> 2) - 1;
  739. *fc = buf->field_count >> 1;
  740. dprintk(1, "vbihack: %d\n", *fc);
  741. }
  742. /* copy stuff using the common method */
  743. count = __videobuf_copy_to_user(q, buf, data, count, nonblocking);
  744. if ((count == -EFAULT) && (pos == 0))
  745. return -EFAULT;
  746. return count;
  747. }
  748. ssize_t videobuf_read_one(struct videobuf_queue *q,
  749. char __user *data, size_t count, loff_t *ppos,
  750. int nonblocking)
  751. {
  752. enum v4l2_field field;
  753. unsigned long flags = 0;
  754. unsigned size = 0, nbufs = 1;
  755. int retval;
  756. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  757. videobuf_queue_lock(q);
  758. q->ops->buf_setup(q, &nbufs, &size);
  759. if (NULL == q->read_buf &&
  760. count >= size &&
  761. !nonblocking) {
  762. retval = videobuf_read_zerocopy(q, data, count, ppos);
  763. if (retval >= 0 || retval == -EIO)
  764. /* ok, all done */
  765. goto done;
  766. /* fallback to kernel bounce buffer on failures */
  767. }
  768. if (NULL == q->read_buf) {
  769. /* need to capture a new frame */
  770. retval = -ENOMEM;
  771. q->read_buf = videobuf_alloc_vb(q);
  772. dprintk(1, "video alloc=0x%p\n", q->read_buf);
  773. if (NULL == q->read_buf)
  774. goto done;
  775. q->read_buf->memory = V4L2_MEMORY_USERPTR;
  776. q->read_buf->bsize = count; /* preferred size */
  777. field = videobuf_next_field(q);
  778. retval = q->ops->buf_prepare(q, q->read_buf, field);
  779. if (0 != retval) {
  780. kfree(q->read_buf);
  781. q->read_buf = NULL;
  782. goto done;
  783. }
  784. spin_lock_irqsave(q->irqlock, flags);
  785. q->ops->buf_queue(q, q->read_buf);
  786. spin_unlock_irqrestore(q->irqlock, flags);
  787. q->read_off = 0;
  788. }
  789. /* wait until capture is done */
  790. retval = videobuf_waiton(q, q->read_buf, nonblocking, 1);
  791. if (0 != retval)
  792. goto done;
  793. CALL(q, sync, q, q->read_buf);
  794. if (VIDEOBUF_ERROR == q->read_buf->state) {
  795. /* catch I/O errors */
  796. q->ops->buf_release(q, q->read_buf);
  797. kfree(q->read_buf);
  798. q->read_buf = NULL;
  799. retval = -EIO;
  800. goto done;
  801. }
  802. /* Copy to userspace */
  803. retval = __videobuf_copy_to_user(q, q->read_buf, data, count, nonblocking);
  804. if (retval < 0)
  805. goto done;
  806. q->read_off += retval;
  807. if (q->read_off == q->read_buf->size) {
  808. /* all data copied, cleanup */
  809. q->ops->buf_release(q, q->read_buf);
  810. kfree(q->read_buf);
  811. q->read_buf = NULL;
  812. }
  813. done:
  814. videobuf_queue_unlock(q);
  815. return retval;
  816. }
  817. EXPORT_SYMBOL_GPL(videobuf_read_one);
  818. /* Locking: Caller holds q->vb_lock */
  819. static int __videobuf_read_start(struct videobuf_queue *q)
  820. {
  821. enum v4l2_field field;
  822. unsigned long flags = 0;
  823. unsigned int count = 0, size = 0;
  824. int err, i;
  825. q->ops->buf_setup(q, &count, &size);
  826. if (count < 2)
  827. count = 2;
  828. if (count > VIDEO_MAX_FRAME)
  829. count = VIDEO_MAX_FRAME;
  830. size = PAGE_ALIGN(size);
  831. err = __videobuf_mmap_setup(q, count, size, V4L2_MEMORY_USERPTR);
  832. if (err < 0)
  833. return err;
  834. count = err;
  835. for (i = 0; i < count; i++) {
  836. field = videobuf_next_field(q);
  837. err = q->ops->buf_prepare(q, q->bufs[i], field);
  838. if (err)
  839. return err;
  840. list_add_tail(&q->bufs[i]->stream, &q->stream);
  841. }
  842. spin_lock_irqsave(q->irqlock, flags);
  843. for (i = 0; i < count; i++)
  844. q->ops->buf_queue(q, q->bufs[i]);
  845. spin_unlock_irqrestore(q->irqlock, flags);
  846. q->reading = 1;
  847. return 0;
  848. }
  849. static void __videobuf_read_stop(struct videobuf_queue *q)
  850. {
  851. int i;
  852. videobuf_queue_cancel(q);
  853. __videobuf_free(q);
  854. INIT_LIST_HEAD(&q->stream);
  855. for (i = 0; i < VIDEO_MAX_FRAME; i++) {
  856. if (NULL == q->bufs[i])
  857. continue;
  858. kfree(q->bufs[i]);
  859. q->bufs[i] = NULL;
  860. }
  861. q->read_buf = NULL;
  862. }
  863. int videobuf_read_start(struct videobuf_queue *q)
  864. {
  865. int rc;
  866. videobuf_queue_lock(q);
  867. rc = __videobuf_read_start(q);
  868. videobuf_queue_unlock(q);
  869. return rc;
  870. }
  871. EXPORT_SYMBOL_GPL(videobuf_read_start);
  872. void videobuf_read_stop(struct videobuf_queue *q)
  873. {
  874. videobuf_queue_lock(q);
  875. __videobuf_read_stop(q);
  876. videobuf_queue_unlock(q);
  877. }
  878. EXPORT_SYMBOL_GPL(videobuf_read_stop);
  879. void videobuf_stop(struct videobuf_queue *q)
  880. {
  881. videobuf_queue_lock(q);
  882. if (q->streaming)
  883. __videobuf_streamoff(q);
  884. if (q->reading)
  885. __videobuf_read_stop(q);
  886. videobuf_queue_unlock(q);
  887. }
  888. EXPORT_SYMBOL_GPL(videobuf_stop);
  889. ssize_t videobuf_read_stream(struct videobuf_queue *q,
  890. char __user *data, size_t count, loff_t *ppos,
  891. int vbihack, int nonblocking)
  892. {
  893. int rc, retval;
  894. unsigned long flags = 0;
  895. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  896. dprintk(2, "%s\n", __func__);
  897. videobuf_queue_lock(q);
  898. retval = -EBUSY;
  899. if (q->streaming)
  900. goto done;
  901. if (!q->reading) {
  902. retval = __videobuf_read_start(q);
  903. if (retval < 0)
  904. goto done;
  905. }
  906. retval = 0;
  907. while (count > 0) {
  908. /* get / wait for data */
  909. if (NULL == q->read_buf) {
  910. q->read_buf = list_entry(q->stream.next,
  911. struct videobuf_buffer,
  912. stream);
  913. list_del(&q->read_buf->stream);
  914. q->read_off = 0;
  915. }
  916. rc = videobuf_waiton(q, q->read_buf, nonblocking, 1);
  917. if (rc < 0) {
  918. if (0 == retval)
  919. retval = rc;
  920. break;
  921. }
  922. if (q->read_buf->state == VIDEOBUF_DONE) {
  923. rc = __videobuf_copy_stream(q, q->read_buf, data + retval, count,
  924. retval, vbihack, nonblocking);
  925. if (rc < 0) {
  926. retval = rc;
  927. break;
  928. }
  929. retval += rc;
  930. count -= rc;
  931. q->read_off += rc;
  932. } else {
  933. /* some error */
  934. q->read_off = q->read_buf->size;
  935. if (0 == retval)
  936. retval = -EIO;
  937. }
  938. /* requeue buffer when done with copying */
  939. if (q->read_off == q->read_buf->size) {
  940. list_add_tail(&q->read_buf->stream,
  941. &q->stream);
  942. spin_lock_irqsave(q->irqlock, flags);
  943. q->ops->buf_queue(q, q->read_buf);
  944. spin_unlock_irqrestore(q->irqlock, flags);
  945. q->read_buf = NULL;
  946. }
  947. if (retval < 0)
  948. break;
  949. }
  950. done:
  951. videobuf_queue_unlock(q);
  952. return retval;
  953. }
  954. EXPORT_SYMBOL_GPL(videobuf_read_stream);
  955. unsigned int videobuf_poll_stream(struct file *file,
  956. struct videobuf_queue *q,
  957. poll_table *wait)
  958. {
  959. struct videobuf_buffer *buf = NULL;
  960. unsigned int rc = 0;
  961. videobuf_queue_lock(q);
  962. if (q->streaming) {
  963. if (!list_empty(&q->stream))
  964. buf = list_entry(q->stream.next,
  965. struct videobuf_buffer, stream);
  966. } else {
  967. if (!q->reading)
  968. __videobuf_read_start(q);
  969. if (!q->reading) {
  970. rc = POLLERR;
  971. } else if (NULL == q->read_buf) {
  972. q->read_buf = list_entry(q->stream.next,
  973. struct videobuf_buffer,
  974. stream);
  975. list_del(&q->read_buf->stream);
  976. q->read_off = 0;
  977. }
  978. buf = q->read_buf;
  979. }
  980. if (!buf)
  981. rc = POLLERR;
  982. if (0 == rc) {
  983. poll_wait(file, &buf->done, wait);
  984. if (buf->state == VIDEOBUF_DONE ||
  985. buf->state == VIDEOBUF_ERROR) {
  986. switch (q->type) {
  987. case V4L2_BUF_TYPE_VIDEO_OUTPUT:
  988. case V4L2_BUF_TYPE_VBI_OUTPUT:
  989. case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
  990. rc = POLLOUT | POLLWRNORM;
  991. break;
  992. default:
  993. rc = POLLIN | POLLRDNORM;
  994. break;
  995. }
  996. }
  997. }
  998. videobuf_queue_unlock(q);
  999. return rc;
  1000. }
  1001. EXPORT_SYMBOL_GPL(videobuf_poll_stream);
  1002. int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma)
  1003. {
  1004. int rc = -EINVAL;
  1005. int i;
  1006. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  1007. if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) {
  1008. dprintk(1, "mmap appl bug: PROT_WRITE and MAP_SHARED are required\n");
  1009. return -EINVAL;
  1010. }
  1011. videobuf_queue_lock(q);
  1012. for (i = 0; i < VIDEO_MAX_FRAME; i++) {
  1013. struct videobuf_buffer *buf = q->bufs[i];
  1014. if (buf && buf->memory == V4L2_MEMORY_MMAP &&
  1015. buf->boff == (vma->vm_pgoff << PAGE_SHIFT)) {
  1016. rc = CALL(q, mmap_mapper, q, buf, vma);
  1017. break;
  1018. }
  1019. }
  1020. videobuf_queue_unlock(q);
  1021. return rc;
  1022. }
  1023. EXPORT_SYMBOL_GPL(videobuf_mmap_mapper);