ispqueue.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159
  1. /*
  2. * ispqueue.c
  3. *
  4. * TI OMAP3 ISP - Video buffers queue handling
  5. *
  6. * Copyright (C) 2010 Nokia Corporation
  7. *
  8. * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
  9. * Sakari Ailus <sakari.ailus@iki.fi>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2 as
  13. * published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  23. * 02110-1301 USA
  24. */
  25. #include <asm/cacheflush.h>
  26. #include <linux/dma-mapping.h>
  27. #include <linux/mm.h>
  28. #include <linux/pagemap.h>
  29. #include <linux/poll.h>
  30. #include <linux/scatterlist.h>
  31. #include <linux/sched.h>
  32. #include <linux/slab.h>
  33. #include <linux/vmalloc.h>
  34. #include "ispqueue.h"
  35. /* -----------------------------------------------------------------------------
  36. * Video buffers management
  37. */
  38. /*
  39. * isp_video_buffer_cache_sync - Keep the buffers coherent between CPU and ISP
  40. *
  41. * The typical operation required here is Cache Invalidation across
  42. * the (user space) buffer address range. And this _must_ be done
  43. * at QBUF stage (and *only* at QBUF).
  44. *
  45. * We try to use optimal cache invalidation function:
  46. * - dmac_map_area:
  47. * - used when the number of pages are _low_.
  48. * - it becomes quite slow as the number of pages increase.
  49. * - for 648x492 viewfinder (150 pages) it takes 1.3 ms.
  50. * - for 5 Mpix buffer (2491 pages) it takes between 25-50 ms.
  51. *
  52. * - flush_cache_all:
  53. * - used when the number of pages are _high_.
  54. * - time taken in the range of 500-900 us.
  55. * - has a higher penalty but, as whole dcache + icache is invalidated
  56. */
  57. /*
  58. * FIXME: dmac_inv_range crashes randomly on the user space buffer
  59. * address. Fall back to flush_cache_all for now.
  60. */
  61. #define ISP_CACHE_FLUSH_PAGES_MAX 0
  62. static void isp_video_buffer_cache_sync(struct isp_video_buffer *buf)
  63. {
  64. if (buf->skip_cache)
  65. return;
  66. if (buf->vbuf.m.userptr == 0 || buf->npages == 0 ||
  67. buf->npages > ISP_CACHE_FLUSH_PAGES_MAX)
  68. flush_cache_all();
  69. else {
  70. dmac_map_area((void *)buf->vbuf.m.userptr, buf->vbuf.length,
  71. DMA_FROM_DEVICE);
  72. outer_inv_range(buf->vbuf.m.userptr,
  73. buf->vbuf.m.userptr + buf->vbuf.length);
  74. }
  75. }
  76. /*
  77. * isp_video_buffer_lock_vma - Prevent VMAs from being unmapped
  78. *
  79. * Lock the VMAs underlying the given buffer into memory. This avoids the
  80. * userspace buffer mapping from being swapped out, making VIPT cache handling
  81. * easier.
  82. *
  83. * Note that the pages will not be freed as the buffers have been locked to
  84. * memory using by a call to get_user_pages(), but the userspace mapping could
  85. * still disappear if the VMAs are not locked. This is caused by the memory
  86. * management code trying to be as lock-less as possible, which results in the
  87. * userspace mapping manager not finding out that the pages are locked under
  88. * some conditions.
  89. */
  90. static int isp_video_buffer_lock_vma(struct isp_video_buffer *buf, int lock)
  91. {
  92. struct vm_area_struct *vma;
  93. unsigned long start;
  94. unsigned long end;
  95. int ret = 0;
  96. if (buf->vbuf.memory == V4L2_MEMORY_MMAP)
  97. return 0;
  98. /* We can be called from workqueue context if the current task dies to
  99. * unlock the VMAs. In that case there's no current memory management
  100. * context so unlocking can't be performed, but the VMAs have been or
  101. * are getting destroyed anyway so it doesn't really matter.
  102. */
  103. if (!current || !current->mm)
  104. return lock ? -EINVAL : 0;
  105. start = buf->vbuf.m.userptr;
  106. end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
  107. down_write(&current->mm->mmap_sem);
  108. spin_lock(&current->mm->page_table_lock);
  109. do {
  110. vma = find_vma(current->mm, start);
  111. if (vma == NULL) {
  112. ret = -EFAULT;
  113. goto out;
  114. }
  115. if (lock)
  116. vma->vm_flags |= VM_LOCKED;
  117. else
  118. vma->vm_flags &= ~VM_LOCKED;
  119. start = vma->vm_end + 1;
  120. } while (vma->vm_end < end);
  121. if (lock)
  122. buf->vm_flags |= VM_LOCKED;
  123. else
  124. buf->vm_flags &= ~VM_LOCKED;
  125. out:
  126. spin_unlock(&current->mm->page_table_lock);
  127. up_write(&current->mm->mmap_sem);
  128. return ret;
  129. }
  130. /*
  131. * isp_video_buffer_sglist_kernel - Build a scatter list for a vmalloc'ed buffer
  132. *
  133. * Iterate over the vmalloc'ed area and create a scatter list entry for every
  134. * page.
  135. */
  136. static int isp_video_buffer_sglist_kernel(struct isp_video_buffer *buf)
  137. {
  138. struct scatterlist *sglist;
  139. unsigned int npages;
  140. unsigned int i;
  141. void *addr;
  142. addr = buf->vaddr;
  143. npages = PAGE_ALIGN(buf->vbuf.length) >> PAGE_SHIFT;
  144. sglist = vmalloc(npages * sizeof(*sglist));
  145. if (sglist == NULL)
  146. return -ENOMEM;
  147. sg_init_table(sglist, npages);
  148. for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
  149. struct page *page = vmalloc_to_page(addr);
  150. if (page == NULL || PageHighMem(page)) {
  151. vfree(sglist);
  152. return -EINVAL;
  153. }
  154. sg_set_page(&sglist[i], page, PAGE_SIZE, 0);
  155. }
  156. buf->sglen = npages;
  157. buf->sglist = sglist;
  158. return 0;
  159. }
  160. /*
  161. * isp_video_buffer_sglist_user - Build a scatter list for a userspace buffer
  162. *
  163. * Walk the buffer pages list and create a 1:1 mapping to a scatter list.
  164. */
  165. static int isp_video_buffer_sglist_user(struct isp_video_buffer *buf)
  166. {
  167. struct scatterlist *sglist;
  168. unsigned int offset = buf->offset;
  169. unsigned int i;
  170. sglist = vmalloc(buf->npages * sizeof(*sglist));
  171. if (sglist == NULL)
  172. return -ENOMEM;
  173. sg_init_table(sglist, buf->npages);
  174. for (i = 0; i < buf->npages; ++i) {
  175. if (PageHighMem(buf->pages[i])) {
  176. vfree(sglist);
  177. return -EINVAL;
  178. }
  179. sg_set_page(&sglist[i], buf->pages[i], PAGE_SIZE - offset,
  180. offset);
  181. offset = 0;
  182. }
  183. buf->sglen = buf->npages;
  184. buf->sglist = sglist;
  185. return 0;
  186. }
  187. /*
  188. * isp_video_buffer_sglist_pfnmap - Build a scatter list for a VM_PFNMAP buffer
  189. *
  190. * Create a scatter list of physically contiguous pages starting at the buffer
  191. * memory physical address.
  192. */
  193. static int isp_video_buffer_sglist_pfnmap(struct isp_video_buffer *buf)
  194. {
  195. struct scatterlist *sglist;
  196. unsigned int offset = buf->offset;
  197. unsigned long pfn = buf->paddr >> PAGE_SHIFT;
  198. unsigned int i;
  199. sglist = vmalloc(buf->npages * sizeof(*sglist));
  200. if (sglist == NULL)
  201. return -ENOMEM;
  202. sg_init_table(sglist, buf->npages);
  203. for (i = 0; i < buf->npages; ++i, ++pfn) {
  204. sg_set_page(&sglist[i], pfn_to_page(pfn), PAGE_SIZE - offset,
  205. offset);
  206. /* PFNMAP buffers will not get DMA-mapped, set the DMA address
  207. * manually.
  208. */
  209. sg_dma_address(&sglist[i]) = (pfn << PAGE_SHIFT) + offset;
  210. offset = 0;
  211. }
  212. buf->sglen = buf->npages;
  213. buf->sglist = sglist;
  214. return 0;
  215. }
  216. /*
  217. * isp_video_buffer_cleanup - Release pages for a userspace VMA.
  218. *
  219. * Release pages locked by a call isp_video_buffer_prepare_user and free the
  220. * pages table.
  221. */
  222. static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
  223. {
  224. enum dma_data_direction direction;
  225. unsigned int i;
  226. if (buf->queue->ops->buffer_cleanup)
  227. buf->queue->ops->buffer_cleanup(buf);
  228. if (!(buf->vm_flags & VM_PFNMAP)) {
  229. direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
  230. ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  231. dma_unmap_sg(buf->queue->dev, buf->sglist, buf->sglen,
  232. direction);
  233. }
  234. vfree(buf->sglist);
  235. buf->sglist = NULL;
  236. buf->sglen = 0;
  237. if (buf->pages != NULL) {
  238. isp_video_buffer_lock_vma(buf, 0);
  239. for (i = 0; i < buf->npages; ++i)
  240. page_cache_release(buf->pages[i]);
  241. vfree(buf->pages);
  242. buf->pages = NULL;
  243. }
  244. buf->npages = 0;
  245. buf->skip_cache = false;
  246. }
  247. /*
  248. * isp_video_buffer_prepare_user - Pin userspace VMA pages to memory.
  249. *
  250. * This function creates a list of pages for a userspace VMA. The number of
  251. * pages is first computed based on the buffer size, and pages are then
  252. * retrieved by a call to get_user_pages.
  253. *
  254. * Pages are pinned to memory by get_user_pages, making them available for DMA
  255. * transfers. However, due to memory management optimization, it seems the
  256. * get_user_pages doesn't guarantee that the pinned pages will not be written
  257. * to swap and removed from the userspace mapping(s). When this happens, a page
  258. * fault can be generated when accessing those unmapped pages.
  259. *
  260. * If the fault is triggered by a page table walk caused by VIPT cache
  261. * management operations, the page fault handler might oops if the MM semaphore
  262. * is held, as it can't handle kernel page faults in that case. To fix that, a
  263. * fixup entry needs to be added to the cache management code, or the userspace
  264. * VMA must be locked to avoid removing pages from the userspace mapping in the
  265. * first place.
  266. *
  267. * If the number of pages retrieved is smaller than the number required by the
  268. * buffer size, the function returns -EFAULT.
  269. */
  270. static int isp_video_buffer_prepare_user(struct isp_video_buffer *buf)
  271. {
  272. unsigned long data;
  273. unsigned int first;
  274. unsigned int last;
  275. int ret;
  276. data = buf->vbuf.m.userptr;
  277. first = (data & PAGE_MASK) >> PAGE_SHIFT;
  278. last = ((data + buf->vbuf.length - 1) & PAGE_MASK) >> PAGE_SHIFT;
  279. buf->offset = data & ~PAGE_MASK;
  280. buf->npages = last - first + 1;
  281. buf->pages = vmalloc(buf->npages * sizeof(buf->pages[0]));
  282. if (buf->pages == NULL)
  283. return -ENOMEM;
  284. down_read(&current->mm->mmap_sem);
  285. ret = get_user_pages(current, current->mm, data & PAGE_MASK,
  286. buf->npages,
  287. buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE, 0,
  288. buf->pages, NULL);
  289. up_read(&current->mm->mmap_sem);
  290. if (ret != buf->npages) {
  291. buf->npages = ret < 0 ? 0 : ret;
  292. isp_video_buffer_cleanup(buf);
  293. return -EFAULT;
  294. }
  295. ret = isp_video_buffer_lock_vma(buf, 1);
  296. if (ret < 0)
  297. isp_video_buffer_cleanup(buf);
  298. return ret;
  299. }
  300. /*
  301. * isp_video_buffer_prepare_pfnmap - Validate a VM_PFNMAP userspace buffer
  302. *
  303. * Userspace VM_PFNMAP buffers are supported only if they are contiguous in
  304. * memory and if they span a single VMA.
  305. *
  306. * Return 0 if the buffer is valid, or -EFAULT otherwise.
  307. */
  308. static int isp_video_buffer_prepare_pfnmap(struct isp_video_buffer *buf)
  309. {
  310. struct vm_area_struct *vma;
  311. unsigned long prev_pfn;
  312. unsigned long this_pfn;
  313. unsigned long start;
  314. unsigned long end;
  315. dma_addr_t pa = 0;
  316. int ret = -EFAULT;
  317. start = buf->vbuf.m.userptr;
  318. end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
  319. buf->offset = start & ~PAGE_MASK;
  320. buf->npages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
  321. buf->pages = NULL;
  322. down_read(&current->mm->mmap_sem);
  323. vma = find_vma(current->mm, start);
  324. if (vma == NULL || vma->vm_end < end)
  325. goto done;
  326. for (prev_pfn = 0; start <= end; start += PAGE_SIZE) {
  327. ret = follow_pfn(vma, start, &this_pfn);
  328. if (ret)
  329. goto done;
  330. if (prev_pfn == 0)
  331. pa = this_pfn << PAGE_SHIFT;
  332. else if (this_pfn != prev_pfn + 1) {
  333. ret = -EFAULT;
  334. goto done;
  335. }
  336. prev_pfn = this_pfn;
  337. }
  338. buf->paddr = pa + buf->offset;
  339. ret = 0;
  340. done:
  341. up_read(&current->mm->mmap_sem);
  342. return ret;
  343. }
  344. /*
  345. * isp_video_buffer_prepare_vm_flags - Get VMA flags for a userspace address
  346. *
  347. * This function locates the VMAs for the buffer's userspace address and checks
  348. * that their flags match. The only flag that we need to care for at the moment
  349. * is VM_PFNMAP.
  350. *
  351. * The buffer vm_flags field is set to the first VMA flags.
  352. *
  353. * Return -EFAULT if no VMA can be found for part of the buffer, or if the VMAs
  354. * have incompatible flags.
  355. */
  356. static int isp_video_buffer_prepare_vm_flags(struct isp_video_buffer *buf)
  357. {
  358. struct vm_area_struct *vma;
  359. pgprot_t uninitialized_var(vm_page_prot);
  360. unsigned long start;
  361. unsigned long end;
  362. int ret = -EFAULT;
  363. start = buf->vbuf.m.userptr;
  364. end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
  365. down_read(&current->mm->mmap_sem);
  366. do {
  367. vma = find_vma(current->mm, start);
  368. if (vma == NULL)
  369. goto done;
  370. if (start == buf->vbuf.m.userptr) {
  371. buf->vm_flags = vma->vm_flags;
  372. vm_page_prot = vma->vm_page_prot;
  373. }
  374. if ((buf->vm_flags ^ vma->vm_flags) & VM_PFNMAP)
  375. goto done;
  376. if (vm_page_prot != vma->vm_page_prot)
  377. goto done;
  378. start = vma->vm_end + 1;
  379. } while (vma->vm_end < end);
  380. /* Skip cache management to enhance performances for non-cached or
  381. * write-combining buffers.
  382. */
  383. if (vm_page_prot == pgprot_noncached(vm_page_prot) ||
  384. vm_page_prot == pgprot_writecombine(vm_page_prot))
  385. buf->skip_cache = true;
  386. ret = 0;
  387. done:
  388. up_read(&current->mm->mmap_sem);
  389. return ret;
  390. }
  391. /*
  392. * isp_video_buffer_prepare - Make a buffer ready for operation
  393. *
  394. * Preparing a buffer involves:
  395. *
  396. * - validating VMAs (userspace buffers only)
  397. * - locking pages and VMAs into memory (userspace buffers only)
  398. * - building page and scatter-gather lists
  399. * - mapping buffers for DMA operation
  400. * - performing driver-specific preparation
  401. *
  402. * The function must be called in userspace context with a valid mm context
  403. * (this excludes cleanup paths such as sys_close when the userspace process
  404. * segfaults).
  405. */
  406. static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
  407. {
  408. enum dma_data_direction direction;
  409. int ret;
  410. switch (buf->vbuf.memory) {
  411. case V4L2_MEMORY_MMAP:
  412. ret = isp_video_buffer_sglist_kernel(buf);
  413. break;
  414. case V4L2_MEMORY_USERPTR:
  415. ret = isp_video_buffer_prepare_vm_flags(buf);
  416. if (ret < 0)
  417. return ret;
  418. if (buf->vm_flags & VM_PFNMAP) {
  419. ret = isp_video_buffer_prepare_pfnmap(buf);
  420. if (ret < 0)
  421. return ret;
  422. ret = isp_video_buffer_sglist_pfnmap(buf);
  423. } else {
  424. ret = isp_video_buffer_prepare_user(buf);
  425. if (ret < 0)
  426. return ret;
  427. ret = isp_video_buffer_sglist_user(buf);
  428. }
  429. break;
  430. default:
  431. return -EINVAL;
  432. }
  433. if (ret < 0)
  434. goto done;
  435. if (!(buf->vm_flags & VM_PFNMAP)) {
  436. direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
  437. ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  438. ret = dma_map_sg(buf->queue->dev, buf->sglist, buf->sglen,
  439. direction);
  440. if (ret != buf->sglen) {
  441. ret = -EFAULT;
  442. goto done;
  443. }
  444. }
  445. if (buf->queue->ops->buffer_prepare)
  446. ret = buf->queue->ops->buffer_prepare(buf);
  447. done:
  448. if (ret < 0) {
  449. isp_video_buffer_cleanup(buf);
  450. return ret;
  451. }
  452. return ret;
  453. }
  454. /*
  455. * isp_video_queue_query - Query the status of a given buffer
  456. *
  457. * Locking: must be called with the queue lock held.
  458. */
  459. static void isp_video_buffer_query(struct isp_video_buffer *buf,
  460. struct v4l2_buffer *vbuf)
  461. {
  462. memcpy(vbuf, &buf->vbuf, sizeof(*vbuf));
  463. if (buf->vma_use_count)
  464. vbuf->flags |= V4L2_BUF_FLAG_MAPPED;
  465. switch (buf->state) {
  466. case ISP_BUF_STATE_ERROR:
  467. vbuf->flags |= V4L2_BUF_FLAG_ERROR;
  468. case ISP_BUF_STATE_DONE:
  469. vbuf->flags |= V4L2_BUF_FLAG_DONE;
  470. case ISP_BUF_STATE_QUEUED:
  471. case ISP_BUF_STATE_ACTIVE:
  472. vbuf->flags |= V4L2_BUF_FLAG_QUEUED;
  473. break;
  474. case ISP_BUF_STATE_IDLE:
  475. default:
  476. break;
  477. }
  478. }
  479. /*
  480. * isp_video_buffer_wait - Wait for a buffer to be ready
  481. *
  482. * In non-blocking mode, return immediately with 0 if the buffer is ready or
  483. * -EAGAIN if the buffer is in the QUEUED or ACTIVE state.
  484. *
  485. * In blocking mode, wait (interruptibly but with no timeout) on the buffer wait
  486. * queue using the same condition.
  487. */
  488. static int isp_video_buffer_wait(struct isp_video_buffer *buf, int nonblocking)
  489. {
  490. if (nonblocking) {
  491. return (buf->state != ISP_BUF_STATE_QUEUED &&
  492. buf->state != ISP_BUF_STATE_ACTIVE)
  493. ? 0 : -EAGAIN;
  494. }
  495. return wait_event_interruptible(buf->wait,
  496. buf->state != ISP_BUF_STATE_QUEUED &&
  497. buf->state != ISP_BUF_STATE_ACTIVE);
  498. }
  499. /* -----------------------------------------------------------------------------
  500. * Queue management
  501. */
  502. /*
  503. * isp_video_queue_free - Free video buffers memory
  504. *
  505. * Buffers can only be freed if the queue isn't streaming and if no buffer is
  506. * mapped to userspace. Return -EBUSY if those conditions aren't statisfied.
  507. *
  508. * This function must be called with the queue lock held.
  509. */
  510. static int isp_video_queue_free(struct isp_video_queue *queue)
  511. {
  512. unsigned int i;
  513. if (queue->streaming)
  514. return -EBUSY;
  515. for (i = 0; i < queue->count; ++i) {
  516. if (queue->buffers[i]->vma_use_count != 0)
  517. return -EBUSY;
  518. }
  519. for (i = 0; i < queue->count; ++i) {
  520. struct isp_video_buffer *buf = queue->buffers[i];
  521. isp_video_buffer_cleanup(buf);
  522. vfree(buf->vaddr);
  523. buf->vaddr = NULL;
  524. kfree(buf);
  525. queue->buffers[i] = NULL;
  526. }
  527. INIT_LIST_HEAD(&queue->queue);
  528. queue->count = 0;
  529. return 0;
  530. }
  531. /*
  532. * isp_video_queue_alloc - Allocate video buffers memory
  533. *
  534. * This function must be called with the queue lock held.
  535. */
  536. static int isp_video_queue_alloc(struct isp_video_queue *queue,
  537. unsigned int nbuffers,
  538. unsigned int size, enum v4l2_memory memory)
  539. {
  540. struct isp_video_buffer *buf;
  541. unsigned int i;
  542. void *mem;
  543. int ret;
  544. /* Start by freeing the buffers. */
  545. ret = isp_video_queue_free(queue);
  546. if (ret < 0)
  547. return ret;
  548. /* Bail out if no buffers should be allocated. */
  549. if (nbuffers == 0)
  550. return 0;
  551. /* Initialize the allocated buffers. */
  552. for (i = 0; i < nbuffers; ++i) {
  553. buf = kzalloc(queue->bufsize, GFP_KERNEL);
  554. if (buf == NULL)
  555. break;
  556. if (memory == V4L2_MEMORY_MMAP) {
  557. /* Allocate video buffers memory for mmap mode. Align
  558. * the size to the page size.
  559. */
  560. mem = vmalloc_32_user(PAGE_ALIGN(size));
  561. if (mem == NULL) {
  562. kfree(buf);
  563. break;
  564. }
  565. buf->vbuf.m.offset = i * PAGE_ALIGN(size);
  566. buf->vaddr = mem;
  567. }
  568. buf->vbuf.index = i;
  569. buf->vbuf.length = size;
  570. buf->vbuf.type = queue->type;
  571. buf->vbuf.flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
  572. buf->vbuf.field = V4L2_FIELD_NONE;
  573. buf->vbuf.memory = memory;
  574. buf->queue = queue;
  575. init_waitqueue_head(&buf->wait);
  576. queue->buffers[i] = buf;
  577. }
  578. if (i == 0)
  579. return -ENOMEM;
  580. queue->count = i;
  581. return nbuffers;
  582. }
  583. /**
  584. * omap3isp_video_queue_cleanup - Clean up the video buffers queue
  585. * @queue: Video buffers queue
  586. *
  587. * Free all allocated resources and clean up the video buffers queue. The queue
  588. * must not be busy (no ongoing video stream) and buffers must have been
  589. * unmapped.
  590. *
  591. * Return 0 on success or -EBUSY if the queue is busy or buffers haven't been
  592. * unmapped.
  593. */
  594. int omap3isp_video_queue_cleanup(struct isp_video_queue *queue)
  595. {
  596. return isp_video_queue_free(queue);
  597. }
  598. /**
  599. * omap3isp_video_queue_init - Initialize the video buffers queue
  600. * @queue: Video buffers queue
  601. * @type: V4L2 buffer type (capture or output)
  602. * @ops: Driver-specific queue operations
  603. * @dev: Device used for DMA operations
  604. * @bufsize: Size of the driver-specific buffer structure
  605. *
  606. * Initialize the video buffers queue with the supplied parameters.
  607. *
  608. * The queue type must be one of V4L2_BUF_TYPE_VIDEO_CAPTURE or
  609. * V4L2_BUF_TYPE_VIDEO_OUTPUT. Other buffer types are not supported yet.
  610. *
  611. * Buffer objects will be allocated using the given buffer size to allow room
  612. * for driver-specific fields. Driver-specific buffer structures must start
  613. * with a struct isp_video_buffer field. Drivers with no driver-specific buffer
  614. * structure must pass the size of the isp_video_buffer structure in the bufsize
  615. * parameter.
  616. *
  617. * Return 0 on success.
  618. */
  619. int omap3isp_video_queue_init(struct isp_video_queue *queue,
  620. enum v4l2_buf_type type,
  621. const struct isp_video_queue_operations *ops,
  622. struct device *dev, unsigned int bufsize)
  623. {
  624. INIT_LIST_HEAD(&queue->queue);
  625. mutex_init(&queue->lock);
  626. spin_lock_init(&queue->irqlock);
  627. queue->type = type;
  628. queue->ops = ops;
  629. queue->dev = dev;
  630. queue->bufsize = bufsize;
  631. return 0;
  632. }
  633. /* -----------------------------------------------------------------------------
  634. * V4L2 operations
  635. */
  636. /**
  637. * omap3isp_video_queue_reqbufs - Allocate video buffers memory
  638. *
  639. * This function is intended to be used as a VIDIOC_REQBUFS ioctl handler. It
  640. * allocated video buffer objects and, for MMAP buffers, buffer memory.
  641. *
  642. * If the number of buffers is 0, all buffers are freed and the function returns
  643. * without performing any allocation.
  644. *
  645. * If the number of buffers is not 0, currently allocated buffers (if any) are
  646. * freed and the requested number of buffers are allocated. Depending on
  647. * driver-specific requirements and on memory availability, a number of buffer
  648. * smaller or bigger than requested can be allocated. This isn't considered as
  649. * an error.
  650. *
  651. * Return 0 on success or one of the following error codes:
  652. *
  653. * -EINVAL if the buffer type or index are invalid
  654. * -EBUSY if the queue is busy (streaming or buffers mapped)
  655. * -ENOMEM if the buffers can't be allocated due to an out-of-memory condition
  656. */
  657. int omap3isp_video_queue_reqbufs(struct isp_video_queue *queue,
  658. struct v4l2_requestbuffers *rb)
  659. {
  660. unsigned int nbuffers = rb->count;
  661. unsigned int size;
  662. int ret;
  663. if (rb->type != queue->type)
  664. return -EINVAL;
  665. queue->ops->queue_prepare(queue, &nbuffers, &size);
  666. if (size == 0)
  667. return -EINVAL;
  668. nbuffers = min_t(unsigned int, nbuffers, ISP_VIDEO_MAX_BUFFERS);
  669. mutex_lock(&queue->lock);
  670. ret = isp_video_queue_alloc(queue, nbuffers, size, rb->memory);
  671. if (ret < 0)
  672. goto done;
  673. rb->count = ret;
  674. ret = 0;
  675. done:
  676. mutex_unlock(&queue->lock);
  677. return ret;
  678. }
  679. /**
  680. * omap3isp_video_queue_querybuf - Query the status of a buffer in a queue
  681. *
  682. * This function is intended to be used as a VIDIOC_QUERYBUF ioctl handler. It
  683. * returns the status of a given video buffer.
  684. *
  685. * Return 0 on success or -EINVAL if the buffer type or index are invalid.
  686. */
  687. int omap3isp_video_queue_querybuf(struct isp_video_queue *queue,
  688. struct v4l2_buffer *vbuf)
  689. {
  690. struct isp_video_buffer *buf;
  691. int ret = 0;
  692. if (vbuf->type != queue->type)
  693. return -EINVAL;
  694. mutex_lock(&queue->lock);
  695. if (vbuf->index >= queue->count) {
  696. ret = -EINVAL;
  697. goto done;
  698. }
  699. buf = queue->buffers[vbuf->index];
  700. isp_video_buffer_query(buf, vbuf);
  701. done:
  702. mutex_unlock(&queue->lock);
  703. return ret;
  704. }
  705. /**
  706. * omap3isp_video_queue_qbuf - Queue a buffer
  707. *
  708. * This function is intended to be used as a VIDIOC_QBUF ioctl handler.
  709. *
  710. * The v4l2_buffer structure passed from userspace is first sanity tested. If
  711. * sane, the buffer is then processed and added to the main queue and, if the
  712. * queue is streaming, to the IRQ queue.
  713. *
  714. * Before being enqueued, USERPTR buffers are checked for address changes. If
  715. * the buffer has a different userspace address, the old memory area is unlocked
  716. * and the new memory area is locked.
  717. */
  718. int omap3isp_video_queue_qbuf(struct isp_video_queue *queue,
  719. struct v4l2_buffer *vbuf)
  720. {
  721. struct isp_video_buffer *buf;
  722. unsigned long flags;
  723. int ret = -EINVAL;
  724. if (vbuf->type != queue->type)
  725. goto done;
  726. mutex_lock(&queue->lock);
  727. if (vbuf->index >= queue->count)
  728. goto done;
  729. buf = queue->buffers[vbuf->index];
  730. if (vbuf->memory != buf->vbuf.memory)
  731. goto done;
  732. if (buf->state != ISP_BUF_STATE_IDLE)
  733. goto done;
  734. if (vbuf->memory == V4L2_MEMORY_USERPTR &&
  735. vbuf->length < buf->vbuf.length)
  736. goto done;
  737. if (vbuf->memory == V4L2_MEMORY_USERPTR &&
  738. vbuf->m.userptr != buf->vbuf.m.userptr) {
  739. isp_video_buffer_cleanup(buf);
  740. buf->vbuf.m.userptr = vbuf->m.userptr;
  741. buf->prepared = 0;
  742. }
  743. if (!buf->prepared) {
  744. ret = isp_video_buffer_prepare(buf);
  745. if (ret < 0)
  746. goto done;
  747. buf->prepared = 1;
  748. }
  749. isp_video_buffer_cache_sync(buf);
  750. buf->state = ISP_BUF_STATE_QUEUED;
  751. list_add_tail(&buf->stream, &queue->queue);
  752. if (queue->streaming) {
  753. spin_lock_irqsave(&queue->irqlock, flags);
  754. queue->ops->buffer_queue(buf);
  755. spin_unlock_irqrestore(&queue->irqlock, flags);
  756. }
  757. ret = 0;
  758. done:
  759. mutex_unlock(&queue->lock);
  760. return ret;
  761. }
  762. /**
  763. * omap3isp_video_queue_dqbuf - Dequeue a buffer
  764. *
  765. * This function is intended to be used as a VIDIOC_DQBUF ioctl handler.
  766. *
  767. * Wait until a buffer is ready to be dequeued, remove it from the queue and
  768. * copy its information to the v4l2_buffer structure.
  769. *
  770. * If the nonblocking argument is not zero and no buffer is ready, return
  771. * -EAGAIN immediately instead of waiting.
  772. *
  773. * If no buffer has been enqueued, or if the requested buffer type doesn't match
  774. * the queue type, return -EINVAL.
  775. */
  776. int omap3isp_video_queue_dqbuf(struct isp_video_queue *queue,
  777. struct v4l2_buffer *vbuf, int nonblocking)
  778. {
  779. struct isp_video_buffer *buf;
  780. int ret;
  781. if (vbuf->type != queue->type)
  782. return -EINVAL;
  783. mutex_lock(&queue->lock);
  784. if (list_empty(&queue->queue)) {
  785. ret = -EINVAL;
  786. goto done;
  787. }
  788. buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
  789. ret = isp_video_buffer_wait(buf, nonblocking);
  790. if (ret < 0)
  791. goto done;
  792. list_del(&buf->stream);
  793. isp_video_buffer_query(buf, vbuf);
  794. buf->state = ISP_BUF_STATE_IDLE;
  795. vbuf->flags &= ~V4L2_BUF_FLAG_QUEUED;
  796. done:
  797. mutex_unlock(&queue->lock);
  798. return ret;
  799. }
  800. /**
  801. * omap3isp_video_queue_streamon - Start streaming
  802. *
  803. * This function is intended to be used as a VIDIOC_STREAMON ioctl handler. It
  804. * starts streaming on the queue and calls the buffer_queue operation for all
  805. * queued buffers.
  806. *
  807. * Return 0 on success.
  808. */
  809. int omap3isp_video_queue_streamon(struct isp_video_queue *queue)
  810. {
  811. struct isp_video_buffer *buf;
  812. unsigned long flags;
  813. mutex_lock(&queue->lock);
  814. if (queue->streaming)
  815. goto done;
  816. queue->streaming = 1;
  817. spin_lock_irqsave(&queue->irqlock, flags);
  818. list_for_each_entry(buf, &queue->queue, stream)
  819. queue->ops->buffer_queue(buf);
  820. spin_unlock_irqrestore(&queue->irqlock, flags);
  821. done:
  822. mutex_unlock(&queue->lock);
  823. return 0;
  824. }
  825. /**
  826. * omap3isp_video_queue_streamoff - Stop streaming
  827. *
  828. * This function is intended to be used as a VIDIOC_STREAMOFF ioctl handler. It
  829. * stops streaming on the queue and wakes up all the buffers.
  830. *
  831. * Drivers must stop the hardware and synchronize with interrupt handlers and/or
  832. * delayed works before calling this function to make sure no buffer will be
  833. * touched by the driver and/or hardware.
  834. */
  835. void omap3isp_video_queue_streamoff(struct isp_video_queue *queue)
  836. {
  837. struct isp_video_buffer *buf;
  838. unsigned long flags;
  839. unsigned int i;
  840. mutex_lock(&queue->lock);
  841. if (!queue->streaming)
  842. goto done;
  843. queue->streaming = 0;
  844. spin_lock_irqsave(&queue->irqlock, flags);
  845. for (i = 0; i < queue->count; ++i) {
  846. buf = queue->buffers[i];
  847. if (buf->state == ISP_BUF_STATE_ACTIVE)
  848. wake_up(&buf->wait);
  849. buf->state = ISP_BUF_STATE_IDLE;
  850. }
  851. spin_unlock_irqrestore(&queue->irqlock, flags);
  852. INIT_LIST_HEAD(&queue->queue);
  853. done:
  854. mutex_unlock(&queue->lock);
  855. }
  856. /**
  857. * omap3isp_video_queue_discard_done - Discard all buffers marked as DONE
  858. *
  859. * This function is intended to be used with suspend/resume operations. It
  860. * discards all 'done' buffers as they would be too old to be requested after
  861. * resume.
  862. *
  863. * Drivers must stop the hardware and synchronize with interrupt handlers and/or
  864. * delayed works before calling this function to make sure no buffer will be
  865. * touched by the driver and/or hardware.
  866. */
  867. void omap3isp_video_queue_discard_done(struct isp_video_queue *queue)
  868. {
  869. struct isp_video_buffer *buf;
  870. unsigned int i;
  871. mutex_lock(&queue->lock);
  872. if (!queue->streaming)
  873. goto done;
  874. for (i = 0; i < queue->count; ++i) {
  875. buf = queue->buffers[i];
  876. if (buf->state == ISP_BUF_STATE_DONE)
  877. buf->state = ISP_BUF_STATE_ERROR;
  878. }
  879. done:
  880. mutex_unlock(&queue->lock);
  881. }
  882. static void isp_video_queue_vm_open(struct vm_area_struct *vma)
  883. {
  884. struct isp_video_buffer *buf = vma->vm_private_data;
  885. buf->vma_use_count++;
  886. }
  887. static void isp_video_queue_vm_close(struct vm_area_struct *vma)
  888. {
  889. struct isp_video_buffer *buf = vma->vm_private_data;
  890. buf->vma_use_count--;
  891. }
  892. static const struct vm_operations_struct isp_video_queue_vm_ops = {
  893. .open = isp_video_queue_vm_open,
  894. .close = isp_video_queue_vm_close,
  895. };
  896. /**
  897. * omap3isp_video_queue_mmap - Map buffers to userspace
  898. *
  899. * This function is intended to be used as an mmap() file operation handler. It
  900. * maps a buffer to userspace based on the VMA offset.
  901. *
  902. * Only buffers of memory type MMAP are supported.
  903. */
  904. int omap3isp_video_queue_mmap(struct isp_video_queue *queue,
  905. struct vm_area_struct *vma)
  906. {
  907. struct isp_video_buffer *uninitialized_var(buf);
  908. unsigned long size;
  909. unsigned int i;
  910. int ret = 0;
  911. mutex_lock(&queue->lock);
  912. for (i = 0; i < queue->count; ++i) {
  913. buf = queue->buffers[i];
  914. if ((buf->vbuf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
  915. break;
  916. }
  917. if (i == queue->count) {
  918. ret = -EINVAL;
  919. goto done;
  920. }
  921. size = vma->vm_end - vma->vm_start;
  922. if (buf->vbuf.memory != V4L2_MEMORY_MMAP ||
  923. size != PAGE_ALIGN(buf->vbuf.length)) {
  924. ret = -EINVAL;
  925. goto done;
  926. }
  927. ret = remap_vmalloc_range(vma, buf->vaddr, 0);
  928. if (ret < 0)
  929. goto done;
  930. vma->vm_ops = &isp_video_queue_vm_ops;
  931. vma->vm_private_data = buf;
  932. isp_video_queue_vm_open(vma);
  933. done:
  934. mutex_unlock(&queue->lock);
  935. return ret;
  936. }
  937. /**
  938. * omap3isp_video_queue_poll - Poll video queue state
  939. *
  940. * This function is intended to be used as a poll() file operation handler. It
  941. * polls the state of the video buffer at the front of the queue and returns an
  942. * events mask.
  943. *
  944. * If no buffer is present at the front of the queue, POLLERR is returned.
  945. */
  946. unsigned int omap3isp_video_queue_poll(struct isp_video_queue *queue,
  947. struct file *file, poll_table *wait)
  948. {
  949. struct isp_video_buffer *buf;
  950. unsigned int mask = 0;
  951. mutex_lock(&queue->lock);
  952. if (list_empty(&queue->queue)) {
  953. mask |= POLLERR;
  954. goto done;
  955. }
  956. buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
  957. poll_wait(file, &buf->wait, wait);
  958. if (buf->state == ISP_BUF_STATE_DONE ||
  959. buf->state == ISP_BUF_STATE_ERROR) {
  960. if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
  961. mask |= POLLIN | POLLRDNORM;
  962. else
  963. mask |= POLLOUT | POLLWRNORM;
  964. }
  965. done:
  966. mutex_unlock(&queue->lock);
  967. return mask;
  968. }