videobuf2-dma-sg.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. /*
  2. * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
  3. *
  4. * Copyright (C) 2010 Samsung Electronics
  5. *
  6. * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/mm.h>
  14. #include <linux/scatterlist.h>
  15. #include <linux/sched.h>
  16. #include <linux/slab.h>
  17. #include <linux/vmalloc.h>
  18. #include <media/videobuf2-core.h>
  19. #include <media/videobuf2-memops.h>
  20. #include <media/videobuf2-dma-sg.h>
  21. struct vb2_dma_sg_buf {
  22. void *vaddr;
  23. struct page **pages;
  24. int write;
  25. int offset;
  26. struct vb2_dma_sg_desc sg_desc;
  27. atomic_t refcount;
  28. struct vb2_vmarea_handler handler;
  29. };
  30. static void vb2_dma_sg_put(void *buf_priv);
  31. static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size)
  32. {
  33. struct vb2_dma_sg_buf *buf;
  34. int i;
  35. buf = kzalloc(sizeof *buf, GFP_KERNEL);
  36. if (!buf)
  37. return NULL;
  38. buf->vaddr = NULL;
  39. buf->write = 0;
  40. buf->offset = 0;
  41. buf->sg_desc.size = size;
  42. buf->sg_desc.num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  43. buf->sg_desc.sglist = vzalloc(buf->sg_desc.num_pages *
  44. sizeof(*buf->sg_desc.sglist));
  45. if (!buf->sg_desc.sglist)
  46. goto fail_sglist_alloc;
  47. sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
  48. buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
  49. GFP_KERNEL);
  50. if (!buf->pages)
  51. goto fail_pages_array_alloc;
  52. for (i = 0; i < buf->sg_desc.num_pages; ++i) {
  53. buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
  54. if (NULL == buf->pages[i])
  55. goto fail_pages_alloc;
  56. sg_set_page(&buf->sg_desc.sglist[i],
  57. buf->pages[i], PAGE_SIZE, 0);
  58. }
  59. buf->handler.refcount = &buf->refcount;
  60. buf->handler.put = vb2_dma_sg_put;
  61. buf->handler.arg = buf;
  62. atomic_inc(&buf->refcount);
  63. printk(KERN_DEBUG "%s: Allocated buffer of %d pages\n",
  64. __func__, buf->sg_desc.num_pages);
  65. if (!buf->vaddr)
  66. buf->vaddr = vm_map_ram(buf->pages,
  67. buf->sg_desc.num_pages,
  68. -1,
  69. PAGE_KERNEL);
  70. return buf;
  71. fail_pages_alloc:
  72. while (--i >= 0)
  73. __free_page(buf->pages[i]);
  74. kfree(buf->pages);
  75. fail_pages_array_alloc:
  76. vfree(buf->sg_desc.sglist);
  77. fail_sglist_alloc:
  78. kfree(buf);
  79. return NULL;
  80. }
  81. static void vb2_dma_sg_put(void *buf_priv)
  82. {
  83. struct vb2_dma_sg_buf *buf = buf_priv;
  84. int i = buf->sg_desc.num_pages;
  85. if (atomic_dec_and_test(&buf->refcount)) {
  86. printk(KERN_DEBUG "%s: Freeing buffer of %d pages\n", __func__,
  87. buf->sg_desc.num_pages);
  88. if (buf->vaddr)
  89. vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
  90. vfree(buf->sg_desc.sglist);
  91. while (--i >= 0)
  92. __free_page(buf->pages[i]);
  93. kfree(buf->pages);
  94. kfree(buf);
  95. }
  96. }
  97. static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
  98. unsigned long size, int write)
  99. {
  100. struct vb2_dma_sg_buf *buf;
  101. unsigned long first, last;
  102. int num_pages_from_user, i;
  103. buf = kzalloc(sizeof *buf, GFP_KERNEL);
  104. if (!buf)
  105. return NULL;
  106. buf->vaddr = NULL;
  107. buf->write = write;
  108. buf->offset = vaddr & ~PAGE_MASK;
  109. buf->sg_desc.size = size;
  110. first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
  111. last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
  112. buf->sg_desc.num_pages = last - first + 1;
  113. buf->sg_desc.sglist = vzalloc(
  114. buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
  115. if (!buf->sg_desc.sglist)
  116. goto userptr_fail_sglist_alloc;
  117. sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
  118. buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
  119. GFP_KERNEL);
  120. if (!buf->pages)
  121. goto userptr_fail_pages_array_alloc;
  122. down_read(&current->mm->mmap_sem);
  123. num_pages_from_user = get_user_pages(current, current->mm,
  124. vaddr & PAGE_MASK,
  125. buf->sg_desc.num_pages,
  126. write,
  127. 1, /* force */
  128. buf->pages,
  129. NULL);
  130. up_read(&current->mm->mmap_sem);
  131. if (num_pages_from_user != buf->sg_desc.num_pages)
  132. goto userptr_fail_get_user_pages;
  133. sg_set_page(&buf->sg_desc.sglist[0], buf->pages[0],
  134. PAGE_SIZE - buf->offset, buf->offset);
  135. size -= PAGE_SIZE - buf->offset;
  136. for (i = 1; i < buf->sg_desc.num_pages; ++i) {
  137. sg_set_page(&buf->sg_desc.sglist[i], buf->pages[i],
  138. min_t(size_t, PAGE_SIZE, size), 0);
  139. size -= min_t(size_t, PAGE_SIZE, size);
  140. }
  141. return buf;
  142. userptr_fail_get_user_pages:
  143. printk(KERN_DEBUG "get_user_pages requested/got: %d/%d]\n",
  144. num_pages_from_user, buf->sg_desc.num_pages);
  145. while (--num_pages_from_user >= 0)
  146. put_page(buf->pages[num_pages_from_user]);
  147. kfree(buf->pages);
  148. userptr_fail_pages_array_alloc:
  149. vfree(buf->sg_desc.sglist);
  150. userptr_fail_sglist_alloc:
  151. kfree(buf);
  152. return NULL;
  153. }
  154. /*
  155. * @put_userptr: inform the allocator that a USERPTR buffer will no longer
  156. * be used
  157. */
  158. static void vb2_dma_sg_put_userptr(void *buf_priv)
  159. {
  160. struct vb2_dma_sg_buf *buf = buf_priv;
  161. int i = buf->sg_desc.num_pages;
  162. printk(KERN_DEBUG "%s: Releasing userspace buffer of %d pages\n",
  163. __func__, buf->sg_desc.num_pages);
  164. if (buf->vaddr)
  165. vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
  166. while (--i >= 0) {
  167. if (buf->write)
  168. set_page_dirty_lock(buf->pages[i]);
  169. put_page(buf->pages[i]);
  170. }
  171. vfree(buf->sg_desc.sglist);
  172. kfree(buf->pages);
  173. kfree(buf);
  174. }
  175. static void *vb2_dma_sg_vaddr(void *buf_priv)
  176. {
  177. struct vb2_dma_sg_buf *buf = buf_priv;
  178. BUG_ON(!buf);
  179. if (!buf->vaddr)
  180. buf->vaddr = vm_map_ram(buf->pages,
  181. buf->sg_desc.num_pages,
  182. -1,
  183. PAGE_KERNEL);
  184. /* add offset in case userptr is not page-aligned */
  185. return buf->vaddr + buf->offset;
  186. }
  187. static unsigned int vb2_dma_sg_num_users(void *buf_priv)
  188. {
  189. struct vb2_dma_sg_buf *buf = buf_priv;
  190. return atomic_read(&buf->refcount);
  191. }
  192. static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
  193. {
  194. struct vb2_dma_sg_buf *buf = buf_priv;
  195. unsigned long uaddr = vma->vm_start;
  196. unsigned long usize = vma->vm_end - vma->vm_start;
  197. int i = 0;
  198. if (!buf) {
  199. printk(KERN_ERR "No memory to map\n");
  200. return -EINVAL;
  201. }
  202. do {
  203. int ret;
  204. ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
  205. if (ret) {
  206. printk(KERN_ERR "Remapping memory, error: %d\n", ret);
  207. return ret;
  208. }
  209. uaddr += PAGE_SIZE;
  210. usize -= PAGE_SIZE;
  211. } while (usize > 0);
  212. /*
  213. * Use common vm_area operations to track buffer refcount.
  214. */
  215. vma->vm_private_data = &buf->handler;
  216. vma->vm_ops = &vb2_common_vm_ops;
  217. vma->vm_ops->open(vma);
  218. return 0;
  219. }
  220. static void *vb2_dma_sg_cookie(void *buf_priv)
  221. {
  222. struct vb2_dma_sg_buf *buf = buf_priv;
  223. return &buf->sg_desc;
  224. }
  225. const struct vb2_mem_ops vb2_dma_sg_memops = {
  226. .alloc = vb2_dma_sg_alloc,
  227. .put = vb2_dma_sg_put,
  228. .get_userptr = vb2_dma_sg_get_userptr,
  229. .put_userptr = vb2_dma_sg_put_userptr,
  230. .vaddr = vb2_dma_sg_vaddr,
  231. .mmap = vb2_dma_sg_mmap,
  232. .num_users = vb2_dma_sg_num_users,
  233. .cookie = vb2_dma_sg_cookie,
  234. };
  235. EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
  236. MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
  237. MODULE_AUTHOR("Andrzej Pietrasiewicz");
  238. MODULE_LICENSE("GPL");