videobuf2-dma-sg.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315
  1. /*
  2. * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
  3. *
  4. * Copyright (C) 2010 Samsung Electronics
  5. *
  6. * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/mm.h>
  14. #include <linux/scatterlist.h>
  15. #include <linux/sched.h>
  16. #include <linux/slab.h>
  17. #include <linux/vmalloc.h>
  18. #include <media/videobuf2-core.h>
  19. #include <media/videobuf2-memops.h>
  20. #include <media/videobuf2-dma-sg.h>
  21. static int debug;
  22. module_param(debug, int, 0644);
  23. #define dprintk(level, fmt, arg...) \
  24. do { \
  25. if (debug >= level) \
  26. printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
  27. } while (0)
  28. struct vb2_dma_sg_buf {
  29. void *vaddr;
  30. struct page **pages;
  31. int write;
  32. int offset;
  33. struct sg_table sg_table;
  34. size_t size;
  35. unsigned int num_pages;
  36. atomic_t refcount;
  37. struct vb2_vmarea_handler handler;
  38. };
  39. static void vb2_dma_sg_put(void *buf_priv);
  40. static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
  41. gfp_t gfp_flags)
  42. {
  43. unsigned int last_page = 0;
  44. int size = buf->size;
  45. while (size > 0) {
  46. struct page *pages;
  47. int order;
  48. int i;
  49. order = get_order(size);
  50. /* Dont over allocate*/
  51. if ((PAGE_SIZE << order) > size)
  52. order--;
  53. pages = NULL;
  54. while (!pages) {
  55. pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
  56. __GFP_NOWARN | gfp_flags, order);
  57. if (pages)
  58. break;
  59. if (order == 0) {
  60. while (last_page--)
  61. __free_page(buf->pages[last_page]);
  62. return -ENOMEM;
  63. }
  64. order--;
  65. }
  66. split_page(pages, order);
  67. for (i = 0; i < (1 << order); i++)
  68. buf->pages[last_page++] = &pages[i];
  69. size -= PAGE_SIZE << order;
  70. }
  71. return 0;
  72. }
  73. static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
  74. {
  75. struct vb2_dma_sg_buf *buf;
  76. int ret;
  77. int num_pages;
  78. buf = kzalloc(sizeof *buf, GFP_KERNEL);
  79. if (!buf)
  80. return NULL;
  81. buf->vaddr = NULL;
  82. buf->write = 0;
  83. buf->offset = 0;
  84. buf->size = size;
  85. /* size is already page aligned */
  86. buf->num_pages = size >> PAGE_SHIFT;
  87. buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
  88. GFP_KERNEL);
  89. if (!buf->pages)
  90. goto fail_pages_array_alloc;
  91. ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
  92. if (ret)
  93. goto fail_pages_alloc;
  94. ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
  95. buf->num_pages, 0, size, gfp_flags);
  96. if (ret)
  97. goto fail_table_alloc;
  98. buf->handler.refcount = &buf->refcount;
  99. buf->handler.put = vb2_dma_sg_put;
  100. buf->handler.arg = buf;
  101. atomic_inc(&buf->refcount);
  102. dprintk(1, "%s: Allocated buffer of %d pages\n",
  103. __func__, buf->num_pages);
  104. return buf;
  105. fail_table_alloc:
  106. num_pages = buf->num_pages;
  107. while (num_pages--)
  108. __free_page(buf->pages[num_pages]);
  109. fail_pages_alloc:
  110. kfree(buf->pages);
  111. fail_pages_array_alloc:
  112. kfree(buf);
  113. return NULL;
  114. }
  115. static void vb2_dma_sg_put(void *buf_priv)
  116. {
  117. struct vb2_dma_sg_buf *buf = buf_priv;
  118. int i = buf->num_pages;
  119. if (atomic_dec_and_test(&buf->refcount)) {
  120. dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
  121. buf->num_pages);
  122. if (buf->vaddr)
  123. vm_unmap_ram(buf->vaddr, buf->num_pages);
  124. sg_free_table(&buf->sg_table);
  125. while (--i >= 0)
  126. __free_page(buf->pages[i]);
  127. kfree(buf->pages);
  128. kfree(buf);
  129. }
  130. }
  131. static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
  132. unsigned long size, int write)
  133. {
  134. struct vb2_dma_sg_buf *buf;
  135. unsigned long first, last;
  136. int num_pages_from_user;
  137. buf = kzalloc(sizeof *buf, GFP_KERNEL);
  138. if (!buf)
  139. return NULL;
  140. buf->vaddr = NULL;
  141. buf->write = write;
  142. buf->offset = vaddr & ~PAGE_MASK;
  143. buf->size = size;
  144. first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
  145. last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
  146. buf->num_pages = last - first + 1;
  147. buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
  148. GFP_KERNEL);
  149. if (!buf->pages)
  150. return NULL;
  151. num_pages_from_user = get_user_pages(current, current->mm,
  152. vaddr & PAGE_MASK,
  153. buf->num_pages,
  154. write,
  155. 1, /* force */
  156. buf->pages,
  157. NULL);
  158. if (num_pages_from_user != buf->num_pages)
  159. goto userptr_fail_get_user_pages;
  160. if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
  161. buf->num_pages, buf->offset, size, 0))
  162. goto userptr_fail_alloc_table_from_pages;
  163. return buf;
  164. userptr_fail_alloc_table_from_pages:
  165. userptr_fail_get_user_pages:
  166. dprintk(1, "get_user_pages requested/got: %d/%d]\n",
  167. num_pages_from_user, buf->num_pages);
  168. while (--num_pages_from_user >= 0)
  169. put_page(buf->pages[num_pages_from_user]);
  170. kfree(buf->pages);
  171. kfree(buf);
  172. return NULL;
  173. }
  174. /*
  175. * @put_userptr: inform the allocator that a USERPTR buffer will no longer
  176. * be used
  177. */
  178. static void vb2_dma_sg_put_userptr(void *buf_priv)
  179. {
  180. struct vb2_dma_sg_buf *buf = buf_priv;
  181. int i = buf->num_pages;
  182. dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
  183. __func__, buf->num_pages);
  184. if (buf->vaddr)
  185. vm_unmap_ram(buf->vaddr, buf->num_pages);
  186. sg_free_table(&buf->sg_table);
  187. while (--i >= 0) {
  188. if (buf->write)
  189. set_page_dirty_lock(buf->pages[i]);
  190. put_page(buf->pages[i]);
  191. }
  192. kfree(buf->pages);
  193. kfree(buf);
  194. }
  195. static void *vb2_dma_sg_vaddr(void *buf_priv)
  196. {
  197. struct vb2_dma_sg_buf *buf = buf_priv;
  198. BUG_ON(!buf);
  199. if (!buf->vaddr)
  200. buf->vaddr = vm_map_ram(buf->pages,
  201. buf->num_pages,
  202. -1,
  203. PAGE_KERNEL);
  204. /* add offset in case userptr is not page-aligned */
  205. return buf->vaddr + buf->offset;
  206. }
  207. static unsigned int vb2_dma_sg_num_users(void *buf_priv)
  208. {
  209. struct vb2_dma_sg_buf *buf = buf_priv;
  210. return atomic_read(&buf->refcount);
  211. }
  212. static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
  213. {
  214. struct vb2_dma_sg_buf *buf = buf_priv;
  215. unsigned long uaddr = vma->vm_start;
  216. unsigned long usize = vma->vm_end - vma->vm_start;
  217. int i = 0;
  218. if (!buf) {
  219. printk(KERN_ERR "No memory to map\n");
  220. return -EINVAL;
  221. }
  222. do {
  223. int ret;
  224. ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
  225. if (ret) {
  226. printk(KERN_ERR "Remapping memory, error: %d\n", ret);
  227. return ret;
  228. }
  229. uaddr += PAGE_SIZE;
  230. usize -= PAGE_SIZE;
  231. } while (usize > 0);
  232. /*
  233. * Use common vm_area operations to track buffer refcount.
  234. */
  235. vma->vm_private_data = &buf->handler;
  236. vma->vm_ops = &vb2_common_vm_ops;
  237. vma->vm_ops->open(vma);
  238. return 0;
  239. }
  240. static void *vb2_dma_sg_cookie(void *buf_priv)
  241. {
  242. struct vb2_dma_sg_buf *buf = buf_priv;
  243. return &buf->sg_table;
  244. }
  245. const struct vb2_mem_ops vb2_dma_sg_memops = {
  246. .alloc = vb2_dma_sg_alloc,
  247. .put = vb2_dma_sg_put,
  248. .get_userptr = vb2_dma_sg_get_userptr,
  249. .put_userptr = vb2_dma_sg_put_userptr,
  250. .vaddr = vb2_dma_sg_vaddr,
  251. .mmap = vb2_dma_sg_mmap,
  252. .num_users = vb2_dma_sg_num_users,
  253. .cookie = vb2_dma_sg_cookie,
  254. };
  255. EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
  256. MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
  257. MODULE_AUTHOR("Andrzej Pietrasiewicz");
  258. MODULE_LICENSE("GPL");