videobuf2-dma-sg.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. /*
  2. * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
  3. *
  4. * Copyright (C) 2010 Samsung Electronics
  5. *
  6. * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/mm.h>
  14. #include <linux/scatterlist.h>
  15. #include <linux/sched.h>
  16. #include <linux/slab.h>
  17. #include <linux/vmalloc.h>
  18. #include <media/videobuf2-core.h>
  19. #include <media/videobuf2-memops.h>
  20. #include <media/videobuf2-dma-sg.h>
  21. struct vb2_dma_sg_buf {
  22. void *vaddr;
  23. struct page **pages;
  24. int write;
  25. int offset;
  26. struct vb2_dma_sg_desc sg_desc;
  27. atomic_t refcount;
  28. struct vb2_vmarea_handler handler;
  29. };
  30. static void vb2_dma_sg_put(void *buf_priv);
  31. static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size)
  32. {
  33. struct vb2_dma_sg_buf *buf;
  34. int i;
  35. buf = kzalloc(sizeof *buf, GFP_KERNEL);
  36. if (!buf)
  37. return NULL;
  38. buf->vaddr = NULL;
  39. buf->write = 0;
  40. buf->offset = 0;
  41. buf->sg_desc.size = size;
  42. buf->sg_desc.num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  43. buf->sg_desc.sglist = vmalloc(buf->sg_desc.num_pages *
  44. sizeof(*buf->sg_desc.sglist));
  45. if (!buf->sg_desc.sglist)
  46. goto fail_sglist_alloc;
  47. memset(buf->sg_desc.sglist, 0, buf->sg_desc.num_pages *
  48. sizeof(*buf->sg_desc.sglist));
  49. sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
  50. buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
  51. GFP_KERNEL);
  52. if (!buf->pages)
  53. goto fail_pages_array_alloc;
  54. for (i = 0; i < buf->sg_desc.num_pages; ++i) {
  55. buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
  56. if (NULL == buf->pages[i])
  57. goto fail_pages_alloc;
  58. sg_set_page(&buf->sg_desc.sglist[i],
  59. buf->pages[i], PAGE_SIZE, 0);
  60. }
  61. buf->handler.refcount = &buf->refcount;
  62. buf->handler.put = vb2_dma_sg_put;
  63. buf->handler.arg = buf;
  64. atomic_inc(&buf->refcount);
  65. printk(KERN_DEBUG "%s: Allocated buffer of %d pages\n",
  66. __func__, buf->sg_desc.num_pages);
  67. if (!buf->vaddr)
  68. buf->vaddr = vm_map_ram(buf->pages,
  69. buf->sg_desc.num_pages,
  70. -1,
  71. PAGE_KERNEL);
  72. return buf;
  73. fail_pages_alloc:
  74. while (--i >= 0)
  75. __free_page(buf->pages[i]);
  76. kfree(buf->pages);
  77. fail_pages_array_alloc:
  78. vfree(buf->sg_desc.sglist);
  79. fail_sglist_alloc:
  80. kfree(buf);
  81. return NULL;
  82. }
  83. static void vb2_dma_sg_put(void *buf_priv)
  84. {
  85. struct vb2_dma_sg_buf *buf = buf_priv;
  86. int i = buf->sg_desc.num_pages;
  87. if (atomic_dec_and_test(&buf->refcount)) {
  88. printk(KERN_DEBUG "%s: Freeing buffer of %d pages\n", __func__,
  89. buf->sg_desc.num_pages);
  90. if (buf->vaddr)
  91. vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
  92. vfree(buf->sg_desc.sglist);
  93. while (--i >= 0)
  94. __free_page(buf->pages[i]);
  95. kfree(buf->pages);
  96. kfree(buf);
  97. }
  98. }
  99. static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
  100. unsigned long size, int write)
  101. {
  102. struct vb2_dma_sg_buf *buf;
  103. unsigned long first, last;
  104. int num_pages_from_user, i;
  105. buf = kzalloc(sizeof *buf, GFP_KERNEL);
  106. if (!buf)
  107. return NULL;
  108. buf->vaddr = NULL;
  109. buf->write = write;
  110. buf->offset = vaddr & ~PAGE_MASK;
  111. buf->sg_desc.size = size;
  112. first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
  113. last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
  114. buf->sg_desc.num_pages = last - first + 1;
  115. buf->sg_desc.sglist = vmalloc(
  116. buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
  117. if (!buf->sg_desc.sglist)
  118. goto userptr_fail_sglist_alloc;
  119. memset(buf->sg_desc.sglist, 0,
  120. buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
  121. sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
  122. buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
  123. GFP_KERNEL);
  124. if (!buf->pages)
  125. goto userptr_fail_pages_array_alloc;
  126. down_read(&current->mm->mmap_sem);
  127. num_pages_from_user = get_user_pages(current, current->mm,
  128. vaddr & PAGE_MASK,
  129. buf->sg_desc.num_pages,
  130. write,
  131. 1, /* force */
  132. buf->pages,
  133. NULL);
  134. up_read(&current->mm->mmap_sem);
  135. if (num_pages_from_user != buf->sg_desc.num_pages)
  136. goto userptr_fail_get_user_pages;
  137. sg_set_page(&buf->sg_desc.sglist[0], buf->pages[0],
  138. PAGE_SIZE - buf->offset, buf->offset);
  139. size -= PAGE_SIZE - buf->offset;
  140. for (i = 1; i < buf->sg_desc.num_pages; ++i) {
  141. sg_set_page(&buf->sg_desc.sglist[i], buf->pages[i],
  142. min_t(size_t, PAGE_SIZE, size), 0);
  143. size -= min_t(size_t, PAGE_SIZE, size);
  144. }
  145. return buf;
  146. userptr_fail_get_user_pages:
  147. printk(KERN_DEBUG "get_user_pages requested/got: %d/%d]\n",
  148. num_pages_from_user, buf->sg_desc.num_pages);
  149. while (--num_pages_from_user >= 0)
  150. put_page(buf->pages[num_pages_from_user]);
  151. kfree(buf->pages);
  152. userptr_fail_pages_array_alloc:
  153. vfree(buf->sg_desc.sglist);
  154. userptr_fail_sglist_alloc:
  155. kfree(buf);
  156. return NULL;
  157. }
  158. /*
  159. * @put_userptr: inform the allocator that a USERPTR buffer will no longer
  160. * be used
  161. */
  162. static void vb2_dma_sg_put_userptr(void *buf_priv)
  163. {
  164. struct vb2_dma_sg_buf *buf = buf_priv;
  165. int i = buf->sg_desc.num_pages;
  166. printk(KERN_DEBUG "%s: Releasing userspace buffer of %d pages\n",
  167. __func__, buf->sg_desc.num_pages);
  168. if (buf->vaddr)
  169. vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
  170. while (--i >= 0) {
  171. if (buf->write)
  172. set_page_dirty_lock(buf->pages[i]);
  173. put_page(buf->pages[i]);
  174. }
  175. vfree(buf->sg_desc.sglist);
  176. kfree(buf->pages);
  177. kfree(buf);
  178. }
  179. static void *vb2_dma_sg_vaddr(void *buf_priv)
  180. {
  181. struct vb2_dma_sg_buf *buf = buf_priv;
  182. BUG_ON(!buf);
  183. if (!buf->vaddr)
  184. buf->vaddr = vm_map_ram(buf->pages,
  185. buf->sg_desc.num_pages,
  186. -1,
  187. PAGE_KERNEL);
  188. /* add offset in case userptr is not page-aligned */
  189. return buf->vaddr + buf->offset;
  190. }
  191. static unsigned int vb2_dma_sg_num_users(void *buf_priv)
  192. {
  193. struct vb2_dma_sg_buf *buf = buf_priv;
  194. return atomic_read(&buf->refcount);
  195. }
  196. static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
  197. {
  198. struct vb2_dma_sg_buf *buf = buf_priv;
  199. unsigned long uaddr = vma->vm_start;
  200. unsigned long usize = vma->vm_end - vma->vm_start;
  201. int i = 0;
  202. if (!buf) {
  203. printk(KERN_ERR "No memory to map\n");
  204. return -EINVAL;
  205. }
  206. do {
  207. int ret;
  208. ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
  209. if (ret) {
  210. printk(KERN_ERR "Remapping memory, error: %d\n", ret);
  211. return ret;
  212. }
  213. uaddr += PAGE_SIZE;
  214. usize -= PAGE_SIZE;
  215. } while (usize > 0);
  216. /*
  217. * Use common vm_area operations to track buffer refcount.
  218. */
  219. vma->vm_private_data = &buf->handler;
  220. vma->vm_ops = &vb2_common_vm_ops;
  221. vma->vm_ops->open(vma);
  222. return 0;
  223. }
  224. static void *vb2_dma_sg_cookie(void *buf_priv)
  225. {
  226. struct vb2_dma_sg_buf *buf = buf_priv;
  227. return &buf->sg_desc;
  228. }
  229. const struct vb2_mem_ops vb2_dma_sg_memops = {
  230. .alloc = vb2_dma_sg_alloc,
  231. .put = vb2_dma_sg_put,
  232. .get_userptr = vb2_dma_sg_get_userptr,
  233. .put_userptr = vb2_dma_sg_put_userptr,
  234. .vaddr = vb2_dma_sg_vaddr,
  235. .mmap = vb2_dma_sg_mmap,
  236. .num_users = vb2_dma_sg_num_users,
  237. .cookie = vb2_dma_sg_cookie,
  238. };
  239. EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
  240. MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
  241. MODULE_AUTHOR("Andrzej Pietrasiewicz");
  242. MODULE_LICENSE("GPL");