videobuf2-vmalloc.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279
  1. /*
  2. * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
  3. *
  4. * Copyright (C) 2010 Samsung Electronics
  5. *
  6. * Author: Pawel Osciak <pawel@osciak.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation.
  11. */
  12. #include <linux/io.h>
  13. #include <linux/module.h>
  14. #include <linux/mm.h>
  15. #include <linux/sched.h>
  16. #include <linux/slab.h>
  17. #include <linux/vmalloc.h>
  18. #include <media/videobuf2-core.h>
  19. #include <media/videobuf2-vmalloc.h>
  20. #include <media/videobuf2-memops.h>
  21. struct vb2_vmalloc_buf {
  22. void *vaddr;
  23. struct page **pages;
  24. struct vm_area_struct *vma;
  25. int write;
  26. unsigned long size;
  27. unsigned int n_pages;
  28. atomic_t refcount;
  29. struct vb2_vmarea_handler handler;
  30. struct dma_buf *dbuf;
  31. };
  32. static void vb2_vmalloc_put(void *buf_priv);
  33. static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size)
  34. {
  35. struct vb2_vmalloc_buf *buf;
  36. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  37. if (!buf)
  38. return NULL;
  39. buf->size = size;
  40. buf->vaddr = vmalloc_user(buf->size);
  41. buf->handler.refcount = &buf->refcount;
  42. buf->handler.put = vb2_vmalloc_put;
  43. buf->handler.arg = buf;
  44. if (!buf->vaddr) {
  45. pr_debug("vmalloc of size %ld failed\n", buf->size);
  46. kfree(buf);
  47. return NULL;
  48. }
  49. atomic_inc(&buf->refcount);
  50. return buf;
  51. }
  52. static void vb2_vmalloc_put(void *buf_priv)
  53. {
  54. struct vb2_vmalloc_buf *buf = buf_priv;
  55. if (atomic_dec_and_test(&buf->refcount)) {
  56. vfree(buf->vaddr);
  57. kfree(buf);
  58. }
  59. }
  60. static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
  61. unsigned long size, int write)
  62. {
  63. struct vb2_vmalloc_buf *buf;
  64. unsigned long first, last;
  65. int n_pages, offset;
  66. struct vm_area_struct *vma;
  67. dma_addr_t physp;
  68. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  69. if (!buf)
  70. return NULL;
  71. buf->write = write;
  72. offset = vaddr & ~PAGE_MASK;
  73. buf->size = size;
  74. vma = find_vma(current->mm, vaddr);
  75. if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) {
  76. if (vb2_get_contig_userptr(vaddr, size, &vma, &physp))
  77. goto fail_pages_array_alloc;
  78. buf->vma = vma;
  79. buf->vaddr = ioremap_nocache(physp, size);
  80. if (!buf->vaddr)
  81. goto fail_pages_array_alloc;
  82. } else {
  83. first = vaddr >> PAGE_SHIFT;
  84. last = (vaddr + size - 1) >> PAGE_SHIFT;
  85. buf->n_pages = last - first + 1;
  86. buf->pages = kzalloc(buf->n_pages * sizeof(struct page *),
  87. GFP_KERNEL);
  88. if (!buf->pages)
  89. goto fail_pages_array_alloc;
  90. /* current->mm->mmap_sem is taken by videobuf2 core */
  91. n_pages = get_user_pages(current, current->mm,
  92. vaddr & PAGE_MASK, buf->n_pages,
  93. write, 1, /* force */
  94. buf->pages, NULL);
  95. if (n_pages != buf->n_pages)
  96. goto fail_get_user_pages;
  97. buf->vaddr = vm_map_ram(buf->pages, buf->n_pages, -1,
  98. PAGE_KERNEL);
  99. if (!buf->vaddr)
  100. goto fail_get_user_pages;
  101. }
  102. buf->vaddr += offset;
  103. return buf;
  104. fail_get_user_pages:
  105. pr_debug("get_user_pages requested/got: %d/%d]\n", n_pages,
  106. buf->n_pages);
  107. while (--n_pages >= 0)
  108. put_page(buf->pages[n_pages]);
  109. kfree(buf->pages);
  110. fail_pages_array_alloc:
  111. kfree(buf);
  112. return NULL;
  113. }
  114. static void vb2_vmalloc_put_userptr(void *buf_priv)
  115. {
  116. struct vb2_vmalloc_buf *buf = buf_priv;
  117. unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
  118. unsigned int i;
  119. if (buf->pages) {
  120. if (vaddr)
  121. vm_unmap_ram((void *)vaddr, buf->n_pages);
  122. for (i = 0; i < buf->n_pages; ++i) {
  123. if (buf->write)
  124. set_page_dirty_lock(buf->pages[i]);
  125. put_page(buf->pages[i]);
  126. }
  127. kfree(buf->pages);
  128. } else {
  129. if (buf->vma)
  130. vb2_put_vma(buf->vma);
  131. iounmap(buf->vaddr);
  132. }
  133. kfree(buf);
  134. }
  135. static void *vb2_vmalloc_vaddr(void *buf_priv)
  136. {
  137. struct vb2_vmalloc_buf *buf = buf_priv;
  138. if (!buf->vaddr) {
  139. pr_err("Address of an unallocated plane requested "
  140. "or cannot map user pointer\n");
  141. return NULL;
  142. }
  143. return buf->vaddr;
  144. }
  145. static unsigned int vb2_vmalloc_num_users(void *buf_priv)
  146. {
  147. struct vb2_vmalloc_buf *buf = buf_priv;
  148. return atomic_read(&buf->refcount);
  149. }
  150. static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
  151. {
  152. struct vb2_vmalloc_buf *buf = buf_priv;
  153. int ret;
  154. if (!buf) {
  155. pr_err("No memory to map\n");
  156. return -EINVAL;
  157. }
  158. ret = remap_vmalloc_range(vma, buf->vaddr, 0);
  159. if (ret) {
  160. pr_err("Remapping vmalloc memory, error: %d\n", ret);
  161. return ret;
  162. }
  163. /*
  164. * Make sure that vm_areas for 2 buffers won't be merged together
  165. */
  166. vma->vm_flags |= VM_DONTEXPAND;
  167. /*
  168. * Use common vm_area operations to track buffer refcount.
  169. */
  170. vma->vm_private_data = &buf->handler;
  171. vma->vm_ops = &vb2_common_vm_ops;
  172. vma->vm_ops->open(vma);
  173. return 0;
  174. }
  175. /*********************************************/
  176. /* callbacks for DMABUF buffers */
  177. /*********************************************/
  178. static int vb2_vmalloc_map_dmabuf(void *mem_priv)
  179. {
  180. struct vb2_vmalloc_buf *buf = mem_priv;
  181. buf->vaddr = dma_buf_vmap(buf->dbuf);
  182. return buf->vaddr ? 0 : -EFAULT;
  183. }
  184. static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
  185. {
  186. struct vb2_vmalloc_buf *buf = mem_priv;
  187. dma_buf_vunmap(buf->dbuf, buf->vaddr);
  188. buf->vaddr = NULL;
  189. }
  190. static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
  191. {
  192. struct vb2_vmalloc_buf *buf = mem_priv;
  193. if (buf->vaddr)
  194. dma_buf_vunmap(buf->dbuf, buf->vaddr);
  195. kfree(buf);
  196. }
  197. static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
  198. unsigned long size, int write)
  199. {
  200. struct vb2_vmalloc_buf *buf;
  201. if (dbuf->size < size)
  202. return ERR_PTR(-EFAULT);
  203. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  204. if (!buf)
  205. return ERR_PTR(-ENOMEM);
  206. buf->dbuf = dbuf;
  207. buf->write = write;
  208. buf->size = size;
  209. return buf;
  210. }
  211. const struct vb2_mem_ops vb2_vmalloc_memops = {
  212. .alloc = vb2_vmalloc_alloc,
  213. .put = vb2_vmalloc_put,
  214. .get_userptr = vb2_vmalloc_get_userptr,
  215. .put_userptr = vb2_vmalloc_put_userptr,
  216. .map_dmabuf = vb2_vmalloc_map_dmabuf,
  217. .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
  218. .attach_dmabuf = vb2_vmalloc_attach_dmabuf,
  219. .detach_dmabuf = vb2_vmalloc_detach_dmabuf,
  220. .vaddr = vb2_vmalloc_vaddr,
  221. .mmap = vb2_vmalloc_mmap,
  222. .num_users = vb2_vmalloc_num_users,
  223. };
  224. EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
  225. MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
  226. MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
  227. MODULE_LICENSE("GPL");