videobuf2-memops.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. /*
  2. * videobuf2-memops.c - generic memory handling routines for videobuf2
  3. *
  4. * Copyright (C) 2010 Samsung Electronics
  5. *
  6. * Author: Pawel Osciak <pawel@osciak.com>
  7. * Marek Szyprowski <m.szyprowski@samsung.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation.
  12. */
  13. #include <linux/slab.h>
  14. #include <linux/module.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/vmalloc.h>
  17. #include <linux/mm.h>
  18. #include <linux/sched.h>
  19. #include <linux/file.h>
  20. #include <media/videobuf2-core.h>
  21. #include <media/videobuf2-memops.h>
  22. /**
  23. * vb2_get_vma() - acquire and lock the virtual memory area
  24. * @vma: given virtual memory area
  25. *
  26. * This function attempts to acquire an area mapped in the userspace for
  27. * the duration of a hardware operation. The area is "locked" by performing
  28. * the same set of operation that are done when process calls fork() and
  29. * memory areas are duplicated.
  30. *
  31. * Returns a copy of a virtual memory region on success or NULL.
  32. */
  33. struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma)
  34. {
  35. struct vm_area_struct *vma_copy;
  36. vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
  37. if (vma_copy == NULL)
  38. return NULL;
  39. if (vma->vm_ops && vma->vm_ops->open)
  40. vma->vm_ops->open(vma);
  41. if (vma->vm_file)
  42. get_file(vma->vm_file);
  43. memcpy(vma_copy, vma, sizeof(*vma));
  44. vma_copy->vm_mm = NULL;
  45. vma_copy->vm_next = NULL;
  46. vma_copy->vm_prev = NULL;
  47. return vma_copy;
  48. }
  49. /**
  50. * vb2_put_userptr() - release a userspace virtual memory area
  51. * @vma: virtual memory region associated with the area to be released
  52. *
  53. * This function releases the previously acquired memory area after a hardware
  54. * operation.
  55. */
  56. void vb2_put_vma(struct vm_area_struct *vma)
  57. {
  58. if (!vma)
  59. return;
  60. if (vma->vm_ops && vma->vm_ops->close)
  61. vma->vm_ops->close(vma);
  62. if (vma->vm_file)
  63. fput(vma->vm_file);
  64. kfree(vma);
  65. }
  66. EXPORT_SYMBOL_GPL(vb2_put_vma);
  67. /**
  68. * vb2_get_contig_userptr() - lock physically contiguous userspace mapped memory
  69. * @vaddr: starting virtual address of the area to be verified
  70. * @size: size of the area
  71. * @res_paddr: will return physical address for the given vaddr
  72. * @res_vma: will return locked copy of struct vm_area for the given area
  73. *
  74. * This function will go through memory area of size @size mapped at @vaddr and
  75. * verify that the underlying physical pages are contiguous. If they are
  76. * contiguous the virtual memory area is locked and a @res_vma is filled with
  77. * the copy and @res_pa set to the physical address of the buffer.
  78. *
  79. * Returns 0 on success.
  80. */
  81. int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
  82. struct vm_area_struct **res_vma, dma_addr_t *res_pa)
  83. {
  84. struct mm_struct *mm = current->mm;
  85. struct vm_area_struct *vma;
  86. unsigned long offset, start, end;
  87. unsigned long this_pfn, prev_pfn;
  88. dma_addr_t pa = 0;
  89. int ret = -EFAULT;
  90. start = vaddr;
  91. offset = start & ~PAGE_MASK;
  92. end = start + size;
  93. down_read(&mm->mmap_sem);
  94. vma = find_vma(mm, start);
  95. if (vma == NULL || vma->vm_end < end)
  96. goto done;
  97. for (prev_pfn = 0; start < end; start += PAGE_SIZE) {
  98. ret = follow_pfn(vma, start, &this_pfn);
  99. if (ret)
  100. goto done;
  101. if (prev_pfn == 0)
  102. pa = this_pfn << PAGE_SHIFT;
  103. else if (this_pfn != prev_pfn + 1) {
  104. ret = -EFAULT;
  105. goto done;
  106. }
  107. prev_pfn = this_pfn;
  108. }
  109. /*
  110. * Memory is contigous, lock vma and return to the caller
  111. */
  112. *res_vma = vb2_get_vma(vma);
  113. if (*res_vma == NULL) {
  114. ret = -ENOMEM;
  115. goto done;
  116. }
  117. *res_pa = pa + offset;
  118. ret = 0;
  119. done:
  120. up_read(&mm->mmap_sem);
  121. return ret;
  122. }
  123. EXPORT_SYMBOL_GPL(vb2_get_contig_userptr);
  124. /**
  125. * vb2_mmap_pfn_range() - map physical pages to userspace
  126. * @vma: virtual memory region for the mapping
  127. * @paddr: starting physical address of the memory to be mapped
  128. * @size: size of the memory to be mapped
  129. * @vm_ops: vm operations to be assigned to the created area
  130. * @priv: private data to be associated with the area
  131. *
  132. * Returns 0 on success.
  133. */
  134. int vb2_mmap_pfn_range(struct vm_area_struct *vma, unsigned long paddr,
  135. unsigned long size,
  136. const struct vm_operations_struct *vm_ops,
  137. void *priv)
  138. {
  139. int ret;
  140. size = min_t(unsigned long, vma->vm_end - vma->vm_start, size);
  141. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  142. ret = remap_pfn_range(vma, vma->vm_start, paddr >> PAGE_SHIFT,
  143. size, vma->vm_page_prot);
  144. if (ret) {
  145. printk(KERN_ERR "Remapping memory failed, error: %d\n", ret);
  146. return ret;
  147. }
  148. vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
  149. vma->vm_private_data = priv;
  150. vma->vm_ops = vm_ops;
  151. vma->vm_ops->open(vma);
  152. pr_debug("%s: mapped paddr 0x%08lx at 0x%08lx, size %ld\n",
  153. __func__, paddr, vma->vm_start, size);
  154. return 0;
  155. }
  156. EXPORT_SYMBOL_GPL(vb2_mmap_pfn_range);
  157. /**
  158. * vb2_common_vm_open() - increase refcount of the vma
  159. * @vma: virtual memory region for the mapping
  160. *
  161. * This function adds another user to the provided vma. It expects
  162. * struct vb2_vmarea_handler pointer in vma->vm_private_data.
  163. */
  164. static void vb2_common_vm_open(struct vm_area_struct *vma)
  165. {
  166. struct vb2_vmarea_handler *h = vma->vm_private_data;
  167. pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
  168. __func__, h, atomic_read(h->refcount), vma->vm_start,
  169. vma->vm_end);
  170. atomic_inc(h->refcount);
  171. }
  172. /**
  173. * vb2_common_vm_close() - decrease refcount of the vma
  174. * @vma: virtual memory region for the mapping
  175. *
  176. * This function releases the user from the provided vma. It expects
  177. * struct vb2_vmarea_handler pointer in vma->vm_private_data.
  178. */
  179. static void vb2_common_vm_close(struct vm_area_struct *vma)
  180. {
  181. struct vb2_vmarea_handler *h = vma->vm_private_data;
  182. pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
  183. __func__, h, atomic_read(h->refcount), vma->vm_start,
  184. vma->vm_end);
  185. h->put(h->arg);
  186. }
  187. /**
  188. * vb2_common_vm_ops - common vm_ops used for tracking refcount of mmaped
  189. * video buffers
  190. */
  191. const struct vm_operations_struct vb2_common_vm_ops = {
  192. .open = vb2_common_vm_open,
  193. .close = vb2_common_vm_close,
  194. };
  195. EXPORT_SYMBOL_GPL(vb2_common_vm_ops);
  196. MODULE_DESCRIPTION("common memory handling routines for videobuf2");
  197. MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
  198. MODULE_LICENSE("GPL");