videobuf2-memops.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235
  1. /*
  2. * videobuf2-memops.c - generic memory handling routines for videobuf2
  3. *
  4. * Copyright (C) 2010 Samsung Electronics
  5. *
  6. * Author: Pawel Osciak <pawel@osciak.com>
  7. * Marek Szyprowski <m.szyprowski@samsung.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation.
  12. */
  13. #include <linux/slab.h>
  14. #include <linux/module.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/vmalloc.h>
  17. #include <linux/mm.h>
  18. #include <linux/sched.h>
  19. #include <linux/file.h>
  20. #include <linux/slab.h>
  21. #include <media/videobuf2-core.h>
  22. #include <media/videobuf2-memops.h>
  23. /**
  24. * vb2_get_vma() - acquire and lock the virtual memory area
  25. * @vma: given virtual memory area
  26. *
  27. * This function attempts to acquire an area mapped in the userspace for
  28. * the duration of a hardware operation. The area is "locked" by performing
  29. * the same set of operation that are done when process calls fork() and
  30. * memory areas are duplicated.
  31. *
  32. * Returns a copy of a virtual memory region on success or NULL.
  33. */
  34. struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma)
  35. {
  36. struct vm_area_struct *vma_copy;
  37. vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
  38. if (vma_copy == NULL)
  39. return NULL;
  40. if (vma->vm_ops && vma->vm_ops->open)
  41. vma->vm_ops->open(vma);
  42. if (vma->vm_file)
  43. get_file(vma->vm_file);
  44. memcpy(vma_copy, vma, sizeof(*vma));
  45. vma_copy->vm_mm = NULL;
  46. vma_copy->vm_next = NULL;
  47. vma_copy->vm_prev = NULL;
  48. return vma_copy;
  49. }
  50. /**
  51. * vb2_put_userptr() - release a userspace virtual memory area
  52. * @vma: virtual memory region associated with the area to be released
  53. *
  54. * This function releases the previously acquired memory area after a hardware
  55. * operation.
  56. */
  57. void vb2_put_vma(struct vm_area_struct *vma)
  58. {
  59. if (!vma)
  60. return;
  61. if (vma->vm_file)
  62. fput(vma->vm_file);
  63. if (vma->vm_ops && vma->vm_ops->close)
  64. vma->vm_ops->close(vma);
  65. kfree(vma);
  66. }
  67. EXPORT_SYMBOL_GPL(vb2_put_vma);
  68. /**
  69. * vb2_get_contig_userptr() - lock physically contiguous userspace mapped memory
  70. * @vaddr: starting virtual address of the area to be verified
  71. * @size: size of the area
  72. * @res_paddr: will return physical address for the given vaddr
  73. * @res_vma: will return locked copy of struct vm_area for the given area
  74. *
  75. * This function will go through memory area of size @size mapped at @vaddr and
  76. * verify that the underlying physical pages are contiguous. If they are
  77. * contiguous the virtual memory area is locked and a @res_vma is filled with
  78. * the copy and @res_pa set to the physical address of the buffer.
  79. *
  80. * Returns 0 on success.
  81. */
  82. int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
  83. struct vm_area_struct **res_vma, dma_addr_t *res_pa)
  84. {
  85. struct mm_struct *mm = current->mm;
  86. struct vm_area_struct *vma;
  87. unsigned long offset, start, end;
  88. unsigned long this_pfn, prev_pfn;
  89. dma_addr_t pa = 0;
  90. int ret = -EFAULT;
  91. start = vaddr;
  92. offset = start & ~PAGE_MASK;
  93. end = start + size;
  94. down_read(&mm->mmap_sem);
  95. vma = find_vma(mm, start);
  96. if (vma == NULL || vma->vm_end < end)
  97. goto done;
  98. for (prev_pfn = 0; start < end; start += PAGE_SIZE) {
  99. ret = follow_pfn(vma, start, &this_pfn);
  100. if (ret)
  101. goto done;
  102. if (prev_pfn == 0)
  103. pa = this_pfn << PAGE_SHIFT;
  104. else if (this_pfn != prev_pfn + 1) {
  105. ret = -EFAULT;
  106. goto done;
  107. }
  108. prev_pfn = this_pfn;
  109. }
  110. /*
  111. * Memory is contigous, lock vma and return to the caller
  112. */
  113. *res_vma = vb2_get_vma(vma);
  114. if (*res_vma == NULL) {
  115. ret = -ENOMEM;
  116. goto done;
  117. }
  118. *res_pa = pa + offset;
  119. ret = 0;
  120. done:
  121. up_read(&mm->mmap_sem);
  122. return ret;
  123. }
  124. EXPORT_SYMBOL_GPL(vb2_get_contig_userptr);
  125. /**
  126. * vb2_mmap_pfn_range() - map physical pages to userspace
  127. * @vma: virtual memory region for the mapping
  128. * @paddr: starting physical address of the memory to be mapped
  129. * @size: size of the memory to be mapped
  130. * @vm_ops: vm operations to be assigned to the created area
  131. * @priv: private data to be associated with the area
  132. *
  133. * Returns 0 on success.
  134. */
  135. int vb2_mmap_pfn_range(struct vm_area_struct *vma, unsigned long paddr,
  136. unsigned long size,
  137. const struct vm_operations_struct *vm_ops,
  138. void *priv)
  139. {
  140. int ret;
  141. size = min_t(unsigned long, vma->vm_end - vma->vm_start, size);
  142. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  143. ret = remap_pfn_range(vma, vma->vm_start, paddr >> PAGE_SHIFT,
  144. size, vma->vm_page_prot);
  145. if (ret) {
  146. printk(KERN_ERR "Remapping memory failed, error: %d\n", ret);
  147. return ret;
  148. }
  149. vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
  150. vma->vm_private_data = priv;
  151. vma->vm_ops = vm_ops;
  152. vma->vm_ops->open(vma);
  153. printk(KERN_DEBUG "%s: mapped paddr 0x%08lx at 0x%08lx, size %ld\n",
  154. __func__, paddr, vma->vm_start, size);
  155. return 0;
  156. }
  157. EXPORT_SYMBOL_GPL(vb2_mmap_pfn_range);
  158. /**
  159. * vb2_common_vm_open() - increase refcount of the vma
  160. * @vma: virtual memory region for the mapping
  161. *
  162. * This function adds another user to the provided vma. It expects
  163. * struct vb2_vmarea_handler pointer in vma->vm_private_data.
  164. */
  165. static void vb2_common_vm_open(struct vm_area_struct *vma)
  166. {
  167. struct vb2_vmarea_handler *h = vma->vm_private_data;
  168. printk(KERN_DEBUG "%s: %p, refcount: %d, vma: %08lx-%08lx\n",
  169. __func__, h, atomic_read(h->refcount), vma->vm_start,
  170. vma->vm_end);
  171. atomic_inc(h->refcount);
  172. }
  173. /**
  174. * vb2_common_vm_close() - decrease refcount of the vma
  175. * @vma: virtual memory region for the mapping
  176. *
  177. * This function releases the user from the provided vma. It expects
  178. * struct vb2_vmarea_handler pointer in vma->vm_private_data.
  179. */
  180. static void vb2_common_vm_close(struct vm_area_struct *vma)
  181. {
  182. struct vb2_vmarea_handler *h = vma->vm_private_data;
  183. printk(KERN_DEBUG "%s: %p, refcount: %d, vma: %08lx-%08lx\n",
  184. __func__, h, atomic_read(h->refcount), vma->vm_start,
  185. vma->vm_end);
  186. h->put(h->arg);
  187. }
  188. /**
  189. * vb2_common_vm_ops - common vm_ops used for tracking refcount of mmaped
  190. * video buffers
  191. */
  192. const struct vm_operations_struct vb2_common_vm_ops = {
  193. .open = vb2_common_vm_open,
  194. .close = vb2_common_vm_close,
  195. };
  196. EXPORT_SYMBOL_GPL(vb2_common_vm_ops);
  197. MODULE_DESCRIPTION("common memory handling routines for videobuf2");
  198. MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
  199. MODULE_LICENSE("GPL");