videobuf2-memops.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232
  1. /*
  2. * videobuf2-memops.c - generic memory handling routines for videobuf2
  3. *
  4. * Copyright (C) 2010 Samsung Electronics
  5. *
  6. * Author: Pawel Osciak <p.osciak@samsung.com>
  7. * Marek Szyprowski <m.szyprowski@samsung.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation.
  12. */
  13. #include <linux/slab.h>
  14. #include <linux/module.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/vmalloc.h>
  17. #include <linux/mm.h>
  18. #include <linux/sched.h>
  19. #include <linux/file.h>
  20. #include <linux/slab.h>
  21. #include <media/videobuf2-core.h>
  22. #include <media/videobuf2-memops.h>
  23. /**
  24. * vb2_get_vma() - acquire and lock the virtual memory area
  25. * @vma: given virtual memory area
  26. *
  27. * This function attempts to acquire an area mapped in the userspace for
  28. * the duration of a hardware operation. The area is "locked" by performing
  29. * the same set of operation that are done when process calls fork() and
  30. * memory areas are duplicated.
  31. *
  32. * Returns a copy of a virtual memory region on success or NULL.
  33. */
  34. struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma)
  35. {
  36. struct vm_area_struct *vma_copy;
  37. vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
  38. if (vma_copy == NULL)
  39. return NULL;
  40. if (vma->vm_ops && vma->vm_ops->open)
  41. vma->vm_ops->open(vma);
  42. if (vma->vm_file)
  43. get_file(vma->vm_file);
  44. memcpy(vma_copy, vma, sizeof(*vma));
  45. vma_copy->vm_mm = NULL;
  46. vma_copy->vm_next = NULL;
  47. vma_copy->vm_prev = NULL;
  48. return vma_copy;
  49. }
  50. /**
  51. * vb2_put_userptr() - release a userspace virtual memory area
  52. * @vma: virtual memory region associated with the area to be released
  53. *
  54. * This function releases the previously acquired memory area after a hardware
  55. * operation.
  56. */
  57. void vb2_put_vma(struct vm_area_struct *vma)
  58. {
  59. if (!vma)
  60. return;
  61. if (vma->vm_file)
  62. fput(vma->vm_file);
  63. if (vma->vm_ops && vma->vm_ops->close)
  64. vma->vm_ops->close(vma);
  65. kfree(vma);
  66. }
  67. /**
  68. * vb2_get_contig_userptr() - lock physically contiguous userspace mapped memory
  69. * @vaddr: starting virtual address of the area to be verified
  70. * @size: size of the area
  71. * @res_paddr: will return physical address for the given vaddr
  72. * @res_vma: will return locked copy of struct vm_area for the given area
  73. *
  74. * This function will go through memory area of size @size mapped at @vaddr and
  75. * verify that the underlying physical pages are contiguous. If they are
  76. * contiguous the virtual memory area is locked and a @res_vma is filled with
  77. * the copy and @res_pa set to the physical address of the buffer.
  78. *
  79. * Returns 0 on success.
  80. */
  81. int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
  82. struct vm_area_struct **res_vma, dma_addr_t *res_pa)
  83. {
  84. struct mm_struct *mm = current->mm;
  85. struct vm_area_struct *vma;
  86. unsigned long offset, start, end;
  87. unsigned long this_pfn, prev_pfn;
  88. dma_addr_t pa = 0;
  89. int ret = -EFAULT;
  90. start = vaddr;
  91. offset = start & ~PAGE_MASK;
  92. end = start + size;
  93. down_read(&mm->mmap_sem);
  94. vma = find_vma(mm, start);
  95. if (vma == NULL || vma->vm_end < end)
  96. goto done;
  97. for (prev_pfn = 0; start < end; start += PAGE_SIZE) {
  98. ret = follow_pfn(vma, start, &this_pfn);
  99. if (ret)
  100. goto done;
  101. if (prev_pfn == 0)
  102. pa = this_pfn << PAGE_SHIFT;
  103. else if (this_pfn != prev_pfn + 1) {
  104. ret = -EFAULT;
  105. goto done;
  106. }
  107. prev_pfn = this_pfn;
  108. }
  109. /*
  110. * Memory is contigous, lock vma and return to the caller
  111. */
  112. *res_vma = vb2_get_vma(vma);
  113. if (*res_vma == NULL) {
  114. ret = -ENOMEM;
  115. goto done;
  116. }
  117. *res_pa = pa + offset;
  118. ret = 0;
  119. done:
  120. up_read(&mm->mmap_sem);
  121. return ret;
  122. }
  123. /**
  124. * vb2_mmap_pfn_range() - map physical pages to userspace
  125. * @vma: virtual memory region for the mapping
  126. * @paddr: starting physical address of the memory to be mapped
  127. * @size: size of the memory to be mapped
  128. * @vm_ops: vm operations to be assigned to the created area
  129. * @priv: private data to be associated with the area
  130. *
  131. * Returns 0 on success.
  132. */
  133. int vb2_mmap_pfn_range(struct vm_area_struct *vma, unsigned long paddr,
  134. unsigned long size,
  135. const struct vm_operations_struct *vm_ops,
  136. void *priv)
  137. {
  138. int ret;
  139. size = min_t(unsigned long, vma->vm_end - vma->vm_start, size);
  140. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  141. ret = remap_pfn_range(vma, vma->vm_start, paddr >> PAGE_SHIFT,
  142. size, vma->vm_page_prot);
  143. if (ret) {
  144. printk(KERN_ERR "Remapping memory failed, error: %d\n", ret);
  145. return ret;
  146. }
  147. vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
  148. vma->vm_private_data = priv;
  149. vma->vm_ops = vm_ops;
  150. vma->vm_ops->open(vma);
  151. printk(KERN_DEBUG "%s: mapped paddr 0x%08lx at 0x%08lx, size %ld\n",
  152. __func__, paddr, vma->vm_start, size);
  153. return 0;
  154. }
  155. /**
  156. * vb2_common_vm_open() - increase refcount of the vma
  157. * @vma: virtual memory region for the mapping
  158. *
  159. * This function adds another user to the provided vma. It expects
  160. * struct vb2_vmarea_handler pointer in vma->vm_private_data.
  161. */
  162. static void vb2_common_vm_open(struct vm_area_struct *vma)
  163. {
  164. struct vb2_vmarea_handler *h = vma->vm_private_data;
  165. printk(KERN_DEBUG "%s: %p, refcount: %d, vma: %08lx-%08lx\n",
  166. __func__, h, atomic_read(h->refcount), vma->vm_start,
  167. vma->vm_end);
  168. atomic_inc(h->refcount);
  169. }
  170. /**
  171. * vb2_common_vm_close() - decrease refcount of the vma
  172. * @vma: virtual memory region for the mapping
  173. *
  174. * This function releases the user from the provided vma. It expects
  175. * struct vb2_vmarea_handler pointer in vma->vm_private_data.
  176. */
  177. static void vb2_common_vm_close(struct vm_area_struct *vma)
  178. {
  179. struct vb2_vmarea_handler *h = vma->vm_private_data;
  180. printk(KERN_DEBUG "%s: %p, refcount: %d, vma: %08lx-%08lx\n",
  181. __func__, h, atomic_read(h->refcount), vma->vm_start,
  182. vma->vm_end);
  183. h->put(h->arg);
  184. }
  185. /**
  186. * vb2_common_vm_ops - common vm_ops used for tracking refcount of mmaped
  187. * video buffers
  188. */
  189. const struct vm_operations_struct vb2_common_vm_ops = {
  190. .open = vb2_common_vm_open,
  191. .close = vb2_common_vm_close,
  192. };
  193. EXPORT_SYMBOL_GPL(vb2_common_vm_ops);
  194. MODULE_DESCRIPTION("common memory handling routines for videobuf2");
  195. MODULE_AUTHOR("Pawel Osciak");
  196. MODULE_LICENSE("GPL");