videobuf2-memops.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. /*
  2. * videobuf2-memops.c - generic memory handling routines for videobuf2
  3. *
  4. * Copyright (C) 2010 Samsung Electronics
  5. *
  6. * Author: Pawel Osciak <pawel@osciak.com>
  7. * Marek Szyprowski <m.szyprowski@samsung.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation.
  12. */
  13. #include <linux/slab.h>
  14. #include <linux/module.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/vmalloc.h>
  17. #include <linux/mm.h>
  18. #include <linux/sched.h>
  19. #include <linux/file.h>
  20. #include <media/videobuf2-core.h>
  21. #include <media/videobuf2-memops.h>
  22. /**
  23. * vb2_get_vma() - acquire and lock the virtual memory area
  24. * @vma: given virtual memory area
  25. *
  26. * This function attempts to acquire an area mapped in the userspace for
  27. * the duration of a hardware operation. The area is "locked" by performing
  28. * the same set of operation that are done when process calls fork() and
  29. * memory areas are duplicated.
  30. *
  31. * Returns a copy of a virtual memory region on success or NULL.
  32. */
  33. struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma)
  34. {
  35. struct vm_area_struct *vma_copy;
  36. vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
  37. if (vma_copy == NULL)
  38. return NULL;
  39. if (vma->vm_ops && vma->vm_ops->open)
  40. vma->vm_ops->open(vma);
  41. if (vma->vm_file)
  42. get_file(vma->vm_file);
  43. memcpy(vma_copy, vma, sizeof(*vma));
  44. vma_copy->vm_mm = NULL;
  45. vma_copy->vm_next = NULL;
  46. vma_copy->vm_prev = NULL;
  47. return vma_copy;
  48. }
  49. /**
  50. * vb2_put_userptr() - release a userspace virtual memory area
  51. * @vma: virtual memory region associated with the area to be released
  52. *
  53. * This function releases the previously acquired memory area after a hardware
  54. * operation.
  55. */
  56. void vb2_put_vma(struct vm_area_struct *vma)
  57. {
  58. if (!vma)
  59. return;
  60. if (vma->vm_ops && vma->vm_ops->close)
  61. vma->vm_ops->close(vma);
  62. if (vma->vm_file)
  63. fput(vma->vm_file);
  64. kfree(vma);
  65. }
  66. EXPORT_SYMBOL_GPL(vb2_put_vma);
  67. /**
  68. * vb2_get_contig_userptr() - lock physically contiguous userspace mapped memory
  69. * @vaddr: starting virtual address of the area to be verified
  70. * @size: size of the area
  71. * @res_paddr: will return physical address for the given vaddr
  72. * @res_vma: will return locked copy of struct vm_area for the given area
  73. *
  74. * This function will go through memory area of size @size mapped at @vaddr and
  75. * verify that the underlying physical pages are contiguous. If they are
  76. * contiguous the virtual memory area is locked and a @res_vma is filled with
  77. * the copy and @res_pa set to the physical address of the buffer.
  78. *
  79. * Returns 0 on success.
  80. */
  81. int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
  82. struct vm_area_struct **res_vma, dma_addr_t *res_pa)
  83. {
  84. struct mm_struct *mm = current->mm;
  85. struct vm_area_struct *vma;
  86. unsigned long offset, start, end;
  87. unsigned long this_pfn, prev_pfn;
  88. dma_addr_t pa = 0;
  89. start = vaddr;
  90. offset = start & ~PAGE_MASK;
  91. end = start + size;
  92. vma = find_vma(mm, start);
  93. if (vma == NULL || vma->vm_end < end)
  94. return -EFAULT;
  95. for (prev_pfn = 0; start < end; start += PAGE_SIZE) {
  96. int ret = follow_pfn(vma, start, &this_pfn);
  97. if (ret)
  98. return ret;
  99. if (prev_pfn == 0)
  100. pa = this_pfn << PAGE_SHIFT;
  101. else if (this_pfn != prev_pfn + 1)
  102. return -EFAULT;
  103. prev_pfn = this_pfn;
  104. }
  105. /*
  106. * Memory is contigous, lock vma and return to the caller
  107. */
  108. *res_vma = vb2_get_vma(vma);
  109. if (*res_vma == NULL)
  110. return -ENOMEM;
  111. *res_pa = pa + offset;
  112. return 0;
  113. }
  114. EXPORT_SYMBOL_GPL(vb2_get_contig_userptr);
  115. /**
  116. * vb2_mmap_pfn_range() - map physical pages to userspace
  117. * @vma: virtual memory region for the mapping
  118. * @paddr: starting physical address of the memory to be mapped
  119. * @size: size of the memory to be mapped
  120. * @vm_ops: vm operations to be assigned to the created area
  121. * @priv: private data to be associated with the area
  122. *
  123. * Returns 0 on success.
  124. */
  125. int vb2_mmap_pfn_range(struct vm_area_struct *vma, unsigned long paddr,
  126. unsigned long size,
  127. const struct vm_operations_struct *vm_ops,
  128. void *priv)
  129. {
  130. int ret;
  131. size = min_t(unsigned long, vma->vm_end - vma->vm_start, size);
  132. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  133. ret = remap_pfn_range(vma, vma->vm_start, paddr >> PAGE_SHIFT,
  134. size, vma->vm_page_prot);
  135. if (ret) {
  136. printk(KERN_ERR "Remapping memory failed, error: %d\n", ret);
  137. return ret;
  138. }
  139. vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
  140. vma->vm_private_data = priv;
  141. vma->vm_ops = vm_ops;
  142. vma->vm_ops->open(vma);
  143. pr_debug("%s: mapped paddr 0x%08lx at 0x%08lx, size %ld\n",
  144. __func__, paddr, vma->vm_start, size);
  145. return 0;
  146. }
  147. EXPORT_SYMBOL_GPL(vb2_mmap_pfn_range);
  148. /**
  149. * vb2_common_vm_open() - increase refcount of the vma
  150. * @vma: virtual memory region for the mapping
  151. *
  152. * This function adds another user to the provided vma. It expects
  153. * struct vb2_vmarea_handler pointer in vma->vm_private_data.
  154. */
  155. static void vb2_common_vm_open(struct vm_area_struct *vma)
  156. {
  157. struct vb2_vmarea_handler *h = vma->vm_private_data;
  158. pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
  159. __func__, h, atomic_read(h->refcount), vma->vm_start,
  160. vma->vm_end);
  161. atomic_inc(h->refcount);
  162. }
  163. /**
  164. * vb2_common_vm_close() - decrease refcount of the vma
  165. * @vma: virtual memory region for the mapping
  166. *
  167. * This function releases the user from the provided vma. It expects
  168. * struct vb2_vmarea_handler pointer in vma->vm_private_data.
  169. */
  170. static void vb2_common_vm_close(struct vm_area_struct *vma)
  171. {
  172. struct vb2_vmarea_handler *h = vma->vm_private_data;
  173. pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
  174. __func__, h, atomic_read(h->refcount), vma->vm_start,
  175. vma->vm_end);
  176. h->put(h->arg);
  177. }
  178. /**
  179. * vb2_common_vm_ops - common vm_ops used for tracking refcount of mmaped
  180. * video buffers
  181. */
  182. const struct vm_operations_struct vb2_common_vm_ops = {
  183. .open = vb2_common_vm_open,
  184. .close = vb2_common_vm_close,
  185. };
  186. EXPORT_SYMBOL_GPL(vb2_common_vm_ops);
  187. MODULE_DESCRIPTION("common memory handling routines for videobuf2");
  188. MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
  189. MODULE_LICENSE("GPL");