omap_gem_dmabuf.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225
  1. /*
  2. * drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
  3. *
  4. * Copyright (C) 2011 Texas Instruments
  5. * Author: Rob Clark <rob.clark@linaro.org>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License version 2 as published by
  9. * the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but WITHOUT
  12. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  14. * more details.
  15. *
  16. * You should have received a copy of the GNU General Public License along with
  17. * this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include "omap_drv.h"
  20. #include <linux/dma-buf.h>
  21. static struct sg_table *omap_gem_map_dma_buf(
  22. struct dma_buf_attachment *attachment,
  23. enum dma_data_direction dir)
  24. {
  25. struct drm_gem_object *obj = attachment->dmabuf->priv;
  26. struct sg_table *sg;
  27. dma_addr_t paddr;
  28. int ret;
  29. sg = kzalloc(sizeof(*sg), GFP_KERNEL);
  30. if (!sg)
  31. return ERR_PTR(-ENOMEM);
  32. /* camera, etc, need physically contiguous.. but we need a
  33. * better way to know this..
  34. */
  35. ret = omap_gem_get_paddr(obj, &paddr, true);
  36. if (ret)
  37. goto out;
  38. ret = sg_alloc_table(sg, 1, GFP_KERNEL);
  39. if (ret)
  40. goto out;
  41. sg_init_table(sg->sgl, 1);
  42. sg_dma_len(sg->sgl) = obj->size;
  43. sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(paddr)), obj->size, 0);
  44. sg_dma_address(sg->sgl) = paddr;
  45. /* this should be after _get_paddr() to ensure we have pages attached */
  46. omap_gem_dma_sync(obj, dir);
  47. return sg;
  48. out:
  49. kfree(sg);
  50. return ERR_PTR(ret);
  51. }
  52. static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
  53. struct sg_table *sg, enum dma_data_direction dir)
  54. {
  55. struct drm_gem_object *obj = attachment->dmabuf->priv;
  56. omap_gem_put_paddr(obj);
  57. sg_free_table(sg);
  58. kfree(sg);
  59. }
  60. static void omap_gem_dmabuf_release(struct dma_buf *buffer)
  61. {
  62. struct drm_gem_object *obj = buffer->priv;
  63. /* release reference that was taken when dmabuf was exported
  64. * in omap_gem_prime_set()..
  65. */
  66. drm_gem_object_unreference_unlocked(obj);
  67. }
  68. static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
  69. size_t start, size_t len, enum dma_data_direction dir)
  70. {
  71. struct drm_gem_object *obj = buffer->priv;
  72. struct page **pages;
  73. if (omap_gem_flags(obj) & OMAP_BO_TILED) {
  74. /* TODO we would need to pin at least part of the buffer to
  75. * get de-tiled view. For now just reject it.
  76. */
  77. return -ENOMEM;
  78. }
  79. /* make sure we have the pages: */
  80. return omap_gem_get_pages(obj, &pages, true);
  81. }
  82. static void omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
  83. size_t start, size_t len, enum dma_data_direction dir)
  84. {
  85. struct drm_gem_object *obj = buffer->priv;
  86. omap_gem_put_pages(obj);
  87. }
  88. static void *omap_gem_dmabuf_kmap_atomic(struct dma_buf *buffer,
  89. unsigned long page_num)
  90. {
  91. struct drm_gem_object *obj = buffer->priv;
  92. struct page **pages;
  93. omap_gem_get_pages(obj, &pages, false);
  94. omap_gem_cpu_sync(obj, page_num);
  95. return kmap_atomic(pages[page_num]);
  96. }
  97. static void omap_gem_dmabuf_kunmap_atomic(struct dma_buf *buffer,
  98. unsigned long page_num, void *addr)
  99. {
  100. kunmap_atomic(addr);
  101. }
  102. static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer,
  103. unsigned long page_num)
  104. {
  105. struct drm_gem_object *obj = buffer->priv;
  106. struct page **pages;
  107. omap_gem_get_pages(obj, &pages, false);
  108. omap_gem_cpu_sync(obj, page_num);
  109. return kmap(pages[page_num]);
  110. }
  111. static void omap_gem_dmabuf_kunmap(struct dma_buf *buffer,
  112. unsigned long page_num, void *addr)
  113. {
  114. struct drm_gem_object *obj = buffer->priv;
  115. struct page **pages;
  116. omap_gem_get_pages(obj, &pages, false);
  117. kunmap(pages[page_num]);
  118. }
  119. /*
  120. * TODO maybe we can split up drm_gem_mmap to avoid duplicating
  121. * some here.. or at least have a drm_dmabuf_mmap helper.
  122. */
  123. static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
  124. struct vm_area_struct *vma)
  125. {
  126. struct drm_gem_object *obj = buffer->priv;
  127. int ret = 0;
  128. if (WARN_ON(!obj->filp))
  129. return -EINVAL;
  130. /* Check for valid size. */
  131. if (omap_gem_mmap_size(obj) < vma->vm_end - vma->vm_start) {
  132. ret = -EINVAL;
  133. goto out_unlock;
  134. }
  135. if (!obj->dev->driver->gem_vm_ops) {
  136. ret = -EINVAL;
  137. goto out_unlock;
  138. }
  139. vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
  140. vma->vm_ops = obj->dev->driver->gem_vm_ops;
  141. vma->vm_private_data = obj;
  142. vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  143. /* Take a ref for this mapping of the object, so that the fault
  144. * handler can dereference the mmap offset's pointer to the object.
  145. * This reference is cleaned up by the corresponding vm_close
  146. * (which should happen whether the vma was created by this call, or
  147. * by a vm_open due to mremap or partial unmap or whatever).
  148. */
  149. vma->vm_ops->open(vma);
  150. out_unlock:
  151. return omap_gem_mmap_obj(obj, vma);
  152. }
  153. struct dma_buf_ops omap_dmabuf_ops = {
  154. .map_dma_buf = omap_gem_map_dma_buf,
  155. .unmap_dma_buf = omap_gem_unmap_dma_buf,
  156. .release = omap_gem_dmabuf_release,
  157. .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
  158. .end_cpu_access = omap_gem_dmabuf_end_cpu_access,
  159. .kmap_atomic = omap_gem_dmabuf_kmap_atomic,
  160. .kunmap_atomic = omap_gem_dmabuf_kunmap_atomic,
  161. .kmap = omap_gem_dmabuf_kmap,
  162. .kunmap = omap_gem_dmabuf_kunmap,
  163. .mmap = omap_gem_dmabuf_mmap,
  164. };
  165. struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
  166. struct drm_gem_object *obj, int flags)
  167. {
  168. return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, flags);
  169. }
  170. struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
  171. struct dma_buf *buffer)
  172. {
  173. struct drm_gem_object *obj;
  174. /* is this one of own objects? */
  175. if (buffer->ops == &omap_dmabuf_ops) {
  176. obj = buffer->priv;
  177. /* is it from our device? */
  178. if (obj->dev == dev) {
  179. /*
  180. * Importing dmabuf exported from out own gem increases
  181. * refcount on gem itself instead of f_count of dmabuf.
  182. */
  183. drm_gem_object_reference(obj);
  184. dma_buf_put(buffer);
  185. return obj;
  186. }
  187. }
  188. /*
  189. * TODO add support for importing buffers from other devices..
  190. * for now we don't need this but would be nice to add eventually
  191. */
  192. return ERR_PTR(-EINVAL);
  193. }