i915_gem_dmabuf.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. /*
  2. * Copyright 2012 Red Hat Inc
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21. * DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Dave Airlie <airlied@redhat.com>
  25. */
  26. #include "drmP.h"
  27. #include "i915_drv.h"
  28. #include <linux/dma-buf.h>
  29. static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
  30. enum dma_data_direction dir)
  31. {
  32. struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
  33. struct sg_table *st;
  34. struct scatterlist *src, *dst;
  35. int ret, i;
  36. ret = i915_mutex_lock_interruptible(obj->base.dev);
  37. if (ret)
  38. return ERR_PTR(ret);
  39. ret = i915_gem_object_get_pages(obj);
  40. if (ret) {
  41. st = ERR_PTR(ret);
  42. goto out;
  43. }
  44. /* Copy sg so that we make an independent mapping */
  45. st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
  46. if (st == NULL) {
  47. st = ERR_PTR(-ENOMEM);
  48. goto out;
  49. }
  50. ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
  51. if (ret) {
  52. kfree(st);
  53. st = ERR_PTR(ret);
  54. goto out;
  55. }
  56. src = obj->pages->sgl;
  57. dst = st->sgl;
  58. for (i = 0; i < obj->pages->nents; i++) {
  59. sg_set_page(dst, sg_page(src), PAGE_SIZE, 0);
  60. dst = sg_next(dst);
  61. src = sg_next(src);
  62. }
  63. if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
  64. sg_free_table(st);
  65. kfree(st);
  66. st = ERR_PTR(-ENOMEM);
  67. goto out;
  68. }
  69. i915_gem_object_pin_pages(obj);
  70. out:
  71. mutex_unlock(&obj->base.dev->struct_mutex);
  72. return st;
  73. }
  74. static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
  75. struct sg_table *sg, enum dma_data_direction dir)
  76. {
  77. dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
  78. sg_free_table(sg);
  79. kfree(sg);
  80. }
  81. static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
  82. {
  83. struct drm_i915_gem_object *obj = dma_buf->priv;
  84. if (obj->base.export_dma_buf == dma_buf) {
  85. /* drop the reference on the export fd holds */
  86. obj->base.export_dma_buf = NULL;
  87. drm_gem_object_unreference_unlocked(&obj->base);
  88. }
  89. }
  90. static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
  91. {
  92. struct drm_i915_gem_object *obj = dma_buf->priv;
  93. struct drm_device *dev = obj->base.dev;
  94. struct scatterlist *sg;
  95. struct page **pages;
  96. int ret, i;
  97. ret = i915_mutex_lock_interruptible(dev);
  98. if (ret)
  99. return ERR_PTR(ret);
  100. if (obj->dma_buf_vmapping) {
  101. obj->vmapping_count++;
  102. goto out_unlock;
  103. }
  104. ret = i915_gem_object_get_pages(obj);
  105. if (ret)
  106. goto error;
  107. ret = -ENOMEM;
  108. pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *));
  109. if (pages == NULL)
  110. goto error;
  111. for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i)
  112. pages[i] = sg_page(sg);
  113. obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL);
  114. drm_free_large(pages);
  115. if (!obj->dma_buf_vmapping)
  116. goto error;
  117. obj->vmapping_count = 1;
  118. i915_gem_object_pin_pages(obj);
  119. out_unlock:
  120. mutex_unlock(&dev->struct_mutex);
  121. return obj->dma_buf_vmapping;
  122. error:
  123. mutex_unlock(&dev->struct_mutex);
  124. return ERR_PTR(ret);
  125. }
  126. static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
  127. {
  128. struct drm_i915_gem_object *obj = dma_buf->priv;
  129. struct drm_device *dev = obj->base.dev;
  130. int ret;
  131. ret = i915_mutex_lock_interruptible(dev);
  132. if (ret)
  133. return;
  134. if (--obj->vmapping_count == 0) {
  135. vunmap(obj->dma_buf_vmapping);
  136. obj->dma_buf_vmapping = NULL;
  137. i915_gem_object_unpin_pages(obj);
  138. }
  139. mutex_unlock(&dev->struct_mutex);
  140. }
  141. static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
  142. {
  143. return NULL;
  144. }
  145. static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
  146. {
  147. }
  148. static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
  149. {
  150. return NULL;
  151. }
  152. static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
  153. {
  154. }
  155. static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
  156. {
  157. return -EINVAL;
  158. }
  159. static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
  160. {
  161. struct drm_i915_gem_object *obj = dma_buf->priv;
  162. struct drm_device *dev = obj->base.dev;
  163. int ret;
  164. bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
  165. ret = i915_mutex_lock_interruptible(dev);
  166. if (ret)
  167. return ret;
  168. ret = i915_gem_object_set_to_cpu_domain(obj, write);
  169. mutex_unlock(&dev->struct_mutex);
  170. return ret;
  171. }
  172. static const struct dma_buf_ops i915_dmabuf_ops = {
  173. .map_dma_buf = i915_gem_map_dma_buf,
  174. .unmap_dma_buf = i915_gem_unmap_dma_buf,
  175. .release = i915_gem_dmabuf_release,
  176. .kmap = i915_gem_dmabuf_kmap,
  177. .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
  178. .kunmap = i915_gem_dmabuf_kunmap,
  179. .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
  180. .mmap = i915_gem_dmabuf_mmap,
  181. .vmap = i915_gem_dmabuf_vmap,
  182. .vunmap = i915_gem_dmabuf_vunmap,
  183. .begin_cpu_access = i915_gem_begin_cpu_access,
  184. };
  185. struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
  186. struct drm_gem_object *gem_obj, int flags)
  187. {
  188. struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
  189. return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600);
  190. }
  191. struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
  192. struct dma_buf *dma_buf)
  193. {
  194. struct dma_buf_attachment *attach;
  195. struct sg_table *sg;
  196. struct drm_i915_gem_object *obj;
  197. int ret;
  198. /* is this one of own objects? */
  199. if (dma_buf->ops == &i915_dmabuf_ops) {
  200. obj = dma_buf->priv;
  201. /* is it from our device? */
  202. if (obj->base.dev == dev) {
  203. drm_gem_object_reference(&obj->base);
  204. return &obj->base;
  205. }
  206. }
  207. /* need to attach */
  208. attach = dma_buf_attach(dma_buf, dev->dev);
  209. if (IS_ERR(attach))
  210. return ERR_CAST(attach);
  211. sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  212. if (IS_ERR(sg)) {
  213. ret = PTR_ERR(sg);
  214. goto fail_detach;
  215. }
  216. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  217. if (obj == NULL) {
  218. ret = -ENOMEM;
  219. goto fail_unmap;
  220. }
  221. ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
  222. if (ret) {
  223. kfree(obj);
  224. goto fail_unmap;
  225. }
  226. obj->has_dma_mapping = true;
  227. obj->sg_table = sg;
  228. obj->base.import_attach = attach;
  229. return &obj->base;
  230. fail_unmap:
  231. dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
  232. fail_detach:
  233. dma_buf_detach(dma_buf, attach);
  234. return ERR_PTR(ret);
  235. }