radeon_prime.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225
  1. /*
  2. * Copyright 2012 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * based on nouveau_prime.c
  23. *
  24. * Authors: Alex Deucher
  25. */
  26. #include <drm/drmP.h>
  27. #include "radeon.h"
  28. #include <drm/radeon_drm.h>
  29. #include <linux/dma-buf.h>
  30. static struct sg_table *radeon_gem_map_dma_buf(struct dma_buf_attachment *attachment,
  31. enum dma_data_direction dir)
  32. {
  33. struct radeon_bo *bo = attachment->dmabuf->priv;
  34. struct drm_device *dev = bo->rdev->ddev;
  35. int npages = bo->tbo.num_pages;
  36. struct sg_table *sg;
  37. int nents;
  38. mutex_lock(&dev->struct_mutex);
  39. sg = drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
  40. nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
  41. mutex_unlock(&dev->struct_mutex);
  42. return sg;
  43. }
  44. static void radeon_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
  45. struct sg_table *sg, enum dma_data_direction dir)
  46. {
  47. dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
  48. sg_free_table(sg);
  49. kfree(sg);
  50. }
  51. static void radeon_gem_dmabuf_release(struct dma_buf *dma_buf)
  52. {
  53. struct radeon_bo *bo = dma_buf->priv;
  54. if (bo->gem_base.export_dma_buf == dma_buf) {
  55. DRM_ERROR("unreference dmabuf %p\n", &bo->gem_base);
  56. bo->gem_base.export_dma_buf = NULL;
  57. drm_gem_object_unreference_unlocked(&bo->gem_base);
  58. }
  59. }
  60. static void *radeon_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
  61. {
  62. return NULL;
  63. }
  64. static void radeon_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
  65. {
  66. }
  67. static void *radeon_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
  68. {
  69. return NULL;
  70. }
  71. static void radeon_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
  72. {
  73. }
  74. static int radeon_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
  75. {
  76. return -EINVAL;
  77. }
  78. static void *radeon_gem_prime_vmap(struct dma_buf *dma_buf)
  79. {
  80. struct radeon_bo *bo = dma_buf->priv;
  81. struct drm_device *dev = bo->rdev->ddev;
  82. int ret;
  83. mutex_lock(&dev->struct_mutex);
  84. if (bo->vmapping_count) {
  85. bo->vmapping_count++;
  86. goto out_unlock;
  87. }
  88. ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
  89. &bo->dma_buf_vmap);
  90. if (ret) {
  91. mutex_unlock(&dev->struct_mutex);
  92. return ERR_PTR(ret);
  93. }
  94. bo->vmapping_count = 1;
  95. out_unlock:
  96. mutex_unlock(&dev->struct_mutex);
  97. return bo->dma_buf_vmap.virtual;
  98. }
  99. static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
  100. {
  101. struct radeon_bo *bo = dma_buf->priv;
  102. struct drm_device *dev = bo->rdev->ddev;
  103. mutex_lock(&dev->struct_mutex);
  104. bo->vmapping_count--;
  105. if (bo->vmapping_count == 0) {
  106. ttm_bo_kunmap(&bo->dma_buf_vmap);
  107. }
  108. mutex_unlock(&dev->struct_mutex);
  109. }
  110. const static struct dma_buf_ops radeon_dmabuf_ops = {
  111. .map_dma_buf = radeon_gem_map_dma_buf,
  112. .unmap_dma_buf = radeon_gem_unmap_dma_buf,
  113. .release = radeon_gem_dmabuf_release,
  114. .kmap = radeon_gem_kmap,
  115. .kmap_atomic = radeon_gem_kmap_atomic,
  116. .kunmap = radeon_gem_kunmap,
  117. .kunmap_atomic = radeon_gem_kunmap_atomic,
  118. .mmap = radeon_gem_prime_mmap,
  119. .vmap = radeon_gem_prime_vmap,
  120. .vunmap = radeon_gem_prime_vunmap,
  121. };
  122. static int radeon_prime_create(struct drm_device *dev,
  123. size_t size,
  124. struct sg_table *sg,
  125. struct radeon_bo **pbo)
  126. {
  127. struct radeon_device *rdev = dev->dev_private;
  128. struct radeon_bo *bo;
  129. int ret;
  130. ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
  131. RADEON_GEM_DOMAIN_GTT, sg, pbo);
  132. if (ret)
  133. return ret;
  134. bo = *pbo;
  135. bo->gem_base.driver_private = bo;
  136. mutex_lock(&rdev->gem.mutex);
  137. list_add_tail(&bo->list, &rdev->gem.objects);
  138. mutex_unlock(&rdev->gem.mutex);
  139. return 0;
  140. }
  141. struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
  142. struct drm_gem_object *obj,
  143. int flags)
  144. {
  145. struct radeon_bo *bo = gem_to_radeon_bo(obj);
  146. int ret = 0;
  147. ret = radeon_bo_reserve(bo, false);
  148. if (unlikely(ret != 0))
  149. return ERR_PTR(ret);
  150. /* pin buffer into GTT */
  151. ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
  152. if (ret) {
  153. radeon_bo_unreserve(bo);
  154. return ERR_PTR(ret);
  155. }
  156. radeon_bo_unreserve(bo);
  157. return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags);
  158. }
  159. struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
  160. struct dma_buf *dma_buf)
  161. {
  162. struct dma_buf_attachment *attach;
  163. struct sg_table *sg;
  164. struct radeon_bo *bo;
  165. int ret;
  166. if (dma_buf->ops == &radeon_dmabuf_ops) {
  167. bo = dma_buf->priv;
  168. if (bo->gem_base.dev == dev) {
  169. drm_gem_object_reference(&bo->gem_base);
  170. return &bo->gem_base;
  171. }
  172. }
  173. /* need to attach */
  174. attach = dma_buf_attach(dma_buf, dev->dev);
  175. if (IS_ERR(attach))
  176. return ERR_CAST(attach);
  177. sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  178. if (IS_ERR(sg)) {
  179. ret = PTR_ERR(sg);
  180. goto fail_detach;
  181. }
  182. ret = radeon_prime_create(dev, dma_buf->size, sg, &bo);
  183. if (ret)
  184. goto fail_unmap;
  185. bo->gem_base.import_attach = attach;
  186. return &bo->gem_base;
  187. fail_unmap:
  188. dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
  189. fail_detach:
  190. dma_buf_detach(dma_buf, attach);
  191. return ERR_PTR(ret);
  192. }