nouveau_prime.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. #include "drmP.h"
  2. #include "drm.h"
  3. #include "nouveau_drv.h"
  4. #include "nouveau_drm.h"
  5. #include "nouveau_dma.h"
  6. #include <linux/dma-buf.h>
  7. static struct sg_table *nouveau_gem_map_dma_buf(struct dma_buf_attachment *attachment,
  8. enum dma_data_direction dir)
  9. {
  10. struct nouveau_bo *nvbo = attachment->dmabuf->priv;
  11. struct drm_device *dev = nvbo->gem->dev;
  12. int npages = nvbo->bo.num_pages;
  13. struct sg_table *sg;
  14. int nents;
  15. mutex_lock(&dev->struct_mutex);
  16. sg = drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
  17. nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
  18. mutex_unlock(&dev->struct_mutex);
  19. return sg;
  20. }
  21. static void nouveau_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
  22. struct sg_table *sg, enum dma_data_direction dir)
  23. {
  24. dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
  25. sg_free_table(sg);
  26. kfree(sg);
  27. }
  28. static void nouveau_gem_dmabuf_release(struct dma_buf *dma_buf)
  29. {
  30. struct nouveau_bo *nvbo = dma_buf->priv;
  31. if (nvbo->gem->export_dma_buf == dma_buf) {
  32. nvbo->gem->export_dma_buf = NULL;
  33. drm_gem_object_unreference_unlocked(nvbo->gem);
  34. }
  35. }
  36. static void *nouveau_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
  37. {
  38. return NULL;
  39. }
  40. static void nouveau_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
  41. {
  42. }
  43. static void *nouveau_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
  44. {
  45. return NULL;
  46. }
  47. static void nouveau_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
  48. {
  49. }
  50. static int nouveau_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
  51. {
  52. return -EINVAL;
  53. }
  54. static void *nouveau_gem_prime_vmap(struct dma_buf *dma_buf)
  55. {
  56. struct nouveau_bo *nvbo = dma_buf->priv;
  57. struct drm_device *dev = nvbo->gem->dev;
  58. int ret;
  59. mutex_lock(&dev->struct_mutex);
  60. if (nvbo->vmapping_count) {
  61. nvbo->vmapping_count++;
  62. goto out_unlock;
  63. }
  64. ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages,
  65. &nvbo->dma_buf_vmap);
  66. if (ret) {
  67. mutex_unlock(&dev->struct_mutex);
  68. return ERR_PTR(ret);
  69. }
  70. nvbo->vmapping_count = 1;
  71. out_unlock:
  72. mutex_unlock(&dev->struct_mutex);
  73. return nvbo->dma_buf_vmap.virtual;
  74. }
  75. static void nouveau_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
  76. {
  77. struct nouveau_bo *nvbo = dma_buf->priv;
  78. struct drm_device *dev = nvbo->gem->dev;
  79. mutex_lock(&dev->struct_mutex);
  80. nvbo->vmapping_count--;
  81. if (nvbo->vmapping_count == 0) {
  82. ttm_bo_kunmap(&nvbo->dma_buf_vmap);
  83. }
  84. mutex_unlock(&dev->struct_mutex);
  85. }
  86. static const struct dma_buf_ops nouveau_dmabuf_ops = {
  87. .map_dma_buf = nouveau_gem_map_dma_buf,
  88. .unmap_dma_buf = nouveau_gem_unmap_dma_buf,
  89. .release = nouveau_gem_dmabuf_release,
  90. .kmap = nouveau_gem_kmap,
  91. .kmap_atomic = nouveau_gem_kmap_atomic,
  92. .kunmap = nouveau_gem_kunmap,
  93. .kunmap_atomic = nouveau_gem_kunmap_atomic,
  94. .mmap = nouveau_gem_prime_mmap,
  95. .vmap = nouveau_gem_prime_vmap,
  96. .vunmap = nouveau_gem_prime_vunmap,
  97. };
  98. static int
  99. nouveau_prime_new(struct drm_device *dev,
  100. size_t size,
  101. struct sg_table *sg,
  102. struct nouveau_bo **pnvbo)
  103. {
  104. struct nouveau_bo *nvbo;
  105. u32 flags = 0;
  106. int ret;
  107. flags = TTM_PL_FLAG_TT;
  108. ret = nouveau_bo_new(dev, size, 0, flags, 0, 0,
  109. sg, pnvbo);
  110. if (ret)
  111. return ret;
  112. nvbo = *pnvbo;
  113. /* we restrict allowed domains on nv50+ to only the types
  114. * that were requested at creation time. not possibly on
  115. * earlier chips without busting the ABI.
  116. */
  117. nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
  118. nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
  119. if (!nvbo->gem) {
  120. nouveau_bo_ref(NULL, pnvbo);
  121. return -ENOMEM;
  122. }
  123. nvbo->gem->driver_private = nvbo;
  124. return 0;
  125. }
  126. struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
  127. struct drm_gem_object *obj, int flags)
  128. {
  129. struct nouveau_bo *nvbo = nouveau_gem_object(obj);
  130. int ret = 0;
  131. /* pin buffer into GTT */
  132. ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT);
  133. if (ret)
  134. return ERR_PTR(-EINVAL);
  135. return dma_buf_export(nvbo, &nouveau_dmabuf_ops, obj->size, flags);
  136. }
  137. struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
  138. struct dma_buf *dma_buf)
  139. {
  140. struct dma_buf_attachment *attach;
  141. struct sg_table *sg;
  142. struct nouveau_bo *nvbo;
  143. int ret;
  144. if (dma_buf->ops == &nouveau_dmabuf_ops) {
  145. nvbo = dma_buf->priv;
  146. if (nvbo->gem) {
  147. if (nvbo->gem->dev == dev) {
  148. drm_gem_object_reference(nvbo->gem);
  149. return nvbo->gem;
  150. }
  151. }
  152. }
  153. /* need to attach */
  154. attach = dma_buf_attach(dma_buf, dev->dev);
  155. if (IS_ERR(attach))
  156. return ERR_PTR(PTR_ERR(attach));
  157. sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  158. if (IS_ERR(sg)) {
  159. ret = PTR_ERR(sg);
  160. goto fail_detach;
  161. }
  162. ret = nouveau_prime_new(dev, dma_buf->size, sg, &nvbo);
  163. if (ret)
  164. goto fail_unmap;
  165. nvbo->gem->import_attach = attach;
  166. return nvbo->gem;
  167. fail_unmap:
  168. dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
  169. fail_detach:
  170. dma_buf_detach(dma_buf, attach);
  171. return ERR_PTR(ret);
  172. }