nouveau_prime.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229
  1. /*
  2. * Copyright 2011 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Dave Airlie
  23. */
  24. #include <linux/dma-buf.h>
  25. #include <drm/drmP.h>
  26. #include "nouveau_drm.h"
  27. #include "nouveau_gem.h"
  28. static struct sg_table *nouveau_gem_map_dma_buf(struct dma_buf_attachment *attachment,
  29. enum dma_data_direction dir)
  30. {
  31. struct nouveau_bo *nvbo = attachment->dmabuf->priv;
  32. struct drm_device *dev = nvbo->gem->dev;
  33. int npages = nvbo->bo.num_pages;
  34. struct sg_table *sg;
  35. int nents;
  36. mutex_lock(&dev->struct_mutex);
  37. sg = drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
  38. nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
  39. mutex_unlock(&dev->struct_mutex);
  40. return sg;
  41. }
  42. static void nouveau_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
  43. struct sg_table *sg, enum dma_data_direction dir)
  44. {
  45. dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
  46. sg_free_table(sg);
  47. kfree(sg);
  48. }
  49. static void nouveau_gem_dmabuf_release(struct dma_buf *dma_buf)
  50. {
  51. struct nouveau_bo *nvbo = dma_buf->priv;
  52. if (nvbo->gem->export_dma_buf == dma_buf) {
  53. nvbo->gem->export_dma_buf = NULL;
  54. drm_gem_object_unreference_unlocked(nvbo->gem);
  55. }
  56. }
  57. static void *nouveau_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
  58. {
  59. return NULL;
  60. }
  61. static void nouveau_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
  62. {
  63. }
  64. static void *nouveau_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
  65. {
  66. return NULL;
  67. }
  68. static void nouveau_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
  69. {
  70. }
  71. static int nouveau_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
  72. {
  73. return -EINVAL;
  74. }
  75. static void *nouveau_gem_prime_vmap(struct dma_buf *dma_buf)
  76. {
  77. struct nouveau_bo *nvbo = dma_buf->priv;
  78. struct drm_device *dev = nvbo->gem->dev;
  79. int ret;
  80. mutex_lock(&dev->struct_mutex);
  81. if (nvbo->vmapping_count) {
  82. nvbo->vmapping_count++;
  83. goto out_unlock;
  84. }
  85. ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages,
  86. &nvbo->dma_buf_vmap);
  87. if (ret) {
  88. mutex_unlock(&dev->struct_mutex);
  89. return ERR_PTR(ret);
  90. }
  91. nvbo->vmapping_count = 1;
  92. out_unlock:
  93. mutex_unlock(&dev->struct_mutex);
  94. return nvbo->dma_buf_vmap.virtual;
  95. }
  96. static void nouveau_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
  97. {
  98. struct nouveau_bo *nvbo = dma_buf->priv;
  99. struct drm_device *dev = nvbo->gem->dev;
  100. mutex_lock(&dev->struct_mutex);
  101. nvbo->vmapping_count--;
  102. if (nvbo->vmapping_count == 0) {
  103. ttm_bo_kunmap(&nvbo->dma_buf_vmap);
  104. }
  105. mutex_unlock(&dev->struct_mutex);
  106. }
  107. static const struct dma_buf_ops nouveau_dmabuf_ops = {
  108. .map_dma_buf = nouveau_gem_map_dma_buf,
  109. .unmap_dma_buf = nouveau_gem_unmap_dma_buf,
  110. .release = nouveau_gem_dmabuf_release,
  111. .kmap = nouveau_gem_kmap,
  112. .kmap_atomic = nouveau_gem_kmap_atomic,
  113. .kunmap = nouveau_gem_kunmap,
  114. .kunmap_atomic = nouveau_gem_kunmap_atomic,
  115. .mmap = nouveau_gem_prime_mmap,
  116. .vmap = nouveau_gem_prime_vmap,
  117. .vunmap = nouveau_gem_prime_vunmap,
  118. };
  119. static int
  120. nouveau_prime_new(struct drm_device *dev,
  121. size_t size,
  122. struct sg_table *sg,
  123. struct nouveau_bo **pnvbo)
  124. {
  125. struct nouveau_bo *nvbo;
  126. u32 flags = 0;
  127. int ret;
  128. flags = TTM_PL_FLAG_TT;
  129. ret = nouveau_bo_new(dev, size, 0, flags, 0, 0,
  130. sg, pnvbo);
  131. if (ret)
  132. return ret;
  133. nvbo = *pnvbo;
  134. /* we restrict allowed domains on nv50+ to only the types
  135. * that were requested at creation time. not possibly on
  136. * earlier chips without busting the ABI.
  137. */
  138. nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
  139. nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
  140. if (!nvbo->gem) {
  141. nouveau_bo_ref(NULL, pnvbo);
  142. return -ENOMEM;
  143. }
  144. nvbo->gem->driver_private = nvbo;
  145. return 0;
  146. }
  147. struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
  148. struct drm_gem_object *obj, int flags)
  149. {
  150. struct nouveau_bo *nvbo = nouveau_gem_object(obj);
  151. int ret = 0;
  152. /* pin buffer into GTT */
  153. ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT);
  154. if (ret)
  155. return ERR_PTR(-EINVAL);
  156. return dma_buf_export(nvbo, &nouveau_dmabuf_ops, obj->size, flags);
  157. }
  158. struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
  159. struct dma_buf *dma_buf)
  160. {
  161. struct dma_buf_attachment *attach;
  162. struct sg_table *sg;
  163. struct nouveau_bo *nvbo;
  164. int ret;
  165. if (dma_buf->ops == &nouveau_dmabuf_ops) {
  166. nvbo = dma_buf->priv;
  167. if (nvbo->gem) {
  168. if (nvbo->gem->dev == dev) {
  169. drm_gem_object_reference(nvbo->gem);
  170. return nvbo->gem;
  171. }
  172. }
  173. }
  174. /* need to attach */
  175. attach = dma_buf_attach(dma_buf, dev->dev);
  176. if (IS_ERR(attach))
  177. return ERR_PTR(PTR_ERR(attach));
  178. sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  179. if (IS_ERR(sg)) {
  180. ret = PTR_ERR(sg);
  181. goto fail_detach;
  182. }
  183. ret = nouveau_prime_new(dev, dma_buf->size, sg, &nvbo);
  184. if (ret)
  185. goto fail_unmap;
  186. nvbo->gem->import_attach = attach;
  187. return nvbo->gem;
  188. fail_unmap:
  189. dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
  190. fail_detach:
  191. dma_buf_detach(dma_buf, attach);
  192. return ERR_PTR(ret);
  193. }