i915_gem_dmabuf.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. /*
  2. * Copyright 2012 Red Hat Inc
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21. * DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Dave Airlie <airlied@redhat.com>
  25. */
  26. #include "drmP.h"
  27. #include "i915_drv.h"
  28. #include <linux/dma-buf.h>
  29. struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
  30. enum dma_data_direction dir)
  31. {
  32. struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
  33. struct drm_device *dev = obj->base.dev;
  34. int npages = obj->base.size / PAGE_SIZE;
  35. struct sg_table *sg = NULL;
  36. int ret;
  37. int nents;
  38. ret = i915_mutex_lock_interruptible(dev);
  39. if (ret)
  40. return ERR_PTR(ret);
  41. if (!obj->pages) {
  42. ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
  43. if (ret)
  44. goto out;
  45. }
  46. /* link the pages into an SG then map the sg */
  47. sg = drm_prime_pages_to_sg(obj->pages, npages);
  48. nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
  49. out:
  50. mutex_unlock(&dev->struct_mutex);
  51. return sg;
  52. }
  53. void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
  54. struct sg_table *sg, enum dma_data_direction dir)
  55. {
  56. dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
  57. sg_free_table(sg);
  58. kfree(sg);
  59. }
  60. void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
  61. {
  62. struct drm_i915_gem_object *obj = dma_buf->priv;
  63. if (obj->base.export_dma_buf == dma_buf) {
  64. /* drop the reference on the export fd holds */
  65. obj->base.export_dma_buf = NULL;
  66. drm_gem_object_unreference_unlocked(&obj->base);
  67. }
  68. }
  69. static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
  70. {
  71. return NULL;
  72. }
  73. static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
  74. {
  75. }
  76. static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
  77. {
  78. return NULL;
  79. }
  80. static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
  81. {
  82. }
  83. struct dma_buf_ops i915_dmabuf_ops = {
  84. .map_dma_buf = i915_gem_map_dma_buf,
  85. .unmap_dma_buf = i915_gem_unmap_dma_buf,
  86. .release = i915_gem_dmabuf_release,
  87. .kmap = i915_gem_dmabuf_kmap,
  88. .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
  89. .kunmap = i915_gem_dmabuf_kunmap,
  90. .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
  91. };
  92. struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
  93. struct drm_gem_object *gem_obj, int flags)
  94. {
  95. struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
  96. return dma_buf_export(obj, &i915_dmabuf_ops,
  97. obj->base.size, 0600);
  98. }
  99. struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
  100. struct dma_buf *dma_buf)
  101. {
  102. struct dma_buf_attachment *attach;
  103. struct sg_table *sg;
  104. struct drm_i915_gem_object *obj;
  105. int npages;
  106. int size;
  107. int ret;
  108. /* is this one of own objects? */
  109. if (dma_buf->ops == &i915_dmabuf_ops) {
  110. obj = dma_buf->priv;
  111. /* is it from our device? */
  112. if (obj->base.dev == dev) {
  113. drm_gem_object_reference(&obj->base);
  114. return &obj->base;
  115. }
  116. }
  117. /* need to attach */
  118. attach = dma_buf_attach(dma_buf, dev->dev);
  119. if (IS_ERR(attach))
  120. return ERR_CAST(attach);
  121. sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  122. if (IS_ERR(sg)) {
  123. ret = PTR_ERR(sg);
  124. goto fail_detach;
  125. }
  126. size = dma_buf->size;
  127. npages = size / PAGE_SIZE;
  128. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  129. if (obj == NULL) {
  130. ret = -ENOMEM;
  131. goto fail_unmap;
  132. }
  133. ret = drm_gem_private_object_init(dev, &obj->base, size);
  134. if (ret) {
  135. kfree(obj);
  136. goto fail_unmap;
  137. }
  138. obj->sg_table = sg;
  139. obj->base.import_attach = attach;
  140. return &obj->base;
  141. fail_unmap:
  142. dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
  143. fail_detach:
  144. dma_buf_detach(dma_buf, attach);
  145. return ERR_PTR(ret);
  146. }