exynos_drm_dmabuf.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306
  1. /* exynos_drm_dmabuf.c
  2. *
  3. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  4. * Author: Inki Dae <inki.dae@samsung.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; either version 2 of the License, or (at your
  9. * option) any later version.
  10. */
  11. #include <drm/drmP.h>
  12. #include <drm/exynos_drm.h>
  13. #include "exynos_drm_drv.h"
  14. #include "exynos_drm_gem.h"
  15. #include <linux/dma-buf.h>
  16. struct exynos_drm_dmabuf_attachment {
  17. struct sg_table sgt;
  18. enum dma_data_direction dir;
  19. bool is_mapped;
  20. };
  21. static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
  22. struct device *dev,
  23. struct dma_buf_attachment *attach)
  24. {
  25. struct exynos_drm_dmabuf_attachment *exynos_attach;
  26. exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
  27. if (!exynos_attach)
  28. return -ENOMEM;
  29. exynos_attach->dir = DMA_NONE;
  30. attach->priv = exynos_attach;
  31. return 0;
  32. }
  33. static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
  34. struct dma_buf_attachment *attach)
  35. {
  36. struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
  37. struct sg_table *sgt;
  38. if (!exynos_attach)
  39. return;
  40. sgt = &exynos_attach->sgt;
  41. if (exynos_attach->dir != DMA_NONE)
  42. dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
  43. exynos_attach->dir);
  44. sg_free_table(sgt);
  45. kfree(exynos_attach);
  46. attach->priv = NULL;
  47. }
  48. static struct sg_table *
  49. exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
  50. enum dma_data_direction dir)
  51. {
  52. struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
  53. struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
  54. struct drm_device *dev = gem_obj->base.dev;
  55. struct exynos_drm_gem_buf *buf;
  56. struct scatterlist *rd, *wr;
  57. struct sg_table *sgt = NULL;
  58. unsigned int i;
  59. int nents, ret;
  60. DRM_DEBUG_PRIME("%s\n", __FILE__);
  61. /* just return current sgt if already requested. */
  62. if (exynos_attach->dir == dir && exynos_attach->is_mapped)
  63. return &exynos_attach->sgt;
  64. buf = gem_obj->buffer;
  65. if (!buf) {
  66. DRM_ERROR("buffer is null.\n");
  67. return ERR_PTR(-ENOMEM);
  68. }
  69. sgt = &exynos_attach->sgt;
  70. ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
  71. if (ret) {
  72. DRM_ERROR("failed to alloc sgt.\n");
  73. return ERR_PTR(-ENOMEM);
  74. }
  75. mutex_lock(&dev->struct_mutex);
  76. rd = buf->sgt->sgl;
  77. wr = sgt->sgl;
  78. for (i = 0; i < sgt->orig_nents; ++i) {
  79. sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
  80. rd = sg_next(rd);
  81. wr = sg_next(wr);
  82. }
  83. if (dir != DMA_NONE) {
  84. nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
  85. if (!nents) {
  86. DRM_ERROR("failed to map sgl with iommu.\n");
  87. sg_free_table(sgt);
  88. sgt = ERR_PTR(-EIO);
  89. goto err_unlock;
  90. }
  91. }
  92. exynos_attach->is_mapped = true;
  93. exynos_attach->dir = dir;
  94. attach->priv = exynos_attach;
  95. DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
  96. err_unlock:
  97. mutex_unlock(&dev->struct_mutex);
  98. return sgt;
  99. }
  100. static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
  101. struct sg_table *sgt,
  102. enum dma_data_direction dir)
  103. {
  104. /* Nothing to do. */
  105. }
  106. static void exynos_dmabuf_release(struct dma_buf *dmabuf)
  107. {
  108. struct exynos_drm_gem_obj *exynos_gem_obj = dmabuf->priv;
  109. DRM_DEBUG_PRIME("%s\n", __FILE__);
  110. /*
  111. * exynos_dmabuf_release() call means that file object's
  112. * f_count is 0 and it calls drm_gem_object_handle_unreference()
  113. * to drop the references that these values had been increased
  114. * at drm_prime_handle_to_fd()
  115. */
  116. if (exynos_gem_obj->base.export_dma_buf == dmabuf) {
  117. exynos_gem_obj->base.export_dma_buf = NULL;
  118. /*
  119. * drop this gem object refcount to release allocated buffer
  120. * and resources.
  121. */
  122. drm_gem_object_unreference_unlocked(&exynos_gem_obj->base);
  123. }
  124. }
  125. static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
  126. unsigned long page_num)
  127. {
  128. /* TODO */
  129. return NULL;
  130. }
  131. static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
  132. unsigned long page_num,
  133. void *addr)
  134. {
  135. /* TODO */
  136. }
  137. static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf,
  138. unsigned long page_num)
  139. {
  140. /* TODO */
  141. return NULL;
  142. }
  143. static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
  144. unsigned long page_num, void *addr)
  145. {
  146. /* TODO */
  147. }
  148. static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
  149. struct vm_area_struct *vma)
  150. {
  151. return -ENOTTY;
  152. }
  153. static struct dma_buf_ops exynos_dmabuf_ops = {
  154. .attach = exynos_gem_attach_dma_buf,
  155. .detach = exynos_gem_detach_dma_buf,
  156. .map_dma_buf = exynos_gem_map_dma_buf,
  157. .unmap_dma_buf = exynos_gem_unmap_dma_buf,
  158. .kmap = exynos_gem_dmabuf_kmap,
  159. .kmap_atomic = exynos_gem_dmabuf_kmap_atomic,
  160. .kunmap = exynos_gem_dmabuf_kunmap,
  161. .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic,
  162. .mmap = exynos_gem_dmabuf_mmap,
  163. .release = exynos_dmabuf_release,
  164. };
  165. struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
  166. struct drm_gem_object *obj, int flags)
  167. {
  168. struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
  169. return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops,
  170. exynos_gem_obj->base.size, flags);
  171. }
  172. struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
  173. struct dma_buf *dma_buf)
  174. {
  175. struct dma_buf_attachment *attach;
  176. struct sg_table *sgt;
  177. struct scatterlist *sgl;
  178. struct exynos_drm_gem_obj *exynos_gem_obj;
  179. struct exynos_drm_gem_buf *buffer;
  180. int ret;
  181. DRM_DEBUG_PRIME("%s\n", __FILE__);
  182. /* is this one of own objects? */
  183. if (dma_buf->ops == &exynos_dmabuf_ops) {
  184. struct drm_gem_object *obj;
  185. exynos_gem_obj = dma_buf->priv;
  186. obj = &exynos_gem_obj->base;
  187. /* is it from our device? */
  188. if (obj->dev == drm_dev) {
  189. /*
  190. * Importing dmabuf exported from out own gem increases
  191. * refcount on gem itself instead of f_count of dmabuf.
  192. */
  193. drm_gem_object_reference(obj);
  194. dma_buf_put(dma_buf);
  195. return obj;
  196. }
  197. }
  198. attach = dma_buf_attach(dma_buf, drm_dev->dev);
  199. if (IS_ERR(attach))
  200. return ERR_PTR(-EINVAL);
  201. sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  202. if (IS_ERR_OR_NULL(sgt)) {
  203. ret = PTR_ERR(sgt);
  204. goto err_buf_detach;
  205. }
  206. buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  207. if (!buffer) {
  208. DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
  209. ret = -ENOMEM;
  210. goto err_unmap_attach;
  211. }
  212. exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
  213. if (!exynos_gem_obj) {
  214. ret = -ENOMEM;
  215. goto err_free_buffer;
  216. }
  217. sgl = sgt->sgl;
  218. buffer->size = dma_buf->size;
  219. buffer->dma_addr = sg_dma_address(sgl);
  220. if (sgt->nents == 1) {
  221. /* always physically continuous memory if sgt->nents is 1. */
  222. exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
  223. } else {
  224. /*
  225. * this case could be CONTIG or NONCONTIG type but for now
  226. * sets NONCONTIG.
  227. * TODO. we have to find a way that exporter can notify
  228. * the type of its own buffer to importer.
  229. */
  230. exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
  231. }
  232. exynos_gem_obj->buffer = buffer;
  233. buffer->sgt = sgt;
  234. exynos_gem_obj->base.import_attach = attach;
  235. DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
  236. buffer->size);
  237. return &exynos_gem_obj->base;
  238. err_free_buffer:
  239. kfree(buffer);
  240. buffer = NULL;
  241. err_unmap_attach:
  242. dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
  243. err_buf_detach:
  244. dma_buf_detach(dma_buf, attach);
  245. return ERR_PTR(ret);
  246. }
  247. MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
  248. MODULE_DESCRIPTION("Samsung SoC DRM DMABUF Module");
  249. MODULE_LICENSE("GPL");