exynos_drm_dmabuf.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302
  1. /* exynos_drm_dmabuf.c
  2. *
  3. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  4. * Author: Inki Dae <inki.dae@samsung.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; either version 2 of the License, or (at your
  9. * option) any later version.
  10. */
  11. #include <drm/drmP.h>
  12. #include <drm/exynos_drm.h>
  13. #include "exynos_drm_drv.h"
  14. #include "exynos_drm_gem.h"
  15. #include <linux/dma-buf.h>
  16. struct exynos_drm_dmabuf_attachment {
  17. struct sg_table sgt;
  18. enum dma_data_direction dir;
  19. bool is_mapped;
  20. };
  21. static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
  22. struct device *dev,
  23. struct dma_buf_attachment *attach)
  24. {
  25. struct exynos_drm_dmabuf_attachment *exynos_attach;
  26. exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
  27. if (!exynos_attach)
  28. return -ENOMEM;
  29. exynos_attach->dir = DMA_NONE;
  30. attach->priv = exynos_attach;
  31. return 0;
  32. }
  33. static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
  34. struct dma_buf_attachment *attach)
  35. {
  36. struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
  37. struct sg_table *sgt;
  38. if (!exynos_attach)
  39. return;
  40. sgt = &exynos_attach->sgt;
  41. if (exynos_attach->dir != DMA_NONE)
  42. dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
  43. exynos_attach->dir);
  44. sg_free_table(sgt);
  45. kfree(exynos_attach);
  46. attach->priv = NULL;
  47. }
  48. static struct sg_table *
  49. exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
  50. enum dma_data_direction dir)
  51. {
  52. struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
  53. struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
  54. struct drm_device *dev = gem_obj->base.dev;
  55. struct exynos_drm_gem_buf *buf;
  56. struct scatterlist *rd, *wr;
  57. struct sg_table *sgt = NULL;
  58. unsigned int i;
  59. int nents, ret;
  60. /* just return current sgt if already requested. */
  61. if (exynos_attach->dir == dir && exynos_attach->is_mapped)
  62. return &exynos_attach->sgt;
  63. buf = gem_obj->buffer;
  64. if (!buf) {
  65. DRM_ERROR("buffer is null.\n");
  66. return ERR_PTR(-ENOMEM);
  67. }
  68. sgt = &exynos_attach->sgt;
  69. ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
  70. if (ret) {
  71. DRM_ERROR("failed to alloc sgt.\n");
  72. return ERR_PTR(-ENOMEM);
  73. }
  74. mutex_lock(&dev->struct_mutex);
  75. rd = buf->sgt->sgl;
  76. wr = sgt->sgl;
  77. for (i = 0; i < sgt->orig_nents; ++i) {
  78. sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
  79. rd = sg_next(rd);
  80. wr = sg_next(wr);
  81. }
  82. if (dir != DMA_NONE) {
  83. nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
  84. if (!nents) {
  85. DRM_ERROR("failed to map sgl with iommu.\n");
  86. sg_free_table(sgt);
  87. sgt = ERR_PTR(-EIO);
  88. goto err_unlock;
  89. }
  90. }
  91. exynos_attach->is_mapped = true;
  92. exynos_attach->dir = dir;
  93. attach->priv = exynos_attach;
  94. DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
  95. err_unlock:
  96. mutex_unlock(&dev->struct_mutex);
  97. return sgt;
  98. }
  99. static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
  100. struct sg_table *sgt,
  101. enum dma_data_direction dir)
  102. {
  103. /* Nothing to do. */
  104. }
  105. static void exynos_dmabuf_release(struct dma_buf *dmabuf)
  106. {
  107. struct exynos_drm_gem_obj *exynos_gem_obj = dmabuf->priv;
  108. /*
  109. * exynos_dmabuf_release() call means that file object's
  110. * f_count is 0 and it calls drm_gem_object_handle_unreference()
  111. * to drop the references that these values had been increased
  112. * at drm_prime_handle_to_fd()
  113. */
  114. if (exynos_gem_obj->base.export_dma_buf == dmabuf) {
  115. exynos_gem_obj->base.export_dma_buf = NULL;
  116. /*
  117. * drop this gem object refcount to release allocated buffer
  118. * and resources.
  119. */
  120. drm_gem_object_unreference_unlocked(&exynos_gem_obj->base);
  121. }
  122. }
  123. static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
  124. unsigned long page_num)
  125. {
  126. /* TODO */
  127. return NULL;
  128. }
  129. static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
  130. unsigned long page_num,
  131. void *addr)
  132. {
  133. /* TODO */
  134. }
  135. static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf,
  136. unsigned long page_num)
  137. {
  138. /* TODO */
  139. return NULL;
  140. }
  141. static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
  142. unsigned long page_num, void *addr)
  143. {
  144. /* TODO */
  145. }
  146. static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
  147. struct vm_area_struct *vma)
  148. {
  149. return -ENOTTY;
  150. }
  151. static struct dma_buf_ops exynos_dmabuf_ops = {
  152. .attach = exynos_gem_attach_dma_buf,
  153. .detach = exynos_gem_detach_dma_buf,
  154. .map_dma_buf = exynos_gem_map_dma_buf,
  155. .unmap_dma_buf = exynos_gem_unmap_dma_buf,
  156. .kmap = exynos_gem_dmabuf_kmap,
  157. .kmap_atomic = exynos_gem_dmabuf_kmap_atomic,
  158. .kunmap = exynos_gem_dmabuf_kunmap,
  159. .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic,
  160. .mmap = exynos_gem_dmabuf_mmap,
  161. .release = exynos_dmabuf_release,
  162. };
  163. struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
  164. struct drm_gem_object *obj, int flags)
  165. {
  166. struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
  167. return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops,
  168. exynos_gem_obj->base.size, flags);
  169. }
  170. struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
  171. struct dma_buf *dma_buf)
  172. {
  173. struct dma_buf_attachment *attach;
  174. struct sg_table *sgt;
  175. struct scatterlist *sgl;
  176. struct exynos_drm_gem_obj *exynos_gem_obj;
  177. struct exynos_drm_gem_buf *buffer;
  178. int ret;
  179. /* is this one of own objects? */
  180. if (dma_buf->ops == &exynos_dmabuf_ops) {
  181. struct drm_gem_object *obj;
  182. exynos_gem_obj = dma_buf->priv;
  183. obj = &exynos_gem_obj->base;
  184. /* is it from our device? */
  185. if (obj->dev == drm_dev) {
  186. /*
  187. * Importing dmabuf exported from out own gem increases
  188. * refcount on gem itself instead of f_count of dmabuf.
  189. */
  190. drm_gem_object_reference(obj);
  191. return obj;
  192. }
  193. }
  194. attach = dma_buf_attach(dma_buf, drm_dev->dev);
  195. if (IS_ERR(attach))
  196. return ERR_PTR(-EINVAL);
  197. get_dma_buf(dma_buf);
  198. sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  199. if (IS_ERR_OR_NULL(sgt)) {
  200. ret = PTR_ERR(sgt);
  201. goto err_buf_detach;
  202. }
  203. buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  204. if (!buffer) {
  205. DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
  206. ret = -ENOMEM;
  207. goto err_unmap_attach;
  208. }
  209. exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
  210. if (!exynos_gem_obj) {
  211. ret = -ENOMEM;
  212. goto err_free_buffer;
  213. }
  214. sgl = sgt->sgl;
  215. buffer->size = dma_buf->size;
  216. buffer->dma_addr = sg_dma_address(sgl);
  217. if (sgt->nents == 1) {
  218. /* always physically continuous memory if sgt->nents is 1. */
  219. exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
  220. } else {
  221. /*
  222. * this case could be CONTIG or NONCONTIG type but for now
  223. * sets NONCONTIG.
  224. * TODO. we have to find a way that exporter can notify
  225. * the type of its own buffer to importer.
  226. */
  227. exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
  228. }
  229. exynos_gem_obj->buffer = buffer;
  230. buffer->sgt = sgt;
  231. exynos_gem_obj->base.import_attach = attach;
  232. DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
  233. buffer->size);
  234. return &exynos_gem_obj->base;
  235. err_free_buffer:
  236. kfree(buffer);
  237. buffer = NULL;
  238. err_unmap_attach:
  239. dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
  240. err_buf_detach:
  241. dma_buf_detach(dma_buf, attach);
  242. dma_buf_put(dma_buf);
  243. return ERR_PTR(ret);
  244. }
  245. MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
  246. MODULE_DESCRIPTION("Samsung SoC DRM DMABUF Module");
  247. MODULE_LICENSE("GPL");