dma-buf.h 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286
  1. /*
  2. * Header file for dma buffer sharing framework.
  3. *
  4. * Copyright(C) 2011 Linaro Limited. All rights reserved.
  5. * Author: Sumit Semwal <sumit.semwal@ti.com>
  6. *
  7. * Many thanks to linaro-mm-sig list, and specially
  8. * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
  9. * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
  10. * refining of this idea.
  11. *
  12. * This program is free software; you can redistribute it and/or modify it
  13. * under the terms of the GNU General Public License version 2 as published by
  14. * the Free Software Foundation.
  15. *
  16. * This program is distributed in the hope that it will be useful, but WITHOUT
  17. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  18. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  19. * more details.
  20. *
  21. * You should have received a copy of the GNU General Public License along with
  22. * this program. If not, see <http://www.gnu.org/licenses/>.
  23. */
  24. #ifndef __DMA_BUF_H__
  25. #define __DMA_BUF_H__
  26. #include <linux/file.h>
  27. #include <linux/err.h>
  28. #include <linux/scatterlist.h>
  29. #include <linux/list.h>
  30. #include <linux/dma-mapping.h>
  31. #include <linux/fs.h>
  32. struct device;
  33. struct dma_buf;
  34. struct dma_buf_attachment;
  35. /**
  36. * struct dma_buf_ops - operations possible on struct dma_buf
  37. * @attach: [optional] allows different devices to 'attach' themselves to the
  38. * given buffer. It might return -EBUSY to signal that backing storage
  39. * is already allocated and incompatible with the requirements
  40. * of requesting device.
  41. * @detach: [optional] detach a given device from this buffer.
  42. * @map_dma_buf: returns list of scatter pages allocated, increases usecount
  43. * of the buffer. Requires atleast one attach to be called
  44. * before. Returned sg list should already be mapped into
  45. * _device_ address space. This call may sleep. May also return
  46. * -EINTR. Should return -EINVAL if attach hasn't been called yet.
  47. * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter
  48. * pages.
  49. * @release: release this buffer; to be called after the last dma_buf_put.
  50. * @begin_cpu_access: [optional] called before cpu access to invalidate cpu
  51. * caches and allocate backing storage (if not yet done)
  52. * respectively pin the objet into memory.
  53. * @end_cpu_access: [optional] called after cpu access to flush cashes.
  54. * @kmap_atomic: maps a page from the buffer into kernel address
  55. * space, users may not block until the subsequent unmap call.
  56. * This callback must not sleep.
  57. * @kunmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
  58. * This Callback must not sleep.
  59. * @kmap: maps a page from the buffer into kernel address space.
  60. * @kunmap: [optional] unmaps a page from the buffer.
  61. * @mmap: used to expose the backing storage to userspace. Note that the
  62. * mapping needs to be coherent - if the exporter doesn't directly
  63. * support this, it needs to fake coherency by shooting down any ptes
  64. * when transitioning away from the cpu domain.
  65. * @vmap: [optional] creates a virtual mapping for the buffer into kernel
  66. * address space. Same restrictions as for vmap and friends apply.
  67. * @vunmap: [optional] unmaps a vmap from the buffer
  68. */
  69. struct dma_buf_ops {
  70. int (*attach)(struct dma_buf *, struct device *,
  71. struct dma_buf_attachment *);
  72. void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
  73. /* For {map,unmap}_dma_buf below, any specific buffer attributes
  74. * required should get added to device_dma_parameters accessible
  75. * via dev->dma_params.
  76. */
  77. struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
  78. enum dma_data_direction);
  79. void (*unmap_dma_buf)(struct dma_buf_attachment *,
  80. struct sg_table *,
  81. enum dma_data_direction);
  82. /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY
  83. * if the call would block.
  84. */
  85. /* after final dma_buf_put() */
  86. void (*release)(struct dma_buf *);
  87. int (*begin_cpu_access)(struct dma_buf *, size_t, size_t,
  88. enum dma_data_direction);
  89. void (*end_cpu_access)(struct dma_buf *, size_t, size_t,
  90. enum dma_data_direction);
  91. void *(*kmap_atomic)(struct dma_buf *, unsigned long);
  92. void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
  93. void *(*kmap)(struct dma_buf *, unsigned long);
  94. void (*kunmap)(struct dma_buf *, unsigned long, void *);
  95. int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
  96. void *(*vmap)(struct dma_buf *);
  97. void (*vunmap)(struct dma_buf *, void *vaddr);
  98. };
  99. /**
  100. * struct dma_buf - shared buffer object
  101. * @size: size of the buffer
  102. * @file: file pointer used for sharing buffers across, and for refcounting.
  103. * @attachments: list of dma_buf_attachment that denotes all devices attached.
  104. * @ops: dma_buf_ops associated with this buffer object.
  105. * @priv: exporter specific private data for this buffer object.
  106. */
  107. struct dma_buf {
  108. size_t size;
  109. struct file *file;
  110. struct list_head attachments;
  111. const struct dma_buf_ops *ops;
  112. /* mutex to serialize list manipulation and attach/detach */
  113. struct mutex lock;
  114. void *priv;
  115. };
  116. /**
  117. * struct dma_buf_attachment - holds device-buffer attachment data
  118. * @dmabuf: buffer for this attachment.
  119. * @dev: device attached to the buffer.
  120. * @node: list of dma_buf_attachment.
  121. * @priv: exporter specific attachment data.
  122. *
  123. * This structure holds the attachment information between the dma_buf buffer
  124. * and its user device(s). The list contains one attachment struct per device
  125. * attached to the buffer.
  126. */
  127. struct dma_buf_attachment {
  128. struct dma_buf *dmabuf;
  129. struct device *dev;
  130. struct list_head node;
  131. void *priv;
  132. };
  133. /**
  134. * get_dma_buf - convenience wrapper for get_file.
  135. * @dmabuf: [in] pointer to dma_buf
  136. *
  137. * Increments the reference count on the dma-buf, needed in case of drivers
  138. * that either need to create additional references to the dmabuf on the
  139. * kernel side. For example, an exporter that needs to keep a dmabuf ptr
  140. * so that subsequent exports don't create a new dmabuf.
  141. */
  142. static inline void get_dma_buf(struct dma_buf *dmabuf)
  143. {
  144. get_file(dmabuf->file);
  145. }
  146. #ifdef CONFIG_DMA_SHARED_BUFFER
  147. struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
  148. struct device *dev);
  149. void dma_buf_detach(struct dma_buf *dmabuf,
  150. struct dma_buf_attachment *dmabuf_attach);
  151. struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops,
  152. size_t size, int flags);
  153. int dma_buf_fd(struct dma_buf *dmabuf, int flags);
  154. struct dma_buf *dma_buf_get(int fd);
  155. void dma_buf_put(struct dma_buf *dmabuf);
  156. struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
  157. enum dma_data_direction);
  158. void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
  159. enum dma_data_direction);
  160. int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
  161. enum dma_data_direction dir);
  162. void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
  163. enum dma_data_direction dir);
  164. void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
  165. void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
  166. void *dma_buf_kmap(struct dma_buf *, unsigned long);
  167. void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
  168. int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
  169. unsigned long);
  170. void *dma_buf_vmap(struct dma_buf *);
  171. void dma_buf_vunmap(struct dma_buf *, void *vaddr);
  172. #else
  173. static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
  174. struct device *dev)
  175. {
  176. return ERR_PTR(-ENODEV);
  177. }
  178. static inline void dma_buf_detach(struct dma_buf *dmabuf,
  179. struct dma_buf_attachment *dmabuf_attach)
  180. {
  181. return;
  182. }
  183. static inline struct dma_buf *dma_buf_export(void *priv,
  184. const struct dma_buf_ops *ops,
  185. size_t size, int flags)
  186. {
  187. return ERR_PTR(-ENODEV);
  188. }
  189. static inline int dma_buf_fd(struct dma_buf *dmabuf, int flags)
  190. {
  191. return -ENODEV;
  192. }
  193. static inline struct dma_buf *dma_buf_get(int fd)
  194. {
  195. return ERR_PTR(-ENODEV);
  196. }
  197. static inline void dma_buf_put(struct dma_buf *dmabuf)
  198. {
  199. return;
  200. }
  201. static inline struct sg_table *dma_buf_map_attachment(
  202. struct dma_buf_attachment *attach, enum dma_data_direction write)
  203. {
  204. return ERR_PTR(-ENODEV);
  205. }
  206. static inline void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
  207. struct sg_table *sg, enum dma_data_direction dir)
  208. {
  209. return;
  210. }
  211. static inline int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
  212. size_t start, size_t len,
  213. enum dma_data_direction dir)
  214. {
  215. return -ENODEV;
  216. }
  217. static inline void dma_buf_end_cpu_access(struct dma_buf *dmabuf,
  218. size_t start, size_t len,
  219. enum dma_data_direction dir)
  220. {
  221. }
  222. static inline void *dma_buf_kmap_atomic(struct dma_buf *dmabuf,
  223. unsigned long pnum)
  224. {
  225. return NULL;
  226. }
  227. static inline void dma_buf_kunmap_atomic(struct dma_buf *dmabuf,
  228. unsigned long pnum, void *vaddr)
  229. {
  230. }
  231. static inline void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long pnum)
  232. {
  233. return NULL;
  234. }
  235. static inline void dma_buf_kunmap(struct dma_buf *dmabuf,
  236. unsigned long pnum, void *vaddr)
  237. {
  238. }
  239. static inline int dma_buf_mmap(struct dma_buf *dmabuf,
  240. struct vm_area_struct *vma,
  241. unsigned long pgoff)
  242. {
  243. return -ENODEV;
  244. }
  245. static inline void *dma_buf_vmap(struct dma_buf *dmabuf)
  246. {
  247. return NULL;
  248. }
  249. static inline void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
  250. {
  251. }
  252. #endif /* CONFIG_DMA_SHARED_BUFFER */
  253. #endif /* __DMA_BUF_H__ */