dma-buf.h 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253
  1. /*
  2. * Header file for dma buffer sharing framework.
  3. *
  4. * Copyright(C) 2011 Linaro Limited. All rights reserved.
  5. * Author: Sumit Semwal <sumit.semwal@ti.com>
  6. *
  7. * Many thanks to linaro-mm-sig list, and specially
  8. * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
  9. * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
  10. * refining of this idea.
  11. *
  12. * This program is free software; you can redistribute it and/or modify it
  13. * under the terms of the GNU General Public License version 2 as published by
  14. * the Free Software Foundation.
  15. *
  16. * This program is distributed in the hope that it will be useful, but WITHOUT
  17. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  18. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  19. * more details.
  20. *
  21. * You should have received a copy of the GNU General Public License along with
  22. * this program. If not, see <http://www.gnu.org/licenses/>.
  23. */
  24. #ifndef __DMA_BUF_H__
  25. #define __DMA_BUF_H__
  26. #include <linux/file.h>
  27. #include <linux/err.h>
  28. #include <linux/scatterlist.h>
  29. #include <linux/list.h>
  30. #include <linux/dma-mapping.h>
  31. #include <linux/fs.h>
  32. struct device;
  33. struct dma_buf;
  34. struct dma_buf_attachment;
  35. /**
  36. * struct dma_buf_ops - operations possible on struct dma_buf
  37. * @attach: [optional] allows different devices to 'attach' themselves to the
  38. * given buffer. It might return -EBUSY to signal that backing storage
  39. * is already allocated and incompatible with the requirements
  40. * of requesting device.
  41. * @detach: [optional] detach a given device from this buffer.
  42. * @map_dma_buf: returns list of scatter pages allocated, increases usecount
  43. * of the buffer. Requires atleast one attach to be called
  44. * before. Returned sg list should already be mapped into
  45. * _device_ address space. This call may sleep. May also return
  46. * -EINTR. Should return -EINVAL if attach hasn't been called yet.
  47. * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter
  48. * pages.
  49. * @release: release this buffer; to be called after the last dma_buf_put.
  50. * @begin_cpu_access: [optional] called before cpu access to invalidate cpu
  51. * caches and allocate backing storage (if not yet done)
  52. * respectively pin the objet into memory.
  53. * @end_cpu_access: [optional] called after cpu access to flush cashes.
  54. * @kmap_atomic: maps a page from the buffer into kernel address
  55. * space, users may not block until the subsequent unmap call.
  56. * This callback must not sleep.
  57. * @kunmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
  58. * This Callback must not sleep.
  59. * @kmap: maps a page from the buffer into kernel address space.
  60. * @kunmap: [optional] unmaps a page from the buffer.
  61. */
  62. struct dma_buf_ops {
  63. int (*attach)(struct dma_buf *, struct device *,
  64. struct dma_buf_attachment *);
  65. void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
  66. /* For {map,unmap}_dma_buf below, any specific buffer attributes
  67. * required should get added to device_dma_parameters accessible
  68. * via dev->dma_params.
  69. */
  70. struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
  71. enum dma_data_direction);
  72. void (*unmap_dma_buf)(struct dma_buf_attachment *,
  73. struct sg_table *,
  74. enum dma_data_direction);
  75. /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY
  76. * if the call would block.
  77. */
  78. /* after final dma_buf_put() */
  79. void (*release)(struct dma_buf *);
  80. int (*begin_cpu_access)(struct dma_buf *, size_t, size_t,
  81. enum dma_data_direction);
  82. void (*end_cpu_access)(struct dma_buf *, size_t, size_t,
  83. enum dma_data_direction);
  84. void *(*kmap_atomic)(struct dma_buf *, unsigned long);
  85. void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
  86. void *(*kmap)(struct dma_buf *, unsigned long);
  87. void (*kunmap)(struct dma_buf *, unsigned long, void *);
  88. };
  89. /**
  90. * struct dma_buf - shared buffer object
  91. * @size: size of the buffer
  92. * @file: file pointer used for sharing buffers across, and for refcounting.
  93. * @attachments: list of dma_buf_attachment that denotes all devices attached.
  94. * @ops: dma_buf_ops associated with this buffer object.
  95. * @priv: exporter specific private data for this buffer object.
  96. */
  97. struct dma_buf {
  98. size_t size;
  99. struct file *file;
  100. struct list_head attachments;
  101. const struct dma_buf_ops *ops;
  102. /* mutex to serialize list manipulation and attach/detach */
  103. struct mutex lock;
  104. void *priv;
  105. };
  106. /**
  107. * struct dma_buf_attachment - holds device-buffer attachment data
  108. * @dmabuf: buffer for this attachment.
  109. * @dev: device attached to the buffer.
  110. * @node: list of dma_buf_attachment.
  111. * @priv: exporter specific attachment data.
  112. *
  113. * This structure holds the attachment information between the dma_buf buffer
  114. * and its user device(s). The list contains one attachment struct per device
  115. * attached to the buffer.
  116. */
  117. struct dma_buf_attachment {
  118. struct dma_buf *dmabuf;
  119. struct device *dev;
  120. struct list_head node;
  121. void *priv;
  122. };
  123. /**
  124. * get_dma_buf - convenience wrapper for get_file.
  125. * @dmabuf: [in] pointer to dma_buf
  126. *
  127. * Increments the reference count on the dma-buf, needed in case of drivers
  128. * that either need to create additional references to the dmabuf on the
  129. * kernel side. For example, an exporter that needs to keep a dmabuf ptr
  130. * so that subsequent exports don't create a new dmabuf.
  131. */
  132. static inline void get_dma_buf(struct dma_buf *dmabuf)
  133. {
  134. get_file(dmabuf->file);
  135. }
  136. #ifdef CONFIG_DMA_SHARED_BUFFER
  137. struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
  138. struct device *dev);
  139. void dma_buf_detach(struct dma_buf *dmabuf,
  140. struct dma_buf_attachment *dmabuf_attach);
  141. struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops,
  142. size_t size, int flags);
  143. int dma_buf_fd(struct dma_buf *dmabuf, int flags);
  144. struct dma_buf *dma_buf_get(int fd);
  145. void dma_buf_put(struct dma_buf *dmabuf);
  146. struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
  147. enum dma_data_direction);
  148. void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
  149. enum dma_data_direction);
  150. int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
  151. enum dma_data_direction dir);
  152. void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
  153. enum dma_data_direction dir);
  154. void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
  155. void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
  156. void *dma_buf_kmap(struct dma_buf *, unsigned long);
  157. void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
  158. #else
  159. static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
  160. struct device *dev)
  161. {
  162. return ERR_PTR(-ENODEV);
  163. }
  164. static inline void dma_buf_detach(struct dma_buf *dmabuf,
  165. struct dma_buf_attachment *dmabuf_attach)
  166. {
  167. return;
  168. }
  169. static inline struct dma_buf *dma_buf_export(void *priv,
  170. const struct dma_buf_ops *ops,
  171. size_t size, int flags)
  172. {
  173. return ERR_PTR(-ENODEV);
  174. }
  175. static inline int dma_buf_fd(struct dma_buf *dmabuf, int flags)
  176. {
  177. return -ENODEV;
  178. }
  179. static inline struct dma_buf *dma_buf_get(int fd)
  180. {
  181. return ERR_PTR(-ENODEV);
  182. }
  183. static inline void dma_buf_put(struct dma_buf *dmabuf)
  184. {
  185. return;
  186. }
  187. static inline struct sg_table *dma_buf_map_attachment(
  188. struct dma_buf_attachment *attach, enum dma_data_direction write)
  189. {
  190. return ERR_PTR(-ENODEV);
  191. }
  192. static inline void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
  193. struct sg_table *sg, enum dma_data_direction dir)
  194. {
  195. return;
  196. }
  197. static inline int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
  198. size_t start, size_t len,
  199. enum dma_data_direction dir)
  200. {
  201. return -ENODEV;
  202. }
  203. static inline void dma_buf_end_cpu_access(struct dma_buf *dmabuf,
  204. size_t start, size_t len,
  205. enum dma_data_direction dir)
  206. {
  207. }
  208. static inline void *dma_buf_kmap_atomic(struct dma_buf *dmabuf,
  209. unsigned long pnum)
  210. {
  211. return NULL;
  212. }
  213. static inline void dma_buf_kunmap_atomic(struct dma_buf *dmabuf,
  214. unsigned long pnum, void *vaddr)
  215. {
  216. }
  217. static inline void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long pnum)
  218. {
  219. return NULL;
  220. }
  221. static inline void dma_buf_kunmap(struct dma_buf *dmabuf,
  222. unsigned long pnum, void *vaddr)
  223. {
  224. }
  225. #endif /* CONFIG_DMA_SHARED_BUFFER */
  226. #endif /* __DMA_BUF_H__ */