async_tx.h 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148
  1. /*
  2. * Copyright © 2006, Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc.,
  15. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  16. *
  17. */
  18. #ifndef _ASYNC_TX_H_
  19. #define _ASYNC_TX_H_
  20. #include <linux/dmaengine.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/interrupt.h>
  23. /**
  24. * dma_chan_ref - object used to manage dma channels received from the
  25. * dmaengine core.
  26. * @chan - the channel being tracked
  27. * @node - node for the channel to be placed on async_tx_master_list
  28. * @rcu - for list_del_rcu
  29. * @count - number of times this channel is listed in the pool
  30. * (for channels with multiple capabiities)
  31. */
  32. struct dma_chan_ref {
  33. struct dma_chan *chan;
  34. struct list_head node;
  35. struct rcu_head rcu;
  36. atomic_t count;
  37. };
  38. /**
  39. * async_tx_flags - modifiers for the async_* calls
  40. * @ASYNC_TX_XOR_ZERO_DST: this flag must be used for xor operations where the
  41. * the destination address is not a source. The asynchronous case handles this
  42. * implicitly, the synchronous case needs to zero the destination block.
  43. * @ASYNC_TX_XOR_DROP_DST: this flag must be used if the destination address is
  44. * also one of the source addresses. In the synchronous case the destination
  45. * address is an implied source, whereas the asynchronous case it must be listed
  46. * as a source. The destination address must be the first address in the source
  47. * array.
  48. * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a
  49. * dependency chain
  50. * @ASYNC_TX_DEP_ACK: ack the dependency descriptor. Useful for chaining.
  51. */
  52. enum async_tx_flags {
  53. ASYNC_TX_XOR_ZERO_DST = (1 << 0),
  54. ASYNC_TX_XOR_DROP_DST = (1 << 1),
  55. ASYNC_TX_ACK = (1 << 3),
  56. ASYNC_TX_DEP_ACK = (1 << 4),
  57. };
  58. #ifdef CONFIG_DMA_ENGINE
  59. void async_tx_issue_pending_all(void);
  60. enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
  61. void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx);
  62. struct dma_chan *
  63. async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
  64. enum dma_transaction_type tx_type);
  65. #else
  66. static inline void async_tx_issue_pending_all(void)
  67. {
  68. do { } while (0);
  69. }
  70. static inline enum dma_status
  71. dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
  72. {
  73. return DMA_SUCCESS;
  74. }
  75. static inline void
  76. async_tx_run_dependencies(struct dma_async_tx_descriptor *tx,
  77. struct dma_chan *host_chan)
  78. {
  79. do { } while (0);
  80. }
  81. static inline struct dma_chan *
  82. async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
  83. enum dma_transaction_type tx_type)
  84. {
  85. return NULL;
  86. }
  87. #endif
  88. /**
  89. * async_tx_sync_epilog - actions to take if an operation is run synchronously
  90. * @flags: async_tx flags
  91. * @depend_tx: transaction depends on depend_tx
  92. * @cb_fn: function to call when the transaction completes
  93. * @cb_fn_param: parameter to pass to the callback routine
  94. */
  95. static inline void
  96. async_tx_sync_epilog(unsigned long flags,
  97. struct dma_async_tx_descriptor *depend_tx,
  98. dma_async_tx_callback cb_fn, void *cb_fn_param)
  99. {
  100. if (cb_fn)
  101. cb_fn(cb_fn_param);
  102. if (depend_tx && (flags & ASYNC_TX_DEP_ACK))
  103. async_tx_ack(depend_tx);
  104. }
  105. void
  106. async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
  107. enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx,
  108. dma_async_tx_callback cb_fn, void *cb_fn_param);
  109. struct dma_async_tx_descriptor *
  110. async_xor(struct page *dest, struct page **src_list, unsigned int offset,
  111. int src_cnt, size_t len, enum async_tx_flags flags,
  112. struct dma_async_tx_descriptor *depend_tx,
  113. dma_async_tx_callback cb_fn, void *cb_fn_param);
  114. struct dma_async_tx_descriptor *
  115. async_xor_zero_sum(struct page *dest, struct page **src_list,
  116. unsigned int offset, int src_cnt, size_t len,
  117. u32 *result, enum async_tx_flags flags,
  118. struct dma_async_tx_descriptor *depend_tx,
  119. dma_async_tx_callback cb_fn, void *cb_fn_param);
  120. struct dma_async_tx_descriptor *
  121. async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
  122. unsigned int src_offset, size_t len, enum async_tx_flags flags,
  123. struct dma_async_tx_descriptor *depend_tx,
  124. dma_async_tx_callback cb_fn, void *cb_fn_param);
  125. struct dma_async_tx_descriptor *
  126. async_memset(struct page *dest, int val, unsigned int offset,
  127. size_t len, enum async_tx_flags flags,
  128. struct dma_async_tx_descriptor *depend_tx,
  129. dma_async_tx_callback cb_fn, void *cb_fn_param);
  130. struct dma_async_tx_descriptor *
  131. async_trigger_callback(enum async_tx_flags flags,
  132. struct dma_async_tx_descriptor *depend_tx,
  133. dma_async_tx_callback cb_fn, void *cb_fn_param);
  134. #endif /* _ASYNC_TX_H_ */