dma_v2.h 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. /*
  2. * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License as published by the Free
  6. * Software Foundation; either version 2 of the License, or (at your option)
  7. * any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc., 59
  16. * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * The full GNU General Public License is included in this distribution in the
  19. * file called COPYING.
  20. */
  21. #ifndef IOATDMA_V2_H
  22. #define IOATDMA_V2_H
  23. #include <linux/dmaengine.h>
  24. #include "dma.h"
  25. #include "hw.h"
  26. extern int ioat_pending_level;
  27. extern int ioat_ring_alloc_order;
  28. /*
  29. * workaround for IOAT ver.3.0 null descriptor issue
  30. * (channel returns error when size is 0)
  31. */
  32. #define NULL_DESC_BUFFER_SIZE 1
  33. #define IOAT_MAX_ORDER 16
  34. #define ioat_get_alloc_order() \
  35. (min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
  36. #define ioat_get_max_alloc_order() \
  37. (min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))
  38. /* struct ioat2_dma_chan - ioat v2 / v3 channel attributes
  39. * @base: common ioat channel parameters
  40. * @xfercap_log; log2 of channel max transfer length (for fast division)
  41. * @head: allocated index
  42. * @issued: hardware notification point
  43. * @tail: cleanup index
  44. * @pending: lock free indicator for issued != head
  45. * @dmacount: identical to 'head' except for occasionally resetting to zero
  46. * @alloc_order: log2 of the number of allocated descriptors
  47. * @ring: software ring buffer implementation of hardware ring
  48. * @ring_lock: protects ring attributes
  49. */
  50. struct ioat2_dma_chan {
  51. struct ioat_chan_common base;
  52. size_t xfercap_log;
  53. u16 head;
  54. u16 issued;
  55. u16 tail;
  56. u16 dmacount;
  57. u16 alloc_order;
  58. int pending;
  59. struct ioat_ring_ent **ring;
  60. spinlock_t ring_lock;
  61. };
  62. static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
  63. {
  64. struct ioat_chan_common *chan = to_chan_common(c);
  65. return container_of(chan, struct ioat2_dma_chan, base);
  66. }
  67. static inline u16 ioat2_ring_mask(struct ioat2_dma_chan *ioat)
  68. {
  69. return (1 << ioat->alloc_order) - 1;
  70. }
  71. /* count of descriptors in flight with the engine */
  72. static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat)
  73. {
  74. return (ioat->head - ioat->tail) & ioat2_ring_mask(ioat);
  75. }
  76. /* count of descriptors pending submission to hardware */
  77. static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat)
  78. {
  79. return (ioat->head - ioat->issued) & ioat2_ring_mask(ioat);
  80. }
  81. static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat)
  82. {
  83. u16 num_descs = ioat2_ring_mask(ioat) + 1;
  84. u16 active = ioat2_ring_active(ioat);
  85. BUG_ON(active > num_descs);
  86. return num_descs - active;
  87. }
  88. /* assumes caller already checked space */
  89. static inline u16 ioat2_desc_alloc(struct ioat2_dma_chan *ioat, u16 len)
  90. {
  91. ioat->head += len;
  92. return ioat->head - len;
  93. }
  94. static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len)
  95. {
  96. u16 num_descs = len >> ioat->xfercap_log;
  97. num_descs += !!(len & ((1 << ioat->xfercap_log) - 1));
  98. return num_descs;
  99. }
  100. /**
  101. * struct ioat_ring_ent - wrapper around hardware descriptor
  102. * @hw: hardware DMA descriptor (for memcpy)
  103. * @fill: hardware fill descriptor
  104. * @xor: hardware xor descriptor
  105. * @xor_ex: hardware xor extension descriptor
  106. * @pq: hardware pq descriptor
  107. * @pq_ex: hardware pq extension descriptor
  108. * @pqu: hardware pq update descriptor
  109. * @raw: hardware raw (un-typed) descriptor
  110. * @txd: the generic software descriptor for all engines
  111. * @len: total transaction length for unmap
  112. * @result: asynchronous result of validate operations
  113. * @id: identifier for debug
  114. */
  115. struct ioat_ring_ent {
  116. union {
  117. struct ioat_dma_descriptor *hw;
  118. struct ioat_fill_descriptor *fill;
  119. struct ioat_xor_descriptor *xor;
  120. struct ioat_xor_ext_descriptor *xor_ex;
  121. struct ioat_pq_descriptor *pq;
  122. struct ioat_pq_ext_descriptor *pq_ex;
  123. struct ioat_pq_update_descriptor *pqu;
  124. struct ioat_raw_descriptor *raw;
  125. };
  126. size_t len;
  127. struct dma_async_tx_descriptor txd;
  128. enum sum_check_flags *result;
  129. #ifdef DEBUG
  130. int id;
  131. #endif
  132. };
  133. static inline struct ioat_ring_ent *
  134. ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx)
  135. {
  136. return ioat->ring[idx & ioat2_ring_mask(ioat)];
  137. }
  138. static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr)
  139. {
  140. struct ioat_chan_common *chan = &ioat->base;
  141. writel(addr & 0x00000000FFFFFFFF,
  142. chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
  143. writel(addr >> 32,
  144. chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
  145. }
  146. int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca);
  147. int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca);
  148. struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
  149. struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
  150. int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs);
  151. int ioat2_enumerate_channels(struct ioatdma_device *device);
  152. struct dma_async_tx_descriptor *
  153. ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
  154. dma_addr_t dma_src, size_t len, unsigned long flags);
  155. void ioat2_issue_pending(struct dma_chan *chan);
  156. int ioat2_alloc_chan_resources(struct dma_chan *c);
  157. void ioat2_free_chan_resources(struct dma_chan *c);
  158. enum dma_status ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie,
  159. dma_cookie_t *done, dma_cookie_t *used);
  160. void __ioat2_restart_chan(struct ioat2_dma_chan *ioat);
  161. bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
  162. void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
  163. void ioat2_cleanup_tasklet(unsigned long data);
  164. void ioat2_timer_event(unsigned long data);
  165. int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
  166. int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);
  167. extern struct kobj_type ioat2_ktype;
  168. extern struct kmem_cache *ioat2_cache;
  169. #endif /* IOATDMA_V2_H */