dma_v2.h 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. /*
  2. * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License as published by the Free
  6. * Software Foundation; either version 2 of the License, or (at your option)
  7. * any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc., 59
  16. * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * The full GNU General Public License is included in this distribution in the
  19. * file called COPYING.
  20. */
  21. #ifndef IOATDMA_V2_H
  22. #define IOATDMA_V2_H
  23. #include <linux/dmaengine.h>
  24. #include <linux/circ_buf.h>
  25. #include "dma.h"
  26. #include "hw.h"
  27. extern int ioat_pending_level;
  28. extern int ioat_ring_alloc_order;
  29. /*
  30. * workaround for IOAT ver.3.0 null descriptor issue
  31. * (channel returns error when size is 0)
  32. */
  33. #define NULL_DESC_BUFFER_SIZE 1
  34. #define IOAT_MAX_ORDER 16
  35. #define ioat_get_alloc_order() \
  36. (min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
  37. #define ioat_get_max_alloc_order() \
  38. (min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))
  39. /* struct ioat2_dma_chan - ioat v2 / v3 channel attributes
  40. * @base: common ioat channel parameters
  41. * @xfercap_log; log2 of channel max transfer length (for fast division)
  42. * @head: allocated index
  43. * @issued: hardware notification point
  44. * @tail: cleanup index
  45. * @dmacount: identical to 'head' except for occasionally resetting to zero
  46. * @alloc_order: log2 of the number of allocated descriptors
  47. * @ring: software ring buffer implementation of hardware ring
  48. * @ring_lock: protects ring attributes
  49. */
  50. struct ioat2_dma_chan {
  51. struct ioat_chan_common base;
  52. size_t xfercap_log;
  53. u16 head;
  54. u16 issued;
  55. u16 tail;
  56. u16 dmacount;
  57. u16 alloc_order;
  58. struct ioat_ring_ent **ring;
  59. spinlock_t ring_lock;
  60. };
  61. static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
  62. {
  63. struct ioat_chan_common *chan = to_chan_common(c);
  64. return container_of(chan, struct ioat2_dma_chan, base);
  65. }
  66. static inline u16 ioat2_ring_size(struct ioat2_dma_chan *ioat)
  67. {
  68. return 1 << ioat->alloc_order;
  69. }
  70. /* count of descriptors in flight with the engine */
  71. static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat)
  72. {
  73. return CIRC_CNT(ioat->head, ioat->tail, ioat2_ring_size(ioat));
  74. }
  75. /* count of descriptors pending submission to hardware */
  76. static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat)
  77. {
  78. return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat));
  79. }
  80. static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat)
  81. {
  82. return ioat2_ring_size(ioat) - ioat2_ring_active(ioat);
  83. }
  84. /* assumes caller already checked space */
  85. static inline u16 ioat2_desc_alloc(struct ioat2_dma_chan *ioat, u16 len)
  86. {
  87. ioat->head += len;
  88. return ioat->head - len;
  89. }
  90. static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len)
  91. {
  92. u16 num_descs = len >> ioat->xfercap_log;
  93. num_descs += !!(len & ((1 << ioat->xfercap_log) - 1));
  94. return num_descs;
  95. }
  96. /**
  97. * struct ioat_ring_ent - wrapper around hardware descriptor
  98. * @hw: hardware DMA descriptor (for memcpy)
  99. * @fill: hardware fill descriptor
  100. * @xor: hardware xor descriptor
  101. * @xor_ex: hardware xor extension descriptor
  102. * @pq: hardware pq descriptor
  103. * @pq_ex: hardware pq extension descriptor
  104. * @pqu: hardware pq update descriptor
  105. * @raw: hardware raw (un-typed) descriptor
  106. * @txd: the generic software descriptor for all engines
  107. * @len: total transaction length for unmap
  108. * @result: asynchronous result of validate operations
  109. * @id: identifier for debug
  110. */
  111. struct ioat_ring_ent {
  112. union {
  113. struct ioat_dma_descriptor *hw;
  114. struct ioat_fill_descriptor *fill;
  115. struct ioat_xor_descriptor *xor;
  116. struct ioat_xor_ext_descriptor *xor_ex;
  117. struct ioat_pq_descriptor *pq;
  118. struct ioat_pq_ext_descriptor *pq_ex;
  119. struct ioat_pq_update_descriptor *pqu;
  120. struct ioat_raw_descriptor *raw;
  121. };
  122. size_t len;
  123. struct dma_async_tx_descriptor txd;
  124. enum sum_check_flags *result;
  125. #ifdef DEBUG
  126. int id;
  127. #endif
  128. };
  129. static inline struct ioat_ring_ent *
  130. ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx)
  131. {
  132. return ioat->ring[idx & (ioat2_ring_size(ioat) - 1)];
  133. }
  134. static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr)
  135. {
  136. struct ioat_chan_common *chan = &ioat->base;
  137. writel(addr & 0x00000000FFFFFFFF,
  138. chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
  139. writel(addr >> 32,
  140. chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
  141. }
  142. int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca);
  143. int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca);
  144. struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
  145. struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
  146. int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs);
  147. int ioat2_enumerate_channels(struct ioatdma_device *device);
  148. struct dma_async_tx_descriptor *
  149. ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
  150. dma_addr_t dma_src, size_t len, unsigned long flags);
  151. void ioat2_issue_pending(struct dma_chan *chan);
  152. int ioat2_alloc_chan_resources(struct dma_chan *c);
  153. void ioat2_free_chan_resources(struct dma_chan *c);
  154. void __ioat2_restart_chan(struct ioat2_dma_chan *ioat);
  155. bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
  156. void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
  157. void ioat2_cleanup_event(unsigned long data);
  158. void ioat2_timer_event(unsigned long data);
  159. int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
  160. int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);
  161. extern struct kobj_type ioat2_ktype;
  162. extern struct kmem_cache *ioat2_cache;
  163. #endif /* IOATDMA_V2_H */