dma_v2.h 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. /*
  2. * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License as published by the Free
  6. * Software Foundation; either version 2 of the License, or (at your option)
  7. * any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc., 59
  16. * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * The full GNU General Public License is included in this distribution in the
  19. * file called COPYING.
  20. */
  21. #ifndef IOATDMA_V2_H
  22. #define IOATDMA_V2_H
  23. #include <linux/dmaengine.h>
  24. #include "dma.h"
  25. #include "hw.h"
  26. extern int ioat_pending_level;
  27. extern int ioat_ring_alloc_order;
  28. /*
  29. * workaround for IOAT ver.3.0 null descriptor issue
  30. * (channel returns error when size is 0)
  31. */
  32. #define NULL_DESC_BUFFER_SIZE 1
  33. #define IOAT_MAX_ORDER 16
  34. #define ioat_get_alloc_order() \
  35. (min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
  36. #define ioat_get_max_alloc_order() \
  37. (min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))
  38. /* struct ioat2_dma_chan - ioat v2 / v3 channel attributes
  39. * @base: common ioat channel parameters
  40. * @xfercap_log; log2 of channel max transfer length (for fast division)
  41. * @head: allocated index
  42. * @issued: hardware notification point
  43. * @tail: cleanup index
  44. * @dmacount: identical to 'head' except for occasionally resetting to zero
  45. * @alloc_order: log2 of the number of allocated descriptors
  46. * @ring: software ring buffer implementation of hardware ring
  47. * @ring_lock: protects ring attributes
  48. */
  49. struct ioat2_dma_chan {
  50. struct ioat_chan_common base;
  51. size_t xfercap_log;
  52. u16 head;
  53. u16 issued;
  54. u16 tail;
  55. u16 dmacount;
  56. u16 alloc_order;
  57. struct ioat_ring_ent **ring;
  58. spinlock_t ring_lock;
  59. };
  60. static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
  61. {
  62. struct ioat_chan_common *chan = to_chan_common(c);
  63. return container_of(chan, struct ioat2_dma_chan, base);
  64. }
  65. static inline u16 ioat2_ring_mask(struct ioat2_dma_chan *ioat)
  66. {
  67. return (1 << ioat->alloc_order) - 1;
  68. }
  69. /* count of descriptors in flight with the engine */
  70. static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat)
  71. {
  72. return (ioat->head - ioat->tail) & ioat2_ring_mask(ioat);
  73. }
  74. /* count of descriptors pending submission to hardware */
  75. static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat)
  76. {
  77. return (ioat->head - ioat->issued) & ioat2_ring_mask(ioat);
  78. }
  79. static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat)
  80. {
  81. u16 num_descs = ioat2_ring_mask(ioat) + 1;
  82. u16 active = ioat2_ring_active(ioat);
  83. BUG_ON(active > num_descs);
  84. return num_descs - active;
  85. }
  86. /* assumes caller already checked space */
  87. static inline u16 ioat2_desc_alloc(struct ioat2_dma_chan *ioat, u16 len)
  88. {
  89. ioat->head += len;
  90. return ioat->head - len;
  91. }
  92. static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len)
  93. {
  94. u16 num_descs = len >> ioat->xfercap_log;
  95. num_descs += !!(len & ((1 << ioat->xfercap_log) - 1));
  96. return num_descs;
  97. }
  98. /**
  99. * struct ioat_ring_ent - wrapper around hardware descriptor
  100. * @hw: hardware DMA descriptor (for memcpy)
  101. * @fill: hardware fill descriptor
  102. * @xor: hardware xor descriptor
  103. * @xor_ex: hardware xor extension descriptor
  104. * @pq: hardware pq descriptor
  105. * @pq_ex: hardware pq extension descriptor
  106. * @pqu: hardware pq update descriptor
  107. * @raw: hardware raw (un-typed) descriptor
  108. * @txd: the generic software descriptor for all engines
  109. * @len: total transaction length for unmap
  110. * @result: asynchronous result of validate operations
  111. * @id: identifier for debug
  112. */
  113. struct ioat_ring_ent {
  114. union {
  115. struct ioat_dma_descriptor *hw;
  116. struct ioat_fill_descriptor *fill;
  117. struct ioat_xor_descriptor *xor;
  118. struct ioat_xor_ext_descriptor *xor_ex;
  119. struct ioat_pq_descriptor *pq;
  120. struct ioat_pq_ext_descriptor *pq_ex;
  121. struct ioat_pq_update_descriptor *pqu;
  122. struct ioat_raw_descriptor *raw;
  123. };
  124. size_t len;
  125. struct dma_async_tx_descriptor txd;
  126. enum sum_check_flags *result;
  127. #ifdef DEBUG
  128. int id;
  129. #endif
  130. };
  131. static inline struct ioat_ring_ent *
  132. ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx)
  133. {
  134. return ioat->ring[idx & ioat2_ring_mask(ioat)];
  135. }
  136. static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr)
  137. {
  138. struct ioat_chan_common *chan = &ioat->base;
  139. writel(addr & 0x00000000FFFFFFFF,
  140. chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
  141. writel(addr >> 32,
  142. chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
  143. }
  144. int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca);
  145. int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca);
  146. struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
  147. struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
  148. int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs);
  149. int ioat2_enumerate_channels(struct ioatdma_device *device);
  150. struct dma_async_tx_descriptor *
  151. ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
  152. dma_addr_t dma_src, size_t len, unsigned long flags);
  153. void ioat2_issue_pending(struct dma_chan *chan);
  154. int ioat2_alloc_chan_resources(struct dma_chan *c);
  155. void ioat2_free_chan_resources(struct dma_chan *c);
  156. void __ioat2_restart_chan(struct ioat2_dma_chan *ioat);
  157. bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
  158. void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
  159. void ioat2_cleanup_event(unsigned long data);
  160. void ioat2_timer_event(unsigned long data);
  161. int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
  162. int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);
  163. extern struct kobj_type ioat2_ktype;
  164. extern struct kmem_cache *ioat2_cache;
  165. #endif /* IOATDMA_V2_H */