ce.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #ifndef _CE_H_
  18. #define _CE_H_
  19. #include "hif.h"
  20. /* Maximum number of Copy Engine's supported */
  21. #define CE_COUNT_MAX 8
  22. #define CE_HTT_H2T_MSG_SRC_NENTRIES 2048
  23. /* Descriptor rings must be aligned to this boundary */
  24. #define CE_DESC_RING_ALIGN 8
  25. #define CE_SENDLIST_ITEMS_MAX 12
  26. #define CE_SEND_FLAG_GATHER 0x00010000
  27. /*
  28. * Copy Engine support: low-level Target-side Copy Engine API.
  29. * This is a hardware access layer used by code that understands
  30. * how to use copy engines.
  31. */
  32. struct ce_state;
  33. /* Copy Engine operational state */
  34. enum ce_op_state {
  35. CE_UNUSED,
  36. CE_PAUSED,
  37. CE_RUNNING,
  38. };
  39. #define CE_DESC_FLAGS_GATHER (1 << 0)
  40. #define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
  41. #define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
  42. #define CE_DESC_FLAGS_META_DATA_LSB 3
  43. struct ce_desc {
  44. __le32 addr;
  45. __le16 nbytes;
  46. __le16 flags; /* %CE_DESC_FLAGS_ */
  47. };
  48. /* Copy Engine Ring internal state */
  49. struct ce_ring_state {
  50. /* Number of entries in this ring; must be power of 2 */
  51. unsigned int nentries;
  52. unsigned int nentries_mask;
  53. /*
  54. * For dest ring, this is the next index to be processed
  55. * by software after it was/is received into.
  56. *
  57. * For src ring, this is the last descriptor that was sent
  58. * and completion processed by software.
  59. *
  60. * Regardless of src or dest ring, this is an invariant
  61. * (modulo ring size):
  62. * write index >= read index >= sw_index
  63. */
  64. unsigned int sw_index;
  65. /* cached copy */
  66. unsigned int write_index;
  67. /*
  68. * For src ring, this is the next index not yet processed by HW.
  69. * This is a cached copy of the real HW index (read index), used
  70. * for avoiding reading the HW index register more often than
  71. * necessary.
  72. * This extends the invariant:
  73. * write index >= read index >= hw_index >= sw_index
  74. *
  75. * For dest ring, this is currently unused.
  76. */
  77. /* cached copy */
  78. unsigned int hw_index;
  79. /* Start of DMA-coherent area reserved for descriptors */
  80. /* Host address space */
  81. void *base_addr_owner_space_unaligned;
  82. /* CE address space */
  83. u32 base_addr_ce_space_unaligned;
  84. /*
  85. * Actual start of descriptors.
  86. * Aligned to descriptor-size boundary.
  87. * Points into reserved DMA-coherent area, above.
  88. */
  89. /* Host address space */
  90. void *base_addr_owner_space;
  91. /* CE address space */
  92. u32 base_addr_ce_space;
  93. /*
  94. * Start of shadow copy of descriptors, within regular memory.
  95. * Aligned to descriptor-size boundary.
  96. */
  97. void *shadow_base_unaligned;
  98. struct ce_desc *shadow_base;
  99. void **per_transfer_context;
  100. };
  101. /* Copy Engine internal state */
  102. struct ce_state {
  103. struct ath10k *ar;
  104. unsigned int id;
  105. unsigned int attr_flags;
  106. u32 ctrl_addr;
  107. enum ce_op_state state;
  108. void (*send_cb) (struct ce_state *ce_state,
  109. void *per_transfer_send_context,
  110. u32 buffer,
  111. unsigned int nbytes,
  112. unsigned int transfer_id);
  113. void (*recv_cb) (struct ce_state *ce_state,
  114. void *per_transfer_recv_context,
  115. u32 buffer,
  116. unsigned int nbytes,
  117. unsigned int transfer_id,
  118. unsigned int flags);
  119. unsigned int src_sz_max;
  120. struct ce_ring_state *src_ring;
  121. struct ce_ring_state *dest_ring;
  122. };
  123. struct ce_sendlist_item {
  124. /* e.g. buffer or desc list */
  125. dma_addr_t data;
  126. union {
  127. /* simple buffer */
  128. unsigned int nbytes;
  129. /* Rx descriptor list */
  130. unsigned int ndesc;
  131. } u;
  132. /* externally-specified flags; OR-ed with internal flags */
  133. u32 flags;
  134. };
  135. struct ce_sendlist {
  136. unsigned int num_items;
  137. struct ce_sendlist_item item[CE_SENDLIST_ITEMS_MAX];
  138. };
  139. /* Copy Engine settable attributes */
  140. struct ce_attr;
  141. /*==================Send====================*/
  142. /* ath10k_ce_send flags */
  143. #define CE_SEND_FLAG_BYTE_SWAP 1
  144. /*
  145. * Queue a source buffer to be sent to an anonymous destination buffer.
  146. * ce - which copy engine to use
  147. * buffer - address of buffer
  148. * nbytes - number of bytes to send
  149. * transfer_id - arbitrary ID; reflected to destination
  150. * flags - CE_SEND_FLAG_* values
  151. * Returns 0 on success; otherwise an error status.
  152. *
  153. * Note: If no flags are specified, use CE's default data swap mode.
  154. *
  155. * Implementation note: pushes 1 buffer to Source ring
  156. */
  157. int ath10k_ce_send(struct ce_state *ce_state,
  158. void *per_transfer_send_context,
  159. u32 buffer,
  160. unsigned int nbytes,
  161. /* 14 bits */
  162. unsigned int transfer_id,
  163. unsigned int flags);
  164. void ath10k_ce_send_cb_register(struct ce_state *ce_state,
  165. void (*send_cb) (struct ce_state *ce_state,
  166. void *transfer_context,
  167. u32 buffer,
  168. unsigned int nbytes,
  169. unsigned int transfer_id),
  170. int disable_interrupts);
  171. /* Append a simple buffer (address/length) to a sendlist. */
  172. void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist,
  173. u32 buffer,
  174. unsigned int nbytes,
  175. /* OR-ed with internal flags */
  176. u32 flags);
  177. /*
  178. * Queue a "sendlist" of buffers to be sent using gather to a single
  179. * anonymous destination buffer
  180. * ce - which copy engine to use
  181. * sendlist - list of simple buffers to send using gather
  182. * transfer_id - arbitrary ID; reflected to destination
  183. * Returns 0 on success; otherwise an error status.
  184. *
  185. * Implemenation note: Pushes multiple buffers with Gather to Source ring.
  186. */
  187. int ath10k_ce_sendlist_send(struct ce_state *ce_state,
  188. void *per_transfer_send_context,
  189. struct ce_sendlist *sendlist,
  190. /* 14 bits */
  191. unsigned int transfer_id);
  192. /*==================Recv=======================*/
  193. /*
  194. * Make a buffer available to receive. The buffer must be at least of a
  195. * minimal size appropriate for this copy engine (src_sz_max attribute).
  196. * ce - which copy engine to use
  197. * per_transfer_recv_context - context passed back to caller's recv_cb
  198. * buffer - address of buffer in CE space
  199. * Returns 0 on success; otherwise an error status.
  200. *
  201. * Implemenation note: Pushes a buffer to Dest ring.
  202. */
  203. int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
  204. void *per_transfer_recv_context,
  205. u32 buffer);
  206. void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
  207. void (*recv_cb) (struct ce_state *ce_state,
  208. void *transfer_context,
  209. u32 buffer,
  210. unsigned int nbytes,
  211. unsigned int transfer_id,
  212. unsigned int flags));
  213. /* recv flags */
  214. /* Data is byte-swapped */
  215. #define CE_RECV_FLAG_SWAPPED 1
  216. /*
  217. * Supply data for the next completed unprocessed receive descriptor.
  218. * Pops buffer from Dest ring.
  219. */
  220. int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
  221. void **per_transfer_contextp,
  222. u32 *bufferp,
  223. unsigned int *nbytesp,
  224. unsigned int *transfer_idp,
  225. unsigned int *flagsp);
  226. /*
  227. * Supply data for the next completed unprocessed send descriptor.
  228. * Pops 1 completed send buffer from Source ring.
  229. */
  230. int ath10k_ce_completed_send_next(struct ce_state *ce_state,
  231. void **per_transfer_contextp,
  232. u32 *bufferp,
  233. unsigned int *nbytesp,
  234. unsigned int *transfer_idp);
  235. /*==================CE Engine Initialization=======================*/
  236. /* Initialize an instance of a CE */
  237. struct ce_state *ath10k_ce_init(struct ath10k *ar,
  238. unsigned int ce_id,
  239. const struct ce_attr *attr);
  240. /*==================CE Engine Shutdown=======================*/
  241. /*
  242. * Support clean shutdown by allowing the caller to revoke
  243. * receive buffers. Target DMA must be stopped before using
  244. * this API.
  245. */
  246. int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
  247. void **per_transfer_contextp,
  248. u32 *bufferp);
  249. /*
  250. * Support clean shutdown by allowing the caller to cancel
  251. * pending sends. Target DMA must be stopped before using
  252. * this API.
  253. */
  254. int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
  255. void **per_transfer_contextp,
  256. u32 *bufferp,
  257. unsigned int *nbytesp,
  258. unsigned int *transfer_idp);
  259. void ath10k_ce_deinit(struct ce_state *ce_state);
  260. /*==================CE Interrupt Handlers====================*/
  261. void ath10k_ce_per_engine_service_any(struct ath10k *ar);
  262. void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
  263. void ath10k_ce_disable_interrupts(struct ath10k *ar);
  264. /* ce_attr.flags values */
  265. /* Use NonSnooping PCIe accesses? */
  266. #define CE_ATTR_NO_SNOOP 1
  267. /* Byte swap data words */
  268. #define CE_ATTR_BYTE_SWAP_DATA 2
  269. /* Swizzle descriptors? */
  270. #define CE_ATTR_SWIZZLE_DESCRIPTORS 4
  271. /* no interrupt on copy completion */
  272. #define CE_ATTR_DIS_INTR 8
  273. /* Attributes of an instance of a Copy Engine */
  274. struct ce_attr {
  275. /* CE_ATTR_* values */
  276. unsigned int flags;
  277. /* currently not in use */
  278. unsigned int priority;
  279. /* #entries in source ring - Must be a power of 2 */
  280. unsigned int src_nentries;
  281. /*
  282. * Max source send size for this CE.
  283. * This is also the minimum size of a destination buffer.
  284. */
  285. unsigned int src_sz_max;
  286. /* #entries in destination ring - Must be a power of 2 */
  287. unsigned int dest_nentries;
  288. /* Future use */
  289. void *reserved;
  290. };
  291. /*
  292. * When using sendlist_send to transfer multiple buffer fragments, the
  293. * transfer context of each fragment, except last one, will be filled
  294. * with CE_SENDLIST_ITEM_CTXT. ce_completed_send will return success for
  295. * each fragment done with send and the transfer context would be
  296. * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
  297. * status of a send completion.
  298. */
  299. #define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef)
  300. #define SR_BA_ADDRESS 0x0000
  301. #define SR_SIZE_ADDRESS 0x0004
  302. #define DR_BA_ADDRESS 0x0008
  303. #define DR_SIZE_ADDRESS 0x000c
  304. #define CE_CMD_ADDRESS 0x0018
  305. #define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MSB 17
  306. #define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB 17
  307. #define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK 0x00020000
  308. #define CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(x) \
  309. (((0 | (x)) << CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) & \
  310. CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK)
  311. #define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MSB 16
  312. #define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB 16
  313. #define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK 0x00010000
  314. #define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_GET(x) \
  315. (((x) & CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) >> \
  316. CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB)
  317. #define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(x) \
  318. (((0 | (x)) << CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) & \
  319. CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK)
  320. #define CE_CTRL1_DMAX_LENGTH_MSB 15
  321. #define CE_CTRL1_DMAX_LENGTH_LSB 0
  322. #define CE_CTRL1_DMAX_LENGTH_MASK 0x0000ffff
  323. #define CE_CTRL1_DMAX_LENGTH_GET(x) \
  324. (((x) & CE_CTRL1_DMAX_LENGTH_MASK) >> CE_CTRL1_DMAX_LENGTH_LSB)
  325. #define CE_CTRL1_DMAX_LENGTH_SET(x) \
  326. (((0 | (x)) << CE_CTRL1_DMAX_LENGTH_LSB) & CE_CTRL1_DMAX_LENGTH_MASK)
  327. #define CE_CTRL1_ADDRESS 0x0010
  328. #define CE_CTRL1_HW_MASK 0x0007ffff
  329. #define CE_CTRL1_SW_MASK 0x0007ffff
  330. #define CE_CTRL1_HW_WRITE_MASK 0x00000000
  331. #define CE_CTRL1_SW_WRITE_MASK 0x0007ffff
  332. #define CE_CTRL1_RSTMASK 0xffffffff
  333. #define CE_CTRL1_RESET 0x00000080
  334. #define CE_CMD_HALT_STATUS_MSB 3
  335. #define CE_CMD_HALT_STATUS_LSB 3
  336. #define CE_CMD_HALT_STATUS_MASK 0x00000008
  337. #define CE_CMD_HALT_STATUS_GET(x) \
  338. (((x) & CE_CMD_HALT_STATUS_MASK) >> CE_CMD_HALT_STATUS_LSB)
  339. #define CE_CMD_HALT_STATUS_SET(x) \
  340. (((0 | (x)) << CE_CMD_HALT_STATUS_LSB) & CE_CMD_HALT_STATUS_MASK)
  341. #define CE_CMD_HALT_STATUS_RESET 0
  342. #define CE_CMD_HALT_MSB 0
  343. #define CE_CMD_HALT_MASK 0x00000001
  344. #define HOST_IE_COPY_COMPLETE_MSB 0
  345. #define HOST_IE_COPY_COMPLETE_LSB 0
  346. #define HOST_IE_COPY_COMPLETE_MASK 0x00000001
  347. #define HOST_IE_COPY_COMPLETE_GET(x) \
  348. (((x) & HOST_IE_COPY_COMPLETE_MASK) >> HOST_IE_COPY_COMPLETE_LSB)
  349. #define HOST_IE_COPY_COMPLETE_SET(x) \
  350. (((0 | (x)) << HOST_IE_COPY_COMPLETE_LSB) & HOST_IE_COPY_COMPLETE_MASK)
  351. #define HOST_IE_COPY_COMPLETE_RESET 0
  352. #define HOST_IE_ADDRESS 0x002c
  353. #define HOST_IS_DST_RING_LOW_WATERMARK_MASK 0x00000010
  354. #define HOST_IS_DST_RING_HIGH_WATERMARK_MASK 0x00000008
  355. #define HOST_IS_SRC_RING_LOW_WATERMARK_MASK 0x00000004
  356. #define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002
  357. #define HOST_IS_COPY_COMPLETE_MASK 0x00000001
  358. #define HOST_IS_ADDRESS 0x0030
  359. #define MISC_IE_ADDRESS 0x0034
  360. #define MISC_IS_AXI_ERR_MASK 0x00000400
  361. #define MISC_IS_DST_ADDR_ERR_MASK 0x00000200
  362. #define MISC_IS_SRC_LEN_ERR_MASK 0x00000100
  363. #define MISC_IS_DST_MAX_LEN_VIO_MASK 0x00000080
  364. #define MISC_IS_DST_RING_OVERFLOW_MASK 0x00000040
  365. #define MISC_IS_SRC_RING_OVERFLOW_MASK 0x00000020
  366. #define MISC_IS_ADDRESS 0x0038
  367. #define SR_WR_INDEX_ADDRESS 0x003c
  368. #define DST_WR_INDEX_ADDRESS 0x0040
  369. #define CURRENT_SRRI_ADDRESS 0x0044
  370. #define CURRENT_DRRI_ADDRESS 0x0048
  371. #define SRC_WATERMARK_LOW_MSB 31
  372. #define SRC_WATERMARK_LOW_LSB 16
  373. #define SRC_WATERMARK_LOW_MASK 0xffff0000
  374. #define SRC_WATERMARK_LOW_GET(x) \
  375. (((x) & SRC_WATERMARK_LOW_MASK) >> SRC_WATERMARK_LOW_LSB)
  376. #define SRC_WATERMARK_LOW_SET(x) \
  377. (((0 | (x)) << SRC_WATERMARK_LOW_LSB) & SRC_WATERMARK_LOW_MASK)
  378. #define SRC_WATERMARK_LOW_RESET 0
  379. #define SRC_WATERMARK_HIGH_MSB 15
  380. #define SRC_WATERMARK_HIGH_LSB 0
  381. #define SRC_WATERMARK_HIGH_MASK 0x0000ffff
  382. #define SRC_WATERMARK_HIGH_GET(x) \
  383. (((x) & SRC_WATERMARK_HIGH_MASK) >> SRC_WATERMARK_HIGH_LSB)
  384. #define SRC_WATERMARK_HIGH_SET(x) \
  385. (((0 | (x)) << SRC_WATERMARK_HIGH_LSB) & SRC_WATERMARK_HIGH_MASK)
  386. #define SRC_WATERMARK_HIGH_RESET 0
  387. #define SRC_WATERMARK_ADDRESS 0x004c
  388. #define DST_WATERMARK_LOW_LSB 16
  389. #define DST_WATERMARK_LOW_MASK 0xffff0000
  390. #define DST_WATERMARK_LOW_SET(x) \
  391. (((0 | (x)) << DST_WATERMARK_LOW_LSB) & DST_WATERMARK_LOW_MASK)
  392. #define DST_WATERMARK_LOW_RESET 0
  393. #define DST_WATERMARK_HIGH_MSB 15
  394. #define DST_WATERMARK_HIGH_LSB 0
  395. #define DST_WATERMARK_HIGH_MASK 0x0000ffff
  396. #define DST_WATERMARK_HIGH_GET(x) \
  397. (((x) & DST_WATERMARK_HIGH_MASK) >> DST_WATERMARK_HIGH_LSB)
  398. #define DST_WATERMARK_HIGH_SET(x) \
  399. (((0 | (x)) << DST_WATERMARK_HIGH_LSB) & DST_WATERMARK_HIGH_MASK)
  400. #define DST_WATERMARK_HIGH_RESET 0
  401. #define DST_WATERMARK_ADDRESS 0x0050
  402. static inline u32 ath10k_ce_base_address(unsigned int ce_id)
  403. {
  404. return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
  405. }
  406. #define CE_WATERMARK_MASK (HOST_IS_SRC_RING_LOW_WATERMARK_MASK | \
  407. HOST_IS_SRC_RING_HIGH_WATERMARK_MASK | \
  408. HOST_IS_DST_RING_LOW_WATERMARK_MASK | \
  409. HOST_IS_DST_RING_HIGH_WATERMARK_MASK)
  410. #define CE_ERROR_MASK (MISC_IS_AXI_ERR_MASK | \
  411. MISC_IS_DST_ADDR_ERR_MASK | \
  412. MISC_IS_SRC_LEN_ERR_MASK | \
  413. MISC_IS_DST_MAX_LEN_VIO_MASK | \
  414. MISC_IS_DST_RING_OVERFLOW_MASK | \
  415. MISC_IS_SRC_RING_OVERFLOW_MASK)
  416. #define CE_SRC_RING_TO_DESC(baddr, idx) \
  417. (&(((struct ce_desc *)baddr)[idx]))
  418. #define CE_DEST_RING_TO_DESC(baddr, idx) \
  419. (&(((struct ce_desc *)baddr)[idx]))
  420. /* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */
  421. #define CE_RING_DELTA(nentries_mask, fromidx, toidx) \
  422. (((int)(toidx)-(int)(fromidx)) & (nentries_mask))
  423. #define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
  424. #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB 8
  425. #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00
  426. #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \
  427. (((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
  428. CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
  429. #define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000
  430. #define CE_INTERRUPT_SUMMARY(ar) \
  431. CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \
  432. ath10k_pci_read32((ar), CE_WRAPPER_BASE_ADDRESS + \
  433. CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS))
  434. #endif /* _CE_H_ */