ce.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include "hif.h"
  18. #include "pci.h"
  19. #include "ce.h"
  20. #include "debug.h"
  21. /*
  22. * Support for Copy Engine hardware, which is mainly used for
  23. * communication between Host and Target over a PCIe interconnect.
  24. */
  25. /*
  26. * A single CopyEngine (CE) comprises two "rings":
  27. * a source ring
  28. * a destination ring
  29. *
  30. * Each ring consists of a number of descriptors which specify
  31. * an address, length, and meta-data.
  32. *
  33. * Typically, one side of the PCIe interconnect (Host or Target)
  34. * controls one ring and the other side controls the other ring.
  35. * The source side chooses when to initiate a transfer and it
  36. * chooses what to send (buffer address, length). The destination
  37. * side keeps a supply of "anonymous receive buffers" available and
  38. * it handles incoming data as it arrives (when the destination
  39. * recieves an interrupt).
  40. *
  41. * The sender may send a simple buffer (address/length) or it may
  42. * send a small list of buffers. When a small list is sent, hardware
  43. * "gathers" these and they end up in a single destination buffer
  44. * with a single interrupt.
  45. *
  46. * There are several "contexts" managed by this layer -- more, it
  47. * may seem -- than should be needed. These are provided mainly for
  48. * maximum flexibility and especially to facilitate a simpler HIF
  49. * implementation. There are per-CopyEngine recv, send, and watermark
  50. * contexts. These are supplied by the caller when a recv, send,
  51. * or watermark handler is established and they are echoed back to
  52. * the caller when the respective callbacks are invoked. There is
  53. * also a per-transfer context supplied by the caller when a buffer
  54. * (or sendlist) is sent and when a buffer is enqueued for recv.
  55. * These per-transfer contexts are echoed back to the caller when
  56. * the buffer is sent/received.
  57. */
  58. static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
  59. u32 ce_ctrl_addr,
  60. unsigned int n)
  61. {
  62. ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n);
  63. }
  64. static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
  65. u32 ce_ctrl_addr)
  66. {
  67. return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS);
  68. }
  69. static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
  70. u32 ce_ctrl_addr,
  71. unsigned int n)
  72. {
  73. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  74. void __iomem *indicator_addr;
  75. if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
  76. ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
  77. return;
  78. }
  79. /* workaround for QCA988x_1.0 HW CE */
  80. indicator_addr = ar_pci->mem + ce_ctrl_addr + DST_WATERMARK_ADDRESS;
  81. if (ce_ctrl_addr == ath10k_ce_base_address(CDC_WAR_DATA_CE)) {
  82. iowrite32((CDC_WAR_MAGIC_STR | n), indicator_addr);
  83. } else {
  84. unsigned long irq_flags;
  85. local_irq_save(irq_flags);
  86. iowrite32(1, indicator_addr);
  87. /*
  88. * PCIE write waits for ACK in IPQ8K, there is no
  89. * need to read back value.
  90. */
  91. (void)ioread32(indicator_addr);
  92. (void)ioread32(indicator_addr); /* conservative */
  93. ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
  94. iowrite32(0, indicator_addr);
  95. local_irq_restore(irq_flags);
  96. }
  97. }
  98. static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
  99. u32 ce_ctrl_addr)
  100. {
  101. return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS);
  102. }
  103. static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
  104. u32 ce_ctrl_addr)
  105. {
  106. return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS);
  107. }
  108. static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
  109. u32 ce_ctrl_addr,
  110. unsigned int addr)
  111. {
  112. ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr);
  113. }
  114. static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
  115. u32 ce_ctrl_addr,
  116. unsigned int n)
  117. {
  118. ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n);
  119. }
  120. static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
  121. u32 ce_ctrl_addr,
  122. unsigned int n)
  123. {
  124. u32 ctrl1_addr = ath10k_pci_read32((ar),
  125. (ce_ctrl_addr) + CE_CTRL1_ADDRESS);
  126. ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
  127. (ctrl1_addr & ~CE_CTRL1_DMAX_LENGTH_MASK) |
  128. CE_CTRL1_DMAX_LENGTH_SET(n));
  129. }
  130. static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
  131. u32 ce_ctrl_addr,
  132. unsigned int n)
  133. {
  134. u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
  135. ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
  136. (ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) |
  137. CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n));
  138. }
  139. static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
  140. u32 ce_ctrl_addr,
  141. unsigned int n)
  142. {
  143. u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
  144. ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
  145. (ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) |
  146. CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n));
  147. }
  148. static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
  149. u32 ce_ctrl_addr)
  150. {
  151. return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS);
  152. }
  153. static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
  154. u32 ce_ctrl_addr,
  155. u32 addr)
  156. {
  157. ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr);
  158. }
  159. static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
  160. u32 ce_ctrl_addr,
  161. unsigned int n)
  162. {
  163. ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n);
  164. }
  165. static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
  166. u32 ce_ctrl_addr,
  167. unsigned int n)
  168. {
  169. u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
  170. ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
  171. (addr & ~SRC_WATERMARK_HIGH_MASK) |
  172. SRC_WATERMARK_HIGH_SET(n));
  173. }
  174. static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
  175. u32 ce_ctrl_addr,
  176. unsigned int n)
  177. {
  178. u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
  179. ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
  180. (addr & ~SRC_WATERMARK_LOW_MASK) |
  181. SRC_WATERMARK_LOW_SET(n));
  182. }
  183. static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
  184. u32 ce_ctrl_addr,
  185. unsigned int n)
  186. {
  187. u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
  188. ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
  189. (addr & ~DST_WATERMARK_HIGH_MASK) |
  190. DST_WATERMARK_HIGH_SET(n));
  191. }
  192. static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
  193. u32 ce_ctrl_addr,
  194. unsigned int n)
  195. {
  196. u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
  197. ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
  198. (addr & ~DST_WATERMARK_LOW_MASK) |
  199. DST_WATERMARK_LOW_SET(n));
  200. }
  201. static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
  202. u32 ce_ctrl_addr)
  203. {
  204. u32 host_ie_addr = ath10k_pci_read32(ar,
  205. ce_ctrl_addr + HOST_IE_ADDRESS);
  206. ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
  207. host_ie_addr | HOST_IE_COPY_COMPLETE_MASK);
  208. }
  209. static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
  210. u32 ce_ctrl_addr)
  211. {
  212. u32 host_ie_addr = ath10k_pci_read32(ar,
  213. ce_ctrl_addr + HOST_IE_ADDRESS);
  214. ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
  215. host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK);
  216. }
  217. static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
  218. u32 ce_ctrl_addr)
  219. {
  220. u32 host_ie_addr = ath10k_pci_read32(ar,
  221. ce_ctrl_addr + HOST_IE_ADDRESS);
  222. ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
  223. host_ie_addr & ~CE_WATERMARK_MASK);
  224. }
  225. static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
  226. u32 ce_ctrl_addr)
  227. {
  228. u32 misc_ie_addr = ath10k_pci_read32(ar,
  229. ce_ctrl_addr + MISC_IE_ADDRESS);
  230. ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
  231. misc_ie_addr | CE_ERROR_MASK);
  232. }
  233. static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
  234. u32 ce_ctrl_addr,
  235. unsigned int mask)
  236. {
  237. ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask);
  238. }
  239. /*
  240. * Guts of ath10k_ce_send, used by both ath10k_ce_send and
  241. * ath10k_ce_sendlist_send.
  242. * The caller takes responsibility for any needed locking.
  243. */
  244. static int ath10k_ce_send_nolock(struct ce_state *ce_state,
  245. void *per_transfer_context,
  246. u32 buffer,
  247. unsigned int nbytes,
  248. unsigned int transfer_id,
  249. unsigned int flags)
  250. {
  251. struct ath10k *ar = ce_state->ar;
  252. struct ce_ring_state *src_ring = ce_state->src_ring;
  253. struct ce_desc *desc, *sdesc;
  254. unsigned int nentries_mask = src_ring->nentries_mask;
  255. unsigned int sw_index = src_ring->sw_index;
  256. unsigned int write_index = src_ring->write_index;
  257. u32 ctrl_addr = ce_state->ctrl_addr;
  258. u32 desc_flags = 0;
  259. int ret = 0;
  260. if (nbytes > ce_state->src_sz_max)
  261. ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
  262. __func__, nbytes, ce_state->src_sz_max);
  263. ath10k_pci_wake(ar);
  264. if (unlikely(CE_RING_DELTA(nentries_mask,
  265. write_index, sw_index - 1) <= 0)) {
  266. ret = -EIO;
  267. goto exit;
  268. }
  269. desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
  270. write_index);
  271. sdesc = CE_SRC_RING_TO_DESC(src_ring->shadow_base, write_index);
  272. desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
  273. if (flags & CE_SEND_FLAG_GATHER)
  274. desc_flags |= CE_DESC_FLAGS_GATHER;
  275. if (flags & CE_SEND_FLAG_BYTE_SWAP)
  276. desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
  277. sdesc->addr = __cpu_to_le32(buffer);
  278. sdesc->nbytes = __cpu_to_le16(nbytes);
  279. sdesc->flags = __cpu_to_le16(desc_flags);
  280. *desc = *sdesc;
  281. src_ring->per_transfer_context[write_index] = per_transfer_context;
  282. /* Update Source Ring Write Index */
  283. write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
  284. /* WORKAROUND */
  285. if (!(flags & CE_SEND_FLAG_GATHER))
  286. ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
  287. src_ring->write_index = write_index;
  288. exit:
  289. ath10k_pci_sleep(ar);
  290. return ret;
  291. }
  292. int ath10k_ce_send(struct ce_state *ce_state,
  293. void *per_transfer_context,
  294. u32 buffer,
  295. unsigned int nbytes,
  296. unsigned int transfer_id,
  297. unsigned int flags)
  298. {
  299. struct ath10k *ar = ce_state->ar;
  300. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  301. int ret;
  302. spin_lock_bh(&ar_pci->ce_lock);
  303. ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
  304. buffer, nbytes, transfer_id, flags);
  305. spin_unlock_bh(&ar_pci->ce_lock);
  306. return ret;
  307. }
  308. void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, u32 buffer,
  309. unsigned int nbytes, u32 flags)
  310. {
  311. unsigned int num_items = sendlist->num_items;
  312. struct ce_sendlist_item *item;
  313. item = &sendlist->item[num_items];
  314. item->data = buffer;
  315. item->u.nbytes = nbytes;
  316. item->flags = flags;
  317. sendlist->num_items++;
  318. }
  319. int ath10k_ce_sendlist_send(struct ce_state *ce_state,
  320. void *per_transfer_context,
  321. struct ce_sendlist *sendlist,
  322. unsigned int transfer_id)
  323. {
  324. struct ce_ring_state *src_ring = ce_state->src_ring;
  325. struct ce_sendlist_item *item;
  326. struct ath10k *ar = ce_state->ar;
  327. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  328. unsigned int nentries_mask = src_ring->nentries_mask;
  329. unsigned int num_items = sendlist->num_items;
  330. unsigned int sw_index;
  331. unsigned int write_index;
  332. int i, delta, ret = -ENOMEM;
  333. spin_lock_bh(&ar_pci->ce_lock);
  334. sw_index = src_ring->sw_index;
  335. write_index = src_ring->write_index;
  336. delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
  337. if (delta >= num_items) {
  338. /*
  339. * Handle all but the last item uniformly.
  340. */
  341. for (i = 0; i < num_items - 1; i++) {
  342. item = &sendlist->item[i];
  343. ret = ath10k_ce_send_nolock(ce_state,
  344. CE_SENDLIST_ITEM_CTXT,
  345. (u32) item->data,
  346. item->u.nbytes, transfer_id,
  347. item->flags |
  348. CE_SEND_FLAG_GATHER);
  349. if (ret)
  350. ath10k_warn("CE send failed for item: %d\n", i);
  351. }
  352. /*
  353. * Provide valid context pointer for final item.
  354. */
  355. item = &sendlist->item[i];
  356. ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
  357. (u32) item->data, item->u.nbytes,
  358. transfer_id, item->flags);
  359. if (ret)
  360. ath10k_warn("CE send failed for last item: %d\n", i);
  361. }
  362. spin_unlock_bh(&ar_pci->ce_lock);
  363. return ret;
  364. }
  365. int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
  366. void *per_recv_context,
  367. u32 buffer)
  368. {
  369. struct ce_ring_state *dest_ring = ce_state->dest_ring;
  370. u32 ctrl_addr = ce_state->ctrl_addr;
  371. struct ath10k *ar = ce_state->ar;
  372. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  373. unsigned int nentries_mask = dest_ring->nentries_mask;
  374. unsigned int write_index;
  375. unsigned int sw_index;
  376. int ret;
  377. spin_lock_bh(&ar_pci->ce_lock);
  378. write_index = dest_ring->write_index;
  379. sw_index = dest_ring->sw_index;
  380. ath10k_pci_wake(ar);
  381. if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
  382. struct ce_desc *base = dest_ring->base_addr_owner_space;
  383. struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
  384. /* Update destination descriptor */
  385. desc->addr = __cpu_to_le32(buffer);
  386. desc->nbytes = 0;
  387. dest_ring->per_transfer_context[write_index] =
  388. per_recv_context;
  389. /* Update Destination Ring Write Index */
  390. write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
  391. ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
  392. dest_ring->write_index = write_index;
  393. ret = 0;
  394. } else {
  395. ret = -EIO;
  396. }
  397. ath10k_pci_sleep(ar);
  398. spin_unlock_bh(&ar_pci->ce_lock);
  399. return ret;
  400. }
  401. /*
  402. * Guts of ath10k_ce_completed_recv_next.
  403. * The caller takes responsibility for any necessary locking.
  404. */
  405. static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state,
  406. void **per_transfer_contextp,
  407. u32 *bufferp,
  408. unsigned int *nbytesp,
  409. unsigned int *transfer_idp,
  410. unsigned int *flagsp)
  411. {
  412. struct ce_ring_state *dest_ring = ce_state->dest_ring;
  413. unsigned int nentries_mask = dest_ring->nentries_mask;
  414. unsigned int sw_index = dest_ring->sw_index;
  415. struct ce_desc *base = dest_ring->base_addr_owner_space;
  416. struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
  417. struct ce_desc sdesc;
  418. u16 nbytes;
  419. /* Copy in one go for performance reasons */
  420. sdesc = *desc;
  421. nbytes = __le16_to_cpu(sdesc.nbytes);
  422. if (nbytes == 0) {
  423. /*
  424. * This closes a relatively unusual race where the Host
  425. * sees the updated DRRI before the update to the
  426. * corresponding descriptor has completed. We treat this
  427. * as a descriptor that is not yet done.
  428. */
  429. return -EIO;
  430. }
  431. desc->nbytes = 0;
  432. /* Return data from completed destination descriptor */
  433. *bufferp = __le32_to_cpu(sdesc.addr);
  434. *nbytesp = nbytes;
  435. *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA);
  436. if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP)
  437. *flagsp = CE_RECV_FLAG_SWAPPED;
  438. else
  439. *flagsp = 0;
  440. if (per_transfer_contextp)
  441. *per_transfer_contextp =
  442. dest_ring->per_transfer_context[sw_index];
  443. /* sanity */
  444. dest_ring->per_transfer_context[sw_index] = NULL;
  445. /* Update sw_index */
  446. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  447. dest_ring->sw_index = sw_index;
  448. return 0;
  449. }
  450. int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
  451. void **per_transfer_contextp,
  452. u32 *bufferp,
  453. unsigned int *nbytesp,
  454. unsigned int *transfer_idp,
  455. unsigned int *flagsp)
  456. {
  457. struct ath10k *ar = ce_state->ar;
  458. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  459. int ret;
  460. spin_lock_bh(&ar_pci->ce_lock);
  461. ret = ath10k_ce_completed_recv_next_nolock(ce_state,
  462. per_transfer_contextp,
  463. bufferp, nbytesp,
  464. transfer_idp, flagsp);
  465. spin_unlock_bh(&ar_pci->ce_lock);
  466. return ret;
  467. }
  468. int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
  469. void **per_transfer_contextp,
  470. u32 *bufferp)
  471. {
  472. struct ce_ring_state *dest_ring;
  473. unsigned int nentries_mask;
  474. unsigned int sw_index;
  475. unsigned int write_index;
  476. int ret;
  477. struct ath10k *ar;
  478. struct ath10k_pci *ar_pci;
  479. dest_ring = ce_state->dest_ring;
  480. if (!dest_ring)
  481. return -EIO;
  482. ar = ce_state->ar;
  483. ar_pci = ath10k_pci_priv(ar);
  484. spin_lock_bh(&ar_pci->ce_lock);
  485. nentries_mask = dest_ring->nentries_mask;
  486. sw_index = dest_ring->sw_index;
  487. write_index = dest_ring->write_index;
  488. if (write_index != sw_index) {
  489. struct ce_desc *base = dest_ring->base_addr_owner_space;
  490. struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
  491. /* Return data from completed destination descriptor */
  492. *bufferp = __le32_to_cpu(desc->addr);
  493. if (per_transfer_contextp)
  494. *per_transfer_contextp =
  495. dest_ring->per_transfer_context[sw_index];
  496. /* sanity */
  497. dest_ring->per_transfer_context[sw_index] = NULL;
  498. /* Update sw_index */
  499. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  500. dest_ring->sw_index = sw_index;
  501. ret = 0;
  502. } else {
  503. ret = -EIO;
  504. }
  505. spin_unlock_bh(&ar_pci->ce_lock);
  506. return ret;
  507. }
  508. /*
  509. * Guts of ath10k_ce_completed_send_next.
  510. * The caller takes responsibility for any necessary locking.
  511. */
  512. static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
  513. void **per_transfer_contextp,
  514. u32 *bufferp,
  515. unsigned int *nbytesp,
  516. unsigned int *transfer_idp)
  517. {
  518. struct ce_ring_state *src_ring = ce_state->src_ring;
  519. u32 ctrl_addr = ce_state->ctrl_addr;
  520. struct ath10k *ar = ce_state->ar;
  521. unsigned int nentries_mask = src_ring->nentries_mask;
  522. unsigned int sw_index = src_ring->sw_index;
  523. unsigned int read_index;
  524. int ret = -EIO;
  525. if (src_ring->hw_index == sw_index) {
  526. /*
  527. * The SW completion index has caught up with the cached
  528. * version of the HW completion index.
  529. * Update the cached HW completion index to see whether
  530. * the SW has really caught up to the HW, or if the cached
  531. * value of the HW index has become stale.
  532. */
  533. ath10k_pci_wake(ar);
  534. src_ring->hw_index =
  535. ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
  536. src_ring->hw_index &= nentries_mask;
  537. ath10k_pci_sleep(ar);
  538. }
  539. read_index = src_ring->hw_index;
  540. if ((read_index != sw_index) && (read_index != 0xffffffff)) {
  541. struct ce_desc *sbase = src_ring->shadow_base;
  542. struct ce_desc *sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
  543. /* Return data from completed source descriptor */
  544. *bufferp = __le32_to_cpu(sdesc->addr);
  545. *nbytesp = __le16_to_cpu(sdesc->nbytes);
  546. *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
  547. CE_DESC_FLAGS_META_DATA);
  548. if (per_transfer_contextp)
  549. *per_transfer_contextp =
  550. src_ring->per_transfer_context[sw_index];
  551. /* sanity */
  552. src_ring->per_transfer_context[sw_index] = NULL;
  553. /* Update sw_index */
  554. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  555. src_ring->sw_index = sw_index;
  556. ret = 0;
  557. }
  558. return ret;
  559. }
  560. /* NB: Modeled after ath10k_ce_completed_send_next */
  561. int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
  562. void **per_transfer_contextp,
  563. u32 *bufferp,
  564. unsigned int *nbytesp,
  565. unsigned int *transfer_idp)
  566. {
  567. struct ce_ring_state *src_ring;
  568. unsigned int nentries_mask;
  569. unsigned int sw_index;
  570. unsigned int write_index;
  571. int ret;
  572. struct ath10k *ar;
  573. struct ath10k_pci *ar_pci;
  574. src_ring = ce_state->src_ring;
  575. if (!src_ring)
  576. return -EIO;
  577. ar = ce_state->ar;
  578. ar_pci = ath10k_pci_priv(ar);
  579. spin_lock_bh(&ar_pci->ce_lock);
  580. nentries_mask = src_ring->nentries_mask;
  581. sw_index = src_ring->sw_index;
  582. write_index = src_ring->write_index;
  583. if (write_index != sw_index) {
  584. struct ce_desc *base = src_ring->base_addr_owner_space;
  585. struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
  586. /* Return data from completed source descriptor */
  587. *bufferp = __le32_to_cpu(desc->addr);
  588. *nbytesp = __le16_to_cpu(desc->nbytes);
  589. *transfer_idp = MS(__le16_to_cpu(desc->flags),
  590. CE_DESC_FLAGS_META_DATA);
  591. if (per_transfer_contextp)
  592. *per_transfer_contextp =
  593. src_ring->per_transfer_context[sw_index];
  594. /* sanity */
  595. src_ring->per_transfer_context[sw_index] = NULL;
  596. /* Update sw_index */
  597. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  598. src_ring->sw_index = sw_index;
  599. ret = 0;
  600. } else {
  601. ret = -EIO;
  602. }
  603. spin_unlock_bh(&ar_pci->ce_lock);
  604. return ret;
  605. }
  606. int ath10k_ce_completed_send_next(struct ce_state *ce_state,
  607. void **per_transfer_contextp,
  608. u32 *bufferp,
  609. unsigned int *nbytesp,
  610. unsigned int *transfer_idp)
  611. {
  612. struct ath10k *ar = ce_state->ar;
  613. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  614. int ret;
  615. spin_lock_bh(&ar_pci->ce_lock);
  616. ret = ath10k_ce_completed_send_next_nolock(ce_state,
  617. per_transfer_contextp,
  618. bufferp, nbytesp,
  619. transfer_idp);
  620. spin_unlock_bh(&ar_pci->ce_lock);
  621. return ret;
  622. }
  623. /*
  624. * Guts of interrupt handler for per-engine interrupts on a particular CE.
  625. *
  626. * Invokes registered callbacks for recv_complete,
  627. * send_complete, and watermarks.
  628. */
  629. void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
  630. {
  631. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  632. struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id];
  633. u32 ctrl_addr = ce_state->ctrl_addr;
  634. void *transfer_context;
  635. u32 buf;
  636. unsigned int nbytes;
  637. unsigned int id;
  638. unsigned int flags;
  639. ath10k_pci_wake(ar);
  640. spin_lock_bh(&ar_pci->ce_lock);
  641. /* Clear the copy-complete interrupts that will be handled here. */
  642. ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
  643. HOST_IS_COPY_COMPLETE_MASK);
  644. if (ce_state->recv_cb) {
  645. /*
  646. * Pop completed recv buffers and call the registered
  647. * recv callback for each
  648. */
  649. while (ath10k_ce_completed_recv_next_nolock(ce_state,
  650. &transfer_context,
  651. &buf, &nbytes,
  652. &id, &flags) == 0) {
  653. spin_unlock_bh(&ar_pci->ce_lock);
  654. ce_state->recv_cb(ce_state, transfer_context, buf,
  655. nbytes, id, flags);
  656. spin_lock_bh(&ar_pci->ce_lock);
  657. }
  658. }
  659. if (ce_state->send_cb) {
  660. /*
  661. * Pop completed send buffers and call the registered
  662. * send callback for each
  663. */
  664. while (ath10k_ce_completed_send_next_nolock(ce_state,
  665. &transfer_context,
  666. &buf,
  667. &nbytes,
  668. &id) == 0) {
  669. spin_unlock_bh(&ar_pci->ce_lock);
  670. ce_state->send_cb(ce_state, transfer_context,
  671. buf, nbytes, id);
  672. spin_lock_bh(&ar_pci->ce_lock);
  673. }
  674. }
  675. /*
  676. * Misc CE interrupts are not being handled, but still need
  677. * to be cleared.
  678. */
  679. ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);
  680. spin_unlock_bh(&ar_pci->ce_lock);
  681. ath10k_pci_sleep(ar);
  682. }
  683. /*
  684. * Handler for per-engine interrupts on ALL active CEs.
  685. * This is used in cases where the system is sharing a
  686. * single interrput for all CEs
  687. */
  688. void ath10k_ce_per_engine_service_any(struct ath10k *ar)
  689. {
  690. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  691. int ce_id;
  692. u32 intr_summary;
  693. ath10k_pci_wake(ar);
  694. intr_summary = CE_INTERRUPT_SUMMARY(ar);
  695. for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) {
  696. if (intr_summary & (1 << ce_id))
  697. intr_summary &= ~(1 << ce_id);
  698. else
  699. /* no intr pending on this CE */
  700. continue;
  701. ath10k_ce_per_engine_service(ar, ce_id);
  702. }
  703. ath10k_pci_sleep(ar);
  704. }
  705. /*
  706. * Adjust interrupts for the copy complete handler.
  707. * If it's needed for either send or recv, then unmask
  708. * this interrupt; otherwise, mask it.
  709. *
  710. * Called with ce_lock held.
  711. */
  712. static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state,
  713. int disable_copy_compl_intr)
  714. {
  715. u32 ctrl_addr = ce_state->ctrl_addr;
  716. struct ath10k *ar = ce_state->ar;
  717. ath10k_pci_wake(ar);
  718. if ((!disable_copy_compl_intr) &&
  719. (ce_state->send_cb || ce_state->recv_cb))
  720. ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
  721. else
  722. ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
  723. ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
  724. ath10k_pci_sleep(ar);
  725. }
  726. void ath10k_ce_disable_interrupts(struct ath10k *ar)
  727. {
  728. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  729. int ce_id;
  730. ath10k_pci_wake(ar);
  731. for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) {
  732. struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id];
  733. u32 ctrl_addr = ce_state->ctrl_addr;
  734. ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
  735. }
  736. ath10k_pci_sleep(ar);
  737. }
  738. void ath10k_ce_send_cb_register(struct ce_state *ce_state,
  739. void (*send_cb) (struct ce_state *ce_state,
  740. void *transfer_context,
  741. u32 buffer,
  742. unsigned int nbytes,
  743. unsigned int transfer_id),
  744. int disable_interrupts)
  745. {
  746. struct ath10k *ar = ce_state->ar;
  747. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  748. spin_lock_bh(&ar_pci->ce_lock);
  749. ce_state->send_cb = send_cb;
  750. ath10k_ce_per_engine_handler_adjust(ce_state, disable_interrupts);
  751. spin_unlock_bh(&ar_pci->ce_lock);
  752. }
  753. void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
  754. void (*recv_cb) (struct ce_state *ce_state,
  755. void *transfer_context,
  756. u32 buffer,
  757. unsigned int nbytes,
  758. unsigned int transfer_id,
  759. unsigned int flags))
  760. {
  761. struct ath10k *ar = ce_state->ar;
  762. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  763. spin_lock_bh(&ar_pci->ce_lock);
  764. ce_state->recv_cb = recv_cb;
  765. ath10k_ce_per_engine_handler_adjust(ce_state, 0);
  766. spin_unlock_bh(&ar_pci->ce_lock);
  767. }
  768. static int ath10k_ce_init_src_ring(struct ath10k *ar,
  769. unsigned int ce_id,
  770. struct ce_state *ce_state,
  771. const struct ce_attr *attr)
  772. {
  773. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  774. struct ce_ring_state *src_ring;
  775. unsigned int nentries = attr->src_nentries;
  776. unsigned int ce_nbytes;
  777. u32 ctrl_addr = ath10k_ce_base_address(ce_id);
  778. dma_addr_t base_addr;
  779. char *ptr;
  780. nentries = roundup_pow_of_two(nentries);
  781. if (ce_state->src_ring) {
  782. WARN_ON(ce_state->src_ring->nentries != nentries);
  783. return 0;
  784. }
  785. ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *));
  786. ptr = kzalloc(ce_nbytes, GFP_KERNEL);
  787. if (ptr == NULL)
  788. return -ENOMEM;
  789. ce_state->src_ring = (struct ce_ring_state *)ptr;
  790. src_ring = ce_state->src_ring;
  791. ptr += sizeof(struct ce_ring_state);
  792. src_ring->nentries = nentries;
  793. src_ring->nentries_mask = nentries - 1;
  794. ath10k_pci_wake(ar);
  795. src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
  796. src_ring->sw_index &= src_ring->nentries_mask;
  797. src_ring->hw_index = src_ring->sw_index;
  798. src_ring->write_index =
  799. ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
  800. src_ring->write_index &= src_ring->nentries_mask;
  801. ath10k_pci_sleep(ar);
  802. src_ring->per_transfer_context = (void **)ptr;
  803. /*
  804. * Legacy platforms that do not support cache
  805. * coherent DMA are unsupported
  806. */
  807. src_ring->base_addr_owner_space_unaligned =
  808. pci_alloc_consistent(ar_pci->pdev,
  809. (nentries * sizeof(struct ce_desc) +
  810. CE_DESC_RING_ALIGN),
  811. &base_addr);
  812. src_ring->base_addr_ce_space_unaligned = base_addr;
  813. src_ring->base_addr_owner_space = PTR_ALIGN(
  814. src_ring->base_addr_owner_space_unaligned,
  815. CE_DESC_RING_ALIGN);
  816. src_ring->base_addr_ce_space = ALIGN(
  817. src_ring->base_addr_ce_space_unaligned,
  818. CE_DESC_RING_ALIGN);
  819. /*
  820. * Also allocate a shadow src ring in regular
  821. * mem to use for faster access.
  822. */
  823. src_ring->shadow_base_unaligned =
  824. kmalloc((nentries * sizeof(struct ce_desc) +
  825. CE_DESC_RING_ALIGN), GFP_KERNEL);
  826. src_ring->shadow_base = PTR_ALIGN(
  827. src_ring->shadow_base_unaligned,
  828. CE_DESC_RING_ALIGN);
  829. ath10k_pci_wake(ar);
  830. ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
  831. src_ring->base_addr_ce_space);
  832. ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
  833. ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
  834. ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
  835. ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
  836. ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
  837. ath10k_pci_sleep(ar);
  838. return 0;
  839. }
  840. static int ath10k_ce_init_dest_ring(struct ath10k *ar,
  841. unsigned int ce_id,
  842. struct ce_state *ce_state,
  843. const struct ce_attr *attr)
  844. {
  845. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  846. struct ce_ring_state *dest_ring;
  847. unsigned int nentries = attr->dest_nentries;
  848. unsigned int ce_nbytes;
  849. u32 ctrl_addr = ath10k_ce_base_address(ce_id);
  850. dma_addr_t base_addr;
  851. char *ptr;
  852. nentries = roundup_pow_of_two(nentries);
  853. if (ce_state->dest_ring) {
  854. WARN_ON(ce_state->dest_ring->nentries != nentries);
  855. return 0;
  856. }
  857. ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *));
  858. ptr = kzalloc(ce_nbytes, GFP_KERNEL);
  859. if (ptr == NULL)
  860. return -ENOMEM;
  861. ce_state->dest_ring = (struct ce_ring_state *)ptr;
  862. dest_ring = ce_state->dest_ring;
  863. ptr += sizeof(struct ce_ring_state);
  864. dest_ring->nentries = nentries;
  865. dest_ring->nentries_mask = nentries - 1;
  866. ath10k_pci_wake(ar);
  867. dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
  868. dest_ring->sw_index &= dest_ring->nentries_mask;
  869. dest_ring->write_index =
  870. ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
  871. dest_ring->write_index &= dest_ring->nentries_mask;
  872. ath10k_pci_sleep(ar);
  873. dest_ring->per_transfer_context = (void **)ptr;
  874. /*
  875. * Legacy platforms that do not support cache
  876. * coherent DMA are unsupported
  877. */
  878. dest_ring->base_addr_owner_space_unaligned =
  879. pci_alloc_consistent(ar_pci->pdev,
  880. (nentries * sizeof(struct ce_desc) +
  881. CE_DESC_RING_ALIGN),
  882. &base_addr);
  883. dest_ring->base_addr_ce_space_unaligned = base_addr;
  884. /*
  885. * Correctly initialize memory to 0 to prevent garbage
  886. * data crashing system when download firmware
  887. */
  888. memset(dest_ring->base_addr_owner_space_unaligned, 0,
  889. nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN);
  890. dest_ring->base_addr_owner_space = PTR_ALIGN(
  891. dest_ring->base_addr_owner_space_unaligned,
  892. CE_DESC_RING_ALIGN);
  893. dest_ring->base_addr_ce_space = ALIGN(
  894. dest_ring->base_addr_ce_space_unaligned,
  895. CE_DESC_RING_ALIGN);
  896. ath10k_pci_wake(ar);
  897. ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
  898. dest_ring->base_addr_ce_space);
  899. ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
  900. ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
  901. ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
  902. ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
  903. ath10k_pci_sleep(ar);
  904. return 0;
  905. }
  906. static struct ce_state *ath10k_ce_init_state(struct ath10k *ar,
  907. unsigned int ce_id,
  908. const struct ce_attr *attr)
  909. {
  910. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  911. struct ce_state *ce_state = NULL;
  912. u32 ctrl_addr = ath10k_ce_base_address(ce_id);
  913. spin_lock_bh(&ar_pci->ce_lock);
  914. if (!ar_pci->ce_id_to_state[ce_id]) {
  915. ce_state = kzalloc(sizeof(*ce_state), GFP_ATOMIC);
  916. if (ce_state == NULL) {
  917. spin_unlock_bh(&ar_pci->ce_lock);
  918. return NULL;
  919. }
  920. ar_pci->ce_id_to_state[ce_id] = ce_state;
  921. ce_state->ar = ar;
  922. ce_state->id = ce_id;
  923. ce_state->ctrl_addr = ctrl_addr;
  924. ce_state->state = CE_RUNNING;
  925. /* Save attribute flags */
  926. ce_state->attr_flags = attr->flags;
  927. ce_state->src_sz_max = attr->src_sz_max;
  928. }
  929. spin_unlock_bh(&ar_pci->ce_lock);
  930. return ce_state;
  931. }
  932. /*
  933. * Initialize a Copy Engine based on caller-supplied attributes.
  934. * This may be called once to initialize both source and destination
  935. * rings or it may be called twice for separate source and destination
  936. * initialization. It may be that only one side or the other is
  937. * initialized by software/firmware.
  938. */
  939. struct ce_state *ath10k_ce_init(struct ath10k *ar,
  940. unsigned int ce_id,
  941. const struct ce_attr *attr)
  942. {
  943. struct ce_state *ce_state;
  944. u32 ctrl_addr = ath10k_ce_base_address(ce_id);
  945. ce_state = ath10k_ce_init_state(ar, ce_id, attr);
  946. if (!ce_state) {
  947. ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id);
  948. return NULL;
  949. }
  950. if (attr->src_nentries) {
  951. if (ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr)) {
  952. ath10k_err("Failed to initialize CE src ring for ID: %d\n",
  953. ce_id);
  954. ath10k_ce_deinit(ce_state);
  955. return NULL;
  956. }
  957. }
  958. if (attr->dest_nentries) {
  959. if (ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr)) {
  960. ath10k_err("Failed to initialize CE dest ring for ID: %d\n",
  961. ce_id);
  962. ath10k_ce_deinit(ce_state);
  963. return NULL;
  964. }
  965. }
  966. /* Enable CE error interrupts */
  967. ath10k_pci_wake(ar);
  968. ath10k_ce_error_intr_enable(ar, ctrl_addr);
  969. ath10k_pci_sleep(ar);
  970. return ce_state;
  971. }
  972. void ath10k_ce_deinit(struct ce_state *ce_state)
  973. {
  974. unsigned int ce_id = ce_state->id;
  975. struct ath10k *ar = ce_state->ar;
  976. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  977. ce_state->state = CE_UNUSED;
  978. ar_pci->ce_id_to_state[ce_id] = NULL;
  979. if (ce_state->src_ring) {
  980. kfree(ce_state->src_ring->shadow_base_unaligned);
  981. pci_free_consistent(ar_pci->pdev,
  982. (ce_state->src_ring->nentries *
  983. sizeof(struct ce_desc) +
  984. CE_DESC_RING_ALIGN),
  985. ce_state->src_ring->base_addr_owner_space,
  986. ce_state->src_ring->base_addr_ce_space);
  987. kfree(ce_state->src_ring);
  988. }
  989. if (ce_state->dest_ring) {
  990. pci_free_consistent(ar_pci->pdev,
  991. (ce_state->dest_ring->nentries *
  992. sizeof(struct ce_desc) +
  993. CE_DESC_RING_ALIGN),
  994. ce_state->dest_ring->base_addr_owner_space,
  995. ce_state->dest_ring->base_addr_ce_space);
  996. kfree(ce_state->dest_ring);
  997. }
  998. kfree(ce_state);
  999. }