ce.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include "hif.h"
  18. #include "pci.h"
  19. #include "ce.h"
  20. #include "debug.h"
  21. /*
  22. * Support for Copy Engine hardware, which is mainly used for
  23. * communication between Host and Target over a PCIe interconnect.
  24. */
  25. /*
  26. * A single CopyEngine (CE) comprises two "rings":
  27. * a source ring
  28. * a destination ring
  29. *
  30. * Each ring consists of a number of descriptors which specify
  31. * an address, length, and meta-data.
  32. *
  33. * Typically, one side of the PCIe interconnect (Host or Target)
  34. * controls one ring and the other side controls the other ring.
  35. * The source side chooses when to initiate a transfer and it
  36. * chooses what to send (buffer address, length). The destination
  37. * side keeps a supply of "anonymous receive buffers" available and
  38. * it handles incoming data as it arrives (when the destination
  39. * recieves an interrupt).
  40. *
  41. * The sender may send a simple buffer (address/length) or it may
  42. * send a small list of buffers. When a small list is sent, hardware
  43. * "gathers" these and they end up in a single destination buffer
  44. * with a single interrupt.
  45. *
  46. * There are several "contexts" managed by this layer -- more, it
  47. * may seem -- than should be needed. These are provided mainly for
  48. * maximum flexibility and especially to facilitate a simpler HIF
  49. * implementation. There are per-CopyEngine recv, send, and watermark
  50. * contexts. These are supplied by the caller when a recv, send,
  51. * or watermark handler is established and they are echoed back to
  52. * the caller when the respective callbacks are invoked. There is
  53. * also a per-transfer context supplied by the caller when a buffer
  54. * (or sendlist) is sent and when a buffer is enqueued for recv.
  55. * These per-transfer contexts are echoed back to the caller when
  56. * the buffer is sent/received.
  57. */
  58. static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
  59. u32 ce_ctrl_addr,
  60. unsigned int n)
  61. {
  62. ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n);
  63. }
  64. static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
  65. u32 ce_ctrl_addr)
  66. {
  67. return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS);
  68. }
  69. static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
  70. u32 ce_ctrl_addr,
  71. unsigned int n)
  72. {
  73. ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
  74. }
  75. static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
  76. u32 ce_ctrl_addr)
  77. {
  78. return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS);
  79. }
  80. static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
  81. u32 ce_ctrl_addr)
  82. {
  83. return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS);
  84. }
  85. static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
  86. u32 ce_ctrl_addr,
  87. unsigned int addr)
  88. {
  89. ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr);
  90. }
  91. static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
  92. u32 ce_ctrl_addr,
  93. unsigned int n)
  94. {
  95. ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n);
  96. }
  97. static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
  98. u32 ce_ctrl_addr,
  99. unsigned int n)
  100. {
  101. u32 ctrl1_addr = ath10k_pci_read32((ar),
  102. (ce_ctrl_addr) + CE_CTRL1_ADDRESS);
  103. ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
  104. (ctrl1_addr & ~CE_CTRL1_DMAX_LENGTH_MASK) |
  105. CE_CTRL1_DMAX_LENGTH_SET(n));
  106. }
  107. static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
  108. u32 ce_ctrl_addr,
  109. unsigned int n)
  110. {
  111. u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
  112. ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
  113. (ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) |
  114. CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n));
  115. }
  116. static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
  117. u32 ce_ctrl_addr,
  118. unsigned int n)
  119. {
  120. u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
  121. ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
  122. (ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) |
  123. CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n));
  124. }
  125. static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
  126. u32 ce_ctrl_addr)
  127. {
  128. return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS);
  129. }
  130. static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
  131. u32 ce_ctrl_addr,
  132. u32 addr)
  133. {
  134. ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr);
  135. }
  136. static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
  137. u32 ce_ctrl_addr,
  138. unsigned int n)
  139. {
  140. ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n);
  141. }
  142. static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
  143. u32 ce_ctrl_addr,
  144. unsigned int n)
  145. {
  146. u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
  147. ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
  148. (addr & ~SRC_WATERMARK_HIGH_MASK) |
  149. SRC_WATERMARK_HIGH_SET(n));
  150. }
  151. static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
  152. u32 ce_ctrl_addr,
  153. unsigned int n)
  154. {
  155. u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
  156. ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
  157. (addr & ~SRC_WATERMARK_LOW_MASK) |
  158. SRC_WATERMARK_LOW_SET(n));
  159. }
  160. static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
  161. u32 ce_ctrl_addr,
  162. unsigned int n)
  163. {
  164. u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
  165. ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
  166. (addr & ~DST_WATERMARK_HIGH_MASK) |
  167. DST_WATERMARK_HIGH_SET(n));
  168. }
  169. static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
  170. u32 ce_ctrl_addr,
  171. unsigned int n)
  172. {
  173. u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
  174. ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
  175. (addr & ~DST_WATERMARK_LOW_MASK) |
  176. DST_WATERMARK_LOW_SET(n));
  177. }
  178. static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
  179. u32 ce_ctrl_addr)
  180. {
  181. u32 host_ie_addr = ath10k_pci_read32(ar,
  182. ce_ctrl_addr + HOST_IE_ADDRESS);
  183. ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
  184. host_ie_addr | HOST_IE_COPY_COMPLETE_MASK);
  185. }
  186. static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
  187. u32 ce_ctrl_addr)
  188. {
  189. u32 host_ie_addr = ath10k_pci_read32(ar,
  190. ce_ctrl_addr + HOST_IE_ADDRESS);
  191. ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
  192. host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK);
  193. }
  194. static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
  195. u32 ce_ctrl_addr)
  196. {
  197. u32 host_ie_addr = ath10k_pci_read32(ar,
  198. ce_ctrl_addr + HOST_IE_ADDRESS);
  199. ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
  200. host_ie_addr & ~CE_WATERMARK_MASK);
  201. }
  202. static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
  203. u32 ce_ctrl_addr)
  204. {
  205. u32 misc_ie_addr = ath10k_pci_read32(ar,
  206. ce_ctrl_addr + MISC_IE_ADDRESS);
  207. ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
  208. misc_ie_addr | CE_ERROR_MASK);
  209. }
  210. static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
  211. u32 ce_ctrl_addr,
  212. unsigned int mask)
  213. {
  214. ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask);
  215. }
  216. /*
  217. * Guts of ath10k_ce_send, used by both ath10k_ce_send and
  218. * ath10k_ce_sendlist_send.
  219. * The caller takes responsibility for any needed locking.
  220. */
  221. static int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
  222. void *per_transfer_context,
  223. u32 buffer,
  224. unsigned int nbytes,
  225. unsigned int transfer_id,
  226. unsigned int flags)
  227. {
  228. struct ath10k *ar = ce_state->ar;
  229. struct ath10k_ce_ring *src_ring = ce_state->src_ring;
  230. struct ce_desc *desc, *sdesc;
  231. unsigned int nentries_mask = src_ring->nentries_mask;
  232. unsigned int sw_index = src_ring->sw_index;
  233. unsigned int write_index = src_ring->write_index;
  234. u32 ctrl_addr = ce_state->ctrl_addr;
  235. u32 desc_flags = 0;
  236. int ret = 0;
  237. if (nbytes > ce_state->src_sz_max)
  238. ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
  239. __func__, nbytes, ce_state->src_sz_max);
  240. ret = ath10k_pci_wake(ar);
  241. if (ret)
  242. return ret;
  243. if (unlikely(CE_RING_DELTA(nentries_mask,
  244. write_index, sw_index - 1) <= 0)) {
  245. ret = -EIO;
  246. goto exit;
  247. }
  248. desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
  249. write_index);
  250. sdesc = CE_SRC_RING_TO_DESC(src_ring->shadow_base, write_index);
  251. desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
  252. if (flags & CE_SEND_FLAG_GATHER)
  253. desc_flags |= CE_DESC_FLAGS_GATHER;
  254. if (flags & CE_SEND_FLAG_BYTE_SWAP)
  255. desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
  256. sdesc->addr = __cpu_to_le32(buffer);
  257. sdesc->nbytes = __cpu_to_le16(nbytes);
  258. sdesc->flags = __cpu_to_le16(desc_flags);
  259. *desc = *sdesc;
  260. src_ring->per_transfer_context[write_index] = per_transfer_context;
  261. /* Update Source Ring Write Index */
  262. write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
  263. /* WORKAROUND */
  264. if (!(flags & CE_SEND_FLAG_GATHER))
  265. ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
  266. src_ring->write_index = write_index;
  267. exit:
  268. ath10k_pci_sleep(ar);
  269. return ret;
  270. }
  271. int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
  272. void *per_transfer_context,
  273. u32 buffer,
  274. unsigned int nbytes,
  275. unsigned int transfer_id,
  276. unsigned int flags)
  277. {
  278. struct ath10k *ar = ce_state->ar;
  279. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  280. int ret;
  281. spin_lock_bh(&ar_pci->ce_lock);
  282. ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
  283. buffer, nbytes, transfer_id, flags);
  284. spin_unlock_bh(&ar_pci->ce_lock);
  285. return ret;
  286. }
  287. void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, u32 buffer,
  288. unsigned int nbytes, u32 flags)
  289. {
  290. unsigned int num_items = sendlist->num_items;
  291. struct ce_sendlist_item *item;
  292. item = &sendlist->item[num_items];
  293. item->data = buffer;
  294. item->u.nbytes = nbytes;
  295. item->flags = flags;
  296. sendlist->num_items++;
  297. }
  298. int ath10k_ce_sendlist_send(struct ath10k_ce_pipe *ce_state,
  299. void *per_transfer_context,
  300. struct ce_sendlist *sendlist,
  301. unsigned int transfer_id)
  302. {
  303. struct ath10k_ce_ring *src_ring = ce_state->src_ring;
  304. struct ce_sendlist_item *item;
  305. struct ath10k *ar = ce_state->ar;
  306. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  307. unsigned int nentries_mask = src_ring->nentries_mask;
  308. unsigned int num_items = sendlist->num_items;
  309. unsigned int sw_index;
  310. unsigned int write_index;
  311. int i, delta, ret = -ENOMEM;
  312. spin_lock_bh(&ar_pci->ce_lock);
  313. sw_index = src_ring->sw_index;
  314. write_index = src_ring->write_index;
  315. delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
  316. if (delta >= num_items) {
  317. /*
  318. * Handle all but the last item uniformly.
  319. */
  320. for (i = 0; i < num_items - 1; i++) {
  321. item = &sendlist->item[i];
  322. ret = ath10k_ce_send_nolock(ce_state,
  323. CE_SENDLIST_ITEM_CTXT,
  324. (u32) item->data,
  325. item->u.nbytes, transfer_id,
  326. item->flags |
  327. CE_SEND_FLAG_GATHER);
  328. if (ret)
  329. ath10k_warn("CE send failed for item: %d\n", i);
  330. }
  331. /*
  332. * Provide valid context pointer for final item.
  333. */
  334. item = &sendlist->item[i];
  335. ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
  336. (u32) item->data, item->u.nbytes,
  337. transfer_id, item->flags);
  338. if (ret)
  339. ath10k_warn("CE send failed for last item: %d\n", i);
  340. }
  341. spin_unlock_bh(&ar_pci->ce_lock);
  342. return ret;
  343. }
  344. int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
  345. void *per_recv_context,
  346. u32 buffer)
  347. {
  348. struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
  349. u32 ctrl_addr = ce_state->ctrl_addr;
  350. struct ath10k *ar = ce_state->ar;
  351. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  352. unsigned int nentries_mask = dest_ring->nentries_mask;
  353. unsigned int write_index;
  354. unsigned int sw_index;
  355. int ret;
  356. spin_lock_bh(&ar_pci->ce_lock);
  357. write_index = dest_ring->write_index;
  358. sw_index = dest_ring->sw_index;
  359. ret = ath10k_pci_wake(ar);
  360. if (ret)
  361. goto out;
  362. if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
  363. struct ce_desc *base = dest_ring->base_addr_owner_space;
  364. struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
  365. /* Update destination descriptor */
  366. desc->addr = __cpu_to_le32(buffer);
  367. desc->nbytes = 0;
  368. dest_ring->per_transfer_context[write_index] =
  369. per_recv_context;
  370. /* Update Destination Ring Write Index */
  371. write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
  372. ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
  373. dest_ring->write_index = write_index;
  374. ret = 0;
  375. } else {
  376. ret = -EIO;
  377. }
  378. ath10k_pci_sleep(ar);
  379. out:
  380. spin_unlock_bh(&ar_pci->ce_lock);
  381. return ret;
  382. }
  383. /*
  384. * Guts of ath10k_ce_completed_recv_next.
  385. * The caller takes responsibility for any necessary locking.
  386. */
  387. static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
  388. void **per_transfer_contextp,
  389. u32 *bufferp,
  390. unsigned int *nbytesp,
  391. unsigned int *transfer_idp,
  392. unsigned int *flagsp)
  393. {
  394. struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
  395. unsigned int nentries_mask = dest_ring->nentries_mask;
  396. unsigned int sw_index = dest_ring->sw_index;
  397. struct ce_desc *base = dest_ring->base_addr_owner_space;
  398. struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
  399. struct ce_desc sdesc;
  400. u16 nbytes;
  401. /* Copy in one go for performance reasons */
  402. sdesc = *desc;
  403. nbytes = __le16_to_cpu(sdesc.nbytes);
  404. if (nbytes == 0) {
  405. /*
  406. * This closes a relatively unusual race where the Host
  407. * sees the updated DRRI before the update to the
  408. * corresponding descriptor has completed. We treat this
  409. * as a descriptor that is not yet done.
  410. */
  411. return -EIO;
  412. }
  413. desc->nbytes = 0;
  414. /* Return data from completed destination descriptor */
  415. *bufferp = __le32_to_cpu(sdesc.addr);
  416. *nbytesp = nbytes;
  417. *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA);
  418. if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP)
  419. *flagsp = CE_RECV_FLAG_SWAPPED;
  420. else
  421. *flagsp = 0;
  422. if (per_transfer_contextp)
  423. *per_transfer_contextp =
  424. dest_ring->per_transfer_context[sw_index];
  425. /* sanity */
  426. dest_ring->per_transfer_context[sw_index] = NULL;
  427. /* Update sw_index */
  428. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  429. dest_ring->sw_index = sw_index;
  430. return 0;
  431. }
  432. int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
  433. void **per_transfer_contextp,
  434. u32 *bufferp,
  435. unsigned int *nbytesp,
  436. unsigned int *transfer_idp,
  437. unsigned int *flagsp)
  438. {
  439. struct ath10k *ar = ce_state->ar;
  440. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  441. int ret;
  442. spin_lock_bh(&ar_pci->ce_lock);
  443. ret = ath10k_ce_completed_recv_next_nolock(ce_state,
  444. per_transfer_contextp,
  445. bufferp, nbytesp,
  446. transfer_idp, flagsp);
  447. spin_unlock_bh(&ar_pci->ce_lock);
  448. return ret;
  449. }
  450. int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
  451. void **per_transfer_contextp,
  452. u32 *bufferp)
  453. {
  454. struct ath10k_ce_ring *dest_ring;
  455. unsigned int nentries_mask;
  456. unsigned int sw_index;
  457. unsigned int write_index;
  458. int ret;
  459. struct ath10k *ar;
  460. struct ath10k_pci *ar_pci;
  461. dest_ring = ce_state->dest_ring;
  462. if (!dest_ring)
  463. return -EIO;
  464. ar = ce_state->ar;
  465. ar_pci = ath10k_pci_priv(ar);
  466. spin_lock_bh(&ar_pci->ce_lock);
  467. nentries_mask = dest_ring->nentries_mask;
  468. sw_index = dest_ring->sw_index;
  469. write_index = dest_ring->write_index;
  470. if (write_index != sw_index) {
  471. struct ce_desc *base = dest_ring->base_addr_owner_space;
  472. struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
  473. /* Return data from completed destination descriptor */
  474. *bufferp = __le32_to_cpu(desc->addr);
  475. if (per_transfer_contextp)
  476. *per_transfer_contextp =
  477. dest_ring->per_transfer_context[sw_index];
  478. /* sanity */
  479. dest_ring->per_transfer_context[sw_index] = NULL;
  480. /* Update sw_index */
  481. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  482. dest_ring->sw_index = sw_index;
  483. ret = 0;
  484. } else {
  485. ret = -EIO;
  486. }
  487. spin_unlock_bh(&ar_pci->ce_lock);
  488. return ret;
  489. }
  490. /*
  491. * Guts of ath10k_ce_completed_send_next.
  492. * The caller takes responsibility for any necessary locking.
  493. */
  494. static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
  495. void **per_transfer_contextp,
  496. u32 *bufferp,
  497. unsigned int *nbytesp,
  498. unsigned int *transfer_idp)
  499. {
  500. struct ath10k_ce_ring *src_ring = ce_state->src_ring;
  501. u32 ctrl_addr = ce_state->ctrl_addr;
  502. struct ath10k *ar = ce_state->ar;
  503. unsigned int nentries_mask = src_ring->nentries_mask;
  504. unsigned int sw_index = src_ring->sw_index;
  505. struct ce_desc *sdesc, *sbase;
  506. unsigned int read_index;
  507. int ret;
  508. if (src_ring->hw_index == sw_index) {
  509. /*
  510. * The SW completion index has caught up with the cached
  511. * version of the HW completion index.
  512. * Update the cached HW completion index to see whether
  513. * the SW has really caught up to the HW, or if the cached
  514. * value of the HW index has become stale.
  515. */
  516. ret = ath10k_pci_wake(ar);
  517. if (ret)
  518. return ret;
  519. src_ring->hw_index =
  520. ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
  521. src_ring->hw_index &= nentries_mask;
  522. ath10k_pci_sleep(ar);
  523. }
  524. read_index = src_ring->hw_index;
  525. if ((read_index == sw_index) || (read_index == 0xffffffff))
  526. return -EIO;
  527. sbase = src_ring->shadow_base;
  528. sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
  529. /* Return data from completed source descriptor */
  530. *bufferp = __le32_to_cpu(sdesc->addr);
  531. *nbytesp = __le16_to_cpu(sdesc->nbytes);
  532. *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
  533. CE_DESC_FLAGS_META_DATA);
  534. if (per_transfer_contextp)
  535. *per_transfer_contextp =
  536. src_ring->per_transfer_context[sw_index];
  537. /* sanity */
  538. src_ring->per_transfer_context[sw_index] = NULL;
  539. /* Update sw_index */
  540. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  541. src_ring->sw_index = sw_index;
  542. return 0;
  543. }
  544. /* NB: Modeled after ath10k_ce_completed_send_next */
  545. int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
  546. void **per_transfer_contextp,
  547. u32 *bufferp,
  548. unsigned int *nbytesp,
  549. unsigned int *transfer_idp)
  550. {
  551. struct ath10k_ce_ring *src_ring;
  552. unsigned int nentries_mask;
  553. unsigned int sw_index;
  554. unsigned int write_index;
  555. int ret;
  556. struct ath10k *ar;
  557. struct ath10k_pci *ar_pci;
  558. src_ring = ce_state->src_ring;
  559. if (!src_ring)
  560. return -EIO;
  561. ar = ce_state->ar;
  562. ar_pci = ath10k_pci_priv(ar);
  563. spin_lock_bh(&ar_pci->ce_lock);
  564. nentries_mask = src_ring->nentries_mask;
  565. sw_index = src_ring->sw_index;
  566. write_index = src_ring->write_index;
  567. if (write_index != sw_index) {
  568. struct ce_desc *base = src_ring->base_addr_owner_space;
  569. struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
  570. /* Return data from completed source descriptor */
  571. *bufferp = __le32_to_cpu(desc->addr);
  572. *nbytesp = __le16_to_cpu(desc->nbytes);
  573. *transfer_idp = MS(__le16_to_cpu(desc->flags),
  574. CE_DESC_FLAGS_META_DATA);
  575. if (per_transfer_contextp)
  576. *per_transfer_contextp =
  577. src_ring->per_transfer_context[sw_index];
  578. /* sanity */
  579. src_ring->per_transfer_context[sw_index] = NULL;
  580. /* Update sw_index */
  581. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  582. src_ring->sw_index = sw_index;
  583. ret = 0;
  584. } else {
  585. ret = -EIO;
  586. }
  587. spin_unlock_bh(&ar_pci->ce_lock);
  588. return ret;
  589. }
  590. int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
  591. void **per_transfer_contextp,
  592. u32 *bufferp,
  593. unsigned int *nbytesp,
  594. unsigned int *transfer_idp)
  595. {
  596. struct ath10k *ar = ce_state->ar;
  597. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  598. int ret;
  599. spin_lock_bh(&ar_pci->ce_lock);
  600. ret = ath10k_ce_completed_send_next_nolock(ce_state,
  601. per_transfer_contextp,
  602. bufferp, nbytesp,
  603. transfer_idp);
  604. spin_unlock_bh(&ar_pci->ce_lock);
  605. return ret;
  606. }
  607. /*
  608. * Guts of interrupt handler for per-engine interrupts on a particular CE.
  609. *
  610. * Invokes registered callbacks for recv_complete,
  611. * send_complete, and watermarks.
  612. */
  613. void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
  614. {
  615. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  616. struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
  617. u32 ctrl_addr = ce_state->ctrl_addr;
  618. void *transfer_context;
  619. u32 buf;
  620. unsigned int nbytes;
  621. unsigned int id;
  622. unsigned int flags;
  623. int ret;
  624. ret = ath10k_pci_wake(ar);
  625. if (ret)
  626. return;
  627. spin_lock_bh(&ar_pci->ce_lock);
  628. /* Clear the copy-complete interrupts that will be handled here. */
  629. ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
  630. HOST_IS_COPY_COMPLETE_MASK);
  631. if (ce_state->recv_cb) {
  632. /*
  633. * Pop completed recv buffers and call the registered
  634. * recv callback for each
  635. */
  636. while (ath10k_ce_completed_recv_next_nolock(ce_state,
  637. &transfer_context,
  638. &buf, &nbytes,
  639. &id, &flags) == 0) {
  640. spin_unlock_bh(&ar_pci->ce_lock);
  641. ce_state->recv_cb(ce_state, transfer_context, buf,
  642. nbytes, id, flags);
  643. spin_lock_bh(&ar_pci->ce_lock);
  644. }
  645. }
  646. if (ce_state->send_cb) {
  647. /*
  648. * Pop completed send buffers and call the registered
  649. * send callback for each
  650. */
  651. while (ath10k_ce_completed_send_next_nolock(ce_state,
  652. &transfer_context,
  653. &buf,
  654. &nbytes,
  655. &id) == 0) {
  656. spin_unlock_bh(&ar_pci->ce_lock);
  657. ce_state->send_cb(ce_state, transfer_context,
  658. buf, nbytes, id);
  659. spin_lock_bh(&ar_pci->ce_lock);
  660. }
  661. }
  662. /*
  663. * Misc CE interrupts are not being handled, but still need
  664. * to be cleared.
  665. */
  666. ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);
  667. spin_unlock_bh(&ar_pci->ce_lock);
  668. ath10k_pci_sleep(ar);
  669. }
  670. /*
  671. * Handler for per-engine interrupts on ALL active CEs.
  672. * This is used in cases where the system is sharing a
  673. * single interrput for all CEs
  674. */
  675. void ath10k_ce_per_engine_service_any(struct ath10k *ar)
  676. {
  677. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  678. int ce_id, ret;
  679. u32 intr_summary;
  680. ret = ath10k_pci_wake(ar);
  681. if (ret)
  682. return;
  683. intr_summary = CE_INTERRUPT_SUMMARY(ar);
  684. for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) {
  685. if (intr_summary & (1 << ce_id))
  686. intr_summary &= ~(1 << ce_id);
  687. else
  688. /* no intr pending on this CE */
  689. continue;
  690. ath10k_ce_per_engine_service(ar, ce_id);
  691. }
  692. ath10k_pci_sleep(ar);
  693. }
  694. /*
  695. * Adjust interrupts for the copy complete handler.
  696. * If it's needed for either send or recv, then unmask
  697. * this interrupt; otherwise, mask it.
  698. *
  699. * Called with ce_lock held.
  700. */
  701. static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
  702. int disable_copy_compl_intr)
  703. {
  704. u32 ctrl_addr = ce_state->ctrl_addr;
  705. struct ath10k *ar = ce_state->ar;
  706. int ret;
  707. ret = ath10k_pci_wake(ar);
  708. if (ret)
  709. return;
  710. if ((!disable_copy_compl_intr) &&
  711. (ce_state->send_cb || ce_state->recv_cb))
  712. ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
  713. else
  714. ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
  715. ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
  716. ath10k_pci_sleep(ar);
  717. }
  718. void ath10k_ce_disable_interrupts(struct ath10k *ar)
  719. {
  720. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  721. int ce_id, ret;
  722. ret = ath10k_pci_wake(ar);
  723. if (ret)
  724. return;
  725. for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) {
  726. struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
  727. u32 ctrl_addr = ce_state->ctrl_addr;
  728. ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
  729. }
  730. ath10k_pci_sleep(ar);
  731. }
  732. void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
  733. void (*send_cb)(struct ath10k_ce_pipe *ce_state,
  734. void *transfer_context,
  735. u32 buffer,
  736. unsigned int nbytes,
  737. unsigned int transfer_id),
  738. int disable_interrupts)
  739. {
  740. struct ath10k *ar = ce_state->ar;
  741. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  742. spin_lock_bh(&ar_pci->ce_lock);
  743. ce_state->send_cb = send_cb;
  744. ath10k_ce_per_engine_handler_adjust(ce_state, disable_interrupts);
  745. spin_unlock_bh(&ar_pci->ce_lock);
  746. }
  747. void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
  748. void (*recv_cb)(struct ath10k_ce_pipe *ce_state,
  749. void *transfer_context,
  750. u32 buffer,
  751. unsigned int nbytes,
  752. unsigned int transfer_id,
  753. unsigned int flags))
  754. {
  755. struct ath10k *ar = ce_state->ar;
  756. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  757. spin_lock_bh(&ar_pci->ce_lock);
  758. ce_state->recv_cb = recv_cb;
  759. ath10k_ce_per_engine_handler_adjust(ce_state, 0);
  760. spin_unlock_bh(&ar_pci->ce_lock);
  761. }
  762. static int ath10k_ce_init_src_ring(struct ath10k *ar,
  763. unsigned int ce_id,
  764. struct ath10k_ce_pipe *ce_state,
  765. const struct ce_attr *attr)
  766. {
  767. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  768. struct ath10k_ce_ring *src_ring;
  769. unsigned int nentries = attr->src_nentries;
  770. unsigned int ce_nbytes;
  771. u32 ctrl_addr = ath10k_ce_base_address(ce_id);
  772. dma_addr_t base_addr;
  773. char *ptr;
  774. nentries = roundup_pow_of_two(nentries);
  775. if (ce_state->src_ring) {
  776. WARN_ON(ce_state->src_ring->nentries != nentries);
  777. return 0;
  778. }
  779. ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
  780. ptr = kzalloc(ce_nbytes, GFP_KERNEL);
  781. if (ptr == NULL)
  782. return -ENOMEM;
  783. ce_state->src_ring = (struct ath10k_ce_ring *)ptr;
  784. src_ring = ce_state->src_ring;
  785. ptr += sizeof(struct ath10k_ce_ring);
  786. src_ring->nentries = nentries;
  787. src_ring->nentries_mask = nentries - 1;
  788. src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
  789. src_ring->sw_index &= src_ring->nentries_mask;
  790. src_ring->hw_index = src_ring->sw_index;
  791. src_ring->write_index =
  792. ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
  793. src_ring->write_index &= src_ring->nentries_mask;
  794. src_ring->per_transfer_context = (void **)ptr;
  795. /*
  796. * Legacy platforms that do not support cache
  797. * coherent DMA are unsupported
  798. */
  799. src_ring->base_addr_owner_space_unaligned =
  800. pci_alloc_consistent(ar_pci->pdev,
  801. (nentries * sizeof(struct ce_desc) +
  802. CE_DESC_RING_ALIGN),
  803. &base_addr);
  804. if (!src_ring->base_addr_owner_space_unaligned) {
  805. kfree(ce_state->src_ring);
  806. ce_state->src_ring = NULL;
  807. return -ENOMEM;
  808. }
  809. src_ring->base_addr_ce_space_unaligned = base_addr;
  810. src_ring->base_addr_owner_space = PTR_ALIGN(
  811. src_ring->base_addr_owner_space_unaligned,
  812. CE_DESC_RING_ALIGN);
  813. src_ring->base_addr_ce_space = ALIGN(
  814. src_ring->base_addr_ce_space_unaligned,
  815. CE_DESC_RING_ALIGN);
  816. /*
  817. * Also allocate a shadow src ring in regular
  818. * mem to use for faster access.
  819. */
  820. src_ring->shadow_base_unaligned =
  821. kmalloc((nentries * sizeof(struct ce_desc) +
  822. CE_DESC_RING_ALIGN), GFP_KERNEL);
  823. if (!src_ring->shadow_base_unaligned) {
  824. pci_free_consistent(ar_pci->pdev,
  825. (nentries * sizeof(struct ce_desc) +
  826. CE_DESC_RING_ALIGN),
  827. src_ring->base_addr_owner_space,
  828. src_ring->base_addr_ce_space);
  829. kfree(ce_state->src_ring);
  830. ce_state->src_ring = NULL;
  831. return -ENOMEM;
  832. }
  833. src_ring->shadow_base = PTR_ALIGN(
  834. src_ring->shadow_base_unaligned,
  835. CE_DESC_RING_ALIGN);
  836. ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
  837. src_ring->base_addr_ce_space);
  838. ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
  839. ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
  840. ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
  841. ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
  842. ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
  843. return 0;
  844. }
  845. static int ath10k_ce_init_dest_ring(struct ath10k *ar,
  846. unsigned int ce_id,
  847. struct ath10k_ce_pipe *ce_state,
  848. const struct ce_attr *attr)
  849. {
  850. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  851. struct ath10k_ce_ring *dest_ring;
  852. unsigned int nentries = attr->dest_nentries;
  853. unsigned int ce_nbytes;
  854. u32 ctrl_addr = ath10k_ce_base_address(ce_id);
  855. dma_addr_t base_addr;
  856. char *ptr;
  857. nentries = roundup_pow_of_two(nentries);
  858. if (ce_state->dest_ring) {
  859. WARN_ON(ce_state->dest_ring->nentries != nentries);
  860. return 0;
  861. }
  862. ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
  863. ptr = kzalloc(ce_nbytes, GFP_KERNEL);
  864. if (ptr == NULL)
  865. return -ENOMEM;
  866. ce_state->dest_ring = (struct ath10k_ce_ring *)ptr;
  867. dest_ring = ce_state->dest_ring;
  868. ptr += sizeof(struct ath10k_ce_ring);
  869. dest_ring->nentries = nentries;
  870. dest_ring->nentries_mask = nentries - 1;
  871. dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
  872. dest_ring->sw_index &= dest_ring->nentries_mask;
  873. dest_ring->write_index =
  874. ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
  875. dest_ring->write_index &= dest_ring->nentries_mask;
  876. dest_ring->per_transfer_context = (void **)ptr;
  877. /*
  878. * Legacy platforms that do not support cache
  879. * coherent DMA are unsupported
  880. */
  881. dest_ring->base_addr_owner_space_unaligned =
  882. pci_alloc_consistent(ar_pci->pdev,
  883. (nentries * sizeof(struct ce_desc) +
  884. CE_DESC_RING_ALIGN),
  885. &base_addr);
  886. if (!dest_ring->base_addr_owner_space_unaligned) {
  887. kfree(ce_state->dest_ring);
  888. ce_state->dest_ring = NULL;
  889. return -ENOMEM;
  890. }
  891. dest_ring->base_addr_ce_space_unaligned = base_addr;
  892. /*
  893. * Correctly initialize memory to 0 to prevent garbage
  894. * data crashing system when download firmware
  895. */
  896. memset(dest_ring->base_addr_owner_space_unaligned, 0,
  897. nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN);
  898. dest_ring->base_addr_owner_space = PTR_ALIGN(
  899. dest_ring->base_addr_owner_space_unaligned,
  900. CE_DESC_RING_ALIGN);
  901. dest_ring->base_addr_ce_space = ALIGN(
  902. dest_ring->base_addr_ce_space_unaligned,
  903. CE_DESC_RING_ALIGN);
  904. ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
  905. dest_ring->base_addr_ce_space);
  906. ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
  907. ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
  908. ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
  909. ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
  910. return 0;
  911. }
  912. static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
  913. unsigned int ce_id,
  914. const struct ce_attr *attr)
  915. {
  916. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  917. struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
  918. u32 ctrl_addr = ath10k_ce_base_address(ce_id);
  919. spin_lock_bh(&ar_pci->ce_lock);
  920. ce_state->ar = ar;
  921. ce_state->id = ce_id;
  922. ce_state->ctrl_addr = ctrl_addr;
  923. ce_state->attr_flags = attr->flags;
  924. ce_state->src_sz_max = attr->src_sz_max;
  925. spin_unlock_bh(&ar_pci->ce_lock);
  926. return ce_state;
  927. }
  928. /*
  929. * Initialize a Copy Engine based on caller-supplied attributes.
  930. * This may be called once to initialize both source and destination
  931. * rings or it may be called twice for separate source and destination
  932. * initialization. It may be that only one side or the other is
  933. * initialized by software/firmware.
  934. */
  935. struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
  936. unsigned int ce_id,
  937. const struct ce_attr *attr)
  938. {
  939. struct ath10k_ce_pipe *ce_state;
  940. u32 ctrl_addr = ath10k_ce_base_address(ce_id);
  941. int ret;
  942. ret = ath10k_pci_wake(ar);
  943. if (ret)
  944. return NULL;
  945. ce_state = ath10k_ce_init_state(ar, ce_id, attr);
  946. if (!ce_state) {
  947. ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id);
  948. return NULL;
  949. }
  950. if (attr->src_nentries) {
  951. ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr);
  952. if (ret) {
  953. ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
  954. ce_id, ret);
  955. ath10k_ce_deinit(ce_state);
  956. return NULL;
  957. }
  958. }
  959. if (attr->dest_nentries) {
  960. ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr);
  961. if (ret) {
  962. ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
  963. ce_id, ret);
  964. ath10k_ce_deinit(ce_state);
  965. return NULL;
  966. }
  967. }
  968. /* Enable CE error interrupts */
  969. ath10k_ce_error_intr_enable(ar, ctrl_addr);
  970. ath10k_pci_sleep(ar);
  971. return ce_state;
  972. }
  973. void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state)
  974. {
  975. struct ath10k *ar = ce_state->ar;
  976. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  977. if (ce_state->src_ring) {
  978. kfree(ce_state->src_ring->shadow_base_unaligned);
  979. pci_free_consistent(ar_pci->pdev,
  980. (ce_state->src_ring->nentries *
  981. sizeof(struct ce_desc) +
  982. CE_DESC_RING_ALIGN),
  983. ce_state->src_ring->base_addr_owner_space,
  984. ce_state->src_ring->base_addr_ce_space);
  985. kfree(ce_state->src_ring);
  986. }
  987. if (ce_state->dest_ring) {
  988. pci_free_consistent(ar_pci->pdev,
  989. (ce_state->dest_ring->nentries *
  990. sizeof(struct ce_desc) +
  991. CE_DESC_RING_ALIGN),
  992. ce_state->dest_ring->base_addr_owner_space,
  993. ce_state->dest_ring->base_addr_ce_space);
  994. kfree(ce_state->dest_ring);
  995. }
  996. ce_state->src_ring = NULL;
  997. ce_state->dest_ring = NULL;
  998. }