htt_rx.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include "htc.h"
  18. #include "htt.h"
  19. #include "txrx.h"
  20. #include "debug.h"
  21. #include <linux/log2.h>
  22. /* slightly larger than one large A-MPDU */
  23. #define HTT_RX_RING_SIZE_MIN 128
  24. /* roughly 20 ms @ 1 Gbps of 1500B MSDUs */
  25. #define HTT_RX_RING_SIZE_MAX 2048
  26. #define HTT_RX_AVG_FRM_BYTES 1000
  27. /* ms, very conservative */
  28. #define HTT_RX_HOST_LATENCY_MAX_MS 20
  29. /* ms, conservative */
  30. #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
  31. /* when under memory pressure rx ring refill may fail and needs a retry */
  32. #define HTT_RX_RING_REFILL_RETRY_MS 50
  33. static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
  34. {
  35. int size;
  36. /*
  37. * It is expected that the host CPU will typically be able to
  38. * service the rx indication from one A-MPDU before the rx
  39. * indication from the subsequent A-MPDU happens, roughly 1-2 ms
  40. * later. However, the rx ring should be sized very conservatively,
  41. * to accomodate the worst reasonable delay before the host CPU
  42. * services a rx indication interrupt.
  43. *
  44. * The rx ring need not be kept full of empty buffers. In theory,
  45. * the htt host SW can dynamically track the low-water mark in the
  46. * rx ring, and dynamically adjust the level to which the rx ring
  47. * is filled with empty buffers, to dynamically meet the desired
  48. * low-water mark.
  49. *
  50. * In contrast, it's difficult to resize the rx ring itself, once
  51. * it's in use. Thus, the ring itself should be sized very
  52. * conservatively, while the degree to which the ring is filled
  53. * with empty buffers should be sized moderately conservatively.
  54. */
  55. /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
  56. size =
  57. htt->max_throughput_mbps +
  58. 1000 /
  59. (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
  60. if (size < HTT_RX_RING_SIZE_MIN)
  61. size = HTT_RX_RING_SIZE_MIN;
  62. if (size > HTT_RX_RING_SIZE_MAX)
  63. size = HTT_RX_RING_SIZE_MAX;
  64. size = roundup_pow_of_two(size);
  65. return size;
  66. }
  67. static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt)
  68. {
  69. int size;
  70. /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
  71. size =
  72. htt->max_throughput_mbps *
  73. 1000 /
  74. (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
  75. /*
  76. * Make sure the fill level is at least 1 less than the ring size.
  77. * Leaving 1 element empty allows the SW to easily distinguish
  78. * between a full ring vs. an empty ring.
  79. */
  80. if (size >= htt->rx_ring.size)
  81. size = htt->rx_ring.size - 1;
  82. return size;
  83. }
  84. static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
  85. {
  86. struct sk_buff *skb;
  87. struct ath10k_skb_cb *cb;
  88. int i;
  89. for (i = 0; i < htt->rx_ring.fill_cnt; i++) {
  90. skb = htt->rx_ring.netbufs_ring[i];
  91. cb = ATH10K_SKB_CB(skb);
  92. dma_unmap_single(htt->ar->dev, cb->paddr,
  93. skb->len + skb_tailroom(skb),
  94. DMA_FROM_DEVICE);
  95. dev_kfree_skb_any(skb);
  96. }
  97. htt->rx_ring.fill_cnt = 0;
  98. }
  99. static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
  100. {
  101. struct htt_rx_desc *rx_desc;
  102. struct sk_buff *skb;
  103. dma_addr_t paddr;
  104. int ret = 0, idx;
  105. idx = __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr));
  106. while (num > 0) {
  107. skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
  108. if (!skb) {
  109. ret = -ENOMEM;
  110. goto fail;
  111. }
  112. if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
  113. skb_pull(skb,
  114. PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
  115. skb->data);
  116. /* Clear rx_desc attention word before posting to Rx ring */
  117. rx_desc = (struct htt_rx_desc *)skb->data;
  118. rx_desc->attention.flags = __cpu_to_le32(0);
  119. paddr = dma_map_single(htt->ar->dev, skb->data,
  120. skb->len + skb_tailroom(skb),
  121. DMA_FROM_DEVICE);
  122. if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
  123. dev_kfree_skb_any(skb);
  124. ret = -ENOMEM;
  125. goto fail;
  126. }
  127. ATH10K_SKB_CB(skb)->paddr = paddr;
  128. htt->rx_ring.netbufs_ring[idx] = skb;
  129. htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
  130. htt->rx_ring.fill_cnt++;
  131. num--;
  132. idx++;
  133. idx &= htt->rx_ring.size_mask;
  134. }
  135. fail:
  136. *(htt->rx_ring.alloc_idx.vaddr) = __cpu_to_le32(idx);
  137. return ret;
  138. }
  139. static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
  140. {
  141. lockdep_assert_held(&htt->rx_ring.lock);
  142. return __ath10k_htt_rx_ring_fill_n(htt, num);
  143. }
  144. static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
  145. {
  146. int ret, num_to_fill;
  147. spin_lock_bh(&htt->rx_ring.lock);
  148. num_to_fill = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
  149. ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
  150. if (ret == -ENOMEM) {
  151. /*
  152. * Failed to fill it to the desired level -
  153. * we'll start a timer and try again next time.
  154. * As long as enough buffers are left in the ring for
  155. * another A-MPDU rx, no special recovery is needed.
  156. */
  157. mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
  158. msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
  159. }
  160. spin_unlock_bh(&htt->rx_ring.lock);
  161. }
  162. static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
  163. {
  164. struct ath10k_htt *htt = (struct ath10k_htt *)arg;
  165. ath10k_htt_rx_msdu_buff_replenish(htt);
  166. }
  167. static unsigned ath10k_htt_rx_ring_elems(struct ath10k_htt *htt)
  168. {
  169. return (__le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr) -
  170. htt->rx_ring.sw_rd_idx.msdu_payld) & htt->rx_ring.size_mask;
  171. }
  172. void ath10k_htt_rx_detach(struct ath10k_htt *htt)
  173. {
  174. int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
  175. del_timer_sync(&htt->rx_ring.refill_retry_timer);
  176. while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
  177. struct sk_buff *skb =
  178. htt->rx_ring.netbufs_ring[sw_rd_idx];
  179. struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
  180. dma_unmap_single(htt->ar->dev, cb->paddr,
  181. skb->len + skb_tailroom(skb),
  182. DMA_FROM_DEVICE);
  183. dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]);
  184. sw_rd_idx++;
  185. sw_rd_idx &= htt->rx_ring.size_mask;
  186. }
  187. dma_free_coherent(htt->ar->dev,
  188. (htt->rx_ring.size *
  189. sizeof(htt->rx_ring.paddrs_ring)),
  190. htt->rx_ring.paddrs_ring,
  191. htt->rx_ring.base_paddr);
  192. dma_free_coherent(htt->ar->dev,
  193. sizeof(*htt->rx_ring.alloc_idx.vaddr),
  194. htt->rx_ring.alloc_idx.vaddr,
  195. htt->rx_ring.alloc_idx.paddr);
  196. kfree(htt->rx_ring.netbufs_ring);
  197. }
  198. static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
  199. {
  200. int idx;
  201. struct sk_buff *msdu;
  202. spin_lock_bh(&htt->rx_ring.lock);
  203. if (ath10k_htt_rx_ring_elems(htt) == 0)
  204. ath10k_warn("htt rx ring is empty!\n");
  205. idx = htt->rx_ring.sw_rd_idx.msdu_payld;
  206. msdu = htt->rx_ring.netbufs_ring[idx];
  207. idx++;
  208. idx &= htt->rx_ring.size_mask;
  209. htt->rx_ring.sw_rd_idx.msdu_payld = idx;
  210. htt->rx_ring.fill_cnt--;
  211. spin_unlock_bh(&htt->rx_ring.lock);
  212. return msdu;
  213. }
  214. static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
  215. {
  216. struct sk_buff *next;
  217. while (skb) {
  218. next = skb->next;
  219. dev_kfree_skb_any(skb);
  220. skb = next;
  221. }
  222. }
  223. static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
  224. u8 **fw_desc, int *fw_desc_len,
  225. struct sk_buff **head_msdu,
  226. struct sk_buff **tail_msdu)
  227. {
  228. int msdu_len, msdu_chaining = 0;
  229. struct sk_buff *msdu;
  230. struct htt_rx_desc *rx_desc;
  231. if (ath10k_htt_rx_ring_elems(htt) == 0)
  232. ath10k_warn("htt rx ring is empty!\n");
  233. if (htt->rx_confused) {
  234. ath10k_warn("htt is confused. refusing rx\n");
  235. return 0;
  236. }
  237. msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt);
  238. while (msdu) {
  239. int last_msdu, msdu_len_invalid, msdu_chained;
  240. dma_unmap_single(htt->ar->dev,
  241. ATH10K_SKB_CB(msdu)->paddr,
  242. msdu->len + skb_tailroom(msdu),
  243. DMA_FROM_DEVICE);
  244. ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ",
  245. msdu->data, msdu->len + skb_tailroom(msdu));
  246. rx_desc = (struct htt_rx_desc *)msdu->data;
  247. /* FIXME: we must report msdu payload since this is what caller
  248. * expects now */
  249. skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
  250. skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
  251. /*
  252. * Sanity check - confirm the HW is finished filling in the
  253. * rx data.
  254. * If the HW and SW are working correctly, then it's guaranteed
  255. * that the HW's MAC DMA is done before this point in the SW.
  256. * To prevent the case that we handle a stale Rx descriptor,
  257. * just assert for now until we have a way to recover.
  258. */
  259. if (!(__le32_to_cpu(rx_desc->attention.flags)
  260. & RX_ATTENTION_FLAGS_MSDU_DONE)) {
  261. ath10k_htt_rx_free_msdu_chain(*head_msdu);
  262. *head_msdu = NULL;
  263. msdu = NULL;
  264. ath10k_err("htt rx stopped. cannot recover\n");
  265. htt->rx_confused = true;
  266. break;
  267. }
  268. /*
  269. * Copy the FW rx descriptor for this MSDU from the rx
  270. * indication message into the MSDU's netbuf. HL uses the
  271. * same rx indication message definition as LL, and simply
  272. * appends new info (fields from the HW rx desc, and the
  273. * MSDU payload itself). So, the offset into the rx
  274. * indication message only has to account for the standard
  275. * offset of the per-MSDU FW rx desc info within the
  276. * message, and how many bytes of the per-MSDU FW rx desc
  277. * info have already been consumed. (And the endianness of
  278. * the host, since for a big-endian host, the rx ind
  279. * message contents, including the per-MSDU rx desc bytes,
  280. * were byteswapped during upload.)
  281. */
  282. if (*fw_desc_len > 0) {
  283. rx_desc->fw_desc.info0 = **fw_desc;
  284. /*
  285. * The target is expected to only provide the basic
  286. * per-MSDU rx descriptors. Just to be sure, verify
  287. * that the target has not attached extension data
  288. * (e.g. LRO flow ID).
  289. */
  290. /* or more, if there's extension data */
  291. (*fw_desc)++;
  292. (*fw_desc_len)--;
  293. } else {
  294. /*
  295. * When an oversized AMSDU happened, FW will lost
  296. * some of MSDU status - in this case, the FW
  297. * descriptors provided will be less than the
  298. * actual MSDUs inside this MPDU. Mark the FW
  299. * descriptors so that it will still deliver to
  300. * upper stack, if no CRC error for this MPDU.
  301. *
  302. * FIX THIS - the FW descriptors are actually for
  303. * MSDUs in the end of this A-MSDU instead of the
  304. * beginning.
  305. */
  306. rx_desc->fw_desc.info0 = 0;
  307. }
  308. msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
  309. & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
  310. RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
  311. msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
  312. RX_MSDU_START_INFO0_MSDU_LENGTH);
  313. msdu_chained = rx_desc->frag_info.ring2_more_count;
  314. if (msdu_len_invalid)
  315. msdu_len = 0;
  316. skb_trim(msdu, 0);
  317. skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
  318. msdu_len -= msdu->len;
  319. /* FIXME: Do chained buffers include htt_rx_desc or not? */
  320. while (msdu_chained--) {
  321. struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
  322. dma_unmap_single(htt->ar->dev,
  323. ATH10K_SKB_CB(next)->paddr,
  324. next->len + skb_tailroom(next),
  325. DMA_FROM_DEVICE);
  326. ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ",
  327. next->data,
  328. next->len + skb_tailroom(next));
  329. skb_trim(next, 0);
  330. skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE));
  331. msdu_len -= next->len;
  332. msdu->next = next;
  333. msdu = next;
  334. msdu_chaining = 1;
  335. }
  336. if (msdu_len > 0) {
  337. /* This may suggest FW bug? */
  338. ath10k_warn("htt rx msdu len not consumed (%d)\n",
  339. msdu_len);
  340. }
  341. last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
  342. RX_MSDU_END_INFO0_LAST_MSDU;
  343. if (last_msdu) {
  344. msdu->next = NULL;
  345. break;
  346. } else {
  347. struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
  348. msdu->next = next;
  349. msdu = next;
  350. }
  351. }
  352. *tail_msdu = msdu;
  353. /*
  354. * Don't refill the ring yet.
  355. *
  356. * First, the elements popped here are still in use - it is not
  357. * safe to overwrite them until the matching call to
  358. * mpdu_desc_list_next. Second, for efficiency it is preferable to
  359. * refill the rx ring with 1 PPDU's worth of rx buffers (something
  360. * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
  361. * (something like 3 buffers). Consequently, we'll rely on the txrx
  362. * SW to tell us when it is done pulling all the PPDU's rx buffers
  363. * out of the rx ring, and then refill it just once.
  364. */
  365. return msdu_chaining;
  366. }
  367. int ath10k_htt_rx_attach(struct ath10k_htt *htt)
  368. {
  369. dma_addr_t paddr;
  370. void *vaddr;
  371. struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
  372. htt->rx_ring.size = ath10k_htt_rx_ring_size(htt);
  373. if (!is_power_of_2(htt->rx_ring.size)) {
  374. ath10k_warn("htt rx ring size is not power of 2\n");
  375. return -EINVAL;
  376. }
  377. htt->rx_ring.size_mask = htt->rx_ring.size - 1;
  378. /*
  379. * Set the initial value for the level to which the rx ring
  380. * should be filled, based on the max throughput and the
  381. * worst likely latency for the host to fill the rx ring
  382. * with new buffers. In theory, this fill level can be
  383. * dynamically adjusted from the initial value set here, to
  384. * reflect the actual host latency rather than a
  385. * conservative assumption about the host latency.
  386. */
  387. htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
  388. htt->rx_ring.netbufs_ring =
  389. kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
  390. GFP_KERNEL);
  391. if (!htt->rx_ring.netbufs_ring)
  392. goto err_netbuf;
  393. vaddr = dma_alloc_coherent(htt->ar->dev,
  394. (htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring)),
  395. &paddr, GFP_DMA);
  396. if (!vaddr)
  397. goto err_dma_ring;
  398. htt->rx_ring.paddrs_ring = vaddr;
  399. htt->rx_ring.base_paddr = paddr;
  400. vaddr = dma_alloc_coherent(htt->ar->dev,
  401. sizeof(*htt->rx_ring.alloc_idx.vaddr),
  402. &paddr, GFP_DMA);
  403. if (!vaddr)
  404. goto err_dma_idx;
  405. htt->rx_ring.alloc_idx.vaddr = vaddr;
  406. htt->rx_ring.alloc_idx.paddr = paddr;
  407. htt->rx_ring.sw_rd_idx.msdu_payld = 0;
  408. *htt->rx_ring.alloc_idx.vaddr = 0;
  409. /* Initialize the Rx refill retry timer */
  410. setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
  411. spin_lock_init(&htt->rx_ring.lock);
  412. htt->rx_ring.fill_cnt = 0;
  413. if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
  414. goto err_fill_ring;
  415. ath10k_dbg(ATH10K_DBG_HTT, "HTT RX ring size: %d, fill_level: %d\n",
  416. htt->rx_ring.size, htt->rx_ring.fill_level);
  417. return 0;
  418. err_fill_ring:
  419. ath10k_htt_rx_ring_free(htt);
  420. dma_free_coherent(htt->ar->dev,
  421. sizeof(*htt->rx_ring.alloc_idx.vaddr),
  422. htt->rx_ring.alloc_idx.vaddr,
  423. htt->rx_ring.alloc_idx.paddr);
  424. err_dma_idx:
  425. dma_free_coherent(htt->ar->dev,
  426. (htt->rx_ring.size *
  427. sizeof(htt->rx_ring.paddrs_ring)),
  428. htt->rx_ring.paddrs_ring,
  429. htt->rx_ring.base_paddr);
  430. err_dma_ring:
  431. kfree(htt->rx_ring.netbufs_ring);
  432. err_netbuf:
  433. return -ENOMEM;
  434. }
  435. static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type)
  436. {
  437. switch (type) {
  438. case HTT_RX_MPDU_ENCRYPT_WEP40:
  439. case HTT_RX_MPDU_ENCRYPT_WEP104:
  440. return 4;
  441. case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
  442. case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */
  443. case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
  444. case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */
  445. case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
  446. return 8;
  447. case HTT_RX_MPDU_ENCRYPT_NONE:
  448. return 0;
  449. }
  450. ath10k_warn("unknown encryption type %d\n", type);
  451. return 0;
  452. }
  453. static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type)
  454. {
  455. switch (type) {
  456. case HTT_RX_MPDU_ENCRYPT_NONE:
  457. case HTT_RX_MPDU_ENCRYPT_WEP40:
  458. case HTT_RX_MPDU_ENCRYPT_WEP104:
  459. case HTT_RX_MPDU_ENCRYPT_WEP128:
  460. case HTT_RX_MPDU_ENCRYPT_WAPI:
  461. return 0;
  462. case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
  463. case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
  464. return 4;
  465. case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
  466. return 8;
  467. }
  468. ath10k_warn("unknown encryption type %d\n", type);
  469. return 0;
  470. }
  471. /* Applies for first msdu in chain, before altering it. */
  472. static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb)
  473. {
  474. struct htt_rx_desc *rxd;
  475. enum rx_msdu_decap_format fmt;
  476. rxd = (void *)skb->data - sizeof(*rxd);
  477. fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
  478. RX_MSDU_START_INFO1_DECAP_FORMAT);
  479. if (fmt == RX_MSDU_DECAP_RAW)
  480. return (void *)skb->data;
  481. else
  482. return (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
  483. }
  484. /* This function only applies for first msdu in an msdu chain */
  485. static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
  486. {
  487. if (ieee80211_is_data_qos(hdr->frame_control)) {
  488. u8 *qc = ieee80211_get_qos_ctl(hdr);
  489. if (qc[0] & 0x80)
  490. return true;
  491. }
  492. return false;
  493. }
  494. static int ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
  495. struct htt_rx_info *info)
  496. {
  497. struct htt_rx_desc *rxd;
  498. struct sk_buff *amsdu;
  499. struct sk_buff *first;
  500. struct ieee80211_hdr *hdr;
  501. struct sk_buff *skb = info->skb;
  502. enum rx_msdu_decap_format fmt;
  503. enum htt_rx_mpdu_encrypt_type enctype;
  504. unsigned int hdr_len;
  505. int crypto_len;
  506. rxd = (void *)skb->data - sizeof(*rxd);
  507. fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
  508. RX_MSDU_START_INFO1_DECAP_FORMAT);
  509. enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
  510. RX_MPDU_START_INFO0_ENCRYPT_TYPE);
  511. /* FIXME: No idea what assumptions are safe here. Need logs */
  512. if ((fmt == RX_MSDU_DECAP_RAW && skb->next) ||
  513. (fmt == RX_MSDU_DECAP_8023_SNAP_LLC)) {
  514. ath10k_htt_rx_free_msdu_chain(skb->next);
  515. skb->next = NULL;
  516. return -ENOTSUPP;
  517. }
  518. /* A-MSDU max is a little less than 8K */
  519. amsdu = dev_alloc_skb(8*1024);
  520. if (!amsdu) {
  521. ath10k_warn("A-MSDU allocation failed\n");
  522. ath10k_htt_rx_free_msdu_chain(skb->next);
  523. skb->next = NULL;
  524. return -ENOMEM;
  525. }
  526. if (fmt >= RX_MSDU_DECAP_NATIVE_WIFI) {
  527. int hdrlen;
  528. hdr = (void *)rxd->rx_hdr_status;
  529. hdrlen = ieee80211_hdrlen(hdr->frame_control);
  530. memcpy(skb_put(amsdu, hdrlen), hdr, hdrlen);
  531. }
  532. first = skb;
  533. while (skb) {
  534. void *decap_hdr;
  535. int decap_len = 0;
  536. rxd = (void *)skb->data - sizeof(*rxd);
  537. fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
  538. RX_MSDU_START_INFO1_DECAP_FORMAT);
  539. decap_hdr = (void *)rxd->rx_hdr_status;
  540. if (skb == first) {
  541. /* We receive linked A-MSDU subframe skbuffs. The
  542. * first one contains the original 802.11 header (and
  543. * possible crypto param) in the RX descriptor. The
  544. * A-MSDU subframe header follows that. Each part is
  545. * aligned to 4 byte boundary. */
  546. hdr = (void *)amsdu->data;
  547. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  548. crypto_len = ath10k_htt_rx_crypto_param_len(enctype);
  549. decap_hdr += roundup(hdr_len, 4);
  550. decap_hdr += roundup(crypto_len, 4);
  551. }
  552. if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
  553. /* Ethernet2 decap inserts ethernet header in place of
  554. * A-MSDU subframe header. */
  555. skb_pull(skb, 6 + 6 + 2);
  556. /* A-MSDU subframe header length */
  557. decap_len += 6 + 6 + 2;
  558. /* Ethernet2 decap also strips the LLC/SNAP so we need
  559. * to re-insert it. The LLC/SNAP follows A-MSDU
  560. * subframe header. */
  561. /* FIXME: Not all LLCs are 8 bytes long */
  562. decap_len += 8;
  563. memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
  564. }
  565. if (fmt == RX_MSDU_DECAP_NATIVE_WIFI) {
  566. /* Native Wifi decap inserts regular 802.11 header
  567. * in place of A-MSDU subframe header. */
  568. hdr = (struct ieee80211_hdr *)skb->data;
  569. skb_pull(skb, ieee80211_hdrlen(hdr->frame_control));
  570. /* A-MSDU subframe header length */
  571. decap_len += 6 + 6 + 2;
  572. memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
  573. }
  574. if (fmt == RX_MSDU_DECAP_RAW)
  575. skb_trim(skb, skb->len - 4); /* remove FCS */
  576. memcpy(skb_put(amsdu, skb->len), skb->data, skb->len);
  577. /* A-MSDU subframes are padded to 4bytes
  578. * but relative to first subframe, not the whole MPDU */
  579. if (skb->next && ((decap_len + skb->len) & 3)) {
  580. int padlen = 4 - ((decap_len + skb->len) & 3);
  581. memset(skb_put(amsdu, padlen), 0, padlen);
  582. }
  583. skb = skb->next;
  584. }
  585. info->skb = amsdu;
  586. info->encrypt_type = enctype;
  587. ath10k_htt_rx_free_msdu_chain(first);
  588. return 0;
  589. }
  590. static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
  591. {
  592. struct sk_buff *skb = info->skb;
  593. struct htt_rx_desc *rxd;
  594. struct ieee80211_hdr *hdr;
  595. enum rx_msdu_decap_format fmt;
  596. enum htt_rx_mpdu_encrypt_type enctype;
  597. /* This shouldn't happen. If it does than it may be a FW bug. */
  598. if (skb->next) {
  599. ath10k_warn("received chained non A-MSDU frame\n");
  600. ath10k_htt_rx_free_msdu_chain(skb->next);
  601. skb->next = NULL;
  602. }
  603. rxd = (void *)skb->data - sizeof(*rxd);
  604. fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
  605. RX_MSDU_START_INFO1_DECAP_FORMAT);
  606. enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
  607. RX_MPDU_START_INFO0_ENCRYPT_TYPE);
  608. hdr = (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
  609. switch (fmt) {
  610. case RX_MSDU_DECAP_RAW:
  611. /* remove trailing FCS */
  612. skb_trim(skb, skb->len - 4);
  613. break;
  614. case RX_MSDU_DECAP_NATIVE_WIFI:
  615. /* nothing to do here */
  616. break;
  617. case RX_MSDU_DECAP_ETHERNET2_DIX:
  618. /* macaddr[6] + macaddr[6] + ethertype[2] */
  619. skb_pull(skb, 6 + 6 + 2);
  620. break;
  621. case RX_MSDU_DECAP_8023_SNAP_LLC:
  622. /* macaddr[6] + macaddr[6] + len[2] */
  623. /* we don't need this for non-A-MSDU */
  624. skb_pull(skb, 6 + 6 + 2);
  625. break;
  626. }
  627. if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
  628. void *llc;
  629. int llclen;
  630. llclen = 8;
  631. llc = hdr;
  632. llc += roundup(ieee80211_hdrlen(hdr->frame_control), 4);
  633. llc += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
  634. skb_push(skb, llclen);
  635. memcpy(skb->data, llc, llclen);
  636. }
  637. if (fmt >= RX_MSDU_DECAP_ETHERNET2_DIX) {
  638. int len = ieee80211_hdrlen(hdr->frame_control);
  639. skb_push(skb, len);
  640. memcpy(skb->data, hdr, len);
  641. }
  642. info->skb = skb;
  643. info->encrypt_type = enctype;
  644. return 0;
  645. }
  646. static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb)
  647. {
  648. struct htt_rx_desc *rxd;
  649. u32 flags;
  650. rxd = (void *)skb->data - sizeof(*rxd);
  651. flags = __le32_to_cpu(rxd->attention.flags);
  652. if (flags & RX_ATTENTION_FLAGS_DECRYPT_ERR)
  653. return true;
  654. return false;
  655. }
  656. static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb)
  657. {
  658. struct htt_rx_desc *rxd;
  659. u32 flags;
  660. rxd = (void *)skb->data - sizeof(*rxd);
  661. flags = __le32_to_cpu(rxd->attention.flags);
  662. if (flags & RX_ATTENTION_FLAGS_FCS_ERR)
  663. return true;
  664. return false;
  665. }
  666. static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
  667. struct htt_rx_indication *rx)
  668. {
  669. struct htt_rx_info info;
  670. struct htt_rx_indication_mpdu_range *mpdu_ranges;
  671. struct ieee80211_hdr *hdr;
  672. int num_mpdu_ranges;
  673. int fw_desc_len;
  674. u8 *fw_desc;
  675. int i, j;
  676. int ret;
  677. memset(&info, 0, sizeof(info));
  678. fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
  679. fw_desc = (u8 *)&rx->fw_desc;
  680. num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
  681. HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
  682. mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
  683. ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
  684. rx, sizeof(*rx) +
  685. (sizeof(struct htt_rx_indication_mpdu_range) *
  686. num_mpdu_ranges));
  687. for (i = 0; i < num_mpdu_ranges; i++) {
  688. info.status = mpdu_ranges[i].mpdu_range_status;
  689. for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
  690. struct sk_buff *msdu_head, *msdu_tail;
  691. enum htt_rx_mpdu_status status;
  692. int msdu_chaining;
  693. msdu_head = NULL;
  694. msdu_tail = NULL;
  695. msdu_chaining = ath10k_htt_rx_amsdu_pop(htt,
  696. &fw_desc,
  697. &fw_desc_len,
  698. &msdu_head,
  699. &msdu_tail);
  700. if (!msdu_head) {
  701. ath10k_warn("htt rx no data!\n");
  702. continue;
  703. }
  704. if (msdu_head->len == 0) {
  705. ath10k_dbg(ATH10K_DBG_HTT,
  706. "htt rx dropping due to zero-len\n");
  707. ath10k_htt_rx_free_msdu_chain(msdu_head);
  708. continue;
  709. }
  710. if (ath10k_htt_rx_has_decrypt_err(msdu_head)) {
  711. ath10k_htt_rx_free_msdu_chain(msdu_head);
  712. continue;
  713. }
  714. status = info.status;
  715. /* Skip mgmt frames while we handle this in WMI */
  716. if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL) {
  717. ath10k_htt_rx_free_msdu_chain(msdu_head);
  718. continue;
  719. }
  720. if (status != HTT_RX_IND_MPDU_STATUS_OK &&
  721. status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
  722. !htt->ar->monitor_enabled) {
  723. ath10k_dbg(ATH10K_DBG_HTT,
  724. "htt rx ignoring frame w/ status %d\n",
  725. status);
  726. ath10k_htt_rx_free_msdu_chain(msdu_head);
  727. continue;
  728. }
  729. /* FIXME: we do not support chaining yet.
  730. * this needs investigation */
  731. if (msdu_chaining) {
  732. ath10k_warn("msdu_chaining is true\n");
  733. ath10k_htt_rx_free_msdu_chain(msdu_head);
  734. continue;
  735. }
  736. info.skb = msdu_head;
  737. info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
  738. info.signal = ATH10K_DEFAULT_NOISE_FLOOR;
  739. info.signal += rx->ppdu.combined_rssi;
  740. info.rate.info0 = rx->ppdu.info0;
  741. info.rate.info1 = __le32_to_cpu(rx->ppdu.info1);
  742. info.rate.info2 = __le32_to_cpu(rx->ppdu.info2);
  743. hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
  744. if (ath10k_htt_rx_hdr_is_amsdu(hdr))
  745. ret = ath10k_htt_rx_amsdu(htt, &info);
  746. else
  747. ret = ath10k_htt_rx_msdu(htt, &info);
  748. if (ret && !info.fcs_err) {
  749. ath10k_warn("error processing msdus %d\n", ret);
  750. dev_kfree_skb_any(info.skb);
  751. continue;
  752. }
  753. if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data))
  754. ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n");
  755. ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ",
  756. info.skb->data, info.skb->len);
  757. ath10k_process_rx(htt->ar, &info);
  758. }
  759. }
  760. ath10k_htt_rx_msdu_buff_replenish(htt);
  761. }
  762. static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
  763. struct htt_rx_fragment_indication *frag)
  764. {
  765. struct sk_buff *msdu_head, *msdu_tail;
  766. struct htt_rx_desc *rxd;
  767. enum rx_msdu_decap_format fmt;
  768. struct htt_rx_info info = {};
  769. struct ieee80211_hdr *hdr;
  770. int msdu_chaining;
  771. bool tkip_mic_err;
  772. bool decrypt_err;
  773. u8 *fw_desc;
  774. int fw_desc_len, hdrlen, paramlen;
  775. int trim;
  776. fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
  777. fw_desc = (u8 *)frag->fw_msdu_rx_desc;
  778. msdu_head = NULL;
  779. msdu_tail = NULL;
  780. msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
  781. &msdu_head, &msdu_tail);
  782. ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
  783. if (!msdu_head) {
  784. ath10k_warn("htt rx frag no data\n");
  785. return;
  786. }
  787. if (msdu_chaining || msdu_head != msdu_tail) {
  788. ath10k_warn("aggregation with fragmentation?!\n");
  789. ath10k_htt_rx_free_msdu_chain(msdu_head);
  790. return;
  791. }
  792. /* FIXME: implement signal strength */
  793. hdr = (struct ieee80211_hdr *)msdu_head->data;
  794. rxd = (void *)msdu_head->data - sizeof(*rxd);
  795. tkip_mic_err = !!(__le32_to_cpu(rxd->attention.flags) &
  796. RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
  797. decrypt_err = !!(__le32_to_cpu(rxd->attention.flags) &
  798. RX_ATTENTION_FLAGS_DECRYPT_ERR);
  799. fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
  800. RX_MSDU_START_INFO1_DECAP_FORMAT);
  801. if (fmt != RX_MSDU_DECAP_RAW) {
  802. ath10k_warn("we dont support non-raw fragmented rx yet\n");
  803. dev_kfree_skb_any(msdu_head);
  804. goto end;
  805. }
  806. info.skb = msdu_head;
  807. info.status = HTT_RX_IND_MPDU_STATUS_OK;
  808. info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0),
  809. RX_MPDU_START_INFO0_ENCRYPT_TYPE);
  810. if (tkip_mic_err) {
  811. ath10k_warn("tkip mic error\n");
  812. info.status = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR;
  813. }
  814. if (decrypt_err) {
  815. ath10k_warn("decryption err in fragmented rx\n");
  816. dev_kfree_skb_any(info.skb);
  817. goto end;
  818. }
  819. if (info.encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
  820. hdrlen = ieee80211_hdrlen(hdr->frame_control);
  821. paramlen = ath10k_htt_rx_crypto_param_len(info.encrypt_type);
  822. /* It is more efficient to move the header than the payload */
  823. memmove((void *)info.skb->data + paramlen,
  824. (void *)info.skb->data,
  825. hdrlen);
  826. skb_pull(info.skb, paramlen);
  827. hdr = (struct ieee80211_hdr *)info.skb->data;
  828. }
  829. /* remove trailing FCS */
  830. trim = 4;
  831. /* remove crypto trailer */
  832. trim += ath10k_htt_rx_crypto_tail_len(info.encrypt_type);
  833. /* last fragment of TKIP frags has MIC */
  834. if (!ieee80211_has_morefrags(hdr->frame_control) &&
  835. info.encrypt_type == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
  836. trim += 8;
  837. if (trim > info.skb->len) {
  838. ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
  839. dev_kfree_skb_any(info.skb);
  840. goto end;
  841. }
  842. skb_trim(info.skb, info.skb->len - trim);
  843. ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt frag mpdu: ",
  844. info.skb->data, info.skb->len);
  845. ath10k_process_rx(htt->ar, &info);
  846. end:
  847. if (fw_desc_len > 0) {
  848. ath10k_dbg(ATH10K_DBG_HTT,
  849. "expecting more fragmented rx in one indication %d\n",
  850. fw_desc_len);
  851. }
  852. }
  853. void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
  854. {
  855. struct ath10k_htt *htt = ar->htt;
  856. struct htt_resp *resp = (struct htt_resp *)skb->data;
  857. /* confirm alignment */
  858. if (!IS_ALIGNED((unsigned long)skb->data, 4))
  859. ath10k_warn("unaligned htt message, expect trouble\n");
  860. ath10k_dbg(ATH10K_DBG_HTT, "HTT RX, msg_type: 0x%0X\n",
  861. resp->hdr.msg_type);
  862. switch (resp->hdr.msg_type) {
  863. case HTT_T2H_MSG_TYPE_VERSION_CONF: {
  864. htt->target_version_major = resp->ver_resp.major;
  865. htt->target_version_minor = resp->ver_resp.minor;
  866. complete(&htt->target_version_received);
  867. break;
  868. }
  869. case HTT_T2H_MSG_TYPE_RX_IND: {
  870. ath10k_htt_rx_handler(htt, &resp->rx_ind);
  871. break;
  872. }
  873. case HTT_T2H_MSG_TYPE_PEER_MAP: {
  874. struct htt_peer_map_event ev = {
  875. .vdev_id = resp->peer_map.vdev_id,
  876. .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
  877. };
  878. memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
  879. ath10k_peer_map_event(htt, &ev);
  880. break;
  881. }
  882. case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
  883. struct htt_peer_unmap_event ev = {
  884. .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
  885. };
  886. ath10k_peer_unmap_event(htt, &ev);
  887. break;
  888. }
  889. case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
  890. struct htt_tx_done tx_done = {};
  891. int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
  892. tx_done.msdu_id =
  893. __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
  894. switch (status) {
  895. case HTT_MGMT_TX_STATUS_OK:
  896. break;
  897. case HTT_MGMT_TX_STATUS_RETRY:
  898. tx_done.no_ack = true;
  899. break;
  900. case HTT_MGMT_TX_STATUS_DROP:
  901. tx_done.discard = true;
  902. break;
  903. }
  904. ath10k_txrx_tx_completed(htt, &tx_done);
  905. break;
  906. }
  907. case HTT_T2H_MSG_TYPE_TX_COMPL_IND: {
  908. struct htt_tx_done tx_done = {};
  909. int status = MS(resp->data_tx_completion.flags,
  910. HTT_DATA_TX_STATUS);
  911. __le16 msdu_id;
  912. int i;
  913. switch (status) {
  914. case HTT_DATA_TX_STATUS_NO_ACK:
  915. tx_done.no_ack = true;
  916. break;
  917. case HTT_DATA_TX_STATUS_OK:
  918. break;
  919. case HTT_DATA_TX_STATUS_DISCARD:
  920. case HTT_DATA_TX_STATUS_POSTPONE:
  921. case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
  922. tx_done.discard = true;
  923. break;
  924. default:
  925. ath10k_warn("unhandled tx completion status %d\n",
  926. status);
  927. tx_done.discard = true;
  928. break;
  929. }
  930. ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
  931. resp->data_tx_completion.num_msdus);
  932. for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
  933. msdu_id = resp->data_tx_completion.msdus[i];
  934. tx_done.msdu_id = __le16_to_cpu(msdu_id);
  935. ath10k_txrx_tx_completed(htt, &tx_done);
  936. }
  937. break;
  938. }
  939. case HTT_T2H_MSG_TYPE_SEC_IND: {
  940. struct ath10k *ar = htt->ar;
  941. struct htt_security_indication *ev = &resp->security_indication;
  942. ath10k_dbg(ATH10K_DBG_HTT,
  943. "sec ind peer_id %d unicast %d type %d\n",
  944. __le16_to_cpu(ev->peer_id),
  945. !!(ev->flags & HTT_SECURITY_IS_UNICAST),
  946. MS(ev->flags, HTT_SECURITY_TYPE));
  947. complete(&ar->install_key_done);
  948. break;
  949. }
  950. case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
  951. ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
  952. skb->data, skb->len);
  953. ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
  954. break;
  955. }
  956. case HTT_T2H_MSG_TYPE_TEST:
  957. /* FIX THIS */
  958. break;
  959. case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
  960. case HTT_T2H_MSG_TYPE_STATS_CONF:
  961. case HTT_T2H_MSG_TYPE_RX_ADDBA:
  962. case HTT_T2H_MSG_TYPE_RX_DELBA:
  963. case HTT_T2H_MSG_TYPE_RX_FLUSH:
  964. default:
  965. ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n",
  966. resp->hdr.msg_type);
  967. ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
  968. skb->data, skb->len);
  969. break;
  970. };
  971. /* Free the indication buffer */
  972. dev_kfree_skb_any(skb);
  973. }