xmit.c 63 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452
  1. /*
  2. * Copyright (c) 2008-2011 Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/dma-mapping.h>
  17. #include "ath9k.h"
  18. #include "ar9003_mac.h"
  19. #define BITS_PER_BYTE 8
  20. #define OFDM_PLCP_BITS 22
  21. #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
  22. #define L_STF 8
  23. #define L_LTF 8
  24. #define L_SIG 4
  25. #define HT_SIG 8
  26. #define HT_STF 4
  27. #define HT_LTF(_ns) (4 * (_ns))
  28. #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
  29. #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
  30. #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
  31. #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
  32. static u16 bits_per_symbol[][2] = {
  33. /* 20MHz 40MHz */
  34. { 26, 54 }, /* 0: BPSK */
  35. { 52, 108 }, /* 1: QPSK 1/2 */
  36. { 78, 162 }, /* 2: QPSK 3/4 */
  37. { 104, 216 }, /* 3: 16-QAM 1/2 */
  38. { 156, 324 }, /* 4: 16-QAM 3/4 */
  39. { 208, 432 }, /* 5: 64-QAM 2/3 */
  40. { 234, 486 }, /* 6: 64-QAM 3/4 */
  41. { 260, 540 }, /* 7: 64-QAM 5/6 */
  42. };
  43. #define IS_HT_RATE(_rate) ((_rate) & 0x80)
  44. static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
  45. struct ath_atx_tid *tid, struct sk_buff *skb);
  46. static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
  47. int tx_flags, struct ath_txq *txq);
  48. static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
  49. struct ath_txq *txq, struct list_head *bf_q,
  50. struct ath_tx_status *ts, int txok, int sendbar);
  51. static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
  52. struct list_head *head, bool internal);
  53. static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
  54. struct ath_tx_status *ts, int nframes, int nbad,
  55. int txok);
  56. static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
  57. int seqno);
  58. static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
  59. struct ath_txq *txq,
  60. struct ath_atx_tid *tid,
  61. struct sk_buff *skb);
  62. enum {
  63. MCS_HT20,
  64. MCS_HT20_SGI,
  65. MCS_HT40,
  66. MCS_HT40_SGI,
  67. };
  68. static int ath_max_4ms_framelen[4][32] = {
  69. [MCS_HT20] = {
  70. 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
  71. 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
  72. 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
  73. 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
  74. },
  75. [MCS_HT20_SGI] = {
  76. 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
  77. 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
  78. 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
  79. 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
  80. },
  81. [MCS_HT40] = {
  82. 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
  83. 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
  84. 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
  85. 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
  86. },
  87. [MCS_HT40_SGI] = {
  88. 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
  89. 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
  90. 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
  91. 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
  92. }
  93. };
  94. /*********************/
  95. /* Aggregation logic */
  96. /*********************/
  97. static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
  98. {
  99. struct ath_atx_ac *ac = tid->ac;
  100. if (tid->paused)
  101. return;
  102. if (tid->sched)
  103. return;
  104. tid->sched = true;
  105. list_add_tail(&tid->list, &ac->tid_q);
  106. if (ac->sched)
  107. return;
  108. ac->sched = true;
  109. list_add_tail(&ac->list, &txq->axq_acq);
  110. }
  111. static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
  112. {
  113. struct ath_txq *txq = tid->ac->txq;
  114. WARN_ON(!tid->paused);
  115. spin_lock_bh(&txq->axq_lock);
  116. tid->paused = false;
  117. if (skb_queue_empty(&tid->buf_q))
  118. goto unlock;
  119. ath_tx_queue_tid(txq, tid);
  120. ath_txq_schedule(sc, txq);
  121. unlock:
  122. spin_unlock_bh(&txq->axq_lock);
  123. }
  124. static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
  125. {
  126. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  127. BUILD_BUG_ON(sizeof(struct ath_frame_info) >
  128. sizeof(tx_info->rate_driver_data));
  129. return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
  130. }
  131. static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
  132. {
  133. struct ath_txq *txq = tid->ac->txq;
  134. struct sk_buff *skb;
  135. struct ath_buf *bf;
  136. struct list_head bf_head;
  137. struct ath_tx_status ts;
  138. struct ath_frame_info *fi;
  139. INIT_LIST_HEAD(&bf_head);
  140. memset(&ts, 0, sizeof(ts));
  141. spin_lock_bh(&txq->axq_lock);
  142. while ((skb = __skb_dequeue(&tid->buf_q))) {
  143. fi = get_frame_info(skb);
  144. bf = fi->bf;
  145. spin_unlock_bh(&txq->axq_lock);
  146. if (bf && fi->retries) {
  147. list_add_tail(&bf->list, &bf_head);
  148. ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
  149. ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
  150. } else {
  151. ath_tx_send_normal(sc, txq, NULL, skb);
  152. }
  153. spin_lock_bh(&txq->axq_lock);
  154. }
  155. spin_unlock_bh(&txq->axq_lock);
  156. }
  157. static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
  158. int seqno)
  159. {
  160. int index, cindex;
  161. index = ATH_BA_INDEX(tid->seq_start, seqno);
  162. cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
  163. __clear_bit(cindex, tid->tx_buf);
  164. while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
  165. INCR(tid->seq_start, IEEE80211_SEQ_MAX);
  166. INCR(tid->baw_head, ATH_TID_MAX_BUFS);
  167. }
  168. }
  169. static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
  170. u16 seqno)
  171. {
  172. int index, cindex;
  173. index = ATH_BA_INDEX(tid->seq_start, seqno);
  174. cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
  175. __set_bit(cindex, tid->tx_buf);
  176. if (index >= ((tid->baw_tail - tid->baw_head) &
  177. (ATH_TID_MAX_BUFS - 1))) {
  178. tid->baw_tail = cindex;
  179. INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
  180. }
  181. }
  182. /*
  183. * TODO: For frame(s) that are in the retry state, we will reuse the
  184. * sequence number(s) without setting the retry bit. The
  185. * alternative is to give up on these and BAR the receiver's window
  186. * forward.
  187. */
  188. static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
  189. struct ath_atx_tid *tid)
  190. {
  191. struct sk_buff *skb;
  192. struct ath_buf *bf;
  193. struct list_head bf_head;
  194. struct ath_tx_status ts;
  195. struct ath_frame_info *fi;
  196. memset(&ts, 0, sizeof(ts));
  197. INIT_LIST_HEAD(&bf_head);
  198. while ((skb = __skb_dequeue(&tid->buf_q))) {
  199. fi = get_frame_info(skb);
  200. bf = fi->bf;
  201. if (!bf) {
  202. spin_unlock(&txq->axq_lock);
  203. ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
  204. spin_lock(&txq->axq_lock);
  205. continue;
  206. }
  207. list_add_tail(&bf->list, &bf_head);
  208. if (fi->retries)
  209. ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
  210. spin_unlock(&txq->axq_lock);
  211. ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
  212. spin_lock(&txq->axq_lock);
  213. }
  214. tid->seq_next = tid->seq_start;
  215. tid->baw_tail = tid->baw_head;
  216. }
  217. static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
  218. struct sk_buff *skb)
  219. {
  220. struct ath_frame_info *fi = get_frame_info(skb);
  221. struct ath_buf *bf = fi->bf;
  222. struct ieee80211_hdr *hdr;
  223. TX_STAT_INC(txq->axq_qnum, a_retries);
  224. if (fi->retries++ > 0)
  225. return;
  226. hdr = (struct ieee80211_hdr *)skb->data;
  227. hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
  228. dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
  229. sizeof(*hdr), DMA_TO_DEVICE);
  230. }
  231. static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
  232. {
  233. struct ath_buf *bf = NULL;
  234. spin_lock_bh(&sc->tx.txbuflock);
  235. if (unlikely(list_empty(&sc->tx.txbuf))) {
  236. spin_unlock_bh(&sc->tx.txbuflock);
  237. return NULL;
  238. }
  239. bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
  240. list_del(&bf->list);
  241. spin_unlock_bh(&sc->tx.txbuflock);
  242. return bf;
  243. }
  244. static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
  245. {
  246. spin_lock_bh(&sc->tx.txbuflock);
  247. list_add_tail(&bf->list, &sc->tx.txbuf);
  248. spin_unlock_bh(&sc->tx.txbuflock);
  249. }
  250. static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
  251. {
  252. struct ath_buf *tbf;
  253. tbf = ath_tx_get_buffer(sc);
  254. if (WARN_ON(!tbf))
  255. return NULL;
  256. ATH_TXBUF_RESET(tbf);
  257. tbf->bf_mpdu = bf->bf_mpdu;
  258. tbf->bf_buf_addr = bf->bf_buf_addr;
  259. memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
  260. tbf->bf_state = bf->bf_state;
  261. return tbf;
  262. }
  263. static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
  264. struct ath_tx_status *ts, int txok,
  265. int *nframes, int *nbad)
  266. {
  267. struct ath_frame_info *fi;
  268. u16 seq_st = 0;
  269. u32 ba[WME_BA_BMP_SIZE >> 5];
  270. int ba_index;
  271. int isaggr = 0;
  272. *nbad = 0;
  273. *nframes = 0;
  274. isaggr = bf_isaggr(bf);
  275. if (isaggr) {
  276. seq_st = ts->ts_seqnum;
  277. memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
  278. }
  279. while (bf) {
  280. fi = get_frame_info(bf->bf_mpdu);
  281. ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
  282. (*nframes)++;
  283. if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
  284. (*nbad)++;
  285. bf = bf->bf_next;
  286. }
  287. }
  288. static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
  289. struct ath_buf *bf, struct list_head *bf_q,
  290. struct ath_tx_status *ts, int txok, bool retry)
  291. {
  292. struct ath_node *an = NULL;
  293. struct sk_buff *skb;
  294. struct ieee80211_sta *sta;
  295. struct ieee80211_hw *hw = sc->hw;
  296. struct ieee80211_hdr *hdr;
  297. struct ieee80211_tx_info *tx_info;
  298. struct ath_atx_tid *tid = NULL;
  299. struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
  300. struct list_head bf_head;
  301. struct sk_buff_head bf_pending;
  302. u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
  303. u32 ba[WME_BA_BMP_SIZE >> 5];
  304. int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
  305. bool rc_update = true;
  306. struct ieee80211_tx_rate rates[4];
  307. struct ath_frame_info *fi;
  308. int nframes;
  309. u8 tidno;
  310. bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
  311. skb = bf->bf_mpdu;
  312. hdr = (struct ieee80211_hdr *)skb->data;
  313. tx_info = IEEE80211_SKB_CB(skb);
  314. memcpy(rates, tx_info->control.rates, sizeof(rates));
  315. rcu_read_lock();
  316. sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
  317. if (!sta) {
  318. rcu_read_unlock();
  319. INIT_LIST_HEAD(&bf_head);
  320. while (bf) {
  321. bf_next = bf->bf_next;
  322. if (!bf->bf_stale || bf_next != NULL)
  323. list_move_tail(&bf->list, &bf_head);
  324. ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
  325. 0, 0);
  326. bf = bf_next;
  327. }
  328. return;
  329. }
  330. an = (struct ath_node *)sta->drv_priv;
  331. tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
  332. tid = ATH_AN_2_TID(an, tidno);
  333. /*
  334. * The hardware occasionally sends a tx status for the wrong TID.
  335. * In this case, the BA status cannot be considered valid and all
  336. * subframes need to be retransmitted
  337. */
  338. if (tidno != ts->tid)
  339. txok = false;
  340. isaggr = bf_isaggr(bf);
  341. memset(ba, 0, WME_BA_BMP_SIZE >> 3);
  342. if (isaggr && txok) {
  343. if (ts->ts_flags & ATH9K_TX_BA) {
  344. seq_st = ts->ts_seqnum;
  345. memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
  346. } else {
  347. /*
  348. * AR5416 can become deaf/mute when BA
  349. * issue happens. Chip needs to be reset.
  350. * But AP code may have sychronization issues
  351. * when perform internal reset in this routine.
  352. * Only enable reset in STA mode for now.
  353. */
  354. if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
  355. needreset = 1;
  356. }
  357. }
  358. __skb_queue_head_init(&bf_pending);
  359. ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
  360. while (bf) {
  361. u16 seqno = bf->bf_state.seqno;
  362. txfail = txpending = sendbar = 0;
  363. bf_next = bf->bf_next;
  364. skb = bf->bf_mpdu;
  365. tx_info = IEEE80211_SKB_CB(skb);
  366. fi = get_frame_info(skb);
  367. if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
  368. /* transmit completion, subframe is
  369. * acked by block ack */
  370. acked_cnt++;
  371. } else if (!isaggr && txok) {
  372. /* transmit completion */
  373. acked_cnt++;
  374. } else {
  375. if ((tid->state & AGGR_CLEANUP) || !retry) {
  376. /*
  377. * cleanup in progress, just fail
  378. * the un-acked sub-frames
  379. */
  380. txfail = 1;
  381. } else if (flush) {
  382. txpending = 1;
  383. } else if (fi->retries < ATH_MAX_SW_RETRIES) {
  384. if (txok || !an->sleeping)
  385. ath_tx_set_retry(sc, txq, bf->bf_mpdu);
  386. txpending = 1;
  387. } else {
  388. txfail = 1;
  389. sendbar = 1;
  390. txfail_cnt++;
  391. }
  392. }
  393. /*
  394. * Make sure the last desc is reclaimed if it
  395. * not a holding desc.
  396. */
  397. INIT_LIST_HEAD(&bf_head);
  398. if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
  399. bf_next != NULL || !bf_last->bf_stale)
  400. list_move_tail(&bf->list, &bf_head);
  401. if (!txpending || (tid->state & AGGR_CLEANUP)) {
  402. /*
  403. * complete the acked-ones/xretried ones; update
  404. * block-ack window
  405. */
  406. spin_lock_bh(&txq->axq_lock);
  407. ath_tx_update_baw(sc, tid, seqno);
  408. spin_unlock_bh(&txq->axq_lock);
  409. if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
  410. memcpy(tx_info->control.rates, rates, sizeof(rates));
  411. ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
  412. rc_update = false;
  413. }
  414. ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
  415. !txfail, sendbar);
  416. } else {
  417. /* retry the un-acked ones */
  418. if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
  419. if (bf->bf_next == NULL && bf_last->bf_stale) {
  420. struct ath_buf *tbf;
  421. tbf = ath_clone_txbuf(sc, bf_last);
  422. /*
  423. * Update tx baw and complete the
  424. * frame with failed status if we
  425. * run out of tx buf.
  426. */
  427. if (!tbf) {
  428. spin_lock_bh(&txq->axq_lock);
  429. ath_tx_update_baw(sc, tid, seqno);
  430. spin_unlock_bh(&txq->axq_lock);
  431. ath_tx_complete_buf(sc, bf, txq,
  432. &bf_head,
  433. ts, 0,
  434. !flush);
  435. break;
  436. }
  437. fi->bf = tbf;
  438. }
  439. }
  440. /*
  441. * Put this buffer to the temporary pending
  442. * queue to retain ordering
  443. */
  444. __skb_queue_tail(&bf_pending, skb);
  445. }
  446. bf = bf_next;
  447. }
  448. /* prepend un-acked frames to the beginning of the pending frame queue */
  449. if (!skb_queue_empty(&bf_pending)) {
  450. if (an->sleeping)
  451. ieee80211_sta_set_buffered(sta, tid->tidno, true);
  452. spin_lock_bh(&txq->axq_lock);
  453. skb_queue_splice(&bf_pending, &tid->buf_q);
  454. if (!an->sleeping) {
  455. ath_tx_queue_tid(txq, tid);
  456. if (ts->ts_status & ATH9K_TXERR_FILT)
  457. tid->ac->clear_ps_filter = true;
  458. }
  459. spin_unlock_bh(&txq->axq_lock);
  460. }
  461. if (tid->state & AGGR_CLEANUP) {
  462. ath_tx_flush_tid(sc, tid);
  463. if (tid->baw_head == tid->baw_tail) {
  464. tid->state &= ~AGGR_ADDBA_COMPLETE;
  465. tid->state &= ~AGGR_CLEANUP;
  466. }
  467. }
  468. rcu_read_unlock();
  469. if (needreset) {
  470. RESET_STAT_INC(sc, RESET_TYPE_TX_ERROR);
  471. ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
  472. }
  473. }
  474. static bool ath_lookup_legacy(struct ath_buf *bf)
  475. {
  476. struct sk_buff *skb;
  477. struct ieee80211_tx_info *tx_info;
  478. struct ieee80211_tx_rate *rates;
  479. int i;
  480. skb = bf->bf_mpdu;
  481. tx_info = IEEE80211_SKB_CB(skb);
  482. rates = tx_info->control.rates;
  483. for (i = 0; i < 4; i++) {
  484. if (!rates[i].count || rates[i].idx < 0)
  485. break;
  486. if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
  487. return true;
  488. }
  489. return false;
  490. }
  491. static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
  492. struct ath_atx_tid *tid)
  493. {
  494. struct sk_buff *skb;
  495. struct ieee80211_tx_info *tx_info;
  496. struct ieee80211_tx_rate *rates;
  497. u32 max_4ms_framelen, frmlen;
  498. u16 aggr_limit, legacy = 0;
  499. int i;
  500. skb = bf->bf_mpdu;
  501. tx_info = IEEE80211_SKB_CB(skb);
  502. rates = tx_info->control.rates;
  503. /*
  504. * Find the lowest frame length among the rate series that will have a
  505. * 4ms transmit duration.
  506. * TODO - TXOP limit needs to be considered.
  507. */
  508. max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
  509. for (i = 0; i < 4; i++) {
  510. if (rates[i].count) {
  511. int modeidx;
  512. if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
  513. legacy = 1;
  514. break;
  515. }
  516. if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
  517. modeidx = MCS_HT40;
  518. else
  519. modeidx = MCS_HT20;
  520. if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
  521. modeidx++;
  522. frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
  523. max_4ms_framelen = min(max_4ms_framelen, frmlen);
  524. }
  525. }
  526. /*
  527. * limit aggregate size by the minimum rate if rate selected is
  528. * not a probe rate, if rate selected is a probe rate then
  529. * avoid aggregation of this packet.
  530. */
  531. if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
  532. return 0;
  533. if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
  534. aggr_limit = min((max_4ms_framelen * 3) / 8,
  535. (u32)ATH_AMPDU_LIMIT_MAX);
  536. else
  537. aggr_limit = min(max_4ms_framelen,
  538. (u32)ATH_AMPDU_LIMIT_MAX);
  539. /*
  540. * h/w can accept aggregates up to 16 bit lengths (65535).
  541. * The IE, however can hold up to 65536, which shows up here
  542. * as zero. Ignore 65536 since we are constrained by hw.
  543. */
  544. if (tid->an->maxampdu)
  545. aggr_limit = min(aggr_limit, tid->an->maxampdu);
  546. return aggr_limit;
  547. }
  548. /*
  549. * Returns the number of delimiters to be added to
  550. * meet the minimum required mpdudensity.
  551. */
  552. static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
  553. struct ath_buf *bf, u16 frmlen,
  554. bool first_subfrm)
  555. {
  556. #define FIRST_DESC_NDELIMS 60
  557. struct sk_buff *skb = bf->bf_mpdu;
  558. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  559. u32 nsymbits, nsymbols;
  560. u16 minlen;
  561. u8 flags, rix;
  562. int width, streams, half_gi, ndelim, mindelim;
  563. struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
  564. /* Select standard number of delimiters based on frame length alone */
  565. ndelim = ATH_AGGR_GET_NDELIM(frmlen);
  566. /*
  567. * If encryption enabled, hardware requires some more padding between
  568. * subframes.
  569. * TODO - this could be improved to be dependent on the rate.
  570. * The hardware can keep up at lower rates, but not higher rates
  571. */
  572. if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
  573. !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
  574. ndelim += ATH_AGGR_ENCRYPTDELIM;
  575. /*
  576. * Add delimiter when using RTS/CTS with aggregation
  577. * and non enterprise AR9003 card
  578. */
  579. if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
  580. (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
  581. ndelim = max(ndelim, FIRST_DESC_NDELIMS);
  582. /*
  583. * Convert desired mpdu density from microeconds to bytes based
  584. * on highest rate in rate series (i.e. first rate) to determine
  585. * required minimum length for subframe. Take into account
  586. * whether high rate is 20 or 40Mhz and half or full GI.
  587. *
  588. * If there is no mpdu density restriction, no further calculation
  589. * is needed.
  590. */
  591. if (tid->an->mpdudensity == 0)
  592. return ndelim;
  593. rix = tx_info->control.rates[0].idx;
  594. flags = tx_info->control.rates[0].flags;
  595. width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
  596. half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
  597. if (half_gi)
  598. nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
  599. else
  600. nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
  601. if (nsymbols == 0)
  602. nsymbols = 1;
  603. streams = HT_RC_2_STREAMS(rix);
  604. nsymbits = bits_per_symbol[rix % 8][width] * streams;
  605. minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
  606. if (frmlen < minlen) {
  607. mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
  608. ndelim = max(mindelim, ndelim);
  609. }
  610. return ndelim;
  611. }
  612. static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
  613. struct ath_txq *txq,
  614. struct ath_atx_tid *tid,
  615. struct list_head *bf_q,
  616. int *aggr_len)
  617. {
  618. #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
  619. struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
  620. int rl = 0, nframes = 0, ndelim, prev_al = 0;
  621. u16 aggr_limit = 0, al = 0, bpad = 0,
  622. al_delta, h_baw = tid->baw_size / 2;
  623. enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
  624. struct ieee80211_tx_info *tx_info;
  625. struct ath_frame_info *fi;
  626. struct sk_buff *skb;
  627. u16 seqno;
  628. do {
  629. skb = skb_peek(&tid->buf_q);
  630. fi = get_frame_info(skb);
  631. bf = fi->bf;
  632. if (!fi->bf)
  633. bf = ath_tx_setup_buffer(sc, txq, tid, skb);
  634. if (!bf)
  635. continue;
  636. bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
  637. seqno = bf->bf_state.seqno;
  638. if (!bf_first)
  639. bf_first = bf;
  640. /* do not step over block-ack window */
  641. if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
  642. status = ATH_AGGR_BAW_CLOSED;
  643. break;
  644. }
  645. if (!rl) {
  646. aggr_limit = ath_lookup_rate(sc, bf, tid);
  647. rl = 1;
  648. }
  649. /* do not exceed aggregation limit */
  650. al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
  651. if (nframes &&
  652. ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
  653. ath_lookup_legacy(bf))) {
  654. status = ATH_AGGR_LIMITED;
  655. break;
  656. }
  657. tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
  658. if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
  659. break;
  660. /* do not exceed subframe limit */
  661. if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
  662. status = ATH_AGGR_LIMITED;
  663. break;
  664. }
  665. /* add padding for previous frame to aggregation length */
  666. al += bpad + al_delta;
  667. /*
  668. * Get the delimiters needed to meet the MPDU
  669. * density for this node.
  670. */
  671. ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
  672. !nframes);
  673. bpad = PADBYTES(al_delta) + (ndelim << 2);
  674. nframes++;
  675. bf->bf_next = NULL;
  676. /* link buffers of this frame to the aggregate */
  677. if (!fi->retries)
  678. ath_tx_addto_baw(sc, tid, seqno);
  679. bf->bf_state.ndelim = ndelim;
  680. __skb_unlink(skb, &tid->buf_q);
  681. list_add_tail(&bf->list, bf_q);
  682. if (bf_prev)
  683. bf_prev->bf_next = bf;
  684. bf_prev = bf;
  685. } while (!skb_queue_empty(&tid->buf_q));
  686. *aggr_len = al;
  687. return status;
  688. #undef PADBYTES
  689. }
  690. /*
  691. * rix - rate index
  692. * pktlen - total bytes (delims + data + fcs + pads + pad delims)
  693. * width - 0 for 20 MHz, 1 for 40 MHz
  694. * half_gi - to use 4us v/s 3.6 us for symbol time
  695. */
  696. static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
  697. int width, int half_gi, bool shortPreamble)
  698. {
  699. u32 nbits, nsymbits, duration, nsymbols;
  700. int streams;
  701. /* find number of symbols: PLCP + data */
  702. streams = HT_RC_2_STREAMS(rix);
  703. nbits = (pktlen << 3) + OFDM_PLCP_BITS;
  704. nsymbits = bits_per_symbol[rix % 8][width] * streams;
  705. nsymbols = (nbits + nsymbits - 1) / nsymbits;
  706. if (!half_gi)
  707. duration = SYMBOL_TIME(nsymbols);
  708. else
  709. duration = SYMBOL_TIME_HALFGI(nsymbols);
  710. /* addup duration for legacy/ht training and signal fields */
  711. duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
  712. return duration;
  713. }
  714. static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
  715. struct ath_tx_info *info, int len)
  716. {
  717. struct ath_hw *ah = sc->sc_ah;
  718. struct sk_buff *skb;
  719. struct ieee80211_tx_info *tx_info;
  720. struct ieee80211_tx_rate *rates;
  721. const struct ieee80211_rate *rate;
  722. struct ieee80211_hdr *hdr;
  723. int i;
  724. u8 rix = 0;
  725. skb = bf->bf_mpdu;
  726. tx_info = IEEE80211_SKB_CB(skb);
  727. rates = tx_info->control.rates;
  728. hdr = (struct ieee80211_hdr *)skb->data;
  729. /* set dur_update_en for l-sig computation except for PS-Poll frames */
  730. info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
  731. /*
  732. * We check if Short Preamble is needed for the CTS rate by
  733. * checking the BSS's global flag.
  734. * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
  735. */
  736. rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
  737. info->rtscts_rate = rate->hw_value;
  738. if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
  739. info->rtscts_rate |= rate->hw_value_short;
  740. for (i = 0; i < 4; i++) {
  741. bool is_40, is_sgi, is_sp;
  742. int phy;
  743. if (!rates[i].count || (rates[i].idx < 0))
  744. continue;
  745. rix = rates[i].idx;
  746. info->rates[i].Tries = rates[i].count;
  747. if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
  748. info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
  749. info->flags |= ATH9K_TXDESC_RTSENA;
  750. } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
  751. info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
  752. info->flags |= ATH9K_TXDESC_CTSENA;
  753. }
  754. if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
  755. info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
  756. if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
  757. info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
  758. is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
  759. is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
  760. is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
  761. if (rates[i].flags & IEEE80211_TX_RC_MCS) {
  762. /* MCS rates */
  763. info->rates[i].Rate = rix | 0x80;
  764. info->rates[i].ChSel = ath_txchainmask_reduction(sc,
  765. ah->txchainmask, info->rates[i].Rate);
  766. info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
  767. is_40, is_sgi, is_sp);
  768. if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
  769. info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
  770. continue;
  771. }
  772. /* legacy rates */
  773. if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
  774. !(rate->flags & IEEE80211_RATE_ERP_G))
  775. phy = WLAN_RC_PHY_CCK;
  776. else
  777. phy = WLAN_RC_PHY_OFDM;
  778. rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
  779. info->rates[i].Rate = rate->hw_value;
  780. if (rate->hw_value_short) {
  781. if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
  782. info->rates[i].Rate |= rate->hw_value_short;
  783. } else {
  784. is_sp = false;
  785. }
  786. if (bf->bf_state.bfs_paprd)
  787. info->rates[i].ChSel = ah->txchainmask;
  788. else
  789. info->rates[i].ChSel = ath_txchainmask_reduction(sc,
  790. ah->txchainmask, info->rates[i].Rate);
  791. info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
  792. phy, rate->bitrate * 100, len, rix, is_sp);
  793. }
  794. /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
  795. if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
  796. info->flags &= ~ATH9K_TXDESC_RTSENA;
  797. /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
  798. if (info->flags & ATH9K_TXDESC_RTSENA)
  799. info->flags &= ~ATH9K_TXDESC_CTSENA;
  800. }
  801. static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
  802. {
  803. struct ieee80211_hdr *hdr;
  804. enum ath9k_pkt_type htype;
  805. __le16 fc;
  806. hdr = (struct ieee80211_hdr *)skb->data;
  807. fc = hdr->frame_control;
  808. if (ieee80211_is_beacon(fc))
  809. htype = ATH9K_PKT_TYPE_BEACON;
  810. else if (ieee80211_is_probe_resp(fc))
  811. htype = ATH9K_PKT_TYPE_PROBE_RESP;
  812. else if (ieee80211_is_atim(fc))
  813. htype = ATH9K_PKT_TYPE_ATIM;
  814. else if (ieee80211_is_pspoll(fc))
  815. htype = ATH9K_PKT_TYPE_PSPOLL;
  816. else
  817. htype = ATH9K_PKT_TYPE_NORMAL;
  818. return htype;
  819. }
  820. static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
  821. struct ath_txq *txq, int len)
  822. {
  823. struct ath_hw *ah = sc->sc_ah;
  824. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
  825. struct ath_buf *bf_first = bf;
  826. struct ath_tx_info info;
  827. bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
  828. memset(&info, 0, sizeof(info));
  829. info.is_first = true;
  830. info.is_last = true;
  831. info.txpower = MAX_RATE_POWER;
  832. info.qcu = txq->axq_qnum;
  833. info.flags = ATH9K_TXDESC_INTREQ;
  834. if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
  835. info.flags |= ATH9K_TXDESC_NOACK;
  836. if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
  837. info.flags |= ATH9K_TXDESC_LDPC;
  838. ath_buf_set_rate(sc, bf, &info, len);
  839. if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
  840. info.flags |= ATH9K_TXDESC_CLRDMASK;
  841. if (bf->bf_state.bfs_paprd)
  842. info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
  843. while (bf) {
  844. struct sk_buff *skb = bf->bf_mpdu;
  845. struct ath_frame_info *fi = get_frame_info(skb);
  846. info.type = get_hw_packet_type(skb);
  847. if (bf->bf_next)
  848. info.link = bf->bf_next->bf_daddr;
  849. else
  850. info.link = 0;
  851. info.buf_addr[0] = bf->bf_buf_addr;
  852. info.buf_len[0] = skb->len;
  853. info.pkt_len = fi->framelen;
  854. info.keyix = fi->keyix;
  855. info.keytype = fi->keytype;
  856. if (aggr) {
  857. if (bf == bf_first)
  858. info.aggr = AGGR_BUF_FIRST;
  859. else if (!bf->bf_next)
  860. info.aggr = AGGR_BUF_LAST;
  861. else
  862. info.aggr = AGGR_BUF_MIDDLE;
  863. info.ndelim = bf->bf_state.ndelim;
  864. info.aggr_len = len;
  865. }
  866. ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
  867. bf = bf->bf_next;
  868. }
  869. }
  870. static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
  871. struct ath_atx_tid *tid)
  872. {
  873. struct ath_buf *bf;
  874. enum ATH_AGGR_STATUS status;
  875. struct ieee80211_tx_info *tx_info;
  876. struct list_head bf_q;
  877. int aggr_len;
  878. do {
  879. if (skb_queue_empty(&tid->buf_q))
  880. return;
  881. INIT_LIST_HEAD(&bf_q);
  882. status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
  883. /*
  884. * no frames picked up to be aggregated;
  885. * block-ack window is not open.
  886. */
  887. if (list_empty(&bf_q))
  888. break;
  889. bf = list_first_entry(&bf_q, struct ath_buf, list);
  890. bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
  891. tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
  892. if (tid->ac->clear_ps_filter) {
  893. tid->ac->clear_ps_filter = false;
  894. tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
  895. } else {
  896. tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
  897. }
  898. /* if only one frame, send as non-aggregate */
  899. if (bf == bf->bf_lastbf) {
  900. aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
  901. bf->bf_state.bf_type = BUF_AMPDU;
  902. } else {
  903. TX_STAT_INC(txq->axq_qnum, a_aggr);
  904. }
  905. ath_tx_fill_desc(sc, bf, txq, aggr_len);
  906. ath_tx_txqaddbuf(sc, txq, &bf_q, false);
  907. } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
  908. status != ATH_AGGR_BAW_CLOSED);
  909. }
  910. int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
  911. u16 tid, u16 *ssn)
  912. {
  913. struct ath_atx_tid *txtid;
  914. struct ath_node *an;
  915. an = (struct ath_node *)sta->drv_priv;
  916. txtid = ATH_AN_2_TID(an, tid);
  917. if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
  918. return -EAGAIN;
  919. txtid->state |= AGGR_ADDBA_PROGRESS;
  920. txtid->paused = true;
  921. *ssn = txtid->seq_start = txtid->seq_next;
  922. memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
  923. txtid->baw_head = txtid->baw_tail = 0;
  924. return 0;
  925. }
  926. void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
  927. {
  928. struct ath_node *an = (struct ath_node *)sta->drv_priv;
  929. struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
  930. struct ath_txq *txq = txtid->ac->txq;
  931. if (txtid->state & AGGR_CLEANUP)
  932. return;
  933. if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
  934. txtid->state &= ~AGGR_ADDBA_PROGRESS;
  935. return;
  936. }
  937. spin_lock_bh(&txq->axq_lock);
  938. txtid->paused = true;
  939. /*
  940. * If frames are still being transmitted for this TID, they will be
  941. * cleaned up during tx completion. To prevent race conditions, this
  942. * TID can only be reused after all in-progress subframes have been
  943. * completed.
  944. */
  945. if (txtid->baw_head != txtid->baw_tail)
  946. txtid->state |= AGGR_CLEANUP;
  947. else
  948. txtid->state &= ~AGGR_ADDBA_COMPLETE;
  949. spin_unlock_bh(&txq->axq_lock);
  950. ath_tx_flush_tid(sc, txtid);
  951. }
  952. void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
  953. struct ath_node *an)
  954. {
  955. struct ath_atx_tid *tid;
  956. struct ath_atx_ac *ac;
  957. struct ath_txq *txq;
  958. bool buffered;
  959. int tidno;
  960. for (tidno = 0, tid = &an->tid[tidno];
  961. tidno < WME_NUM_TID; tidno++, tid++) {
  962. if (!tid->sched)
  963. continue;
  964. ac = tid->ac;
  965. txq = ac->txq;
  966. spin_lock_bh(&txq->axq_lock);
  967. buffered = !skb_queue_empty(&tid->buf_q);
  968. tid->sched = false;
  969. list_del(&tid->list);
  970. if (ac->sched) {
  971. ac->sched = false;
  972. list_del(&ac->list);
  973. }
  974. spin_unlock_bh(&txq->axq_lock);
  975. ieee80211_sta_set_buffered(sta, tidno, buffered);
  976. }
  977. }
  978. void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
  979. {
  980. struct ath_atx_tid *tid;
  981. struct ath_atx_ac *ac;
  982. struct ath_txq *txq;
  983. int tidno;
  984. for (tidno = 0, tid = &an->tid[tidno];
  985. tidno < WME_NUM_TID; tidno++, tid++) {
  986. ac = tid->ac;
  987. txq = ac->txq;
  988. spin_lock_bh(&txq->axq_lock);
  989. ac->clear_ps_filter = true;
  990. if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
  991. ath_tx_queue_tid(txq, tid);
  992. ath_txq_schedule(sc, txq);
  993. }
  994. spin_unlock_bh(&txq->axq_lock);
  995. }
  996. }
  997. void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
  998. {
  999. struct ath_atx_tid *txtid;
  1000. struct ath_node *an;
  1001. an = (struct ath_node *)sta->drv_priv;
  1002. if (sc->sc_flags & SC_OP_TXAGGR) {
  1003. txtid = ATH_AN_2_TID(an, tid);
  1004. txtid->baw_size =
  1005. IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
  1006. txtid->state |= AGGR_ADDBA_COMPLETE;
  1007. txtid->state &= ~AGGR_ADDBA_PROGRESS;
  1008. ath_tx_resume_tid(sc, txtid);
  1009. }
  1010. }
  1011. /********************/
  1012. /* Queue Management */
  1013. /********************/
  1014. static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
  1015. struct ath_txq *txq)
  1016. {
  1017. struct ath_atx_ac *ac, *ac_tmp;
  1018. struct ath_atx_tid *tid, *tid_tmp;
  1019. list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
  1020. list_del(&ac->list);
  1021. ac->sched = false;
  1022. list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
  1023. list_del(&tid->list);
  1024. tid->sched = false;
  1025. ath_tid_drain(sc, txq, tid);
  1026. }
  1027. }
  1028. }
  1029. struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
  1030. {
  1031. struct ath_hw *ah = sc->sc_ah;
  1032. struct ath9k_tx_queue_info qi;
  1033. static const int subtype_txq_to_hwq[] = {
  1034. [WME_AC_BE] = ATH_TXQ_AC_BE,
  1035. [WME_AC_BK] = ATH_TXQ_AC_BK,
  1036. [WME_AC_VI] = ATH_TXQ_AC_VI,
  1037. [WME_AC_VO] = ATH_TXQ_AC_VO,
  1038. };
  1039. int axq_qnum, i;
  1040. memset(&qi, 0, sizeof(qi));
  1041. qi.tqi_subtype = subtype_txq_to_hwq[subtype];
  1042. qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
  1043. qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
  1044. qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
  1045. qi.tqi_physCompBuf = 0;
  1046. /*
  1047. * Enable interrupts only for EOL and DESC conditions.
  1048. * We mark tx descriptors to receive a DESC interrupt
  1049. * when a tx queue gets deep; otherwise waiting for the
  1050. * EOL to reap descriptors. Note that this is done to
  1051. * reduce interrupt load and this only defers reaping
  1052. * descriptors, never transmitting frames. Aside from
  1053. * reducing interrupts this also permits more concurrency.
  1054. * The only potential downside is if the tx queue backs
  1055. * up in which case the top half of the kernel may backup
  1056. * due to a lack of tx descriptors.
  1057. *
  1058. * The UAPSD queue is an exception, since we take a desc-
  1059. * based intr on the EOSP frames.
  1060. */
  1061. if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
  1062. qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
  1063. TXQ_FLAG_TXERRINT_ENABLE;
  1064. } else {
  1065. if (qtype == ATH9K_TX_QUEUE_UAPSD)
  1066. qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
  1067. else
  1068. qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
  1069. TXQ_FLAG_TXDESCINT_ENABLE;
  1070. }
  1071. axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
  1072. if (axq_qnum == -1) {
  1073. /*
  1074. * NB: don't print a message, this happens
  1075. * normally on parts with too few tx queues
  1076. */
  1077. return NULL;
  1078. }
  1079. if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
  1080. struct ath_txq *txq = &sc->tx.txq[axq_qnum];
  1081. txq->axq_qnum = axq_qnum;
  1082. txq->mac80211_qnum = -1;
  1083. txq->axq_link = NULL;
  1084. INIT_LIST_HEAD(&txq->axq_q);
  1085. INIT_LIST_HEAD(&txq->axq_acq);
  1086. spin_lock_init(&txq->axq_lock);
  1087. txq->axq_depth = 0;
  1088. txq->axq_ampdu_depth = 0;
  1089. txq->axq_tx_inprogress = false;
  1090. sc->tx.txqsetup |= 1<<axq_qnum;
  1091. txq->txq_headidx = txq->txq_tailidx = 0;
  1092. for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
  1093. INIT_LIST_HEAD(&txq->txq_fifo[i]);
  1094. }
  1095. return &sc->tx.txq[axq_qnum];
  1096. }
  1097. int ath_txq_update(struct ath_softc *sc, int qnum,
  1098. struct ath9k_tx_queue_info *qinfo)
  1099. {
  1100. struct ath_hw *ah = sc->sc_ah;
  1101. int error = 0;
  1102. struct ath9k_tx_queue_info qi;
  1103. if (qnum == sc->beacon.beaconq) {
  1104. /*
  1105. * XXX: for beacon queue, we just save the parameter.
  1106. * It will be picked up by ath_beaconq_config when
  1107. * it's necessary.
  1108. */
  1109. sc->beacon.beacon_qi = *qinfo;
  1110. return 0;
  1111. }
  1112. BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
  1113. ath9k_hw_get_txq_props(ah, qnum, &qi);
  1114. qi.tqi_aifs = qinfo->tqi_aifs;
  1115. qi.tqi_cwmin = qinfo->tqi_cwmin;
  1116. qi.tqi_cwmax = qinfo->tqi_cwmax;
  1117. qi.tqi_burstTime = qinfo->tqi_burstTime;
  1118. qi.tqi_readyTime = qinfo->tqi_readyTime;
  1119. if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
  1120. ath_err(ath9k_hw_common(sc->sc_ah),
  1121. "Unable to update hardware queue %u!\n", qnum);
  1122. error = -EIO;
  1123. } else {
  1124. ath9k_hw_resettxqueue(ah, qnum);
  1125. }
  1126. return error;
  1127. }
  1128. int ath_cabq_update(struct ath_softc *sc)
  1129. {
  1130. struct ath9k_tx_queue_info qi;
  1131. struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
  1132. int qnum = sc->beacon.cabq->axq_qnum;
  1133. ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
  1134. /*
  1135. * Ensure the readytime % is within the bounds.
  1136. */
  1137. if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
  1138. sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
  1139. else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
  1140. sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
  1141. qi.tqi_readyTime = (cur_conf->beacon_interval *
  1142. sc->config.cabqReadytime) / 100;
  1143. ath_txq_update(sc, qnum, &qi);
  1144. return 0;
  1145. }
  1146. static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
  1147. {
  1148. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
  1149. return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
  1150. }
  1151. static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
  1152. struct list_head *list, bool retry_tx)
  1153. __releases(txq->axq_lock)
  1154. __acquires(txq->axq_lock)
  1155. {
  1156. struct ath_buf *bf, *lastbf;
  1157. struct list_head bf_head;
  1158. struct ath_tx_status ts;
  1159. memset(&ts, 0, sizeof(ts));
  1160. ts.ts_status = ATH9K_TX_FLUSH;
  1161. INIT_LIST_HEAD(&bf_head);
  1162. while (!list_empty(list)) {
  1163. bf = list_first_entry(list, struct ath_buf, list);
  1164. if (bf->bf_stale) {
  1165. list_del(&bf->list);
  1166. ath_tx_return_buffer(sc, bf);
  1167. continue;
  1168. }
  1169. lastbf = bf->bf_lastbf;
  1170. list_cut_position(&bf_head, list, &lastbf->list);
  1171. txq->axq_depth--;
  1172. if (bf_is_ampdu_not_probing(bf))
  1173. txq->axq_ampdu_depth--;
  1174. spin_unlock_bh(&txq->axq_lock);
  1175. if (bf_isampdu(bf))
  1176. ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
  1177. retry_tx);
  1178. else
  1179. ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
  1180. spin_lock_bh(&txq->axq_lock);
  1181. }
  1182. }
  1183. /*
  1184. * Drain a given TX queue (could be Beacon or Data)
  1185. *
  1186. * This assumes output has been stopped and
  1187. * we do not need to block ath_tx_tasklet.
  1188. */
  1189. void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
  1190. {
  1191. spin_lock_bh(&txq->axq_lock);
  1192. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
  1193. int idx = txq->txq_tailidx;
  1194. while (!list_empty(&txq->txq_fifo[idx])) {
  1195. ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
  1196. retry_tx);
  1197. INCR(idx, ATH_TXFIFO_DEPTH);
  1198. }
  1199. txq->txq_tailidx = idx;
  1200. }
  1201. txq->axq_link = NULL;
  1202. txq->axq_tx_inprogress = false;
  1203. ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
  1204. /* flush any pending frames if aggregation is enabled */
  1205. if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
  1206. ath_txq_drain_pending_buffers(sc, txq);
  1207. spin_unlock_bh(&txq->axq_lock);
  1208. }
  1209. bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
  1210. {
  1211. struct ath_hw *ah = sc->sc_ah;
  1212. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1213. struct ath_txq *txq;
  1214. int i;
  1215. u32 npend = 0;
  1216. if (sc->sc_flags & SC_OP_INVALID)
  1217. return true;
  1218. ath9k_hw_abort_tx_dma(ah);
  1219. /* Check if any queue remains active */
  1220. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  1221. if (!ATH_TXQ_SETUP(sc, i))
  1222. continue;
  1223. if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
  1224. npend |= BIT(i);
  1225. }
  1226. if (npend)
  1227. ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
  1228. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  1229. if (!ATH_TXQ_SETUP(sc, i))
  1230. continue;
  1231. /*
  1232. * The caller will resume queues with ieee80211_wake_queues.
  1233. * Mark the queue as not stopped to prevent ath_tx_complete
  1234. * from waking the queue too early.
  1235. */
  1236. txq = &sc->tx.txq[i];
  1237. txq->stopped = false;
  1238. ath_draintxq(sc, txq, retry_tx);
  1239. }
  1240. return !npend;
  1241. }
  1242. void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
  1243. {
  1244. ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
  1245. sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
  1246. }
  1247. /* For each axq_acq entry, for each tid, try to schedule packets
  1248. * for transmit until ampdu_depth has reached min Q depth.
  1249. */
  1250. void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
  1251. {
  1252. struct ath_atx_ac *ac, *ac_tmp, *last_ac;
  1253. struct ath_atx_tid *tid, *last_tid;
  1254. if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
  1255. txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
  1256. return;
  1257. ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
  1258. last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
  1259. list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
  1260. last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
  1261. list_del(&ac->list);
  1262. ac->sched = false;
  1263. while (!list_empty(&ac->tid_q)) {
  1264. tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
  1265. list);
  1266. list_del(&tid->list);
  1267. tid->sched = false;
  1268. if (tid->paused)
  1269. continue;
  1270. ath_tx_sched_aggr(sc, txq, tid);
  1271. /*
  1272. * add tid to round-robin queue if more frames
  1273. * are pending for the tid
  1274. */
  1275. if (!skb_queue_empty(&tid->buf_q))
  1276. ath_tx_queue_tid(txq, tid);
  1277. if (tid == last_tid ||
  1278. txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
  1279. break;
  1280. }
  1281. if (!list_empty(&ac->tid_q)) {
  1282. if (!ac->sched) {
  1283. ac->sched = true;
  1284. list_add_tail(&ac->list, &txq->axq_acq);
  1285. }
  1286. }
  1287. if (ac == last_ac ||
  1288. txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
  1289. return;
  1290. }
  1291. }
  1292. /***********/
  1293. /* TX, DMA */
  1294. /***********/
  1295. /*
  1296. * Insert a chain of ath_buf (descriptors) on a txq and
  1297. * assume the descriptors are already chained together by caller.
  1298. */
  1299. static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
  1300. struct list_head *head, bool internal)
  1301. {
  1302. struct ath_hw *ah = sc->sc_ah;
  1303. struct ath_common *common = ath9k_hw_common(ah);
  1304. struct ath_buf *bf, *bf_last;
  1305. bool puttxbuf = false;
  1306. bool edma;
  1307. /*
  1308. * Insert the frame on the outbound list and
  1309. * pass it on to the hardware.
  1310. */
  1311. if (list_empty(head))
  1312. return;
  1313. edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
  1314. bf = list_first_entry(head, struct ath_buf, list);
  1315. bf_last = list_entry(head->prev, struct ath_buf, list);
  1316. ath_dbg(common, ATH_DBG_QUEUE,
  1317. "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
  1318. if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
  1319. list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
  1320. INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
  1321. puttxbuf = true;
  1322. } else {
  1323. list_splice_tail_init(head, &txq->axq_q);
  1324. if (txq->axq_link) {
  1325. ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
  1326. ath_dbg(common, ATH_DBG_XMIT,
  1327. "link[%u] (%p)=%llx (%p)\n",
  1328. txq->axq_qnum, txq->axq_link,
  1329. ito64(bf->bf_daddr), bf->bf_desc);
  1330. } else if (!edma)
  1331. puttxbuf = true;
  1332. txq->axq_link = bf_last->bf_desc;
  1333. }
  1334. if (puttxbuf) {
  1335. TX_STAT_INC(txq->axq_qnum, puttxbuf);
  1336. ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
  1337. ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
  1338. txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
  1339. }
  1340. if (!edma) {
  1341. TX_STAT_INC(txq->axq_qnum, txstart);
  1342. ath9k_hw_txstart(ah, txq->axq_qnum);
  1343. }
  1344. if (!internal) {
  1345. txq->axq_depth++;
  1346. if (bf_is_ampdu_not_probing(bf))
  1347. txq->axq_ampdu_depth++;
  1348. }
  1349. }
  1350. static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
  1351. struct sk_buff *skb, struct ath_tx_control *txctl)
  1352. {
  1353. struct ath_frame_info *fi = get_frame_info(skb);
  1354. struct list_head bf_head;
  1355. struct ath_buf *bf;
  1356. /*
  1357. * Do not queue to h/w when any of the following conditions is true:
  1358. * - there are pending frames in software queue
  1359. * - the TID is currently paused for ADDBA/BAR request
  1360. * - seqno is not within block-ack window
  1361. * - h/w queue depth exceeds low water mark
  1362. */
  1363. if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
  1364. !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
  1365. txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
  1366. /*
  1367. * Add this frame to software queue for scheduling later
  1368. * for aggregation.
  1369. */
  1370. TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
  1371. __skb_queue_tail(&tid->buf_q, skb);
  1372. if (!txctl->an || !txctl->an->sleeping)
  1373. ath_tx_queue_tid(txctl->txq, tid);
  1374. return;
  1375. }
  1376. bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
  1377. if (!bf)
  1378. return;
  1379. bf->bf_state.bf_type = BUF_AMPDU;
  1380. INIT_LIST_HEAD(&bf_head);
  1381. list_add(&bf->list, &bf_head);
  1382. /* Add sub-frame to BAW */
  1383. ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
  1384. /* Queue to h/w without aggregation */
  1385. TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
  1386. bf->bf_lastbf = bf;
  1387. ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
  1388. ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
  1389. }
  1390. static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
  1391. struct ath_atx_tid *tid, struct sk_buff *skb)
  1392. {
  1393. struct ath_frame_info *fi = get_frame_info(skb);
  1394. struct list_head bf_head;
  1395. struct ath_buf *bf;
  1396. bf = fi->bf;
  1397. if (!bf)
  1398. bf = ath_tx_setup_buffer(sc, txq, tid, skb);
  1399. if (!bf)
  1400. return;
  1401. INIT_LIST_HEAD(&bf_head);
  1402. list_add_tail(&bf->list, &bf_head);
  1403. bf->bf_state.bf_type = 0;
  1404. /* update starting sequence number for subsequent ADDBA request */
  1405. if (tid)
  1406. INCR(tid->seq_start, IEEE80211_SEQ_MAX);
  1407. bf->bf_lastbf = bf;
  1408. ath_tx_fill_desc(sc, bf, txq, fi->framelen);
  1409. ath_tx_txqaddbuf(sc, txq, &bf_head, false);
  1410. TX_STAT_INC(txq->axq_qnum, queued);
  1411. }
  1412. static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
  1413. int framelen)
  1414. {
  1415. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1416. struct ieee80211_sta *sta = tx_info->control.sta;
  1417. struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
  1418. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1419. struct ath_frame_info *fi = get_frame_info(skb);
  1420. struct ath_node *an = NULL;
  1421. enum ath9k_key_type keytype;
  1422. keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
  1423. if (sta)
  1424. an = (struct ath_node *) sta->drv_priv;
  1425. memset(fi, 0, sizeof(*fi));
  1426. if (hw_key)
  1427. fi->keyix = hw_key->hw_key_idx;
  1428. else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
  1429. fi->keyix = an->ps_key;
  1430. else
  1431. fi->keyix = ATH9K_TXKEYIX_INVALID;
  1432. fi->keytype = keytype;
  1433. fi->framelen = framelen;
  1434. }
  1435. u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
  1436. {
  1437. struct ath_hw *ah = sc->sc_ah;
  1438. struct ath9k_channel *curchan = ah->curchan;
  1439. if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
  1440. (curchan->channelFlags & CHANNEL_5GHZ) &&
  1441. (chainmask == 0x7) && (rate < 0x90))
  1442. return 0x3;
  1443. else
  1444. return chainmask;
  1445. }
  1446. /*
  1447. * Assign a descriptor (and sequence number if necessary,
  1448. * and map buffer for DMA. Frees skb on error
  1449. */
  1450. static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
  1451. struct ath_txq *txq,
  1452. struct ath_atx_tid *tid,
  1453. struct sk_buff *skb)
  1454. {
  1455. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1456. struct ath_frame_info *fi = get_frame_info(skb);
  1457. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1458. struct ath_buf *bf;
  1459. u16 seqno;
  1460. bf = ath_tx_get_buffer(sc);
  1461. if (!bf) {
  1462. ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
  1463. goto error;
  1464. }
  1465. ATH_TXBUF_RESET(bf);
  1466. if (tid) {
  1467. seqno = tid->seq_next;
  1468. hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
  1469. INCR(tid->seq_next, IEEE80211_SEQ_MAX);
  1470. bf->bf_state.seqno = seqno;
  1471. }
  1472. bf->bf_mpdu = skb;
  1473. bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
  1474. skb->len, DMA_TO_DEVICE);
  1475. if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
  1476. bf->bf_mpdu = NULL;
  1477. bf->bf_buf_addr = 0;
  1478. ath_err(ath9k_hw_common(sc->sc_ah),
  1479. "dma_mapping_error() on TX\n");
  1480. ath_tx_return_buffer(sc, bf);
  1481. goto error;
  1482. }
  1483. fi->bf = bf;
  1484. return bf;
  1485. error:
  1486. dev_kfree_skb_any(skb);
  1487. return NULL;
  1488. }
  1489. /* FIXME: tx power */
  1490. static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
  1491. struct ath_tx_control *txctl)
  1492. {
  1493. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1494. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1495. struct ath_atx_tid *tid = NULL;
  1496. struct ath_buf *bf;
  1497. u8 tidno;
  1498. spin_lock_bh(&txctl->txq->axq_lock);
  1499. if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
  1500. ieee80211_is_data_qos(hdr->frame_control)) {
  1501. tidno = ieee80211_get_qos_ctl(hdr)[0] &
  1502. IEEE80211_QOS_CTL_TID_MASK;
  1503. tid = ATH_AN_2_TID(txctl->an, tidno);
  1504. WARN_ON(tid->ac->txq != txctl->txq);
  1505. }
  1506. if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
  1507. /*
  1508. * Try aggregation if it's a unicast data frame
  1509. * and the destination is HT capable.
  1510. */
  1511. ath_tx_send_ampdu(sc, tid, skb, txctl);
  1512. } else {
  1513. bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
  1514. if (!bf)
  1515. goto out;
  1516. bf->bf_state.bfs_paprd = txctl->paprd;
  1517. if (txctl->paprd)
  1518. bf->bf_state.bfs_paprd_timestamp = jiffies;
  1519. ath_tx_send_normal(sc, txctl->txq, tid, skb);
  1520. }
  1521. out:
  1522. spin_unlock_bh(&txctl->txq->axq_lock);
  1523. }
  1524. /* Upon failure caller should free skb */
  1525. int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
  1526. struct ath_tx_control *txctl)
  1527. {
  1528. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  1529. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1530. struct ieee80211_sta *sta = info->control.sta;
  1531. struct ieee80211_vif *vif = info->control.vif;
  1532. struct ath_softc *sc = hw->priv;
  1533. struct ath_txq *txq = txctl->txq;
  1534. int padpos, padsize;
  1535. int frmlen = skb->len + FCS_LEN;
  1536. int q;
  1537. /* NOTE: sta can be NULL according to net/mac80211.h */
  1538. if (sta)
  1539. txctl->an = (struct ath_node *)sta->drv_priv;
  1540. if (info->control.hw_key)
  1541. frmlen += info->control.hw_key->icv_len;
  1542. /*
  1543. * As a temporary workaround, assign seq# here; this will likely need
  1544. * to be cleaned up to work better with Beacon transmission and virtual
  1545. * BSSes.
  1546. */
  1547. if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
  1548. if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
  1549. sc->tx.seq_no += 0x10;
  1550. hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
  1551. hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
  1552. }
  1553. /* Add the padding after the header if this is not already done */
  1554. padpos = ath9k_cmn_padpos(hdr->frame_control);
  1555. padsize = padpos & 3;
  1556. if (padsize && skb->len > padpos) {
  1557. if (skb_headroom(skb) < padsize)
  1558. return -ENOMEM;
  1559. skb_push(skb, padsize);
  1560. memmove(skb->data, skb->data + padsize, padpos);
  1561. hdr = (struct ieee80211_hdr *) skb->data;
  1562. }
  1563. if ((vif && vif->type != NL80211_IFTYPE_AP &&
  1564. vif->type != NL80211_IFTYPE_AP_VLAN) ||
  1565. !ieee80211_is_data(hdr->frame_control))
  1566. info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
  1567. setup_frame_info(hw, skb, frmlen);
  1568. /*
  1569. * At this point, the vif, hw_key and sta pointers in the tx control
  1570. * info are no longer valid (overwritten by the ath_frame_info data.
  1571. */
  1572. q = skb_get_queue_mapping(skb);
  1573. spin_lock_bh(&txq->axq_lock);
  1574. if (txq == sc->tx.txq_map[q] &&
  1575. ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
  1576. ieee80211_stop_queue(sc->hw, q);
  1577. txq->stopped = 1;
  1578. }
  1579. spin_unlock_bh(&txq->axq_lock);
  1580. ath_tx_start_dma(sc, skb, txctl);
  1581. return 0;
  1582. }
  1583. /*****************/
  1584. /* TX Completion */
  1585. /*****************/
  1586. static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
  1587. int tx_flags, struct ath_txq *txq)
  1588. {
  1589. struct ieee80211_hw *hw = sc->hw;
  1590. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1591. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1592. struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
  1593. int q, padpos, padsize;
  1594. ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
  1595. if (tx_flags & ATH_TX_BAR)
  1596. tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
  1597. if (!(tx_flags & ATH_TX_ERROR))
  1598. /* Frame was ACKed */
  1599. tx_info->flags |= IEEE80211_TX_STAT_ACK;
  1600. padpos = ath9k_cmn_padpos(hdr->frame_control);
  1601. padsize = padpos & 3;
  1602. if (padsize && skb->len>padpos+padsize) {
  1603. /*
  1604. * Remove MAC header padding before giving the frame back to
  1605. * mac80211.
  1606. */
  1607. memmove(skb->data + padsize, skb->data, padpos);
  1608. skb_pull(skb, padsize);
  1609. }
  1610. if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
  1611. sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
  1612. ath_dbg(common, ATH_DBG_PS,
  1613. "Going back to sleep after having received TX status (0x%lx)\n",
  1614. sc->ps_flags & (PS_WAIT_FOR_BEACON |
  1615. PS_WAIT_FOR_CAB |
  1616. PS_WAIT_FOR_PSPOLL_DATA |
  1617. PS_WAIT_FOR_TX_ACK));
  1618. }
  1619. q = skb_get_queue_mapping(skb);
  1620. if (txq == sc->tx.txq_map[q]) {
  1621. spin_lock_bh(&txq->axq_lock);
  1622. if (WARN_ON(--txq->pending_frames < 0))
  1623. txq->pending_frames = 0;
  1624. if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
  1625. ieee80211_wake_queue(sc->hw, q);
  1626. txq->stopped = 0;
  1627. }
  1628. spin_unlock_bh(&txq->axq_lock);
  1629. }
  1630. ieee80211_tx_status(hw, skb);
  1631. }
  1632. static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
  1633. struct ath_txq *txq, struct list_head *bf_q,
  1634. struct ath_tx_status *ts, int txok, int sendbar)
  1635. {
  1636. struct sk_buff *skb = bf->bf_mpdu;
  1637. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1638. unsigned long flags;
  1639. int tx_flags = 0;
  1640. if (sendbar)
  1641. tx_flags = ATH_TX_BAR;
  1642. if (!txok)
  1643. tx_flags |= ATH_TX_ERROR;
  1644. if (ts->ts_status & ATH9K_TXERR_FILT)
  1645. tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
  1646. dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
  1647. bf->bf_buf_addr = 0;
  1648. if (bf->bf_state.bfs_paprd) {
  1649. if (time_after(jiffies,
  1650. bf->bf_state.bfs_paprd_timestamp +
  1651. msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
  1652. dev_kfree_skb_any(skb);
  1653. else
  1654. complete(&sc->paprd_complete);
  1655. } else {
  1656. ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
  1657. ath_tx_complete(sc, skb, tx_flags, txq);
  1658. }
  1659. /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
  1660. * accidentally reference it later.
  1661. */
  1662. bf->bf_mpdu = NULL;
  1663. /*
  1664. * Return the list of ath_buf of this mpdu to free queue
  1665. */
  1666. spin_lock_irqsave(&sc->tx.txbuflock, flags);
  1667. list_splice_tail_init(bf_q, &sc->tx.txbuf);
  1668. spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
  1669. }
  1670. static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
  1671. struct ath_tx_status *ts, int nframes, int nbad,
  1672. int txok)
  1673. {
  1674. struct sk_buff *skb = bf->bf_mpdu;
  1675. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1676. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1677. struct ieee80211_hw *hw = sc->hw;
  1678. struct ath_hw *ah = sc->sc_ah;
  1679. u8 i, tx_rateindex;
  1680. if (txok)
  1681. tx_info->status.ack_signal = ts->ts_rssi;
  1682. tx_rateindex = ts->ts_rateindex;
  1683. WARN_ON(tx_rateindex >= hw->max_rates);
  1684. if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
  1685. tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
  1686. BUG_ON(nbad > nframes);
  1687. }
  1688. tx_info->status.ampdu_len = nframes;
  1689. tx_info->status.ampdu_ack_len = nframes - nbad;
  1690. if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
  1691. (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
  1692. /*
  1693. * If an underrun error is seen assume it as an excessive
  1694. * retry only if max frame trigger level has been reached
  1695. * (2 KB for single stream, and 4 KB for dual stream).
  1696. * Adjust the long retry as if the frame was tried
  1697. * hw->max_rate_tries times to affect how rate control updates
  1698. * PER for the failed rate.
  1699. * In case of congestion on the bus penalizing this type of
  1700. * underruns should help hardware actually transmit new frames
  1701. * successfully by eventually preferring slower rates.
  1702. * This itself should also alleviate congestion on the bus.
  1703. */
  1704. if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
  1705. ATH9K_TX_DELIM_UNDERRUN)) &&
  1706. ieee80211_is_data(hdr->frame_control) &&
  1707. ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
  1708. tx_info->status.rates[tx_rateindex].count =
  1709. hw->max_rate_tries;
  1710. }
  1711. for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
  1712. tx_info->status.rates[i].count = 0;
  1713. tx_info->status.rates[i].idx = -1;
  1714. }
  1715. tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
  1716. }
  1717. static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
  1718. struct ath_tx_status *ts, struct ath_buf *bf,
  1719. struct list_head *bf_head)
  1720. __releases(txq->axq_lock)
  1721. __acquires(txq->axq_lock)
  1722. {
  1723. int txok;
  1724. txq->axq_depth--;
  1725. txok = !(ts->ts_status & ATH9K_TXERR_MASK);
  1726. txq->axq_tx_inprogress = false;
  1727. if (bf_is_ampdu_not_probing(bf))
  1728. txq->axq_ampdu_depth--;
  1729. spin_unlock_bh(&txq->axq_lock);
  1730. if (!bf_isampdu(bf)) {
  1731. ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
  1732. ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
  1733. } else
  1734. ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
  1735. spin_lock_bh(&txq->axq_lock);
  1736. if (sc->sc_flags & SC_OP_TXAGGR)
  1737. ath_txq_schedule(sc, txq);
  1738. }
  1739. static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
  1740. {
  1741. struct ath_hw *ah = sc->sc_ah;
  1742. struct ath_common *common = ath9k_hw_common(ah);
  1743. struct ath_buf *bf, *lastbf, *bf_held = NULL;
  1744. struct list_head bf_head;
  1745. struct ath_desc *ds;
  1746. struct ath_tx_status ts;
  1747. int status;
  1748. ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
  1749. txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
  1750. txq->axq_link);
  1751. spin_lock_bh(&txq->axq_lock);
  1752. for (;;) {
  1753. if (work_pending(&sc->hw_reset_work))
  1754. break;
  1755. if (list_empty(&txq->axq_q)) {
  1756. txq->axq_link = NULL;
  1757. if (sc->sc_flags & SC_OP_TXAGGR)
  1758. ath_txq_schedule(sc, txq);
  1759. break;
  1760. }
  1761. bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
  1762. /*
  1763. * There is a race condition that a BH gets scheduled
  1764. * after sw writes TxE and before hw re-load the last
  1765. * descriptor to get the newly chained one.
  1766. * Software must keep the last DONE descriptor as a
  1767. * holding descriptor - software does so by marking
  1768. * it with the STALE flag.
  1769. */
  1770. bf_held = NULL;
  1771. if (bf->bf_stale) {
  1772. bf_held = bf;
  1773. if (list_is_last(&bf_held->list, &txq->axq_q))
  1774. break;
  1775. bf = list_entry(bf_held->list.next, struct ath_buf,
  1776. list);
  1777. }
  1778. lastbf = bf->bf_lastbf;
  1779. ds = lastbf->bf_desc;
  1780. memset(&ts, 0, sizeof(ts));
  1781. status = ath9k_hw_txprocdesc(ah, ds, &ts);
  1782. if (status == -EINPROGRESS)
  1783. break;
  1784. TX_STAT_INC(txq->axq_qnum, txprocdesc);
  1785. /*
  1786. * Remove ath_buf's of the same transmit unit from txq,
  1787. * however leave the last descriptor back as the holding
  1788. * descriptor for hw.
  1789. */
  1790. lastbf->bf_stale = true;
  1791. INIT_LIST_HEAD(&bf_head);
  1792. if (!list_is_singular(&lastbf->list))
  1793. list_cut_position(&bf_head,
  1794. &txq->axq_q, lastbf->list.prev);
  1795. if (bf_held) {
  1796. list_del(&bf_held->list);
  1797. ath_tx_return_buffer(sc, bf_held);
  1798. }
  1799. ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
  1800. }
  1801. spin_unlock_bh(&txq->axq_lock);
  1802. }
  1803. static void ath_tx_complete_poll_work(struct work_struct *work)
  1804. {
  1805. struct ath_softc *sc = container_of(work, struct ath_softc,
  1806. tx_complete_work.work);
  1807. struct ath_txq *txq;
  1808. int i;
  1809. bool needreset = false;
  1810. #ifdef CONFIG_ATH9K_DEBUGFS
  1811. sc->tx_complete_poll_work_seen++;
  1812. #endif
  1813. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
  1814. if (ATH_TXQ_SETUP(sc, i)) {
  1815. txq = &sc->tx.txq[i];
  1816. spin_lock_bh(&txq->axq_lock);
  1817. if (txq->axq_depth) {
  1818. if (txq->axq_tx_inprogress) {
  1819. needreset = true;
  1820. spin_unlock_bh(&txq->axq_lock);
  1821. break;
  1822. } else {
  1823. txq->axq_tx_inprogress = true;
  1824. }
  1825. }
  1826. spin_unlock_bh(&txq->axq_lock);
  1827. }
  1828. if (needreset) {
  1829. ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
  1830. "tx hung, resetting the chip\n");
  1831. RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
  1832. ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
  1833. }
  1834. ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
  1835. msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
  1836. }
  1837. void ath_tx_tasklet(struct ath_softc *sc)
  1838. {
  1839. int i;
  1840. u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
  1841. ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
  1842. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  1843. if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
  1844. ath_tx_processq(sc, &sc->tx.txq[i]);
  1845. }
  1846. }
  1847. void ath_tx_edma_tasklet(struct ath_softc *sc)
  1848. {
  1849. struct ath_tx_status ts;
  1850. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1851. struct ath_hw *ah = sc->sc_ah;
  1852. struct ath_txq *txq;
  1853. struct ath_buf *bf, *lastbf;
  1854. struct list_head bf_head;
  1855. int status;
  1856. for (;;) {
  1857. if (work_pending(&sc->hw_reset_work))
  1858. break;
  1859. status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
  1860. if (status == -EINPROGRESS)
  1861. break;
  1862. if (status == -EIO) {
  1863. ath_dbg(common, ATH_DBG_XMIT,
  1864. "Error processing tx status\n");
  1865. break;
  1866. }
  1867. /* Skip beacon completions */
  1868. if (ts.qid == sc->beacon.beaconq)
  1869. continue;
  1870. txq = &sc->tx.txq[ts.qid];
  1871. spin_lock_bh(&txq->axq_lock);
  1872. if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
  1873. spin_unlock_bh(&txq->axq_lock);
  1874. return;
  1875. }
  1876. bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
  1877. struct ath_buf, list);
  1878. lastbf = bf->bf_lastbf;
  1879. INIT_LIST_HEAD(&bf_head);
  1880. list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
  1881. &lastbf->list);
  1882. if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
  1883. INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
  1884. if (!list_empty(&txq->axq_q)) {
  1885. struct list_head bf_q;
  1886. INIT_LIST_HEAD(&bf_q);
  1887. txq->axq_link = NULL;
  1888. list_splice_tail_init(&txq->axq_q, &bf_q);
  1889. ath_tx_txqaddbuf(sc, txq, &bf_q, true);
  1890. }
  1891. }
  1892. ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
  1893. spin_unlock_bh(&txq->axq_lock);
  1894. }
  1895. }
  1896. /*****************/
  1897. /* Init, Cleanup */
  1898. /*****************/
  1899. static int ath_txstatus_setup(struct ath_softc *sc, int size)
  1900. {
  1901. struct ath_descdma *dd = &sc->txsdma;
  1902. u8 txs_len = sc->sc_ah->caps.txs_len;
  1903. dd->dd_desc_len = size * txs_len;
  1904. dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
  1905. &dd->dd_desc_paddr, GFP_KERNEL);
  1906. if (!dd->dd_desc)
  1907. return -ENOMEM;
  1908. return 0;
  1909. }
  1910. static int ath_tx_edma_init(struct ath_softc *sc)
  1911. {
  1912. int err;
  1913. err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
  1914. if (!err)
  1915. ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
  1916. sc->txsdma.dd_desc_paddr,
  1917. ATH_TXSTATUS_RING_SIZE);
  1918. return err;
  1919. }
  1920. static void ath_tx_edma_cleanup(struct ath_softc *sc)
  1921. {
  1922. struct ath_descdma *dd = &sc->txsdma;
  1923. dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
  1924. dd->dd_desc_paddr);
  1925. }
  1926. int ath_tx_init(struct ath_softc *sc, int nbufs)
  1927. {
  1928. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1929. int error = 0;
  1930. spin_lock_init(&sc->tx.txbuflock);
  1931. error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
  1932. "tx", nbufs, 1, 1);
  1933. if (error != 0) {
  1934. ath_err(common,
  1935. "Failed to allocate tx descriptors: %d\n", error);
  1936. goto err;
  1937. }
  1938. error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
  1939. "beacon", ATH_BCBUF, 1, 1);
  1940. if (error != 0) {
  1941. ath_err(common,
  1942. "Failed to allocate beacon descriptors: %d\n", error);
  1943. goto err;
  1944. }
  1945. INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
  1946. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
  1947. error = ath_tx_edma_init(sc);
  1948. if (error)
  1949. goto err;
  1950. }
  1951. err:
  1952. if (error != 0)
  1953. ath_tx_cleanup(sc);
  1954. return error;
  1955. }
  1956. void ath_tx_cleanup(struct ath_softc *sc)
  1957. {
  1958. if (sc->beacon.bdma.dd_desc_len != 0)
  1959. ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
  1960. if (sc->tx.txdma.dd_desc_len != 0)
  1961. ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
  1962. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
  1963. ath_tx_edma_cleanup(sc);
  1964. }
  1965. void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
  1966. {
  1967. struct ath_atx_tid *tid;
  1968. struct ath_atx_ac *ac;
  1969. int tidno, acno;
  1970. for (tidno = 0, tid = &an->tid[tidno];
  1971. tidno < WME_NUM_TID;
  1972. tidno++, tid++) {
  1973. tid->an = an;
  1974. tid->tidno = tidno;
  1975. tid->seq_start = tid->seq_next = 0;
  1976. tid->baw_size = WME_MAX_BA;
  1977. tid->baw_head = tid->baw_tail = 0;
  1978. tid->sched = false;
  1979. tid->paused = false;
  1980. tid->state &= ~AGGR_CLEANUP;
  1981. __skb_queue_head_init(&tid->buf_q);
  1982. acno = TID_TO_WME_AC(tidno);
  1983. tid->ac = &an->ac[acno];
  1984. tid->state &= ~AGGR_ADDBA_COMPLETE;
  1985. tid->state &= ~AGGR_ADDBA_PROGRESS;
  1986. }
  1987. for (acno = 0, ac = &an->ac[acno];
  1988. acno < WME_NUM_AC; acno++, ac++) {
  1989. ac->sched = false;
  1990. ac->txq = sc->tx.txq_map[acno];
  1991. INIT_LIST_HEAD(&ac->tid_q);
  1992. }
  1993. }
  1994. void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
  1995. {
  1996. struct ath_atx_ac *ac;
  1997. struct ath_atx_tid *tid;
  1998. struct ath_txq *txq;
  1999. int tidno;
  2000. for (tidno = 0, tid = &an->tid[tidno];
  2001. tidno < WME_NUM_TID; tidno++, tid++) {
  2002. ac = tid->ac;
  2003. txq = ac->txq;
  2004. spin_lock_bh(&txq->axq_lock);
  2005. if (tid->sched) {
  2006. list_del(&tid->list);
  2007. tid->sched = false;
  2008. }
  2009. if (ac->sched) {
  2010. list_del(&ac->list);
  2011. tid->ac->sched = false;
  2012. }
  2013. ath_tid_drain(sc, txq, tid);
  2014. tid->state &= ~AGGR_ADDBA_COMPLETE;
  2015. tid->state &= ~AGGR_CLEANUP;
  2016. spin_unlock_bh(&txq->axq_lock);
  2017. }
  2018. }