xmit.c 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434
  1. /*
  2. * Copyright (c) 2008-2009 Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include "ath9k.h"
  17. #include "ar9003_mac.h"
  18. #define BITS_PER_BYTE 8
  19. #define OFDM_PLCP_BITS 22
  20. #define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
  21. #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
  22. #define L_STF 8
  23. #define L_LTF 8
  24. #define L_SIG 4
  25. #define HT_SIG 8
  26. #define HT_STF 4
  27. #define HT_LTF(_ns) (4 * (_ns))
  28. #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
  29. #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
  30. #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
  31. #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
  32. #define OFDM_SIFS_TIME 16
  33. static u16 bits_per_symbol[][2] = {
  34. /* 20MHz 40MHz */
  35. { 26, 54 }, /* 0: BPSK */
  36. { 52, 108 }, /* 1: QPSK 1/2 */
  37. { 78, 162 }, /* 2: QPSK 3/4 */
  38. { 104, 216 }, /* 3: 16-QAM 1/2 */
  39. { 156, 324 }, /* 4: 16-QAM 3/4 */
  40. { 208, 432 }, /* 5: 64-QAM 2/3 */
  41. { 234, 486 }, /* 6: 64-QAM 3/4 */
  42. { 260, 540 }, /* 7: 64-QAM 5/6 */
  43. };
  44. #define IS_HT_RATE(_rate) ((_rate) & 0x80)
  45. static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
  46. struct ath_atx_tid *tid,
  47. struct list_head *bf_head);
  48. static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
  49. struct ath_txq *txq, struct list_head *bf_q,
  50. struct ath_tx_status *ts, int txok, int sendbar);
  51. static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
  52. struct list_head *head);
  53. static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
  54. static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
  55. struct ath_tx_status *ts, int txok);
  56. static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
  57. int nbad, int txok, bool update_rc);
  58. static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
  59. int seqno);
  60. enum {
  61. MCS_HT20,
  62. MCS_HT20_SGI,
  63. MCS_HT40,
  64. MCS_HT40_SGI,
  65. };
  66. static int ath_max_4ms_framelen[4][32] = {
  67. [MCS_HT20] = {
  68. 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
  69. 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
  70. 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
  71. 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
  72. },
  73. [MCS_HT20_SGI] = {
  74. 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
  75. 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
  76. 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
  77. 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
  78. },
  79. [MCS_HT40] = {
  80. 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
  81. 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
  82. 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
  83. 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
  84. },
  85. [MCS_HT40_SGI] = {
  86. 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
  87. 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
  88. 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
  89. 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
  90. }
  91. };
  92. /*********************/
  93. /* Aggregation logic */
  94. /*********************/
  95. static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
  96. {
  97. struct ath_atx_ac *ac = tid->ac;
  98. if (tid->paused)
  99. return;
  100. if (tid->sched)
  101. return;
  102. tid->sched = true;
  103. list_add_tail(&tid->list, &ac->tid_q);
  104. if (ac->sched)
  105. return;
  106. ac->sched = true;
  107. list_add_tail(&ac->list, &txq->axq_acq);
  108. }
  109. static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
  110. {
  111. struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
  112. WARN_ON(!tid->paused);
  113. spin_lock_bh(&txq->axq_lock);
  114. tid->paused = false;
  115. if (list_empty(&tid->buf_q))
  116. goto unlock;
  117. ath_tx_queue_tid(txq, tid);
  118. ath_txq_schedule(sc, txq);
  119. unlock:
  120. spin_unlock_bh(&txq->axq_lock);
  121. }
  122. static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
  123. {
  124. struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
  125. struct ath_buf *bf;
  126. struct list_head bf_head;
  127. struct ath_tx_status ts;
  128. INIT_LIST_HEAD(&bf_head);
  129. memset(&ts, 0, sizeof(ts));
  130. spin_lock_bh(&txq->axq_lock);
  131. while (!list_empty(&tid->buf_q)) {
  132. bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
  133. list_move_tail(&bf->list, &bf_head);
  134. if (bf_isretried(bf)) {
  135. ath_tx_update_baw(sc, tid, bf->bf_seqno);
  136. ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
  137. } else {
  138. ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
  139. }
  140. }
  141. spin_unlock_bh(&txq->axq_lock);
  142. }
  143. static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
  144. int seqno)
  145. {
  146. int index, cindex;
  147. index = ATH_BA_INDEX(tid->seq_start, seqno);
  148. cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
  149. __clear_bit(cindex, tid->tx_buf);
  150. while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
  151. INCR(tid->seq_start, IEEE80211_SEQ_MAX);
  152. INCR(tid->baw_head, ATH_TID_MAX_BUFS);
  153. }
  154. }
  155. static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
  156. struct ath_buf *bf)
  157. {
  158. int index, cindex;
  159. if (bf_isretried(bf))
  160. return;
  161. index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
  162. cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
  163. __set_bit(cindex, tid->tx_buf);
  164. if (index >= ((tid->baw_tail - tid->baw_head) &
  165. (ATH_TID_MAX_BUFS - 1))) {
  166. tid->baw_tail = cindex;
  167. INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
  168. }
  169. }
  170. /*
  171. * TODO: For frame(s) that are in the retry state, we will reuse the
  172. * sequence number(s) without setting the retry bit. The
  173. * alternative is to give up on these and BAR the receiver's window
  174. * forward.
  175. */
  176. static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
  177. struct ath_atx_tid *tid)
  178. {
  179. struct ath_buf *bf;
  180. struct list_head bf_head;
  181. struct ath_tx_status ts;
  182. memset(&ts, 0, sizeof(ts));
  183. INIT_LIST_HEAD(&bf_head);
  184. for (;;) {
  185. if (list_empty(&tid->buf_q))
  186. break;
  187. bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
  188. list_move_tail(&bf->list, &bf_head);
  189. if (bf_isretried(bf))
  190. ath_tx_update_baw(sc, tid, bf->bf_seqno);
  191. spin_unlock(&txq->axq_lock);
  192. ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
  193. spin_lock(&txq->axq_lock);
  194. }
  195. tid->seq_next = tid->seq_start;
  196. tid->baw_tail = tid->baw_head;
  197. }
  198. static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
  199. struct ath_buf *bf)
  200. {
  201. struct sk_buff *skb;
  202. struct ieee80211_hdr *hdr;
  203. bf->bf_state.bf_type |= BUF_RETRY;
  204. bf->bf_retries++;
  205. TX_STAT_INC(txq->axq_qnum, a_retries);
  206. skb = bf->bf_mpdu;
  207. hdr = (struct ieee80211_hdr *)skb->data;
  208. hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
  209. }
  210. static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
  211. {
  212. struct ath_buf *bf = NULL;
  213. spin_lock_bh(&sc->tx.txbuflock);
  214. if (unlikely(list_empty(&sc->tx.txbuf))) {
  215. spin_unlock_bh(&sc->tx.txbuflock);
  216. return NULL;
  217. }
  218. bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
  219. list_del(&bf->list);
  220. spin_unlock_bh(&sc->tx.txbuflock);
  221. return bf;
  222. }
  223. static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
  224. {
  225. spin_lock_bh(&sc->tx.txbuflock);
  226. list_add_tail(&bf->list, &sc->tx.txbuf);
  227. spin_unlock_bh(&sc->tx.txbuflock);
  228. }
  229. static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
  230. {
  231. struct ath_buf *tbf;
  232. tbf = ath_tx_get_buffer(sc);
  233. if (WARN_ON(!tbf))
  234. return NULL;
  235. ATH_TXBUF_RESET(tbf);
  236. tbf->aphy = bf->aphy;
  237. tbf->bf_mpdu = bf->bf_mpdu;
  238. tbf->bf_buf_addr = bf->bf_buf_addr;
  239. memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
  240. tbf->bf_state = bf->bf_state;
  241. tbf->bf_dmacontext = bf->bf_dmacontext;
  242. return tbf;
  243. }
  244. static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
  245. struct ath_buf *bf, struct list_head *bf_q,
  246. struct ath_tx_status *ts, int txok)
  247. {
  248. struct ath_node *an = NULL;
  249. struct sk_buff *skb;
  250. struct ieee80211_sta *sta;
  251. struct ieee80211_hw *hw;
  252. struct ieee80211_hdr *hdr;
  253. struct ieee80211_tx_info *tx_info;
  254. struct ath_atx_tid *tid = NULL;
  255. struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
  256. struct list_head bf_head, bf_pending;
  257. u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
  258. u32 ba[WME_BA_BMP_SIZE >> 5];
  259. int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
  260. bool rc_update = true;
  261. struct ieee80211_tx_rate rates[4];
  262. skb = bf->bf_mpdu;
  263. hdr = (struct ieee80211_hdr *)skb->data;
  264. tx_info = IEEE80211_SKB_CB(skb);
  265. hw = bf->aphy->hw;
  266. memcpy(rates, tx_info->control.rates, sizeof(rates));
  267. rcu_read_lock();
  268. sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
  269. if (!sta) {
  270. rcu_read_unlock();
  271. INIT_LIST_HEAD(&bf_head);
  272. while (bf) {
  273. bf_next = bf->bf_next;
  274. bf->bf_state.bf_type |= BUF_XRETRY;
  275. if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
  276. !bf->bf_stale || bf_next != NULL)
  277. list_move_tail(&bf->list, &bf_head);
  278. ath_tx_rc_status(bf, ts, 0, 0, false);
  279. ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
  280. 0, 0);
  281. bf = bf_next;
  282. }
  283. return;
  284. }
  285. an = (struct ath_node *)sta->drv_priv;
  286. tid = ATH_AN_2_TID(an, bf->bf_tidno);
  287. /*
  288. * The hardware occasionally sends a tx status for the wrong TID.
  289. * In this case, the BA status cannot be considered valid and all
  290. * subframes need to be retransmitted
  291. */
  292. if (bf->bf_tidno != ts->tid)
  293. txok = false;
  294. isaggr = bf_isaggr(bf);
  295. memset(ba, 0, WME_BA_BMP_SIZE >> 3);
  296. if (isaggr && txok) {
  297. if (ts->ts_flags & ATH9K_TX_BA) {
  298. seq_st = ts->ts_seqnum;
  299. memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
  300. } else {
  301. /*
  302. * AR5416 can become deaf/mute when BA
  303. * issue happens. Chip needs to be reset.
  304. * But AP code may have sychronization issues
  305. * when perform internal reset in this routine.
  306. * Only enable reset in STA mode for now.
  307. */
  308. if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
  309. needreset = 1;
  310. }
  311. }
  312. INIT_LIST_HEAD(&bf_pending);
  313. INIT_LIST_HEAD(&bf_head);
  314. nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
  315. while (bf) {
  316. txfail = txpending = 0;
  317. bf_next = bf->bf_next;
  318. skb = bf->bf_mpdu;
  319. tx_info = IEEE80211_SKB_CB(skb);
  320. if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
  321. /* transmit completion, subframe is
  322. * acked by block ack */
  323. acked_cnt++;
  324. } else if (!isaggr && txok) {
  325. /* transmit completion */
  326. acked_cnt++;
  327. } else {
  328. if (!(tid->state & AGGR_CLEANUP) &&
  329. !bf_last->bf_tx_aborted) {
  330. if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
  331. ath_tx_set_retry(sc, txq, bf);
  332. txpending = 1;
  333. } else {
  334. bf->bf_state.bf_type |= BUF_XRETRY;
  335. txfail = 1;
  336. sendbar = 1;
  337. txfail_cnt++;
  338. }
  339. } else {
  340. /*
  341. * cleanup in progress, just fail
  342. * the un-acked sub-frames
  343. */
  344. txfail = 1;
  345. }
  346. }
  347. if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
  348. bf_next == NULL) {
  349. /*
  350. * Make sure the last desc is reclaimed if it
  351. * not a holding desc.
  352. */
  353. if (!bf_last->bf_stale)
  354. list_move_tail(&bf->list, &bf_head);
  355. else
  356. INIT_LIST_HEAD(&bf_head);
  357. } else {
  358. BUG_ON(list_empty(bf_q));
  359. list_move_tail(&bf->list, &bf_head);
  360. }
  361. if (!txpending || (tid->state & AGGR_CLEANUP)) {
  362. /*
  363. * complete the acked-ones/xretried ones; update
  364. * block-ack window
  365. */
  366. spin_lock_bh(&txq->axq_lock);
  367. ath_tx_update_baw(sc, tid, bf->bf_seqno);
  368. spin_unlock_bh(&txq->axq_lock);
  369. if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
  370. memcpy(tx_info->control.rates, rates, sizeof(rates));
  371. ath_tx_rc_status(bf, ts, nbad, txok, true);
  372. rc_update = false;
  373. } else {
  374. ath_tx_rc_status(bf, ts, nbad, txok, false);
  375. }
  376. ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
  377. !txfail, sendbar);
  378. } else {
  379. /* retry the un-acked ones */
  380. if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
  381. if (bf->bf_next == NULL && bf_last->bf_stale) {
  382. struct ath_buf *tbf;
  383. tbf = ath_clone_txbuf(sc, bf_last);
  384. /*
  385. * Update tx baw and complete the
  386. * frame with failed status if we
  387. * run out of tx buf.
  388. */
  389. if (!tbf) {
  390. spin_lock_bh(&txq->axq_lock);
  391. ath_tx_update_baw(sc, tid,
  392. bf->bf_seqno);
  393. spin_unlock_bh(&txq->axq_lock);
  394. bf->bf_state.bf_type |=
  395. BUF_XRETRY;
  396. ath_tx_rc_status(bf, ts, nbad,
  397. 0, false);
  398. ath_tx_complete_buf(sc, bf, txq,
  399. &bf_head,
  400. ts, 0, 0);
  401. break;
  402. }
  403. ath9k_hw_cleartxdesc(sc->sc_ah,
  404. tbf->bf_desc);
  405. list_add_tail(&tbf->list, &bf_head);
  406. } else {
  407. /*
  408. * Clear descriptor status words for
  409. * software retry
  410. */
  411. ath9k_hw_cleartxdesc(sc->sc_ah,
  412. bf->bf_desc);
  413. }
  414. }
  415. /*
  416. * Put this buffer to the temporary pending
  417. * queue to retain ordering
  418. */
  419. list_splice_tail_init(&bf_head, &bf_pending);
  420. }
  421. bf = bf_next;
  422. }
  423. /* prepend un-acked frames to the beginning of the pending frame queue */
  424. if (!list_empty(&bf_pending)) {
  425. spin_lock_bh(&txq->axq_lock);
  426. list_splice(&bf_pending, &tid->buf_q);
  427. ath_tx_queue_tid(txq, tid);
  428. spin_unlock_bh(&txq->axq_lock);
  429. }
  430. if (tid->state & AGGR_CLEANUP) {
  431. ath_tx_flush_tid(sc, tid);
  432. if (tid->baw_head == tid->baw_tail) {
  433. tid->state &= ~AGGR_ADDBA_COMPLETE;
  434. tid->state &= ~AGGR_CLEANUP;
  435. }
  436. }
  437. rcu_read_unlock();
  438. if (needreset)
  439. ath_reset(sc, false);
  440. }
  441. static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
  442. struct ath_atx_tid *tid)
  443. {
  444. struct sk_buff *skb;
  445. struct ieee80211_tx_info *tx_info;
  446. struct ieee80211_tx_rate *rates;
  447. u32 max_4ms_framelen, frmlen;
  448. u16 aggr_limit, legacy = 0;
  449. int i;
  450. skb = bf->bf_mpdu;
  451. tx_info = IEEE80211_SKB_CB(skb);
  452. rates = tx_info->control.rates;
  453. /*
  454. * Find the lowest frame length among the rate series that will have a
  455. * 4ms transmit duration.
  456. * TODO - TXOP limit needs to be considered.
  457. */
  458. max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
  459. for (i = 0; i < 4; i++) {
  460. if (rates[i].count) {
  461. int modeidx;
  462. if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
  463. legacy = 1;
  464. break;
  465. }
  466. if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
  467. modeidx = MCS_HT40;
  468. else
  469. modeidx = MCS_HT20;
  470. if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
  471. modeidx++;
  472. frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
  473. max_4ms_framelen = min(max_4ms_framelen, frmlen);
  474. }
  475. }
  476. /*
  477. * limit aggregate size by the minimum rate if rate selected is
  478. * not a probe rate, if rate selected is a probe rate then
  479. * avoid aggregation of this packet.
  480. */
  481. if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
  482. return 0;
  483. if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
  484. aggr_limit = min((max_4ms_framelen * 3) / 8,
  485. (u32)ATH_AMPDU_LIMIT_MAX);
  486. else
  487. aggr_limit = min(max_4ms_framelen,
  488. (u32)ATH_AMPDU_LIMIT_MAX);
  489. /*
  490. * h/w can accept aggregates upto 16 bit lengths (65535).
  491. * The IE, however can hold upto 65536, which shows up here
  492. * as zero. Ignore 65536 since we are constrained by hw.
  493. */
  494. if (tid->an->maxampdu)
  495. aggr_limit = min(aggr_limit, tid->an->maxampdu);
  496. return aggr_limit;
  497. }
  498. /*
  499. * Returns the number of delimiters to be added to
  500. * meet the minimum required mpdudensity.
  501. */
  502. static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
  503. struct ath_buf *bf, u16 frmlen)
  504. {
  505. struct sk_buff *skb = bf->bf_mpdu;
  506. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  507. u32 nsymbits, nsymbols;
  508. u16 minlen;
  509. u8 flags, rix;
  510. int width, streams, half_gi, ndelim, mindelim;
  511. /* Select standard number of delimiters based on frame length alone */
  512. ndelim = ATH_AGGR_GET_NDELIM(frmlen);
  513. /*
  514. * If encryption enabled, hardware requires some more padding between
  515. * subframes.
  516. * TODO - this could be improved to be dependent on the rate.
  517. * The hardware can keep up at lower rates, but not higher rates
  518. */
  519. if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
  520. ndelim += ATH_AGGR_ENCRYPTDELIM;
  521. /*
  522. * Convert desired mpdu density from microeconds to bytes based
  523. * on highest rate in rate series (i.e. first rate) to determine
  524. * required minimum length for subframe. Take into account
  525. * whether high rate is 20 or 40Mhz and half or full GI.
  526. *
  527. * If there is no mpdu density restriction, no further calculation
  528. * is needed.
  529. */
  530. if (tid->an->mpdudensity == 0)
  531. return ndelim;
  532. rix = tx_info->control.rates[0].idx;
  533. flags = tx_info->control.rates[0].flags;
  534. width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
  535. half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
  536. if (half_gi)
  537. nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
  538. else
  539. nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
  540. if (nsymbols == 0)
  541. nsymbols = 1;
  542. streams = HT_RC_2_STREAMS(rix);
  543. nsymbits = bits_per_symbol[rix % 8][width] * streams;
  544. minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
  545. if (frmlen < minlen) {
  546. mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
  547. ndelim = max(mindelim, ndelim);
  548. }
  549. return ndelim;
  550. }
  551. static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
  552. struct ath_txq *txq,
  553. struct ath_atx_tid *tid,
  554. struct list_head *bf_q)
  555. {
  556. #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
  557. struct ath_buf *bf, *bf_first, *bf_prev = NULL;
  558. int rl = 0, nframes = 0, ndelim, prev_al = 0;
  559. u16 aggr_limit = 0, al = 0, bpad = 0,
  560. al_delta, h_baw = tid->baw_size / 2;
  561. enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
  562. bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
  563. do {
  564. bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
  565. /* do not step over block-ack window */
  566. if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
  567. status = ATH_AGGR_BAW_CLOSED;
  568. break;
  569. }
  570. if (!rl) {
  571. aggr_limit = ath_lookup_rate(sc, bf, tid);
  572. rl = 1;
  573. }
  574. /* do not exceed aggregation limit */
  575. al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
  576. if (nframes &&
  577. (aggr_limit < (al + bpad + al_delta + prev_al))) {
  578. status = ATH_AGGR_LIMITED;
  579. break;
  580. }
  581. /* do not exceed subframe limit */
  582. if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
  583. status = ATH_AGGR_LIMITED;
  584. break;
  585. }
  586. nframes++;
  587. /* add padding for previous frame to aggregation length */
  588. al += bpad + al_delta;
  589. /*
  590. * Get the delimiters needed to meet the MPDU
  591. * density for this node.
  592. */
  593. ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
  594. bpad = PADBYTES(al_delta) + (ndelim << 2);
  595. bf->bf_next = NULL;
  596. ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
  597. /* link buffers of this frame to the aggregate */
  598. ath_tx_addto_baw(sc, tid, bf);
  599. ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
  600. list_move_tail(&bf->list, bf_q);
  601. if (bf_prev) {
  602. bf_prev->bf_next = bf;
  603. ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
  604. bf->bf_daddr);
  605. }
  606. bf_prev = bf;
  607. } while (!list_empty(&tid->buf_q));
  608. bf_first->bf_al = al;
  609. bf_first->bf_nframes = nframes;
  610. return status;
  611. #undef PADBYTES
  612. }
  613. static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
  614. struct ath_atx_tid *tid)
  615. {
  616. struct ath_buf *bf;
  617. enum ATH_AGGR_STATUS status;
  618. struct list_head bf_q;
  619. do {
  620. if (list_empty(&tid->buf_q))
  621. return;
  622. INIT_LIST_HEAD(&bf_q);
  623. status = ath_tx_form_aggr(sc, txq, tid, &bf_q);
  624. /*
  625. * no frames picked up to be aggregated;
  626. * block-ack window is not open.
  627. */
  628. if (list_empty(&bf_q))
  629. break;
  630. bf = list_first_entry(&bf_q, struct ath_buf, list);
  631. bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
  632. /* if only one frame, send as non-aggregate */
  633. if (bf->bf_nframes == 1) {
  634. bf->bf_state.bf_type &= ~BUF_AGGR;
  635. ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
  636. ath_buf_set_rate(sc, bf);
  637. ath_tx_txqaddbuf(sc, txq, &bf_q);
  638. continue;
  639. }
  640. /* setup first desc of aggregate */
  641. bf->bf_state.bf_type |= BUF_AGGR;
  642. ath_buf_set_rate(sc, bf);
  643. ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
  644. /* anchor last desc of aggregate */
  645. ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
  646. ath_tx_txqaddbuf(sc, txq, &bf_q);
  647. TX_STAT_INC(txq->axq_qnum, a_aggr);
  648. } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
  649. status != ATH_AGGR_BAW_CLOSED);
  650. }
  651. int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
  652. u16 tid, u16 *ssn)
  653. {
  654. struct ath_atx_tid *txtid;
  655. struct ath_node *an;
  656. an = (struct ath_node *)sta->drv_priv;
  657. txtid = ATH_AN_2_TID(an, tid);
  658. if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
  659. return -EAGAIN;
  660. txtid->state |= AGGR_ADDBA_PROGRESS;
  661. txtid->paused = true;
  662. *ssn = txtid->seq_start;
  663. return 0;
  664. }
  665. void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
  666. {
  667. struct ath_node *an = (struct ath_node *)sta->drv_priv;
  668. struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
  669. struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
  670. if (txtid->state & AGGR_CLEANUP)
  671. return;
  672. if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
  673. txtid->state &= ~AGGR_ADDBA_PROGRESS;
  674. return;
  675. }
  676. spin_lock_bh(&txq->axq_lock);
  677. txtid->paused = true;
  678. /*
  679. * If frames are still being transmitted for this TID, they will be
  680. * cleaned up during tx completion. To prevent race conditions, this
  681. * TID can only be reused after all in-progress subframes have been
  682. * completed.
  683. */
  684. if (txtid->baw_head != txtid->baw_tail)
  685. txtid->state |= AGGR_CLEANUP;
  686. else
  687. txtid->state &= ~AGGR_ADDBA_COMPLETE;
  688. spin_unlock_bh(&txq->axq_lock);
  689. ath_tx_flush_tid(sc, txtid);
  690. }
  691. void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
  692. {
  693. struct ath_atx_tid *txtid;
  694. struct ath_node *an;
  695. an = (struct ath_node *)sta->drv_priv;
  696. if (sc->sc_flags & SC_OP_TXAGGR) {
  697. txtid = ATH_AN_2_TID(an, tid);
  698. txtid->baw_size =
  699. IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
  700. txtid->state |= AGGR_ADDBA_COMPLETE;
  701. txtid->state &= ~AGGR_ADDBA_PROGRESS;
  702. ath_tx_resume_tid(sc, txtid);
  703. }
  704. }
  705. /********************/
  706. /* Queue Management */
  707. /********************/
  708. static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
  709. struct ath_txq *txq)
  710. {
  711. struct ath_atx_ac *ac, *ac_tmp;
  712. struct ath_atx_tid *tid, *tid_tmp;
  713. list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
  714. list_del(&ac->list);
  715. ac->sched = false;
  716. list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
  717. list_del(&tid->list);
  718. tid->sched = false;
  719. ath_tid_drain(sc, txq, tid);
  720. }
  721. }
  722. }
  723. struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
  724. {
  725. struct ath_hw *ah = sc->sc_ah;
  726. struct ath_common *common = ath9k_hw_common(ah);
  727. struct ath9k_tx_queue_info qi;
  728. int qnum, i;
  729. memset(&qi, 0, sizeof(qi));
  730. qi.tqi_subtype = subtype;
  731. qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
  732. qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
  733. qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
  734. qi.tqi_physCompBuf = 0;
  735. /*
  736. * Enable interrupts only for EOL and DESC conditions.
  737. * We mark tx descriptors to receive a DESC interrupt
  738. * when a tx queue gets deep; otherwise waiting for the
  739. * EOL to reap descriptors. Note that this is done to
  740. * reduce interrupt load and this only defers reaping
  741. * descriptors, never transmitting frames. Aside from
  742. * reducing interrupts this also permits more concurrency.
  743. * The only potential downside is if the tx queue backs
  744. * up in which case the top half of the kernel may backup
  745. * due to a lack of tx descriptors.
  746. *
  747. * The UAPSD queue is an exception, since we take a desc-
  748. * based intr on the EOSP frames.
  749. */
  750. if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
  751. qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
  752. TXQ_FLAG_TXERRINT_ENABLE;
  753. } else {
  754. if (qtype == ATH9K_TX_QUEUE_UAPSD)
  755. qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
  756. else
  757. qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
  758. TXQ_FLAG_TXDESCINT_ENABLE;
  759. }
  760. qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
  761. if (qnum == -1) {
  762. /*
  763. * NB: don't print a message, this happens
  764. * normally on parts with too few tx queues
  765. */
  766. return NULL;
  767. }
  768. if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
  769. ath_print(common, ATH_DBG_FATAL,
  770. "qnum %u out of range, max %u!\n",
  771. qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
  772. ath9k_hw_releasetxqueue(ah, qnum);
  773. return NULL;
  774. }
  775. if (!ATH_TXQ_SETUP(sc, qnum)) {
  776. struct ath_txq *txq = &sc->tx.txq[qnum];
  777. txq->axq_class = subtype;
  778. txq->axq_qnum = qnum;
  779. txq->axq_link = NULL;
  780. INIT_LIST_HEAD(&txq->axq_q);
  781. INIT_LIST_HEAD(&txq->axq_acq);
  782. spin_lock_init(&txq->axq_lock);
  783. txq->axq_depth = 0;
  784. txq->axq_tx_inprogress = false;
  785. sc->tx.txqsetup |= 1<<qnum;
  786. txq->txq_headidx = txq->txq_tailidx = 0;
  787. for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
  788. INIT_LIST_HEAD(&txq->txq_fifo[i]);
  789. INIT_LIST_HEAD(&txq->txq_fifo_pending);
  790. }
  791. return &sc->tx.txq[qnum];
  792. }
  793. int ath_txq_update(struct ath_softc *sc, int qnum,
  794. struct ath9k_tx_queue_info *qinfo)
  795. {
  796. struct ath_hw *ah = sc->sc_ah;
  797. int error = 0;
  798. struct ath9k_tx_queue_info qi;
  799. if (qnum == sc->beacon.beaconq) {
  800. /*
  801. * XXX: for beacon queue, we just save the parameter.
  802. * It will be picked up by ath_beaconq_config when
  803. * it's necessary.
  804. */
  805. sc->beacon.beacon_qi = *qinfo;
  806. return 0;
  807. }
  808. BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
  809. ath9k_hw_get_txq_props(ah, qnum, &qi);
  810. qi.tqi_aifs = qinfo->tqi_aifs;
  811. qi.tqi_cwmin = qinfo->tqi_cwmin;
  812. qi.tqi_cwmax = qinfo->tqi_cwmax;
  813. qi.tqi_burstTime = qinfo->tqi_burstTime;
  814. qi.tqi_readyTime = qinfo->tqi_readyTime;
  815. if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
  816. ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
  817. "Unable to update hardware queue %u!\n", qnum);
  818. error = -EIO;
  819. } else {
  820. ath9k_hw_resettxqueue(ah, qnum);
  821. }
  822. return error;
  823. }
  824. int ath_cabq_update(struct ath_softc *sc)
  825. {
  826. struct ath9k_tx_queue_info qi;
  827. int qnum = sc->beacon.cabq->axq_qnum;
  828. ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
  829. /*
  830. * Ensure the readytime % is within the bounds.
  831. */
  832. if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
  833. sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
  834. else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
  835. sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
  836. qi.tqi_readyTime = (sc->beacon_interval *
  837. sc->config.cabqReadytime) / 100;
  838. ath_txq_update(sc, qnum, &qi);
  839. return 0;
  840. }
  841. /*
  842. * Drain a given TX queue (could be Beacon or Data)
  843. *
  844. * This assumes output has been stopped and
  845. * we do not need to block ath_tx_tasklet.
  846. */
  847. void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
  848. {
  849. struct ath_buf *bf, *lastbf;
  850. struct list_head bf_head;
  851. struct ath_tx_status ts;
  852. memset(&ts, 0, sizeof(ts));
  853. INIT_LIST_HEAD(&bf_head);
  854. for (;;) {
  855. spin_lock_bh(&txq->axq_lock);
  856. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
  857. if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
  858. txq->txq_headidx = txq->txq_tailidx = 0;
  859. spin_unlock_bh(&txq->axq_lock);
  860. break;
  861. } else {
  862. bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
  863. struct ath_buf, list);
  864. }
  865. } else {
  866. if (list_empty(&txq->axq_q)) {
  867. txq->axq_link = NULL;
  868. spin_unlock_bh(&txq->axq_lock);
  869. break;
  870. }
  871. bf = list_first_entry(&txq->axq_q, struct ath_buf,
  872. list);
  873. if (bf->bf_stale) {
  874. list_del(&bf->list);
  875. spin_unlock_bh(&txq->axq_lock);
  876. ath_tx_return_buffer(sc, bf);
  877. continue;
  878. }
  879. }
  880. lastbf = bf->bf_lastbf;
  881. if (!retry_tx)
  882. lastbf->bf_tx_aborted = true;
  883. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
  884. list_cut_position(&bf_head,
  885. &txq->txq_fifo[txq->txq_tailidx],
  886. &lastbf->list);
  887. INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
  888. } else {
  889. /* remove ath_buf's of the same mpdu from txq */
  890. list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
  891. }
  892. txq->axq_depth--;
  893. spin_unlock_bh(&txq->axq_lock);
  894. if (bf_isampdu(bf))
  895. ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
  896. else
  897. ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
  898. }
  899. spin_lock_bh(&txq->axq_lock);
  900. txq->axq_tx_inprogress = false;
  901. spin_unlock_bh(&txq->axq_lock);
  902. /* flush any pending frames if aggregation is enabled */
  903. if (sc->sc_flags & SC_OP_TXAGGR) {
  904. if (!retry_tx) {
  905. spin_lock_bh(&txq->axq_lock);
  906. ath_txq_drain_pending_buffers(sc, txq);
  907. spin_unlock_bh(&txq->axq_lock);
  908. }
  909. }
  910. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
  911. spin_lock_bh(&txq->axq_lock);
  912. while (!list_empty(&txq->txq_fifo_pending)) {
  913. bf = list_first_entry(&txq->txq_fifo_pending,
  914. struct ath_buf, list);
  915. list_cut_position(&bf_head,
  916. &txq->txq_fifo_pending,
  917. &bf->bf_lastbf->list);
  918. spin_unlock_bh(&txq->axq_lock);
  919. if (bf_isampdu(bf))
  920. ath_tx_complete_aggr(sc, txq, bf, &bf_head,
  921. &ts, 0);
  922. else
  923. ath_tx_complete_buf(sc, bf, txq, &bf_head,
  924. &ts, 0, 0);
  925. spin_lock_bh(&txq->axq_lock);
  926. }
  927. spin_unlock_bh(&txq->axq_lock);
  928. }
  929. }
  930. void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
  931. {
  932. struct ath_hw *ah = sc->sc_ah;
  933. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  934. struct ath_txq *txq;
  935. int i, npend = 0;
  936. if (sc->sc_flags & SC_OP_INVALID)
  937. return;
  938. /* Stop beacon queue */
  939. ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
  940. /* Stop data queues */
  941. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  942. if (ATH_TXQ_SETUP(sc, i)) {
  943. txq = &sc->tx.txq[i];
  944. ath9k_hw_stoptxdma(ah, txq->axq_qnum);
  945. npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
  946. }
  947. }
  948. if (npend) {
  949. int r;
  950. ath_print(common, ATH_DBG_FATAL,
  951. "Failed to stop TX DMA. Resetting hardware!\n");
  952. spin_lock_bh(&sc->sc_resetlock);
  953. r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
  954. if (r)
  955. ath_print(common, ATH_DBG_FATAL,
  956. "Unable to reset hardware; reset status %d\n",
  957. r);
  958. spin_unlock_bh(&sc->sc_resetlock);
  959. }
  960. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  961. if (ATH_TXQ_SETUP(sc, i))
  962. ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
  963. }
  964. }
  965. void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
  966. {
  967. ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
  968. sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
  969. }
  970. void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
  971. {
  972. struct ath_atx_ac *ac;
  973. struct ath_atx_tid *tid;
  974. if (list_empty(&txq->axq_acq))
  975. return;
  976. ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
  977. list_del(&ac->list);
  978. ac->sched = false;
  979. do {
  980. if (list_empty(&ac->tid_q))
  981. return;
  982. tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
  983. list_del(&tid->list);
  984. tid->sched = false;
  985. if (tid->paused)
  986. continue;
  987. ath_tx_sched_aggr(sc, txq, tid);
  988. /*
  989. * add tid to round-robin queue if more frames
  990. * are pending for the tid
  991. */
  992. if (!list_empty(&tid->buf_q))
  993. ath_tx_queue_tid(txq, tid);
  994. break;
  995. } while (!list_empty(&ac->tid_q));
  996. if (!list_empty(&ac->tid_q)) {
  997. if (!ac->sched) {
  998. ac->sched = true;
  999. list_add_tail(&ac->list, &txq->axq_acq);
  1000. }
  1001. }
  1002. }
  1003. int ath_tx_setup(struct ath_softc *sc, int haltype)
  1004. {
  1005. struct ath_txq *txq;
  1006. if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
  1007. ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
  1008. "HAL AC %u out of range, max %zu!\n",
  1009. haltype, ARRAY_SIZE(sc->tx.hwq_map));
  1010. return 0;
  1011. }
  1012. txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
  1013. if (txq != NULL) {
  1014. sc->tx.hwq_map[haltype] = txq->axq_qnum;
  1015. return 1;
  1016. } else
  1017. return 0;
  1018. }
  1019. /***********/
  1020. /* TX, DMA */
  1021. /***********/
  1022. /*
  1023. * Insert a chain of ath_buf (descriptors) on a txq and
  1024. * assume the descriptors are already chained together by caller.
  1025. */
  1026. static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
  1027. struct list_head *head)
  1028. {
  1029. struct ath_hw *ah = sc->sc_ah;
  1030. struct ath_common *common = ath9k_hw_common(ah);
  1031. struct ath_buf *bf;
  1032. /*
  1033. * Insert the frame on the outbound list and
  1034. * pass it on to the hardware.
  1035. */
  1036. if (list_empty(head))
  1037. return;
  1038. bf = list_first_entry(head, struct ath_buf, list);
  1039. ath_print(common, ATH_DBG_QUEUE,
  1040. "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
  1041. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
  1042. if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
  1043. list_splice_tail_init(head, &txq->txq_fifo_pending);
  1044. return;
  1045. }
  1046. if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
  1047. ath_print(common, ATH_DBG_XMIT,
  1048. "Initializing tx fifo %d which "
  1049. "is non-empty\n",
  1050. txq->txq_headidx);
  1051. INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
  1052. list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
  1053. INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
  1054. ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
  1055. ath_print(common, ATH_DBG_XMIT,
  1056. "TXDP[%u] = %llx (%p)\n",
  1057. txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
  1058. } else {
  1059. list_splice_tail_init(head, &txq->axq_q);
  1060. if (txq->axq_link == NULL) {
  1061. ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
  1062. ath_print(common, ATH_DBG_XMIT,
  1063. "TXDP[%u] = %llx (%p)\n",
  1064. txq->axq_qnum, ito64(bf->bf_daddr),
  1065. bf->bf_desc);
  1066. } else {
  1067. *txq->axq_link = bf->bf_daddr;
  1068. ath_print(common, ATH_DBG_XMIT,
  1069. "link[%u] (%p)=%llx (%p)\n",
  1070. txq->axq_qnum, txq->axq_link,
  1071. ito64(bf->bf_daddr), bf->bf_desc);
  1072. }
  1073. ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
  1074. &txq->axq_link);
  1075. ath9k_hw_txstart(ah, txq->axq_qnum);
  1076. }
  1077. txq->axq_depth++;
  1078. }
  1079. static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
  1080. struct list_head *bf_head,
  1081. struct ath_tx_control *txctl)
  1082. {
  1083. struct ath_buf *bf;
  1084. bf = list_first_entry(bf_head, struct ath_buf, list);
  1085. bf->bf_state.bf_type |= BUF_AMPDU;
  1086. TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
  1087. /*
  1088. * Do not queue to h/w when any of the following conditions is true:
  1089. * - there are pending frames in software queue
  1090. * - the TID is currently paused for ADDBA/BAR request
  1091. * - seqno is not within block-ack window
  1092. * - h/w queue depth exceeds low water mark
  1093. */
  1094. if (!list_empty(&tid->buf_q) || tid->paused ||
  1095. !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
  1096. txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
  1097. /*
  1098. * Add this frame to software queue for scheduling later
  1099. * for aggregation.
  1100. */
  1101. list_move_tail(&bf->list, &tid->buf_q);
  1102. ath_tx_queue_tid(txctl->txq, tid);
  1103. return;
  1104. }
  1105. /* Add sub-frame to BAW */
  1106. ath_tx_addto_baw(sc, tid, bf);
  1107. /* Queue to h/w without aggregation */
  1108. bf->bf_nframes = 1;
  1109. bf->bf_lastbf = bf;
  1110. ath_buf_set_rate(sc, bf);
  1111. ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
  1112. }
  1113. static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
  1114. struct ath_atx_tid *tid,
  1115. struct list_head *bf_head)
  1116. {
  1117. struct ath_buf *bf;
  1118. bf = list_first_entry(bf_head, struct ath_buf, list);
  1119. bf->bf_state.bf_type &= ~BUF_AMPDU;
  1120. /* update starting sequence number for subsequent ADDBA request */
  1121. INCR(tid->seq_start, IEEE80211_SEQ_MAX);
  1122. bf->bf_nframes = 1;
  1123. bf->bf_lastbf = bf;
  1124. ath_buf_set_rate(sc, bf);
  1125. ath_tx_txqaddbuf(sc, txq, bf_head);
  1126. TX_STAT_INC(txq->axq_qnum, queued);
  1127. }
  1128. static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
  1129. struct list_head *bf_head)
  1130. {
  1131. struct ath_buf *bf;
  1132. bf = list_first_entry(bf_head, struct ath_buf, list);
  1133. bf->bf_lastbf = bf;
  1134. bf->bf_nframes = 1;
  1135. ath_buf_set_rate(sc, bf);
  1136. ath_tx_txqaddbuf(sc, txq, bf_head);
  1137. TX_STAT_INC(txq->axq_qnum, queued);
  1138. }
  1139. static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
  1140. {
  1141. struct ieee80211_hdr *hdr;
  1142. enum ath9k_pkt_type htype;
  1143. __le16 fc;
  1144. hdr = (struct ieee80211_hdr *)skb->data;
  1145. fc = hdr->frame_control;
  1146. if (ieee80211_is_beacon(fc))
  1147. htype = ATH9K_PKT_TYPE_BEACON;
  1148. else if (ieee80211_is_probe_resp(fc))
  1149. htype = ATH9K_PKT_TYPE_PROBE_RESP;
  1150. else if (ieee80211_is_atim(fc))
  1151. htype = ATH9K_PKT_TYPE_ATIM;
  1152. else if (ieee80211_is_pspoll(fc))
  1153. htype = ATH9K_PKT_TYPE_PSPOLL;
  1154. else
  1155. htype = ATH9K_PKT_TYPE_NORMAL;
  1156. return htype;
  1157. }
  1158. static void assign_aggr_tid_seqno(struct sk_buff *skb,
  1159. struct ath_buf *bf)
  1160. {
  1161. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1162. struct ieee80211_hdr *hdr;
  1163. struct ath_node *an;
  1164. struct ath_atx_tid *tid;
  1165. __le16 fc;
  1166. u8 *qc;
  1167. if (!tx_info->control.sta)
  1168. return;
  1169. an = (struct ath_node *)tx_info->control.sta->drv_priv;
  1170. hdr = (struct ieee80211_hdr *)skb->data;
  1171. fc = hdr->frame_control;
  1172. if (ieee80211_is_data_qos(fc)) {
  1173. qc = ieee80211_get_qos_ctl(hdr);
  1174. bf->bf_tidno = qc[0] & 0xf;
  1175. }
  1176. /*
  1177. * For HT capable stations, we save tidno for later use.
  1178. * We also override seqno set by upper layer with the one
  1179. * in tx aggregation state.
  1180. */
  1181. tid = ATH_AN_2_TID(an, bf->bf_tidno);
  1182. hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
  1183. bf->bf_seqno = tid->seq_next;
  1184. INCR(tid->seq_next, IEEE80211_SEQ_MAX);
  1185. }
  1186. static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
  1187. {
  1188. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1189. int flags = 0;
  1190. flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
  1191. flags |= ATH9K_TXDESC_INTREQ;
  1192. if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
  1193. flags |= ATH9K_TXDESC_NOACK;
  1194. if (use_ldpc)
  1195. flags |= ATH9K_TXDESC_LDPC;
  1196. return flags;
  1197. }
  1198. /*
  1199. * rix - rate index
  1200. * pktlen - total bytes (delims + data + fcs + pads + pad delims)
  1201. * width - 0 for 20 MHz, 1 for 40 MHz
  1202. * half_gi - to use 4us v/s 3.6 us for symbol time
  1203. */
  1204. static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
  1205. int width, int half_gi, bool shortPreamble)
  1206. {
  1207. u32 nbits, nsymbits, duration, nsymbols;
  1208. int streams, pktlen;
  1209. pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
  1210. /* find number of symbols: PLCP + data */
  1211. streams = HT_RC_2_STREAMS(rix);
  1212. nbits = (pktlen << 3) + OFDM_PLCP_BITS;
  1213. nsymbits = bits_per_symbol[rix % 8][width] * streams;
  1214. nsymbols = (nbits + nsymbits - 1) / nsymbits;
  1215. if (!half_gi)
  1216. duration = SYMBOL_TIME(nsymbols);
  1217. else
  1218. duration = SYMBOL_TIME_HALFGI(nsymbols);
  1219. /* addup duration for legacy/ht training and signal fields */
  1220. duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
  1221. return duration;
  1222. }
  1223. static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
  1224. {
  1225. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1226. struct ath9k_11n_rate_series series[4];
  1227. struct sk_buff *skb;
  1228. struct ieee80211_tx_info *tx_info;
  1229. struct ieee80211_tx_rate *rates;
  1230. const struct ieee80211_rate *rate;
  1231. struct ieee80211_hdr *hdr;
  1232. int i, flags = 0;
  1233. u8 rix = 0, ctsrate = 0;
  1234. bool is_pspoll;
  1235. memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
  1236. skb = bf->bf_mpdu;
  1237. tx_info = IEEE80211_SKB_CB(skb);
  1238. rates = tx_info->control.rates;
  1239. hdr = (struct ieee80211_hdr *)skb->data;
  1240. is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
  1241. /*
  1242. * We check if Short Preamble is needed for the CTS rate by
  1243. * checking the BSS's global flag.
  1244. * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
  1245. */
  1246. rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
  1247. ctsrate = rate->hw_value;
  1248. if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
  1249. ctsrate |= rate->hw_value_short;
  1250. for (i = 0; i < 4; i++) {
  1251. bool is_40, is_sgi, is_sp;
  1252. int phy;
  1253. if (!rates[i].count || (rates[i].idx < 0))
  1254. continue;
  1255. rix = rates[i].idx;
  1256. series[i].Tries = rates[i].count;
  1257. series[i].ChSel = common->tx_chainmask;
  1258. if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
  1259. (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
  1260. series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
  1261. flags |= ATH9K_TXDESC_RTSENA;
  1262. } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
  1263. series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
  1264. flags |= ATH9K_TXDESC_CTSENA;
  1265. }
  1266. if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
  1267. series[i].RateFlags |= ATH9K_RATESERIES_2040;
  1268. if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
  1269. series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
  1270. is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
  1271. is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
  1272. is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
  1273. if (rates[i].flags & IEEE80211_TX_RC_MCS) {
  1274. /* MCS rates */
  1275. series[i].Rate = rix | 0x80;
  1276. series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
  1277. is_40, is_sgi, is_sp);
  1278. if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
  1279. series[i].RateFlags |= ATH9K_RATESERIES_STBC;
  1280. continue;
  1281. }
  1282. /* legcay rates */
  1283. if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
  1284. !(rate->flags & IEEE80211_RATE_ERP_G))
  1285. phy = WLAN_RC_PHY_CCK;
  1286. else
  1287. phy = WLAN_RC_PHY_OFDM;
  1288. rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
  1289. series[i].Rate = rate->hw_value;
  1290. if (rate->hw_value_short) {
  1291. if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
  1292. series[i].Rate |= rate->hw_value_short;
  1293. } else {
  1294. is_sp = false;
  1295. }
  1296. series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
  1297. phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
  1298. }
  1299. /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
  1300. if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
  1301. flags &= ~ATH9K_TXDESC_RTSENA;
  1302. /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
  1303. if (flags & ATH9K_TXDESC_RTSENA)
  1304. flags &= ~ATH9K_TXDESC_CTSENA;
  1305. /* set dur_update_en for l-sig computation except for PS-Poll frames */
  1306. ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
  1307. bf->bf_lastbf->bf_desc,
  1308. !is_pspoll, ctsrate,
  1309. 0, series, 4, flags);
  1310. if (sc->config.ath_aggr_prot && flags)
  1311. ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
  1312. }
  1313. static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
  1314. struct sk_buff *skb,
  1315. struct ath_tx_control *txctl)
  1316. {
  1317. struct ath_wiphy *aphy = hw->priv;
  1318. struct ath_softc *sc = aphy->sc;
  1319. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1320. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1321. int hdrlen;
  1322. __le16 fc;
  1323. int padpos, padsize;
  1324. bool use_ldpc = false;
  1325. tx_info->pad[0] = 0;
  1326. switch (txctl->frame_type) {
  1327. case ATH9K_IFT_NOT_INTERNAL:
  1328. break;
  1329. case ATH9K_IFT_PAUSE:
  1330. tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
  1331. /* fall through */
  1332. case ATH9K_IFT_UNPAUSE:
  1333. tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
  1334. break;
  1335. }
  1336. hdrlen = ieee80211_get_hdrlen_from_skb(skb);
  1337. fc = hdr->frame_control;
  1338. ATH_TXBUF_RESET(bf);
  1339. bf->aphy = aphy;
  1340. bf->bf_frmlen = skb->len + FCS_LEN;
  1341. /* Remove the padding size from bf_frmlen, if any */
  1342. padpos = ath9k_cmn_padpos(hdr->frame_control);
  1343. padsize = padpos & 3;
  1344. if (padsize && skb->len>padpos+padsize) {
  1345. bf->bf_frmlen -= padsize;
  1346. }
  1347. if (!txctl->paprd && conf_is_ht(&hw->conf)) {
  1348. bf->bf_state.bf_type |= BUF_HT;
  1349. if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
  1350. use_ldpc = true;
  1351. }
  1352. bf->bf_state.bfs_paprd = txctl->paprd;
  1353. if (txctl->paprd)
  1354. bf->bf_state.bfs_paprd_timestamp = jiffies;
  1355. bf->bf_flags = setup_tx_flags(skb, use_ldpc);
  1356. bf->bf_keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
  1357. if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
  1358. bf->bf_frmlen += tx_info->control.hw_key->icv_len;
  1359. bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
  1360. } else {
  1361. bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
  1362. }
  1363. if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
  1364. (sc->sc_flags & SC_OP_TXAGGR))
  1365. assign_aggr_tid_seqno(skb, bf);
  1366. bf->bf_mpdu = skb;
  1367. bf->bf_dmacontext = dma_map_single(sc->dev, skb->data,
  1368. skb->len, DMA_TO_DEVICE);
  1369. if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) {
  1370. bf->bf_mpdu = NULL;
  1371. ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
  1372. "dma_mapping_error() on TX\n");
  1373. return -ENOMEM;
  1374. }
  1375. bf->bf_buf_addr = bf->bf_dmacontext;
  1376. /* tag if this is a nullfunc frame to enable PS when AP acks it */
  1377. if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
  1378. bf->bf_isnullfunc = true;
  1379. sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
  1380. } else
  1381. bf->bf_isnullfunc = false;
  1382. bf->bf_tx_aborted = false;
  1383. return 0;
  1384. }
  1385. /* FIXME: tx power */
  1386. static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
  1387. struct ath_tx_control *txctl)
  1388. {
  1389. struct sk_buff *skb = bf->bf_mpdu;
  1390. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1391. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1392. struct ath_node *an = NULL;
  1393. struct list_head bf_head;
  1394. struct ath_desc *ds;
  1395. struct ath_atx_tid *tid;
  1396. struct ath_hw *ah = sc->sc_ah;
  1397. int frm_type;
  1398. __le16 fc;
  1399. frm_type = get_hw_packet_type(skb);
  1400. fc = hdr->frame_control;
  1401. INIT_LIST_HEAD(&bf_head);
  1402. list_add_tail(&bf->list, &bf_head);
  1403. ds = bf->bf_desc;
  1404. ath9k_hw_set_desc_link(ah, ds, 0);
  1405. ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
  1406. bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
  1407. ath9k_hw_filltxdesc(ah, ds,
  1408. skb->len, /* segment length */
  1409. true, /* first segment */
  1410. true, /* last segment */
  1411. ds, /* first descriptor */
  1412. bf->bf_buf_addr,
  1413. txctl->txq->axq_qnum);
  1414. if (bf->bf_state.bfs_paprd)
  1415. ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd);
  1416. spin_lock_bh(&txctl->txq->axq_lock);
  1417. if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
  1418. tx_info->control.sta) {
  1419. an = (struct ath_node *)tx_info->control.sta->drv_priv;
  1420. tid = ATH_AN_2_TID(an, bf->bf_tidno);
  1421. if (!ieee80211_is_data_qos(fc)) {
  1422. ath_tx_send_normal(sc, txctl->txq, &bf_head);
  1423. goto tx_done;
  1424. }
  1425. if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
  1426. /*
  1427. * Try aggregation if it's a unicast data frame
  1428. * and the destination is HT capable.
  1429. */
  1430. ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
  1431. } else {
  1432. /*
  1433. * Send this frame as regular when ADDBA
  1434. * exchange is neither complete nor pending.
  1435. */
  1436. ath_tx_send_ht_normal(sc, txctl->txq,
  1437. tid, &bf_head);
  1438. }
  1439. } else {
  1440. ath_tx_send_normal(sc, txctl->txq, &bf_head);
  1441. }
  1442. tx_done:
  1443. spin_unlock_bh(&txctl->txq->axq_lock);
  1444. }
  1445. /* Upon failure caller should free skb */
  1446. int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
  1447. struct ath_tx_control *txctl)
  1448. {
  1449. struct ath_wiphy *aphy = hw->priv;
  1450. struct ath_softc *sc = aphy->sc;
  1451. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1452. struct ath_txq *txq = txctl->txq;
  1453. struct ath_buf *bf;
  1454. int q, r;
  1455. bf = ath_tx_get_buffer(sc);
  1456. if (!bf) {
  1457. ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
  1458. return -1;
  1459. }
  1460. r = ath_tx_setup_buffer(hw, bf, skb, txctl);
  1461. if (unlikely(r)) {
  1462. ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
  1463. /* upon ath_tx_processq() this TX queue will be resumed, we
  1464. * guarantee this will happen by knowing beforehand that
  1465. * we will at least have to run TX completionon one buffer
  1466. * on the queue */
  1467. spin_lock_bh(&txq->axq_lock);
  1468. if (!txq->stopped && txq->axq_depth > 1) {
  1469. ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
  1470. txq->stopped = 1;
  1471. }
  1472. spin_unlock_bh(&txq->axq_lock);
  1473. ath_tx_return_buffer(sc, bf);
  1474. return r;
  1475. }
  1476. q = skb_get_queue_mapping(skb);
  1477. if (q >= 4)
  1478. q = 0;
  1479. spin_lock_bh(&txq->axq_lock);
  1480. if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) {
  1481. ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
  1482. txq->stopped = 1;
  1483. }
  1484. spin_unlock_bh(&txq->axq_lock);
  1485. ath_tx_start_dma(sc, bf, txctl);
  1486. return 0;
  1487. }
  1488. void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
  1489. {
  1490. struct ath_wiphy *aphy = hw->priv;
  1491. struct ath_softc *sc = aphy->sc;
  1492. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1493. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  1494. int padpos, padsize;
  1495. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1496. struct ath_tx_control txctl;
  1497. memset(&txctl, 0, sizeof(struct ath_tx_control));
  1498. /*
  1499. * As a temporary workaround, assign seq# here; this will likely need
  1500. * to be cleaned up to work better with Beacon transmission and virtual
  1501. * BSSes.
  1502. */
  1503. if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
  1504. if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
  1505. sc->tx.seq_no += 0x10;
  1506. hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
  1507. hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
  1508. }
  1509. /* Add the padding after the header if this is not already done */
  1510. padpos = ath9k_cmn_padpos(hdr->frame_control);
  1511. padsize = padpos & 3;
  1512. if (padsize && skb->len>padpos) {
  1513. if (skb_headroom(skb) < padsize) {
  1514. ath_print(common, ATH_DBG_XMIT,
  1515. "TX CABQ padding failed\n");
  1516. dev_kfree_skb_any(skb);
  1517. return;
  1518. }
  1519. skb_push(skb, padsize);
  1520. memmove(skb->data, skb->data + padsize, padpos);
  1521. }
  1522. txctl.txq = sc->beacon.cabq;
  1523. ath_print(common, ATH_DBG_XMIT,
  1524. "transmitting CABQ packet, skb: %p\n", skb);
  1525. if (ath_tx_start(hw, skb, &txctl) != 0) {
  1526. ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
  1527. goto exit;
  1528. }
  1529. return;
  1530. exit:
  1531. dev_kfree_skb_any(skb);
  1532. }
  1533. /*****************/
  1534. /* TX Completion */
  1535. /*****************/
  1536. static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
  1537. struct ath_wiphy *aphy, int tx_flags)
  1538. {
  1539. struct ieee80211_hw *hw = sc->hw;
  1540. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1541. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1542. struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
  1543. int q, padpos, padsize;
  1544. ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
  1545. if (aphy)
  1546. hw = aphy->hw;
  1547. if (tx_flags & ATH_TX_BAR)
  1548. tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
  1549. if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
  1550. /* Frame was ACKed */
  1551. tx_info->flags |= IEEE80211_TX_STAT_ACK;
  1552. }
  1553. padpos = ath9k_cmn_padpos(hdr->frame_control);
  1554. padsize = padpos & 3;
  1555. if (padsize && skb->len>padpos+padsize) {
  1556. /*
  1557. * Remove MAC header padding before giving the frame back to
  1558. * mac80211.
  1559. */
  1560. memmove(skb->data + padsize, skb->data, padpos);
  1561. skb_pull(skb, padsize);
  1562. }
  1563. if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
  1564. sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
  1565. ath_print(common, ATH_DBG_PS,
  1566. "Going back to sleep after having "
  1567. "received TX status (0x%lx)\n",
  1568. sc->ps_flags & (PS_WAIT_FOR_BEACON |
  1569. PS_WAIT_FOR_CAB |
  1570. PS_WAIT_FOR_PSPOLL_DATA |
  1571. PS_WAIT_FOR_TX_ACK));
  1572. }
  1573. if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
  1574. ath9k_tx_status(hw, skb);
  1575. else {
  1576. q = skb_get_queue_mapping(skb);
  1577. if (q >= 4)
  1578. q = 0;
  1579. if (--sc->tx.pending_frames[q] < 0)
  1580. sc->tx.pending_frames[q] = 0;
  1581. ieee80211_tx_status(hw, skb);
  1582. }
  1583. }
  1584. static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
  1585. struct ath_txq *txq, struct list_head *bf_q,
  1586. struct ath_tx_status *ts, int txok, int sendbar)
  1587. {
  1588. struct sk_buff *skb = bf->bf_mpdu;
  1589. unsigned long flags;
  1590. int tx_flags = 0;
  1591. if (sendbar)
  1592. tx_flags = ATH_TX_BAR;
  1593. if (!txok) {
  1594. tx_flags |= ATH_TX_ERROR;
  1595. if (bf_isxretried(bf))
  1596. tx_flags |= ATH_TX_XRETRY;
  1597. }
  1598. dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
  1599. if (bf->bf_state.bfs_paprd) {
  1600. if (time_after(jiffies,
  1601. bf->bf_state.bfs_paprd_timestamp +
  1602. msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
  1603. dev_kfree_skb_any(skb);
  1604. else
  1605. complete(&sc->paprd_complete);
  1606. } else {
  1607. ath_tx_complete(sc, skb, bf->aphy, tx_flags);
  1608. ath_debug_stat_tx(sc, txq, bf, ts);
  1609. }
  1610. /*
  1611. * Return the list of ath_buf of this mpdu to free queue
  1612. */
  1613. spin_lock_irqsave(&sc->tx.txbuflock, flags);
  1614. list_splice_tail_init(bf_q, &sc->tx.txbuf);
  1615. spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
  1616. }
  1617. static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
  1618. struct ath_tx_status *ts, int txok)
  1619. {
  1620. u16 seq_st = 0;
  1621. u32 ba[WME_BA_BMP_SIZE >> 5];
  1622. int ba_index;
  1623. int nbad = 0;
  1624. int isaggr = 0;
  1625. if (bf->bf_lastbf->bf_tx_aborted)
  1626. return 0;
  1627. isaggr = bf_isaggr(bf);
  1628. if (isaggr) {
  1629. seq_st = ts->ts_seqnum;
  1630. memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
  1631. }
  1632. while (bf) {
  1633. ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
  1634. if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
  1635. nbad++;
  1636. bf = bf->bf_next;
  1637. }
  1638. return nbad;
  1639. }
  1640. static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
  1641. int nbad, int txok, bool update_rc)
  1642. {
  1643. struct sk_buff *skb = bf->bf_mpdu;
  1644. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1645. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1646. struct ieee80211_hw *hw = bf->aphy->hw;
  1647. u8 i, tx_rateindex;
  1648. if (txok)
  1649. tx_info->status.ack_signal = ts->ts_rssi;
  1650. tx_rateindex = ts->ts_rateindex;
  1651. WARN_ON(tx_rateindex >= hw->max_rates);
  1652. if (ts->ts_status & ATH9K_TXERR_FILT)
  1653. tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
  1654. if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc)
  1655. tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
  1656. if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
  1657. (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
  1658. if (ieee80211_is_data(hdr->frame_control)) {
  1659. if (ts->ts_flags &
  1660. (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
  1661. tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
  1662. if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
  1663. (ts->ts_status & ATH9K_TXERR_FIFO))
  1664. tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
  1665. tx_info->status.ampdu_len = bf->bf_nframes;
  1666. tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
  1667. }
  1668. }
  1669. for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
  1670. tx_info->status.rates[i].count = 0;
  1671. tx_info->status.rates[i].idx = -1;
  1672. }
  1673. tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
  1674. }
  1675. static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
  1676. {
  1677. int qnum;
  1678. qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
  1679. if (qnum == -1)
  1680. return;
  1681. spin_lock_bh(&txq->axq_lock);
  1682. if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) {
  1683. if (ath_mac80211_start_queue(sc, qnum))
  1684. txq->stopped = 0;
  1685. }
  1686. spin_unlock_bh(&txq->axq_lock);
  1687. }
  1688. static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
  1689. {
  1690. struct ath_hw *ah = sc->sc_ah;
  1691. struct ath_common *common = ath9k_hw_common(ah);
  1692. struct ath_buf *bf, *lastbf, *bf_held = NULL;
  1693. struct list_head bf_head;
  1694. struct ath_desc *ds;
  1695. struct ath_tx_status ts;
  1696. int txok;
  1697. int status;
  1698. ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
  1699. txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
  1700. txq->axq_link);
  1701. for (;;) {
  1702. spin_lock_bh(&txq->axq_lock);
  1703. if (list_empty(&txq->axq_q)) {
  1704. txq->axq_link = NULL;
  1705. spin_unlock_bh(&txq->axq_lock);
  1706. break;
  1707. }
  1708. bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
  1709. /*
  1710. * There is a race condition that a BH gets scheduled
  1711. * after sw writes TxE and before hw re-load the last
  1712. * descriptor to get the newly chained one.
  1713. * Software must keep the last DONE descriptor as a
  1714. * holding descriptor - software does so by marking
  1715. * it with the STALE flag.
  1716. */
  1717. bf_held = NULL;
  1718. if (bf->bf_stale) {
  1719. bf_held = bf;
  1720. if (list_is_last(&bf_held->list, &txq->axq_q)) {
  1721. spin_unlock_bh(&txq->axq_lock);
  1722. break;
  1723. } else {
  1724. bf = list_entry(bf_held->list.next,
  1725. struct ath_buf, list);
  1726. }
  1727. }
  1728. lastbf = bf->bf_lastbf;
  1729. ds = lastbf->bf_desc;
  1730. memset(&ts, 0, sizeof(ts));
  1731. status = ath9k_hw_txprocdesc(ah, ds, &ts);
  1732. if (status == -EINPROGRESS) {
  1733. spin_unlock_bh(&txq->axq_lock);
  1734. break;
  1735. }
  1736. /*
  1737. * We now know the nullfunc frame has been ACKed so we
  1738. * can disable RX.
  1739. */
  1740. if (bf->bf_isnullfunc &&
  1741. (ts.ts_status & ATH9K_TX_ACKED)) {
  1742. if ((sc->ps_flags & PS_ENABLED))
  1743. ath9k_enable_ps(sc);
  1744. else
  1745. sc->ps_flags |= PS_NULLFUNC_COMPLETED;
  1746. }
  1747. /*
  1748. * Remove ath_buf's of the same transmit unit from txq,
  1749. * however leave the last descriptor back as the holding
  1750. * descriptor for hw.
  1751. */
  1752. lastbf->bf_stale = true;
  1753. INIT_LIST_HEAD(&bf_head);
  1754. if (!list_is_singular(&lastbf->list))
  1755. list_cut_position(&bf_head,
  1756. &txq->axq_q, lastbf->list.prev);
  1757. txq->axq_depth--;
  1758. txok = !(ts.ts_status & ATH9K_TXERR_MASK);
  1759. txq->axq_tx_inprogress = false;
  1760. if (bf_held)
  1761. list_del(&bf_held->list);
  1762. spin_unlock_bh(&txq->axq_lock);
  1763. if (bf_held)
  1764. ath_tx_return_buffer(sc, bf_held);
  1765. if (!bf_isampdu(bf)) {
  1766. /*
  1767. * This frame is sent out as a single frame.
  1768. * Use hardware retry status for this frame.
  1769. */
  1770. if (ts.ts_status & ATH9K_TXERR_XRETRY)
  1771. bf->bf_state.bf_type |= BUF_XRETRY;
  1772. ath_tx_rc_status(bf, &ts, 0, txok, true);
  1773. }
  1774. if (bf_isampdu(bf))
  1775. ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
  1776. else
  1777. ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
  1778. ath_wake_mac80211_queue(sc, txq);
  1779. spin_lock_bh(&txq->axq_lock);
  1780. if (sc->sc_flags & SC_OP_TXAGGR)
  1781. ath_txq_schedule(sc, txq);
  1782. spin_unlock_bh(&txq->axq_lock);
  1783. }
  1784. }
  1785. static void ath_tx_complete_poll_work(struct work_struct *work)
  1786. {
  1787. struct ath_softc *sc = container_of(work, struct ath_softc,
  1788. tx_complete_work.work);
  1789. struct ath_txq *txq;
  1790. int i;
  1791. bool needreset = false;
  1792. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
  1793. if (ATH_TXQ_SETUP(sc, i)) {
  1794. txq = &sc->tx.txq[i];
  1795. spin_lock_bh(&txq->axq_lock);
  1796. if (txq->axq_depth) {
  1797. if (txq->axq_tx_inprogress) {
  1798. needreset = true;
  1799. spin_unlock_bh(&txq->axq_lock);
  1800. break;
  1801. } else {
  1802. txq->axq_tx_inprogress = true;
  1803. }
  1804. }
  1805. spin_unlock_bh(&txq->axq_lock);
  1806. }
  1807. if (needreset) {
  1808. ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
  1809. "tx hung, resetting the chip\n");
  1810. ath9k_ps_wakeup(sc);
  1811. ath_reset(sc, false);
  1812. ath9k_ps_restore(sc);
  1813. }
  1814. ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
  1815. msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
  1816. }
  1817. void ath_tx_tasklet(struct ath_softc *sc)
  1818. {
  1819. int i;
  1820. u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
  1821. ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
  1822. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  1823. if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
  1824. ath_tx_processq(sc, &sc->tx.txq[i]);
  1825. }
  1826. }
  1827. void ath_tx_edma_tasklet(struct ath_softc *sc)
  1828. {
  1829. struct ath_tx_status txs;
  1830. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1831. struct ath_hw *ah = sc->sc_ah;
  1832. struct ath_txq *txq;
  1833. struct ath_buf *bf, *lastbf;
  1834. struct list_head bf_head;
  1835. int status;
  1836. int txok;
  1837. for (;;) {
  1838. status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
  1839. if (status == -EINPROGRESS)
  1840. break;
  1841. if (status == -EIO) {
  1842. ath_print(common, ATH_DBG_XMIT,
  1843. "Error processing tx status\n");
  1844. break;
  1845. }
  1846. /* Skip beacon completions */
  1847. if (txs.qid == sc->beacon.beaconq)
  1848. continue;
  1849. txq = &sc->tx.txq[txs.qid];
  1850. spin_lock_bh(&txq->axq_lock);
  1851. if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
  1852. spin_unlock_bh(&txq->axq_lock);
  1853. return;
  1854. }
  1855. bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
  1856. struct ath_buf, list);
  1857. lastbf = bf->bf_lastbf;
  1858. INIT_LIST_HEAD(&bf_head);
  1859. list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
  1860. &lastbf->list);
  1861. INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
  1862. txq->axq_depth--;
  1863. txq->axq_tx_inprogress = false;
  1864. spin_unlock_bh(&txq->axq_lock);
  1865. txok = !(txs.ts_status & ATH9K_TXERR_MASK);
  1866. /*
  1867. * Make sure null func frame is acked before configuring
  1868. * hw into ps mode.
  1869. */
  1870. if (bf->bf_isnullfunc && txok) {
  1871. if ((sc->ps_flags & PS_ENABLED))
  1872. ath9k_enable_ps(sc);
  1873. else
  1874. sc->ps_flags |= PS_NULLFUNC_COMPLETED;
  1875. }
  1876. if (!bf_isampdu(bf)) {
  1877. if (txs.ts_status & ATH9K_TXERR_XRETRY)
  1878. bf->bf_state.bf_type |= BUF_XRETRY;
  1879. ath_tx_rc_status(bf, &txs, 0, txok, true);
  1880. }
  1881. if (bf_isampdu(bf))
  1882. ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
  1883. else
  1884. ath_tx_complete_buf(sc, bf, txq, &bf_head,
  1885. &txs, txok, 0);
  1886. ath_wake_mac80211_queue(sc, txq);
  1887. spin_lock_bh(&txq->axq_lock);
  1888. if (!list_empty(&txq->txq_fifo_pending)) {
  1889. INIT_LIST_HEAD(&bf_head);
  1890. bf = list_first_entry(&txq->txq_fifo_pending,
  1891. struct ath_buf, list);
  1892. list_cut_position(&bf_head, &txq->txq_fifo_pending,
  1893. &bf->bf_lastbf->list);
  1894. ath_tx_txqaddbuf(sc, txq, &bf_head);
  1895. } else if (sc->sc_flags & SC_OP_TXAGGR)
  1896. ath_txq_schedule(sc, txq);
  1897. spin_unlock_bh(&txq->axq_lock);
  1898. }
  1899. }
  1900. /*****************/
  1901. /* Init, Cleanup */
  1902. /*****************/
  1903. static int ath_txstatus_setup(struct ath_softc *sc, int size)
  1904. {
  1905. struct ath_descdma *dd = &sc->txsdma;
  1906. u8 txs_len = sc->sc_ah->caps.txs_len;
  1907. dd->dd_desc_len = size * txs_len;
  1908. dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
  1909. &dd->dd_desc_paddr, GFP_KERNEL);
  1910. if (!dd->dd_desc)
  1911. return -ENOMEM;
  1912. return 0;
  1913. }
  1914. static int ath_tx_edma_init(struct ath_softc *sc)
  1915. {
  1916. int err;
  1917. err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
  1918. if (!err)
  1919. ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
  1920. sc->txsdma.dd_desc_paddr,
  1921. ATH_TXSTATUS_RING_SIZE);
  1922. return err;
  1923. }
  1924. static void ath_tx_edma_cleanup(struct ath_softc *sc)
  1925. {
  1926. struct ath_descdma *dd = &sc->txsdma;
  1927. dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
  1928. dd->dd_desc_paddr);
  1929. }
  1930. int ath_tx_init(struct ath_softc *sc, int nbufs)
  1931. {
  1932. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1933. int error = 0;
  1934. spin_lock_init(&sc->tx.txbuflock);
  1935. error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
  1936. "tx", nbufs, 1, 1);
  1937. if (error != 0) {
  1938. ath_print(common, ATH_DBG_FATAL,
  1939. "Failed to allocate tx descriptors: %d\n", error);
  1940. goto err;
  1941. }
  1942. error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
  1943. "beacon", ATH_BCBUF, 1, 1);
  1944. if (error != 0) {
  1945. ath_print(common, ATH_DBG_FATAL,
  1946. "Failed to allocate beacon descriptors: %d\n", error);
  1947. goto err;
  1948. }
  1949. INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
  1950. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
  1951. error = ath_tx_edma_init(sc);
  1952. if (error)
  1953. goto err;
  1954. }
  1955. err:
  1956. if (error != 0)
  1957. ath_tx_cleanup(sc);
  1958. return error;
  1959. }
  1960. void ath_tx_cleanup(struct ath_softc *sc)
  1961. {
  1962. if (sc->beacon.bdma.dd_desc_len != 0)
  1963. ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
  1964. if (sc->tx.txdma.dd_desc_len != 0)
  1965. ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
  1966. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
  1967. ath_tx_edma_cleanup(sc);
  1968. }
  1969. void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
  1970. {
  1971. struct ath_atx_tid *tid;
  1972. struct ath_atx_ac *ac;
  1973. int tidno, acno;
  1974. for (tidno = 0, tid = &an->tid[tidno];
  1975. tidno < WME_NUM_TID;
  1976. tidno++, tid++) {
  1977. tid->an = an;
  1978. tid->tidno = tidno;
  1979. tid->seq_start = tid->seq_next = 0;
  1980. tid->baw_size = WME_MAX_BA;
  1981. tid->baw_head = tid->baw_tail = 0;
  1982. tid->sched = false;
  1983. tid->paused = false;
  1984. tid->state &= ~AGGR_CLEANUP;
  1985. INIT_LIST_HEAD(&tid->buf_q);
  1986. acno = TID_TO_WME_AC(tidno);
  1987. tid->ac = &an->ac[acno];
  1988. tid->state &= ~AGGR_ADDBA_COMPLETE;
  1989. tid->state &= ~AGGR_ADDBA_PROGRESS;
  1990. }
  1991. for (acno = 0, ac = &an->ac[acno];
  1992. acno < WME_NUM_AC; acno++, ac++) {
  1993. ac->sched = false;
  1994. ac->qnum = sc->tx.hwq_map[acno];
  1995. INIT_LIST_HEAD(&ac->tid_q);
  1996. }
  1997. }
  1998. void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
  1999. {
  2000. struct ath_atx_ac *ac;
  2001. struct ath_atx_tid *tid;
  2002. struct ath_txq *txq;
  2003. int i, tidno;
  2004. for (tidno = 0, tid = &an->tid[tidno];
  2005. tidno < WME_NUM_TID; tidno++, tid++) {
  2006. i = tid->ac->qnum;
  2007. if (!ATH_TXQ_SETUP(sc, i))
  2008. continue;
  2009. txq = &sc->tx.txq[i];
  2010. ac = tid->ac;
  2011. spin_lock_bh(&txq->axq_lock);
  2012. if (tid->sched) {
  2013. list_del(&tid->list);
  2014. tid->sched = false;
  2015. }
  2016. if (ac->sched) {
  2017. list_del(&ac->list);
  2018. tid->ac->sched = false;
  2019. }
  2020. ath_tid_drain(sc, txq, tid);
  2021. tid->state &= ~AGGR_ADDBA_COMPLETE;
  2022. tid->state &= ~AGGR_CLEANUP;
  2023. spin_unlock_bh(&txq->axq_lock);
  2024. }
  2025. }