xmit.c 58 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271
  1. /*
  2. * Copyright (c) 2008-2009 Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include "ath9k.h"
  17. #define BITS_PER_BYTE 8
  18. #define OFDM_PLCP_BITS 22
  19. #define HT_RC_2_MCS(_rc) ((_rc) & 0x0f)
  20. #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
  21. #define L_STF 8
  22. #define L_LTF 8
  23. #define L_SIG 4
  24. #define HT_SIG 8
  25. #define HT_STF 4
  26. #define HT_LTF(_ns) (4 * (_ns))
  27. #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
  28. #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
  29. #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
  30. #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
  31. #define OFDM_SIFS_TIME 16
  32. static u32 bits_per_symbol[][2] = {
  33. /* 20MHz 40MHz */
  34. { 26, 54 }, /* 0: BPSK */
  35. { 52, 108 }, /* 1: QPSK 1/2 */
  36. { 78, 162 }, /* 2: QPSK 3/4 */
  37. { 104, 216 }, /* 3: 16-QAM 1/2 */
  38. { 156, 324 }, /* 4: 16-QAM 3/4 */
  39. { 208, 432 }, /* 5: 64-QAM 2/3 */
  40. { 234, 486 }, /* 6: 64-QAM 3/4 */
  41. { 260, 540 }, /* 7: 64-QAM 5/6 */
  42. { 52, 108 }, /* 8: BPSK */
  43. { 104, 216 }, /* 9: QPSK 1/2 */
  44. { 156, 324 }, /* 10: QPSK 3/4 */
  45. { 208, 432 }, /* 11: 16-QAM 1/2 */
  46. { 312, 648 }, /* 12: 16-QAM 3/4 */
  47. { 416, 864 }, /* 13: 64-QAM 2/3 */
  48. { 468, 972 }, /* 14: 64-QAM 3/4 */
  49. { 520, 1080 }, /* 15: 64-QAM 5/6 */
  50. };
  51. #define IS_HT_RATE(_rate) ((_rate) & 0x80)
  52. static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
  53. struct ath_atx_tid *tid,
  54. struct list_head *bf_head);
  55. static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
  56. struct ath_txq *txq, struct list_head *bf_q,
  57. struct ath_tx_status *ts, int txok, int sendbar);
  58. static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
  59. struct list_head *head);
  60. static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
  61. static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
  62. struct ath_tx_status *ts, int txok);
  63. static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
  64. int nbad, int txok, bool update_rc);
  65. enum {
  66. MCS_DEFAULT,
  67. MCS_HT40,
  68. MCS_HT40_SGI,
  69. };
  70. static int ath_max_4ms_framelen[3][16] = {
  71. [MCS_DEFAULT] = {
  72. 3216, 6434, 9650, 12868, 19304, 25740, 28956, 32180,
  73. 6430, 12860, 19300, 25736, 38600, 51472, 57890, 64320,
  74. },
  75. [MCS_HT40] = {
  76. 6684, 13368, 20052, 26738, 40104, 53476, 60156, 66840,
  77. 13360, 26720, 40080, 53440, 80160, 106880, 120240, 133600,
  78. },
  79. [MCS_HT40_SGI] = {
  80. /* TODO: Only MCS 7 and 15 updated, recalculate the rest */
  81. 6684, 13368, 20052, 26738, 40104, 53476, 60156, 74200,
  82. 13360, 26720, 40080, 53440, 80160, 106880, 120240, 148400,
  83. }
  84. };
  85. /*********************/
  86. /* Aggregation logic */
  87. /*********************/
  88. static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
  89. {
  90. struct ath_atx_ac *ac = tid->ac;
  91. if (tid->paused)
  92. return;
  93. if (tid->sched)
  94. return;
  95. tid->sched = true;
  96. list_add_tail(&tid->list, &ac->tid_q);
  97. if (ac->sched)
  98. return;
  99. ac->sched = true;
  100. list_add_tail(&ac->list, &txq->axq_acq);
  101. }
  102. static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
  103. {
  104. struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
  105. spin_lock_bh(&txq->axq_lock);
  106. tid->paused++;
  107. spin_unlock_bh(&txq->axq_lock);
  108. }
  109. static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
  110. {
  111. struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
  112. BUG_ON(tid->paused <= 0);
  113. spin_lock_bh(&txq->axq_lock);
  114. tid->paused--;
  115. if (tid->paused > 0)
  116. goto unlock;
  117. if (list_empty(&tid->buf_q))
  118. goto unlock;
  119. ath_tx_queue_tid(txq, tid);
  120. ath_txq_schedule(sc, txq);
  121. unlock:
  122. spin_unlock_bh(&txq->axq_lock);
  123. }
  124. static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
  125. {
  126. struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
  127. struct ath_buf *bf;
  128. struct list_head bf_head;
  129. INIT_LIST_HEAD(&bf_head);
  130. BUG_ON(tid->paused <= 0);
  131. spin_lock_bh(&txq->axq_lock);
  132. tid->paused--;
  133. if (tid->paused > 0) {
  134. spin_unlock_bh(&txq->axq_lock);
  135. return;
  136. }
  137. while (!list_empty(&tid->buf_q)) {
  138. bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
  139. BUG_ON(bf_isretried(bf));
  140. list_move_tail(&bf->list, &bf_head);
  141. ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
  142. }
  143. spin_unlock_bh(&txq->axq_lock);
  144. }
  145. static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
  146. int seqno)
  147. {
  148. int index, cindex;
  149. index = ATH_BA_INDEX(tid->seq_start, seqno);
  150. cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
  151. tid->tx_buf[cindex] = NULL;
  152. while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
  153. INCR(tid->seq_start, IEEE80211_SEQ_MAX);
  154. INCR(tid->baw_head, ATH_TID_MAX_BUFS);
  155. }
  156. }
  157. static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
  158. struct ath_buf *bf)
  159. {
  160. int index, cindex;
  161. if (bf_isretried(bf))
  162. return;
  163. index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
  164. cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
  165. BUG_ON(tid->tx_buf[cindex] != NULL);
  166. tid->tx_buf[cindex] = bf;
  167. if (index >= ((tid->baw_tail - tid->baw_head) &
  168. (ATH_TID_MAX_BUFS - 1))) {
  169. tid->baw_tail = cindex;
  170. INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
  171. }
  172. }
  173. /*
  174. * TODO: For frame(s) that are in the retry state, we will reuse the
  175. * sequence number(s) without setting the retry bit. The
  176. * alternative is to give up on these and BAR the receiver's window
  177. * forward.
  178. */
  179. static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
  180. struct ath_atx_tid *tid)
  181. {
  182. struct ath_buf *bf;
  183. struct list_head bf_head;
  184. struct ath_tx_status ts;
  185. memset(&ts, 0, sizeof(ts));
  186. INIT_LIST_HEAD(&bf_head);
  187. for (;;) {
  188. if (list_empty(&tid->buf_q))
  189. break;
  190. bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
  191. list_move_tail(&bf->list, &bf_head);
  192. if (bf_isretried(bf))
  193. ath_tx_update_baw(sc, tid, bf->bf_seqno);
  194. spin_unlock(&txq->axq_lock);
  195. ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
  196. spin_lock(&txq->axq_lock);
  197. }
  198. tid->seq_next = tid->seq_start;
  199. tid->baw_tail = tid->baw_head;
  200. }
  201. static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
  202. struct ath_buf *bf)
  203. {
  204. struct sk_buff *skb;
  205. struct ieee80211_hdr *hdr;
  206. bf->bf_state.bf_type |= BUF_RETRY;
  207. bf->bf_retries++;
  208. TX_STAT_INC(txq->axq_qnum, a_retries);
  209. skb = bf->bf_mpdu;
  210. hdr = (struct ieee80211_hdr *)skb->data;
  211. hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
  212. }
  213. static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
  214. {
  215. struct ath_buf *tbf;
  216. spin_lock_bh(&sc->tx.txbuflock);
  217. if (WARN_ON(list_empty(&sc->tx.txbuf))) {
  218. spin_unlock_bh(&sc->tx.txbuflock);
  219. return NULL;
  220. }
  221. tbf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
  222. list_del(&tbf->list);
  223. spin_unlock_bh(&sc->tx.txbuflock);
  224. ATH_TXBUF_RESET(tbf);
  225. tbf->aphy = bf->aphy;
  226. tbf->bf_mpdu = bf->bf_mpdu;
  227. tbf->bf_buf_addr = bf->bf_buf_addr;
  228. *(tbf->bf_desc) = *(bf->bf_desc);
  229. tbf->bf_state = bf->bf_state;
  230. tbf->bf_dmacontext = bf->bf_dmacontext;
  231. return tbf;
  232. }
  233. static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
  234. struct ath_buf *bf, struct list_head *bf_q,
  235. struct ath_tx_status *ts, int txok)
  236. {
  237. struct ath_node *an = NULL;
  238. struct sk_buff *skb;
  239. struct ieee80211_sta *sta;
  240. struct ieee80211_hw *hw;
  241. struct ieee80211_hdr *hdr;
  242. struct ieee80211_tx_info *tx_info;
  243. struct ath_atx_tid *tid = NULL;
  244. struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
  245. struct list_head bf_head, bf_pending;
  246. u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
  247. u32 ba[WME_BA_BMP_SIZE >> 5];
  248. int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
  249. bool rc_update = true;
  250. skb = bf->bf_mpdu;
  251. hdr = (struct ieee80211_hdr *)skb->data;
  252. tx_info = IEEE80211_SKB_CB(skb);
  253. hw = bf->aphy->hw;
  254. rcu_read_lock();
  255. /* XXX: use ieee80211_find_sta! */
  256. sta = ieee80211_find_sta_by_hw(hw, hdr->addr1);
  257. if (!sta) {
  258. rcu_read_unlock();
  259. return;
  260. }
  261. an = (struct ath_node *)sta->drv_priv;
  262. tid = ATH_AN_2_TID(an, bf->bf_tidno);
  263. isaggr = bf_isaggr(bf);
  264. memset(ba, 0, WME_BA_BMP_SIZE >> 3);
  265. if (isaggr && txok) {
  266. if (ts->ts_flags & ATH9K_TX_BA) {
  267. seq_st = ts->ts_seqnum;
  268. memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
  269. } else {
  270. /*
  271. * AR5416 can become deaf/mute when BA
  272. * issue happens. Chip needs to be reset.
  273. * But AP code may have sychronization issues
  274. * when perform internal reset in this routine.
  275. * Only enable reset in STA mode for now.
  276. */
  277. if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
  278. needreset = 1;
  279. }
  280. }
  281. INIT_LIST_HEAD(&bf_pending);
  282. INIT_LIST_HEAD(&bf_head);
  283. nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
  284. while (bf) {
  285. txfail = txpending = 0;
  286. bf_next = bf->bf_next;
  287. if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
  288. /* transmit completion, subframe is
  289. * acked by block ack */
  290. acked_cnt++;
  291. } else if (!isaggr && txok) {
  292. /* transmit completion */
  293. acked_cnt++;
  294. } else {
  295. if (!(tid->state & AGGR_CLEANUP) &&
  296. ts->ts_flags != ATH9K_TX_SW_ABORTED) {
  297. if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
  298. ath_tx_set_retry(sc, txq, bf);
  299. txpending = 1;
  300. } else {
  301. bf->bf_state.bf_type |= BUF_XRETRY;
  302. txfail = 1;
  303. sendbar = 1;
  304. txfail_cnt++;
  305. }
  306. } else {
  307. /*
  308. * cleanup in progress, just fail
  309. * the un-acked sub-frames
  310. */
  311. txfail = 1;
  312. }
  313. }
  314. if (bf_next == NULL) {
  315. /*
  316. * Make sure the last desc is reclaimed if it
  317. * not a holding desc.
  318. */
  319. if (!bf_last->bf_stale)
  320. list_move_tail(&bf->list, &bf_head);
  321. else
  322. INIT_LIST_HEAD(&bf_head);
  323. } else {
  324. BUG_ON(list_empty(bf_q));
  325. list_move_tail(&bf->list, &bf_head);
  326. }
  327. if (!txpending) {
  328. /*
  329. * complete the acked-ones/xretried ones; update
  330. * block-ack window
  331. */
  332. spin_lock_bh(&txq->axq_lock);
  333. ath_tx_update_baw(sc, tid, bf->bf_seqno);
  334. spin_unlock_bh(&txq->axq_lock);
  335. if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
  336. ath_tx_rc_status(bf, ts, nbad, txok, true);
  337. rc_update = false;
  338. } else {
  339. ath_tx_rc_status(bf, ts, nbad, txok, false);
  340. }
  341. ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
  342. !txfail, sendbar);
  343. } else {
  344. /* retry the un-acked ones */
  345. if (bf->bf_next == NULL && bf_last->bf_stale) {
  346. struct ath_buf *tbf;
  347. tbf = ath_clone_txbuf(sc, bf_last);
  348. /*
  349. * Update tx baw and complete the frame with
  350. * failed status if we run out of tx buf
  351. */
  352. if (!tbf) {
  353. spin_lock_bh(&txq->axq_lock);
  354. ath_tx_update_baw(sc, tid,
  355. bf->bf_seqno);
  356. spin_unlock_bh(&txq->axq_lock);
  357. bf->bf_state.bf_type |= BUF_XRETRY;
  358. ath_tx_rc_status(bf, ts, nbad,
  359. 0, false);
  360. ath_tx_complete_buf(sc, bf, txq,
  361. &bf_head, ts, 0, 0);
  362. break;
  363. }
  364. ath9k_hw_cleartxdesc(sc->sc_ah, tbf->bf_desc);
  365. list_add_tail(&tbf->list, &bf_head);
  366. } else {
  367. /*
  368. * Clear descriptor status words for
  369. * software retry
  370. */
  371. ath9k_hw_cleartxdesc(sc->sc_ah, bf->bf_desc);
  372. }
  373. /*
  374. * Put this buffer to the temporary pending
  375. * queue to retain ordering
  376. */
  377. list_splice_tail_init(&bf_head, &bf_pending);
  378. }
  379. bf = bf_next;
  380. }
  381. if (tid->state & AGGR_CLEANUP) {
  382. if (tid->baw_head == tid->baw_tail) {
  383. tid->state &= ~AGGR_ADDBA_COMPLETE;
  384. tid->state &= ~AGGR_CLEANUP;
  385. /* send buffered frames as singles */
  386. ath_tx_flush_tid(sc, tid);
  387. }
  388. rcu_read_unlock();
  389. return;
  390. }
  391. /* prepend un-acked frames to the beginning of the pending frame queue */
  392. if (!list_empty(&bf_pending)) {
  393. spin_lock_bh(&txq->axq_lock);
  394. list_splice(&bf_pending, &tid->buf_q);
  395. ath_tx_queue_tid(txq, tid);
  396. spin_unlock_bh(&txq->axq_lock);
  397. }
  398. rcu_read_unlock();
  399. if (needreset)
  400. ath_reset(sc, false);
  401. }
  402. static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
  403. struct ath_atx_tid *tid)
  404. {
  405. struct sk_buff *skb;
  406. struct ieee80211_tx_info *tx_info;
  407. struct ieee80211_tx_rate *rates;
  408. u32 max_4ms_framelen, frmlen;
  409. u16 aggr_limit, legacy = 0;
  410. int i;
  411. skb = bf->bf_mpdu;
  412. tx_info = IEEE80211_SKB_CB(skb);
  413. rates = tx_info->control.rates;
  414. /*
  415. * Find the lowest frame length among the rate series that will have a
  416. * 4ms transmit duration.
  417. * TODO - TXOP limit needs to be considered.
  418. */
  419. max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
  420. for (i = 0; i < 4; i++) {
  421. if (rates[i].count) {
  422. int modeidx;
  423. if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
  424. legacy = 1;
  425. break;
  426. }
  427. if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
  428. modeidx = MCS_HT40_SGI;
  429. else if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
  430. modeidx = MCS_HT40;
  431. else
  432. modeidx = MCS_DEFAULT;
  433. frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
  434. max_4ms_framelen = min(max_4ms_framelen, frmlen);
  435. }
  436. }
  437. /*
  438. * limit aggregate size by the minimum rate if rate selected is
  439. * not a probe rate, if rate selected is a probe rate then
  440. * avoid aggregation of this packet.
  441. */
  442. if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
  443. return 0;
  444. if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
  445. aggr_limit = min((max_4ms_framelen * 3) / 8,
  446. (u32)ATH_AMPDU_LIMIT_MAX);
  447. else
  448. aggr_limit = min(max_4ms_framelen,
  449. (u32)ATH_AMPDU_LIMIT_MAX);
  450. /*
  451. * h/w can accept aggregates upto 16 bit lengths (65535).
  452. * The IE, however can hold upto 65536, which shows up here
  453. * as zero. Ignore 65536 since we are constrained by hw.
  454. */
  455. if (tid->an->maxampdu)
  456. aggr_limit = min(aggr_limit, tid->an->maxampdu);
  457. return aggr_limit;
  458. }
  459. /*
  460. * Returns the number of delimiters to be added to
  461. * meet the minimum required mpdudensity.
  462. */
  463. static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
  464. struct ath_buf *bf, u16 frmlen)
  465. {
  466. struct sk_buff *skb = bf->bf_mpdu;
  467. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  468. u32 nsymbits, nsymbols;
  469. u16 minlen;
  470. u8 flags, rix;
  471. int width, half_gi, ndelim, mindelim;
  472. /* Select standard number of delimiters based on frame length alone */
  473. ndelim = ATH_AGGR_GET_NDELIM(frmlen);
  474. /*
  475. * If encryption enabled, hardware requires some more padding between
  476. * subframes.
  477. * TODO - this could be improved to be dependent on the rate.
  478. * The hardware can keep up at lower rates, but not higher rates
  479. */
  480. if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
  481. ndelim += ATH_AGGR_ENCRYPTDELIM;
  482. /*
  483. * Convert desired mpdu density from microeconds to bytes based
  484. * on highest rate in rate series (i.e. first rate) to determine
  485. * required minimum length for subframe. Take into account
  486. * whether high rate is 20 or 40Mhz and half or full GI.
  487. *
  488. * If there is no mpdu density restriction, no further calculation
  489. * is needed.
  490. */
  491. if (tid->an->mpdudensity == 0)
  492. return ndelim;
  493. rix = tx_info->control.rates[0].idx;
  494. flags = tx_info->control.rates[0].flags;
  495. width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
  496. half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
  497. if (half_gi)
  498. nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
  499. else
  500. nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
  501. if (nsymbols == 0)
  502. nsymbols = 1;
  503. nsymbits = bits_per_symbol[rix][width];
  504. minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
  505. if (frmlen < minlen) {
  506. mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
  507. ndelim = max(mindelim, ndelim);
  508. }
  509. return ndelim;
  510. }
  511. static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
  512. struct ath_txq *txq,
  513. struct ath_atx_tid *tid,
  514. struct list_head *bf_q)
  515. {
  516. #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
  517. struct ath_buf *bf, *bf_first, *bf_prev = NULL;
  518. int rl = 0, nframes = 0, ndelim, prev_al = 0;
  519. u16 aggr_limit = 0, al = 0, bpad = 0,
  520. al_delta, h_baw = tid->baw_size / 2;
  521. enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
  522. bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
  523. do {
  524. bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
  525. /* do not step over block-ack window */
  526. if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
  527. status = ATH_AGGR_BAW_CLOSED;
  528. break;
  529. }
  530. if (!rl) {
  531. aggr_limit = ath_lookup_rate(sc, bf, tid);
  532. rl = 1;
  533. }
  534. /* do not exceed aggregation limit */
  535. al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
  536. if (nframes &&
  537. (aggr_limit < (al + bpad + al_delta + prev_al))) {
  538. status = ATH_AGGR_LIMITED;
  539. break;
  540. }
  541. /* do not exceed subframe limit */
  542. if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
  543. status = ATH_AGGR_LIMITED;
  544. break;
  545. }
  546. nframes++;
  547. /* add padding for previous frame to aggregation length */
  548. al += bpad + al_delta;
  549. /*
  550. * Get the delimiters needed to meet the MPDU
  551. * density for this node.
  552. */
  553. ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
  554. bpad = PADBYTES(al_delta) + (ndelim << 2);
  555. bf->bf_next = NULL;
  556. bf->bf_desc->ds_link = 0;
  557. /* link buffers of this frame to the aggregate */
  558. ath_tx_addto_baw(sc, tid, bf);
  559. ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
  560. list_move_tail(&bf->list, bf_q);
  561. if (bf_prev) {
  562. bf_prev->bf_next = bf;
  563. bf_prev->bf_desc->ds_link = bf->bf_daddr;
  564. }
  565. bf_prev = bf;
  566. } while (!list_empty(&tid->buf_q));
  567. bf_first->bf_al = al;
  568. bf_first->bf_nframes = nframes;
  569. return status;
  570. #undef PADBYTES
  571. }
  572. static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
  573. struct ath_atx_tid *tid)
  574. {
  575. struct ath_buf *bf;
  576. enum ATH_AGGR_STATUS status;
  577. struct list_head bf_q;
  578. do {
  579. if (list_empty(&tid->buf_q))
  580. return;
  581. INIT_LIST_HEAD(&bf_q);
  582. status = ath_tx_form_aggr(sc, txq, tid, &bf_q);
  583. /*
  584. * no frames picked up to be aggregated;
  585. * block-ack window is not open.
  586. */
  587. if (list_empty(&bf_q))
  588. break;
  589. bf = list_first_entry(&bf_q, struct ath_buf, list);
  590. bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
  591. /* if only one frame, send as non-aggregate */
  592. if (bf->bf_nframes == 1) {
  593. bf->bf_state.bf_type &= ~BUF_AGGR;
  594. ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
  595. ath_buf_set_rate(sc, bf);
  596. ath_tx_txqaddbuf(sc, txq, &bf_q);
  597. continue;
  598. }
  599. /* setup first desc of aggregate */
  600. bf->bf_state.bf_type |= BUF_AGGR;
  601. ath_buf_set_rate(sc, bf);
  602. ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
  603. /* anchor last desc of aggregate */
  604. ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
  605. ath_tx_txqaddbuf(sc, txq, &bf_q);
  606. TX_STAT_INC(txq->axq_qnum, a_aggr);
  607. } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
  608. status != ATH_AGGR_BAW_CLOSED);
  609. }
  610. void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
  611. u16 tid, u16 *ssn)
  612. {
  613. struct ath_atx_tid *txtid;
  614. struct ath_node *an;
  615. an = (struct ath_node *)sta->drv_priv;
  616. txtid = ATH_AN_2_TID(an, tid);
  617. txtid->state |= AGGR_ADDBA_PROGRESS;
  618. ath_tx_pause_tid(sc, txtid);
  619. *ssn = txtid->seq_start;
  620. }
  621. void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
  622. {
  623. struct ath_node *an = (struct ath_node *)sta->drv_priv;
  624. struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
  625. struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
  626. struct ath_tx_status ts;
  627. struct ath_buf *bf;
  628. struct list_head bf_head;
  629. memset(&ts, 0, sizeof(ts));
  630. INIT_LIST_HEAD(&bf_head);
  631. if (txtid->state & AGGR_CLEANUP)
  632. return;
  633. if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
  634. txtid->state &= ~AGGR_ADDBA_PROGRESS;
  635. return;
  636. }
  637. ath_tx_pause_tid(sc, txtid);
  638. /* drop all software retried frames and mark this TID */
  639. spin_lock_bh(&txq->axq_lock);
  640. while (!list_empty(&txtid->buf_q)) {
  641. bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
  642. if (!bf_isretried(bf)) {
  643. /*
  644. * NB: it's based on the assumption that
  645. * software retried frame will always stay
  646. * at the head of software queue.
  647. */
  648. break;
  649. }
  650. list_move_tail(&bf->list, &bf_head);
  651. ath_tx_update_baw(sc, txtid, bf->bf_seqno);
  652. ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
  653. }
  654. spin_unlock_bh(&txq->axq_lock);
  655. if (txtid->baw_head != txtid->baw_tail) {
  656. txtid->state |= AGGR_CLEANUP;
  657. } else {
  658. txtid->state &= ~AGGR_ADDBA_COMPLETE;
  659. ath_tx_flush_tid(sc, txtid);
  660. }
  661. }
  662. void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
  663. {
  664. struct ath_atx_tid *txtid;
  665. struct ath_node *an;
  666. an = (struct ath_node *)sta->drv_priv;
  667. if (sc->sc_flags & SC_OP_TXAGGR) {
  668. txtid = ATH_AN_2_TID(an, tid);
  669. txtid->baw_size =
  670. IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
  671. txtid->state |= AGGR_ADDBA_COMPLETE;
  672. txtid->state &= ~AGGR_ADDBA_PROGRESS;
  673. ath_tx_resume_tid(sc, txtid);
  674. }
  675. }
  676. bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
  677. {
  678. struct ath_atx_tid *txtid;
  679. if (!(sc->sc_flags & SC_OP_TXAGGR))
  680. return false;
  681. txtid = ATH_AN_2_TID(an, tidno);
  682. if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS)))
  683. return true;
  684. return false;
  685. }
  686. /********************/
  687. /* Queue Management */
  688. /********************/
  689. static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
  690. struct ath_txq *txq)
  691. {
  692. struct ath_atx_ac *ac, *ac_tmp;
  693. struct ath_atx_tid *tid, *tid_tmp;
  694. list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
  695. list_del(&ac->list);
  696. ac->sched = false;
  697. list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
  698. list_del(&tid->list);
  699. tid->sched = false;
  700. ath_tid_drain(sc, txq, tid);
  701. }
  702. }
  703. }
  704. struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
  705. {
  706. struct ath_hw *ah = sc->sc_ah;
  707. struct ath_common *common = ath9k_hw_common(ah);
  708. struct ath9k_tx_queue_info qi;
  709. int qnum;
  710. memset(&qi, 0, sizeof(qi));
  711. qi.tqi_subtype = subtype;
  712. qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
  713. qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
  714. qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
  715. qi.tqi_physCompBuf = 0;
  716. /*
  717. * Enable interrupts only for EOL and DESC conditions.
  718. * We mark tx descriptors to receive a DESC interrupt
  719. * when a tx queue gets deep; otherwise waiting for the
  720. * EOL to reap descriptors. Note that this is done to
  721. * reduce interrupt load and this only defers reaping
  722. * descriptors, never transmitting frames. Aside from
  723. * reducing interrupts this also permits more concurrency.
  724. * The only potential downside is if the tx queue backs
  725. * up in which case the top half of the kernel may backup
  726. * due to a lack of tx descriptors.
  727. *
  728. * The UAPSD queue is an exception, since we take a desc-
  729. * based intr on the EOSP frames.
  730. */
  731. if (qtype == ATH9K_TX_QUEUE_UAPSD)
  732. qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
  733. else
  734. qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
  735. TXQ_FLAG_TXDESCINT_ENABLE;
  736. qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
  737. if (qnum == -1) {
  738. /*
  739. * NB: don't print a message, this happens
  740. * normally on parts with too few tx queues
  741. */
  742. return NULL;
  743. }
  744. if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
  745. ath_print(common, ATH_DBG_FATAL,
  746. "qnum %u out of range, max %u!\n",
  747. qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
  748. ath9k_hw_releasetxqueue(ah, qnum);
  749. return NULL;
  750. }
  751. if (!ATH_TXQ_SETUP(sc, qnum)) {
  752. struct ath_txq *txq = &sc->tx.txq[qnum];
  753. txq->axq_qnum = qnum;
  754. txq->axq_link = NULL;
  755. INIT_LIST_HEAD(&txq->axq_q);
  756. INIT_LIST_HEAD(&txq->axq_acq);
  757. spin_lock_init(&txq->axq_lock);
  758. txq->axq_depth = 0;
  759. txq->axq_tx_inprogress = false;
  760. sc->tx.txqsetup |= 1<<qnum;
  761. }
  762. return &sc->tx.txq[qnum];
  763. }
  764. int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
  765. {
  766. int qnum;
  767. switch (qtype) {
  768. case ATH9K_TX_QUEUE_DATA:
  769. if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
  770. ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
  771. "HAL AC %u out of range, max %zu!\n",
  772. haltype, ARRAY_SIZE(sc->tx.hwq_map));
  773. return -1;
  774. }
  775. qnum = sc->tx.hwq_map[haltype];
  776. break;
  777. case ATH9K_TX_QUEUE_BEACON:
  778. qnum = sc->beacon.beaconq;
  779. break;
  780. case ATH9K_TX_QUEUE_CAB:
  781. qnum = sc->beacon.cabq->axq_qnum;
  782. break;
  783. default:
  784. qnum = -1;
  785. }
  786. return qnum;
  787. }
  788. struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
  789. {
  790. struct ath_txq *txq = NULL;
  791. u16 skb_queue = skb_get_queue_mapping(skb);
  792. int qnum;
  793. qnum = ath_get_hal_qnum(skb_queue, sc);
  794. txq = &sc->tx.txq[qnum];
  795. spin_lock_bh(&txq->axq_lock);
  796. if (txq->axq_depth >= (ATH_TXBUF - 20)) {
  797. ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_XMIT,
  798. "TX queue: %d is full, depth: %d\n",
  799. qnum, txq->axq_depth);
  800. ath_mac80211_stop_queue(sc, skb_queue);
  801. txq->stopped = 1;
  802. spin_unlock_bh(&txq->axq_lock);
  803. return NULL;
  804. }
  805. spin_unlock_bh(&txq->axq_lock);
  806. return txq;
  807. }
  808. int ath_txq_update(struct ath_softc *sc, int qnum,
  809. struct ath9k_tx_queue_info *qinfo)
  810. {
  811. struct ath_hw *ah = sc->sc_ah;
  812. int error = 0;
  813. struct ath9k_tx_queue_info qi;
  814. if (qnum == sc->beacon.beaconq) {
  815. /*
  816. * XXX: for beacon queue, we just save the parameter.
  817. * It will be picked up by ath_beaconq_config when
  818. * it's necessary.
  819. */
  820. sc->beacon.beacon_qi = *qinfo;
  821. return 0;
  822. }
  823. BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
  824. ath9k_hw_get_txq_props(ah, qnum, &qi);
  825. qi.tqi_aifs = qinfo->tqi_aifs;
  826. qi.tqi_cwmin = qinfo->tqi_cwmin;
  827. qi.tqi_cwmax = qinfo->tqi_cwmax;
  828. qi.tqi_burstTime = qinfo->tqi_burstTime;
  829. qi.tqi_readyTime = qinfo->tqi_readyTime;
  830. if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
  831. ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
  832. "Unable to update hardware queue %u!\n", qnum);
  833. error = -EIO;
  834. } else {
  835. ath9k_hw_resettxqueue(ah, qnum);
  836. }
  837. return error;
  838. }
  839. int ath_cabq_update(struct ath_softc *sc)
  840. {
  841. struct ath9k_tx_queue_info qi;
  842. int qnum = sc->beacon.cabq->axq_qnum;
  843. ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
  844. /*
  845. * Ensure the readytime % is within the bounds.
  846. */
  847. if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
  848. sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
  849. else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
  850. sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
  851. qi.tqi_readyTime = (sc->beacon_interval *
  852. sc->config.cabqReadytime) / 100;
  853. ath_txq_update(sc, qnum, &qi);
  854. return 0;
  855. }
  856. /*
  857. * Drain a given TX queue (could be Beacon or Data)
  858. *
  859. * This assumes output has been stopped and
  860. * we do not need to block ath_tx_tasklet.
  861. */
  862. void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
  863. {
  864. struct ath_buf *bf, *lastbf;
  865. struct list_head bf_head;
  866. struct ath_tx_status ts;
  867. memset(&ts, 0, sizeof(ts));
  868. if (!retry_tx)
  869. ts.ts_flags = ATH9K_TX_SW_ABORTED;
  870. INIT_LIST_HEAD(&bf_head);
  871. for (;;) {
  872. spin_lock_bh(&txq->axq_lock);
  873. if (list_empty(&txq->axq_q)) {
  874. txq->axq_link = NULL;
  875. spin_unlock_bh(&txq->axq_lock);
  876. break;
  877. }
  878. bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
  879. if (bf->bf_stale) {
  880. list_del(&bf->list);
  881. spin_unlock_bh(&txq->axq_lock);
  882. spin_lock_bh(&sc->tx.txbuflock);
  883. list_add_tail(&bf->list, &sc->tx.txbuf);
  884. spin_unlock_bh(&sc->tx.txbuflock);
  885. continue;
  886. }
  887. lastbf = bf->bf_lastbf;
  888. /* remove ath_buf's of the same mpdu from txq */
  889. list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
  890. txq->axq_depth--;
  891. spin_unlock_bh(&txq->axq_lock);
  892. if (bf_isampdu(bf))
  893. ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
  894. else
  895. ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
  896. }
  897. spin_lock_bh(&txq->axq_lock);
  898. txq->axq_tx_inprogress = false;
  899. spin_unlock_bh(&txq->axq_lock);
  900. /* flush any pending frames if aggregation is enabled */
  901. if (sc->sc_flags & SC_OP_TXAGGR) {
  902. if (!retry_tx) {
  903. spin_lock_bh(&txq->axq_lock);
  904. ath_txq_drain_pending_buffers(sc, txq);
  905. spin_unlock_bh(&txq->axq_lock);
  906. }
  907. }
  908. }
  909. void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
  910. {
  911. struct ath_hw *ah = sc->sc_ah;
  912. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  913. struct ath_txq *txq;
  914. int i, npend = 0;
  915. if (sc->sc_flags & SC_OP_INVALID)
  916. return;
  917. /* Stop beacon queue */
  918. ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
  919. /* Stop data queues */
  920. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  921. if (ATH_TXQ_SETUP(sc, i)) {
  922. txq = &sc->tx.txq[i];
  923. ath9k_hw_stoptxdma(ah, txq->axq_qnum);
  924. npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
  925. }
  926. }
  927. if (npend) {
  928. int r;
  929. ath_print(common, ATH_DBG_FATAL,
  930. "Unable to stop TxDMA. Reset HAL!\n");
  931. spin_lock_bh(&sc->sc_resetlock);
  932. r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
  933. if (r)
  934. ath_print(common, ATH_DBG_FATAL,
  935. "Unable to reset hardware; reset status %d\n",
  936. r);
  937. spin_unlock_bh(&sc->sc_resetlock);
  938. }
  939. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  940. if (ATH_TXQ_SETUP(sc, i))
  941. ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
  942. }
  943. }
  944. void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
  945. {
  946. ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
  947. sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
  948. }
  949. void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
  950. {
  951. struct ath_atx_ac *ac;
  952. struct ath_atx_tid *tid;
  953. if (list_empty(&txq->axq_acq))
  954. return;
  955. ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
  956. list_del(&ac->list);
  957. ac->sched = false;
  958. do {
  959. if (list_empty(&ac->tid_q))
  960. return;
  961. tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
  962. list_del(&tid->list);
  963. tid->sched = false;
  964. if (tid->paused)
  965. continue;
  966. ath_tx_sched_aggr(sc, txq, tid);
  967. /*
  968. * add tid to round-robin queue if more frames
  969. * are pending for the tid
  970. */
  971. if (!list_empty(&tid->buf_q))
  972. ath_tx_queue_tid(txq, tid);
  973. break;
  974. } while (!list_empty(&ac->tid_q));
  975. if (!list_empty(&ac->tid_q)) {
  976. if (!ac->sched) {
  977. ac->sched = true;
  978. list_add_tail(&ac->list, &txq->axq_acq);
  979. }
  980. }
  981. }
  982. int ath_tx_setup(struct ath_softc *sc, int haltype)
  983. {
  984. struct ath_txq *txq;
  985. if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
  986. ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
  987. "HAL AC %u out of range, max %zu!\n",
  988. haltype, ARRAY_SIZE(sc->tx.hwq_map));
  989. return 0;
  990. }
  991. txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
  992. if (txq != NULL) {
  993. sc->tx.hwq_map[haltype] = txq->axq_qnum;
  994. return 1;
  995. } else
  996. return 0;
  997. }
  998. /***********/
  999. /* TX, DMA */
  1000. /***********/
  1001. /*
  1002. * Insert a chain of ath_buf (descriptors) on a txq and
  1003. * assume the descriptors are already chained together by caller.
  1004. */
  1005. static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
  1006. struct list_head *head)
  1007. {
  1008. struct ath_hw *ah = sc->sc_ah;
  1009. struct ath_common *common = ath9k_hw_common(ah);
  1010. struct ath_buf *bf;
  1011. /*
  1012. * Insert the frame on the outbound list and
  1013. * pass it on to the hardware.
  1014. */
  1015. if (list_empty(head))
  1016. return;
  1017. bf = list_first_entry(head, struct ath_buf, list);
  1018. list_splice_tail_init(head, &txq->axq_q);
  1019. txq->axq_depth++;
  1020. ath_print(common, ATH_DBG_QUEUE,
  1021. "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
  1022. if (txq->axq_link == NULL) {
  1023. ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
  1024. ath_print(common, ATH_DBG_XMIT,
  1025. "TXDP[%u] = %llx (%p)\n",
  1026. txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
  1027. } else {
  1028. *txq->axq_link = bf->bf_daddr;
  1029. ath_print(common, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n",
  1030. txq->axq_qnum, txq->axq_link,
  1031. ito64(bf->bf_daddr), bf->bf_desc);
  1032. }
  1033. txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
  1034. ath9k_hw_txstart(ah, txq->axq_qnum);
  1035. }
  1036. static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
  1037. {
  1038. struct ath_buf *bf = NULL;
  1039. spin_lock_bh(&sc->tx.txbuflock);
  1040. if (unlikely(list_empty(&sc->tx.txbuf))) {
  1041. spin_unlock_bh(&sc->tx.txbuflock);
  1042. return NULL;
  1043. }
  1044. bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
  1045. list_del(&bf->list);
  1046. spin_unlock_bh(&sc->tx.txbuflock);
  1047. return bf;
  1048. }
  1049. static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
  1050. struct list_head *bf_head,
  1051. struct ath_tx_control *txctl)
  1052. {
  1053. struct ath_buf *bf;
  1054. bf = list_first_entry(bf_head, struct ath_buf, list);
  1055. bf->bf_state.bf_type |= BUF_AMPDU;
  1056. TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
  1057. /*
  1058. * Do not queue to h/w when any of the following conditions is true:
  1059. * - there are pending frames in software queue
  1060. * - the TID is currently paused for ADDBA/BAR request
  1061. * - seqno is not within block-ack window
  1062. * - h/w queue depth exceeds low water mark
  1063. */
  1064. if (!list_empty(&tid->buf_q) || tid->paused ||
  1065. !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
  1066. txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
  1067. /*
  1068. * Add this frame to software queue for scheduling later
  1069. * for aggregation.
  1070. */
  1071. list_move_tail(&bf->list, &tid->buf_q);
  1072. ath_tx_queue_tid(txctl->txq, tid);
  1073. return;
  1074. }
  1075. /* Add sub-frame to BAW */
  1076. ath_tx_addto_baw(sc, tid, bf);
  1077. /* Queue to h/w without aggregation */
  1078. bf->bf_nframes = 1;
  1079. bf->bf_lastbf = bf;
  1080. ath_buf_set_rate(sc, bf);
  1081. ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
  1082. }
  1083. static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
  1084. struct ath_atx_tid *tid,
  1085. struct list_head *bf_head)
  1086. {
  1087. struct ath_buf *bf;
  1088. bf = list_first_entry(bf_head, struct ath_buf, list);
  1089. bf->bf_state.bf_type &= ~BUF_AMPDU;
  1090. /* update starting sequence number for subsequent ADDBA request */
  1091. INCR(tid->seq_start, IEEE80211_SEQ_MAX);
  1092. bf->bf_nframes = 1;
  1093. bf->bf_lastbf = bf;
  1094. ath_buf_set_rate(sc, bf);
  1095. ath_tx_txqaddbuf(sc, txq, bf_head);
  1096. TX_STAT_INC(txq->axq_qnum, queued);
  1097. }
  1098. static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
  1099. struct list_head *bf_head)
  1100. {
  1101. struct ath_buf *bf;
  1102. bf = list_first_entry(bf_head, struct ath_buf, list);
  1103. bf->bf_lastbf = bf;
  1104. bf->bf_nframes = 1;
  1105. ath_buf_set_rate(sc, bf);
  1106. ath_tx_txqaddbuf(sc, txq, bf_head);
  1107. TX_STAT_INC(txq->axq_qnum, queued);
  1108. }
  1109. static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
  1110. {
  1111. struct ieee80211_hdr *hdr;
  1112. enum ath9k_pkt_type htype;
  1113. __le16 fc;
  1114. hdr = (struct ieee80211_hdr *)skb->data;
  1115. fc = hdr->frame_control;
  1116. if (ieee80211_is_beacon(fc))
  1117. htype = ATH9K_PKT_TYPE_BEACON;
  1118. else if (ieee80211_is_probe_resp(fc))
  1119. htype = ATH9K_PKT_TYPE_PROBE_RESP;
  1120. else if (ieee80211_is_atim(fc))
  1121. htype = ATH9K_PKT_TYPE_ATIM;
  1122. else if (ieee80211_is_pspoll(fc))
  1123. htype = ATH9K_PKT_TYPE_PSPOLL;
  1124. else
  1125. htype = ATH9K_PKT_TYPE_NORMAL;
  1126. return htype;
  1127. }
  1128. static int get_hw_crypto_keytype(struct sk_buff *skb)
  1129. {
  1130. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1131. if (tx_info->control.hw_key) {
  1132. if (tx_info->control.hw_key->alg == ALG_WEP)
  1133. return ATH9K_KEY_TYPE_WEP;
  1134. else if (tx_info->control.hw_key->alg == ALG_TKIP)
  1135. return ATH9K_KEY_TYPE_TKIP;
  1136. else if (tx_info->control.hw_key->alg == ALG_CCMP)
  1137. return ATH9K_KEY_TYPE_AES;
  1138. }
  1139. return ATH9K_KEY_TYPE_CLEAR;
  1140. }
  1141. static void assign_aggr_tid_seqno(struct sk_buff *skb,
  1142. struct ath_buf *bf)
  1143. {
  1144. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1145. struct ieee80211_hdr *hdr;
  1146. struct ath_node *an;
  1147. struct ath_atx_tid *tid;
  1148. __le16 fc;
  1149. u8 *qc;
  1150. if (!tx_info->control.sta)
  1151. return;
  1152. an = (struct ath_node *)tx_info->control.sta->drv_priv;
  1153. hdr = (struct ieee80211_hdr *)skb->data;
  1154. fc = hdr->frame_control;
  1155. if (ieee80211_is_data_qos(fc)) {
  1156. qc = ieee80211_get_qos_ctl(hdr);
  1157. bf->bf_tidno = qc[0] & 0xf;
  1158. }
  1159. /*
  1160. * For HT capable stations, we save tidno for later use.
  1161. * We also override seqno set by upper layer with the one
  1162. * in tx aggregation state.
  1163. */
  1164. tid = ATH_AN_2_TID(an, bf->bf_tidno);
  1165. hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
  1166. bf->bf_seqno = tid->seq_next;
  1167. INCR(tid->seq_next, IEEE80211_SEQ_MAX);
  1168. }
  1169. static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
  1170. struct ath_txq *txq)
  1171. {
  1172. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1173. int flags = 0;
  1174. flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
  1175. flags |= ATH9K_TXDESC_INTREQ;
  1176. if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
  1177. flags |= ATH9K_TXDESC_NOACK;
  1178. return flags;
  1179. }
  1180. /*
  1181. * rix - rate index
  1182. * pktlen - total bytes (delims + data + fcs + pads + pad delims)
  1183. * width - 0 for 20 MHz, 1 for 40 MHz
  1184. * half_gi - to use 4us v/s 3.6 us for symbol time
  1185. */
  1186. static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
  1187. int width, int half_gi, bool shortPreamble)
  1188. {
  1189. u32 nbits, nsymbits, duration, nsymbols;
  1190. int streams, pktlen;
  1191. pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
  1192. /* find number of symbols: PLCP + data */
  1193. nbits = (pktlen << 3) + OFDM_PLCP_BITS;
  1194. nsymbits = bits_per_symbol[rix][width];
  1195. nsymbols = (nbits + nsymbits - 1) / nsymbits;
  1196. if (!half_gi)
  1197. duration = SYMBOL_TIME(nsymbols);
  1198. else
  1199. duration = SYMBOL_TIME_HALFGI(nsymbols);
  1200. /* addup duration for legacy/ht training and signal fields */
  1201. streams = HT_RC_2_STREAMS(rix);
  1202. duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
  1203. return duration;
  1204. }
  1205. static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
  1206. {
  1207. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1208. struct ath9k_11n_rate_series series[4];
  1209. struct sk_buff *skb;
  1210. struct ieee80211_tx_info *tx_info;
  1211. struct ieee80211_tx_rate *rates;
  1212. const struct ieee80211_rate *rate;
  1213. struct ieee80211_hdr *hdr;
  1214. int i, flags = 0;
  1215. u8 rix = 0, ctsrate = 0;
  1216. bool is_pspoll;
  1217. memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
  1218. skb = bf->bf_mpdu;
  1219. tx_info = IEEE80211_SKB_CB(skb);
  1220. rates = tx_info->control.rates;
  1221. hdr = (struct ieee80211_hdr *)skb->data;
  1222. is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
  1223. /*
  1224. * We check if Short Preamble is needed for the CTS rate by
  1225. * checking the BSS's global flag.
  1226. * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
  1227. */
  1228. rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
  1229. ctsrate = rate->hw_value;
  1230. if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
  1231. ctsrate |= rate->hw_value_short;
  1232. for (i = 0; i < 4; i++) {
  1233. bool is_40, is_sgi, is_sp;
  1234. int phy;
  1235. if (!rates[i].count || (rates[i].idx < 0))
  1236. continue;
  1237. rix = rates[i].idx;
  1238. series[i].Tries = rates[i].count;
  1239. series[i].ChSel = common->tx_chainmask;
  1240. if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
  1241. (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
  1242. series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
  1243. flags |= ATH9K_TXDESC_RTSENA;
  1244. } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
  1245. series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
  1246. flags |= ATH9K_TXDESC_CTSENA;
  1247. }
  1248. if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
  1249. series[i].RateFlags |= ATH9K_RATESERIES_2040;
  1250. if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
  1251. series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
  1252. is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
  1253. is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
  1254. is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
  1255. if (rates[i].flags & IEEE80211_TX_RC_MCS) {
  1256. /* MCS rates */
  1257. series[i].Rate = rix | 0x80;
  1258. series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
  1259. is_40, is_sgi, is_sp);
  1260. continue;
  1261. }
  1262. /* legcay rates */
  1263. if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
  1264. !(rate->flags & IEEE80211_RATE_ERP_G))
  1265. phy = WLAN_RC_PHY_CCK;
  1266. else
  1267. phy = WLAN_RC_PHY_OFDM;
  1268. rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
  1269. series[i].Rate = rate->hw_value;
  1270. if (rate->hw_value_short) {
  1271. if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
  1272. series[i].Rate |= rate->hw_value_short;
  1273. } else {
  1274. is_sp = false;
  1275. }
  1276. series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
  1277. phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
  1278. }
  1279. /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
  1280. if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
  1281. flags &= ~ATH9K_TXDESC_RTSENA;
  1282. /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
  1283. if (flags & ATH9K_TXDESC_RTSENA)
  1284. flags &= ~ATH9K_TXDESC_CTSENA;
  1285. /* set dur_update_en for l-sig computation except for PS-Poll frames */
  1286. ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
  1287. bf->bf_lastbf->bf_desc,
  1288. !is_pspoll, ctsrate,
  1289. 0, series, 4, flags);
  1290. if (sc->config.ath_aggr_prot && flags)
  1291. ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
  1292. }
  1293. static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
  1294. struct sk_buff *skb,
  1295. struct ath_tx_control *txctl)
  1296. {
  1297. struct ath_wiphy *aphy = hw->priv;
  1298. struct ath_softc *sc = aphy->sc;
  1299. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1300. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1301. int hdrlen;
  1302. __le16 fc;
  1303. int padpos, padsize;
  1304. tx_info->pad[0] = 0;
  1305. switch (txctl->frame_type) {
  1306. case ATH9K_IFT_NOT_INTERNAL:
  1307. break;
  1308. case ATH9K_IFT_PAUSE:
  1309. tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
  1310. /* fall through */
  1311. case ATH9K_IFT_UNPAUSE:
  1312. tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
  1313. break;
  1314. }
  1315. hdrlen = ieee80211_get_hdrlen_from_skb(skb);
  1316. fc = hdr->frame_control;
  1317. ATH_TXBUF_RESET(bf);
  1318. bf->aphy = aphy;
  1319. bf->bf_frmlen = skb->len + FCS_LEN;
  1320. /* Remove the padding size from bf_frmlen, if any */
  1321. padpos = ath9k_cmn_padpos(hdr->frame_control);
  1322. padsize = padpos & 3;
  1323. if (padsize && skb->len>padpos+padsize) {
  1324. bf->bf_frmlen -= padsize;
  1325. }
  1326. if (conf_is_ht(&hw->conf))
  1327. bf->bf_state.bf_type |= BUF_HT;
  1328. bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
  1329. bf->bf_keytype = get_hw_crypto_keytype(skb);
  1330. if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
  1331. bf->bf_frmlen += tx_info->control.hw_key->icv_len;
  1332. bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
  1333. } else {
  1334. bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
  1335. }
  1336. if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
  1337. (sc->sc_flags & SC_OP_TXAGGR))
  1338. assign_aggr_tid_seqno(skb, bf);
  1339. bf->bf_mpdu = skb;
  1340. bf->bf_dmacontext = dma_map_single(sc->dev, skb->data,
  1341. skb->len, DMA_TO_DEVICE);
  1342. if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) {
  1343. bf->bf_mpdu = NULL;
  1344. ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
  1345. "dma_mapping_error() on TX\n");
  1346. return -ENOMEM;
  1347. }
  1348. bf->bf_buf_addr = bf->bf_dmacontext;
  1349. /* tag if this is a nullfunc frame to enable PS when AP acks it */
  1350. if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
  1351. bf->bf_isnullfunc = true;
  1352. sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
  1353. } else
  1354. bf->bf_isnullfunc = false;
  1355. return 0;
  1356. }
  1357. /* FIXME: tx power */
  1358. static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
  1359. struct ath_tx_control *txctl)
  1360. {
  1361. struct sk_buff *skb = bf->bf_mpdu;
  1362. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1363. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1364. struct ath_node *an = NULL;
  1365. struct list_head bf_head;
  1366. struct ath_desc *ds;
  1367. struct ath_atx_tid *tid;
  1368. struct ath_hw *ah = sc->sc_ah;
  1369. int frm_type;
  1370. __le16 fc;
  1371. frm_type = get_hw_packet_type(skb);
  1372. fc = hdr->frame_control;
  1373. INIT_LIST_HEAD(&bf_head);
  1374. list_add_tail(&bf->list, &bf_head);
  1375. ds = bf->bf_desc;
  1376. ds->ds_link = 0;
  1377. ds->ds_data = bf->bf_buf_addr;
  1378. ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
  1379. bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
  1380. ath9k_hw_filltxdesc(ah, ds,
  1381. skb->len, /* segment length */
  1382. true, /* first segment */
  1383. true, /* last segment */
  1384. ds); /* first descriptor */
  1385. spin_lock_bh(&txctl->txq->axq_lock);
  1386. if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
  1387. tx_info->control.sta) {
  1388. an = (struct ath_node *)tx_info->control.sta->drv_priv;
  1389. tid = ATH_AN_2_TID(an, bf->bf_tidno);
  1390. if (!ieee80211_is_data_qos(fc)) {
  1391. ath_tx_send_normal(sc, txctl->txq, &bf_head);
  1392. goto tx_done;
  1393. }
  1394. if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
  1395. /*
  1396. * Try aggregation if it's a unicast data frame
  1397. * and the destination is HT capable.
  1398. */
  1399. ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
  1400. } else {
  1401. /*
  1402. * Send this frame as regular when ADDBA
  1403. * exchange is neither complete nor pending.
  1404. */
  1405. ath_tx_send_ht_normal(sc, txctl->txq,
  1406. tid, &bf_head);
  1407. }
  1408. } else {
  1409. ath_tx_send_normal(sc, txctl->txq, &bf_head);
  1410. }
  1411. tx_done:
  1412. spin_unlock_bh(&txctl->txq->axq_lock);
  1413. }
  1414. /* Upon failure caller should free skb */
  1415. int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
  1416. struct ath_tx_control *txctl)
  1417. {
  1418. struct ath_wiphy *aphy = hw->priv;
  1419. struct ath_softc *sc = aphy->sc;
  1420. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1421. struct ath_buf *bf;
  1422. int r;
  1423. bf = ath_tx_get_buffer(sc);
  1424. if (!bf) {
  1425. ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
  1426. return -1;
  1427. }
  1428. r = ath_tx_setup_buffer(hw, bf, skb, txctl);
  1429. if (unlikely(r)) {
  1430. struct ath_txq *txq = txctl->txq;
  1431. ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
  1432. /* upon ath_tx_processq() this TX queue will be resumed, we
  1433. * guarantee this will happen by knowing beforehand that
  1434. * we will at least have to run TX completionon one buffer
  1435. * on the queue */
  1436. spin_lock_bh(&txq->axq_lock);
  1437. if (sc->tx.txq[txq->axq_qnum].axq_depth > 1) {
  1438. ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
  1439. txq->stopped = 1;
  1440. }
  1441. spin_unlock_bh(&txq->axq_lock);
  1442. spin_lock_bh(&sc->tx.txbuflock);
  1443. list_add_tail(&bf->list, &sc->tx.txbuf);
  1444. spin_unlock_bh(&sc->tx.txbuflock);
  1445. return r;
  1446. }
  1447. ath_tx_start_dma(sc, bf, txctl);
  1448. return 0;
  1449. }
  1450. void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
  1451. {
  1452. struct ath_wiphy *aphy = hw->priv;
  1453. struct ath_softc *sc = aphy->sc;
  1454. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1455. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  1456. int padpos, padsize;
  1457. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1458. struct ath_tx_control txctl;
  1459. memset(&txctl, 0, sizeof(struct ath_tx_control));
  1460. /*
  1461. * As a temporary workaround, assign seq# here; this will likely need
  1462. * to be cleaned up to work better with Beacon transmission and virtual
  1463. * BSSes.
  1464. */
  1465. if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
  1466. if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
  1467. sc->tx.seq_no += 0x10;
  1468. hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
  1469. hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
  1470. }
  1471. /* Add the padding after the header if this is not already done */
  1472. padpos = ath9k_cmn_padpos(hdr->frame_control);
  1473. padsize = padpos & 3;
  1474. if (padsize && skb->len>padpos) {
  1475. if (skb_headroom(skb) < padsize) {
  1476. ath_print(common, ATH_DBG_XMIT,
  1477. "TX CABQ padding failed\n");
  1478. dev_kfree_skb_any(skb);
  1479. return;
  1480. }
  1481. skb_push(skb, padsize);
  1482. memmove(skb->data, skb->data + padsize, padpos);
  1483. }
  1484. txctl.txq = sc->beacon.cabq;
  1485. ath_print(common, ATH_DBG_XMIT,
  1486. "transmitting CABQ packet, skb: %p\n", skb);
  1487. if (ath_tx_start(hw, skb, &txctl) != 0) {
  1488. ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
  1489. goto exit;
  1490. }
  1491. return;
  1492. exit:
  1493. dev_kfree_skb_any(skb);
  1494. }
  1495. /*****************/
  1496. /* TX Completion */
  1497. /*****************/
  1498. static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
  1499. struct ath_wiphy *aphy, int tx_flags)
  1500. {
  1501. struct ieee80211_hw *hw = sc->hw;
  1502. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1503. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1504. struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
  1505. int padpos, padsize;
  1506. ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
  1507. if (aphy)
  1508. hw = aphy->hw;
  1509. if (tx_flags & ATH_TX_BAR)
  1510. tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
  1511. if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
  1512. /* Frame was ACKed */
  1513. tx_info->flags |= IEEE80211_TX_STAT_ACK;
  1514. }
  1515. padpos = ath9k_cmn_padpos(hdr->frame_control);
  1516. padsize = padpos & 3;
  1517. if (padsize && skb->len>padpos+padsize) {
  1518. /*
  1519. * Remove MAC header padding before giving the frame back to
  1520. * mac80211.
  1521. */
  1522. memmove(skb->data + padsize, skb->data, padpos);
  1523. skb_pull(skb, padsize);
  1524. }
  1525. if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
  1526. sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
  1527. ath_print(common, ATH_DBG_PS,
  1528. "Going back to sleep after having "
  1529. "received TX status (0x%lx)\n",
  1530. sc->ps_flags & (PS_WAIT_FOR_BEACON |
  1531. PS_WAIT_FOR_CAB |
  1532. PS_WAIT_FOR_PSPOLL_DATA |
  1533. PS_WAIT_FOR_TX_ACK));
  1534. }
  1535. if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
  1536. ath9k_tx_status(hw, skb);
  1537. else
  1538. ieee80211_tx_status(hw, skb);
  1539. }
  1540. static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
  1541. struct ath_txq *txq, struct list_head *bf_q,
  1542. struct ath_tx_status *ts, int txok, int sendbar)
  1543. {
  1544. struct sk_buff *skb = bf->bf_mpdu;
  1545. unsigned long flags;
  1546. int tx_flags = 0;
  1547. if (sendbar)
  1548. tx_flags = ATH_TX_BAR;
  1549. if (!txok) {
  1550. tx_flags |= ATH_TX_ERROR;
  1551. if (bf_isxretried(bf))
  1552. tx_flags |= ATH_TX_XRETRY;
  1553. }
  1554. dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
  1555. ath_tx_complete(sc, skb, bf->aphy, tx_flags);
  1556. ath_debug_stat_tx(sc, txq, bf, ts);
  1557. /*
  1558. * Return the list of ath_buf of this mpdu to free queue
  1559. */
  1560. spin_lock_irqsave(&sc->tx.txbuflock, flags);
  1561. list_splice_tail_init(bf_q, &sc->tx.txbuf);
  1562. spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
  1563. }
  1564. static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
  1565. struct ath_tx_status *ts, int txok)
  1566. {
  1567. u16 seq_st = 0;
  1568. u32 ba[WME_BA_BMP_SIZE >> 5];
  1569. int ba_index;
  1570. int nbad = 0;
  1571. int isaggr = 0;
  1572. if (ts->ts_flags == ATH9K_TX_SW_ABORTED)
  1573. return 0;
  1574. isaggr = bf_isaggr(bf);
  1575. if (isaggr) {
  1576. seq_st = ts->ts_seqnum;
  1577. memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
  1578. }
  1579. while (bf) {
  1580. ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
  1581. if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
  1582. nbad++;
  1583. bf = bf->bf_next;
  1584. }
  1585. return nbad;
  1586. }
  1587. static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
  1588. int nbad, int txok, bool update_rc)
  1589. {
  1590. struct sk_buff *skb = bf->bf_mpdu;
  1591. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1592. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1593. struct ieee80211_hw *hw = bf->aphy->hw;
  1594. u8 i, tx_rateindex;
  1595. if (txok)
  1596. tx_info->status.ack_signal = ts->ts_rssi;
  1597. tx_rateindex = ts->ts_rateindex;
  1598. WARN_ON(tx_rateindex >= hw->max_rates);
  1599. if (ts->ts_status & ATH9K_TXERR_FILT)
  1600. tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
  1601. if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc)
  1602. tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
  1603. if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
  1604. (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
  1605. if (ieee80211_is_data(hdr->frame_control)) {
  1606. if (ts->ts_flags &
  1607. (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
  1608. tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
  1609. if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
  1610. (ts->ts_status & ATH9K_TXERR_FIFO))
  1611. tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
  1612. tx_info->status.ampdu_len = bf->bf_nframes;
  1613. tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
  1614. }
  1615. }
  1616. for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
  1617. tx_info->status.rates[i].count = 0;
  1618. tx_info->status.rates[i].idx = -1;
  1619. }
  1620. tx_info->status.rates[tx_rateindex].count = bf->bf_retries + 1;
  1621. }
  1622. static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
  1623. {
  1624. int qnum;
  1625. spin_lock_bh(&txq->axq_lock);
  1626. if (txq->stopped &&
  1627. sc->tx.txq[txq->axq_qnum].axq_depth <= (ATH_TXBUF - 20)) {
  1628. qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
  1629. if (qnum != -1) {
  1630. ath_mac80211_start_queue(sc, qnum);
  1631. txq->stopped = 0;
  1632. }
  1633. }
  1634. spin_unlock_bh(&txq->axq_lock);
  1635. }
  1636. static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
  1637. {
  1638. struct ath_hw *ah = sc->sc_ah;
  1639. struct ath_common *common = ath9k_hw_common(ah);
  1640. struct ath_buf *bf, *lastbf, *bf_held = NULL;
  1641. struct list_head bf_head;
  1642. struct ath_desc *ds;
  1643. struct ath_tx_status ts;
  1644. int txok;
  1645. int status;
  1646. ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
  1647. txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
  1648. txq->axq_link);
  1649. for (;;) {
  1650. spin_lock_bh(&txq->axq_lock);
  1651. if (list_empty(&txq->axq_q)) {
  1652. txq->axq_link = NULL;
  1653. spin_unlock_bh(&txq->axq_lock);
  1654. break;
  1655. }
  1656. bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
  1657. /*
  1658. * There is a race condition that a BH gets scheduled
  1659. * after sw writes TxE and before hw re-load the last
  1660. * descriptor to get the newly chained one.
  1661. * Software must keep the last DONE descriptor as a
  1662. * holding descriptor - software does so by marking
  1663. * it with the STALE flag.
  1664. */
  1665. bf_held = NULL;
  1666. if (bf->bf_stale) {
  1667. bf_held = bf;
  1668. if (list_is_last(&bf_held->list, &txq->axq_q)) {
  1669. spin_unlock_bh(&txq->axq_lock);
  1670. break;
  1671. } else {
  1672. bf = list_entry(bf_held->list.next,
  1673. struct ath_buf, list);
  1674. }
  1675. }
  1676. lastbf = bf->bf_lastbf;
  1677. ds = lastbf->bf_desc;
  1678. memset(&ts, 0, sizeof(ts));
  1679. status = ath9k_hw_txprocdesc(ah, ds, &ts);
  1680. if (status == -EINPROGRESS) {
  1681. spin_unlock_bh(&txq->axq_lock);
  1682. break;
  1683. }
  1684. /*
  1685. * We now know the nullfunc frame has been ACKed so we
  1686. * can disable RX.
  1687. */
  1688. if (bf->bf_isnullfunc &&
  1689. (ts.ts_status & ATH9K_TX_ACKED)) {
  1690. if ((sc->ps_flags & PS_ENABLED))
  1691. ath9k_enable_ps(sc);
  1692. else
  1693. sc->ps_flags |= PS_NULLFUNC_COMPLETED;
  1694. }
  1695. /*
  1696. * Remove ath_buf's of the same transmit unit from txq,
  1697. * however leave the last descriptor back as the holding
  1698. * descriptor for hw.
  1699. */
  1700. lastbf->bf_stale = true;
  1701. INIT_LIST_HEAD(&bf_head);
  1702. if (!list_is_singular(&lastbf->list))
  1703. list_cut_position(&bf_head,
  1704. &txq->axq_q, lastbf->list.prev);
  1705. txq->axq_depth--;
  1706. txok = !(ts.ts_status & ATH9K_TXERR_MASK);
  1707. txq->axq_tx_inprogress = false;
  1708. spin_unlock_bh(&txq->axq_lock);
  1709. if (bf_held) {
  1710. spin_lock_bh(&sc->tx.txbuflock);
  1711. list_move_tail(&bf_held->list, &sc->tx.txbuf);
  1712. spin_unlock_bh(&sc->tx.txbuflock);
  1713. }
  1714. if (!bf_isampdu(bf)) {
  1715. /*
  1716. * This frame is sent out as a single frame.
  1717. * Use hardware retry status for this frame.
  1718. */
  1719. bf->bf_retries = ts.ts_longretry;
  1720. if (ts.ts_status & ATH9K_TXERR_XRETRY)
  1721. bf->bf_state.bf_type |= BUF_XRETRY;
  1722. ath_tx_rc_status(bf, &ts, 0, txok, true);
  1723. }
  1724. if (bf_isampdu(bf))
  1725. ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
  1726. else
  1727. ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
  1728. ath_wake_mac80211_queue(sc, txq);
  1729. spin_lock_bh(&txq->axq_lock);
  1730. if (sc->sc_flags & SC_OP_TXAGGR)
  1731. ath_txq_schedule(sc, txq);
  1732. spin_unlock_bh(&txq->axq_lock);
  1733. }
  1734. }
  1735. static void ath_tx_complete_poll_work(struct work_struct *work)
  1736. {
  1737. struct ath_softc *sc = container_of(work, struct ath_softc,
  1738. tx_complete_work.work);
  1739. struct ath_txq *txq;
  1740. int i;
  1741. bool needreset = false;
  1742. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
  1743. if (ATH_TXQ_SETUP(sc, i)) {
  1744. txq = &sc->tx.txq[i];
  1745. spin_lock_bh(&txq->axq_lock);
  1746. if (txq->axq_depth) {
  1747. if (txq->axq_tx_inprogress) {
  1748. needreset = true;
  1749. spin_unlock_bh(&txq->axq_lock);
  1750. break;
  1751. } else {
  1752. txq->axq_tx_inprogress = true;
  1753. }
  1754. }
  1755. spin_unlock_bh(&txq->axq_lock);
  1756. }
  1757. if (needreset) {
  1758. ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
  1759. "tx hung, resetting the chip\n");
  1760. ath9k_ps_wakeup(sc);
  1761. ath_reset(sc, false);
  1762. ath9k_ps_restore(sc);
  1763. }
  1764. ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
  1765. msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
  1766. }
  1767. void ath_tx_tasklet(struct ath_softc *sc)
  1768. {
  1769. int i;
  1770. u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
  1771. ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
  1772. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  1773. if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
  1774. ath_tx_processq(sc, &sc->tx.txq[i]);
  1775. }
  1776. }
  1777. /*****************/
  1778. /* Init, Cleanup */
  1779. /*****************/
  1780. int ath_tx_init(struct ath_softc *sc, int nbufs)
  1781. {
  1782. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1783. int error = 0;
  1784. spin_lock_init(&sc->tx.txbuflock);
  1785. error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
  1786. "tx", nbufs, 1);
  1787. if (error != 0) {
  1788. ath_print(common, ATH_DBG_FATAL,
  1789. "Failed to allocate tx descriptors: %d\n", error);
  1790. goto err;
  1791. }
  1792. error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
  1793. "beacon", ATH_BCBUF, 1);
  1794. if (error != 0) {
  1795. ath_print(common, ATH_DBG_FATAL,
  1796. "Failed to allocate beacon descriptors: %d\n", error);
  1797. goto err;
  1798. }
  1799. INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
  1800. err:
  1801. if (error != 0)
  1802. ath_tx_cleanup(sc);
  1803. return error;
  1804. }
  1805. void ath_tx_cleanup(struct ath_softc *sc)
  1806. {
  1807. if (sc->beacon.bdma.dd_desc_len != 0)
  1808. ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
  1809. if (sc->tx.txdma.dd_desc_len != 0)
  1810. ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
  1811. }
  1812. void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
  1813. {
  1814. struct ath_atx_tid *tid;
  1815. struct ath_atx_ac *ac;
  1816. int tidno, acno;
  1817. for (tidno = 0, tid = &an->tid[tidno];
  1818. tidno < WME_NUM_TID;
  1819. tidno++, tid++) {
  1820. tid->an = an;
  1821. tid->tidno = tidno;
  1822. tid->seq_start = tid->seq_next = 0;
  1823. tid->baw_size = WME_MAX_BA;
  1824. tid->baw_head = tid->baw_tail = 0;
  1825. tid->sched = false;
  1826. tid->paused = false;
  1827. tid->state &= ~AGGR_CLEANUP;
  1828. INIT_LIST_HEAD(&tid->buf_q);
  1829. acno = TID_TO_WME_AC(tidno);
  1830. tid->ac = &an->ac[acno];
  1831. tid->state &= ~AGGR_ADDBA_COMPLETE;
  1832. tid->state &= ~AGGR_ADDBA_PROGRESS;
  1833. }
  1834. for (acno = 0, ac = &an->ac[acno];
  1835. acno < WME_NUM_AC; acno++, ac++) {
  1836. ac->sched = false;
  1837. INIT_LIST_HEAD(&ac->tid_q);
  1838. switch (acno) {
  1839. case WME_AC_BE:
  1840. ac->qnum = ath_tx_get_qnum(sc,
  1841. ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
  1842. break;
  1843. case WME_AC_BK:
  1844. ac->qnum = ath_tx_get_qnum(sc,
  1845. ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
  1846. break;
  1847. case WME_AC_VI:
  1848. ac->qnum = ath_tx_get_qnum(sc,
  1849. ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
  1850. break;
  1851. case WME_AC_VO:
  1852. ac->qnum = ath_tx_get_qnum(sc,
  1853. ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
  1854. break;
  1855. }
  1856. }
  1857. }
  1858. void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
  1859. {
  1860. int i;
  1861. struct ath_atx_ac *ac, *ac_tmp;
  1862. struct ath_atx_tid *tid, *tid_tmp;
  1863. struct ath_txq *txq;
  1864. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  1865. if (ATH_TXQ_SETUP(sc, i)) {
  1866. txq = &sc->tx.txq[i];
  1867. spin_lock_bh(&txq->axq_lock);
  1868. list_for_each_entry_safe(ac,
  1869. ac_tmp, &txq->axq_acq, list) {
  1870. tid = list_first_entry(&ac->tid_q,
  1871. struct ath_atx_tid, list);
  1872. if (tid && tid->an != an)
  1873. continue;
  1874. list_del(&ac->list);
  1875. ac->sched = false;
  1876. list_for_each_entry_safe(tid,
  1877. tid_tmp, &ac->tid_q, list) {
  1878. list_del(&tid->list);
  1879. tid->sched = false;
  1880. ath_tid_drain(sc, txq, tid);
  1881. tid->state &= ~AGGR_ADDBA_COMPLETE;
  1882. tid->state &= ~AGGR_CLEANUP;
  1883. }
  1884. }
  1885. spin_unlock_bh(&txq->axq_lock);
  1886. }
  1887. }
  1888. }