xmit.c 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273
  1. /*
  2. * Copyright (c) 2008-2009 Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include "ath9k.h"
  17. #include "ar9003_mac.h"
  18. #define BITS_PER_BYTE 8
  19. #define OFDM_PLCP_BITS 22
  20. #define HT_RC_2_MCS(_rc) ((_rc) & 0x0f)
  21. #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
  22. #define L_STF 8
  23. #define L_LTF 8
  24. #define L_SIG 4
  25. #define HT_SIG 8
  26. #define HT_STF 4
  27. #define HT_LTF(_ns) (4 * (_ns))
  28. #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
  29. #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
  30. #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
  31. #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
  32. #define OFDM_SIFS_TIME 16
  33. static u32 bits_per_symbol[][2] = {
  34. /* 20MHz 40MHz */
  35. { 26, 54 }, /* 0: BPSK */
  36. { 52, 108 }, /* 1: QPSK 1/2 */
  37. { 78, 162 }, /* 2: QPSK 3/4 */
  38. { 104, 216 }, /* 3: 16-QAM 1/2 */
  39. { 156, 324 }, /* 4: 16-QAM 3/4 */
  40. { 208, 432 }, /* 5: 64-QAM 2/3 */
  41. { 234, 486 }, /* 6: 64-QAM 3/4 */
  42. { 260, 540 }, /* 7: 64-QAM 5/6 */
  43. { 52, 108 }, /* 8: BPSK */
  44. { 104, 216 }, /* 9: QPSK 1/2 */
  45. { 156, 324 }, /* 10: QPSK 3/4 */
  46. { 208, 432 }, /* 11: 16-QAM 1/2 */
  47. { 312, 648 }, /* 12: 16-QAM 3/4 */
  48. { 416, 864 }, /* 13: 64-QAM 2/3 */
  49. { 468, 972 }, /* 14: 64-QAM 3/4 */
  50. { 520, 1080 }, /* 15: 64-QAM 5/6 */
  51. };
  52. #define IS_HT_RATE(_rate) ((_rate) & 0x80)
  53. static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
  54. struct ath_atx_tid *tid,
  55. struct list_head *bf_head);
  56. static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
  57. struct ath_txq *txq, struct list_head *bf_q,
  58. struct ath_tx_status *ts, int txok, int sendbar);
  59. static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
  60. struct list_head *head);
  61. static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
  62. static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
  63. struct ath_tx_status *ts, int txok);
  64. static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
  65. int nbad, int txok, bool update_rc);
  66. enum {
  67. MCS_DEFAULT,
  68. MCS_HT40,
  69. MCS_HT40_SGI,
  70. };
  71. static int ath_max_4ms_framelen[3][16] = {
  72. [MCS_DEFAULT] = {
  73. 3216, 6434, 9650, 12868, 19304, 25740, 28956, 32180,
  74. 6430, 12860, 19300, 25736, 38600, 51472, 57890, 64320,
  75. },
  76. [MCS_HT40] = {
  77. 6684, 13368, 20052, 26738, 40104, 53476, 60156, 66840,
  78. 13360, 26720, 40080, 53440, 80160, 106880, 120240, 133600,
  79. },
  80. [MCS_HT40_SGI] = {
  81. /* TODO: Only MCS 7 and 15 updated, recalculate the rest */
  82. 6684, 13368, 20052, 26738, 40104, 53476, 60156, 74200,
  83. 13360, 26720, 40080, 53440, 80160, 106880, 120240, 148400,
  84. }
  85. };
  86. /*********************/
  87. /* Aggregation logic */
  88. /*********************/
  89. static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
  90. {
  91. struct ath_atx_ac *ac = tid->ac;
  92. if (tid->paused)
  93. return;
  94. if (tid->sched)
  95. return;
  96. tid->sched = true;
  97. list_add_tail(&tid->list, &ac->tid_q);
  98. if (ac->sched)
  99. return;
  100. ac->sched = true;
  101. list_add_tail(&ac->list, &txq->axq_acq);
  102. }
  103. static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
  104. {
  105. struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
  106. spin_lock_bh(&txq->axq_lock);
  107. tid->paused++;
  108. spin_unlock_bh(&txq->axq_lock);
  109. }
  110. static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
  111. {
  112. struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
  113. BUG_ON(tid->paused <= 0);
  114. spin_lock_bh(&txq->axq_lock);
  115. tid->paused--;
  116. if (tid->paused > 0)
  117. goto unlock;
  118. if (list_empty(&tid->buf_q))
  119. goto unlock;
  120. ath_tx_queue_tid(txq, tid);
  121. ath_txq_schedule(sc, txq);
  122. unlock:
  123. spin_unlock_bh(&txq->axq_lock);
  124. }
  125. static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
  126. {
  127. struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
  128. struct ath_buf *bf;
  129. struct list_head bf_head;
  130. INIT_LIST_HEAD(&bf_head);
  131. BUG_ON(tid->paused <= 0);
  132. spin_lock_bh(&txq->axq_lock);
  133. tid->paused--;
  134. if (tid->paused > 0) {
  135. spin_unlock_bh(&txq->axq_lock);
  136. return;
  137. }
  138. while (!list_empty(&tid->buf_q)) {
  139. bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
  140. BUG_ON(bf_isretried(bf));
  141. list_move_tail(&bf->list, &bf_head);
  142. ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
  143. }
  144. spin_unlock_bh(&txq->axq_lock);
  145. }
  146. static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
  147. int seqno)
  148. {
  149. int index, cindex;
  150. index = ATH_BA_INDEX(tid->seq_start, seqno);
  151. cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
  152. tid->tx_buf[cindex] = NULL;
  153. while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
  154. INCR(tid->seq_start, IEEE80211_SEQ_MAX);
  155. INCR(tid->baw_head, ATH_TID_MAX_BUFS);
  156. }
  157. }
  158. static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
  159. struct ath_buf *bf)
  160. {
  161. int index, cindex;
  162. if (bf_isretried(bf))
  163. return;
  164. index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
  165. cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
  166. BUG_ON(tid->tx_buf[cindex] != NULL);
  167. tid->tx_buf[cindex] = bf;
  168. if (index >= ((tid->baw_tail - tid->baw_head) &
  169. (ATH_TID_MAX_BUFS - 1))) {
  170. tid->baw_tail = cindex;
  171. INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
  172. }
  173. }
  174. /*
  175. * TODO: For frame(s) that are in the retry state, we will reuse the
  176. * sequence number(s) without setting the retry bit. The
  177. * alternative is to give up on these and BAR the receiver's window
  178. * forward.
  179. */
  180. static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
  181. struct ath_atx_tid *tid)
  182. {
  183. struct ath_buf *bf;
  184. struct list_head bf_head;
  185. struct ath_tx_status ts;
  186. memset(&ts, 0, sizeof(ts));
  187. INIT_LIST_HEAD(&bf_head);
  188. for (;;) {
  189. if (list_empty(&tid->buf_q))
  190. break;
  191. bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
  192. list_move_tail(&bf->list, &bf_head);
  193. if (bf_isretried(bf))
  194. ath_tx_update_baw(sc, tid, bf->bf_seqno);
  195. spin_unlock(&txq->axq_lock);
  196. ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
  197. spin_lock(&txq->axq_lock);
  198. }
  199. tid->seq_next = tid->seq_start;
  200. tid->baw_tail = tid->baw_head;
  201. }
  202. static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
  203. struct ath_buf *bf)
  204. {
  205. struct sk_buff *skb;
  206. struct ieee80211_hdr *hdr;
  207. bf->bf_state.bf_type |= BUF_RETRY;
  208. bf->bf_retries++;
  209. TX_STAT_INC(txq->axq_qnum, a_retries);
  210. skb = bf->bf_mpdu;
  211. hdr = (struct ieee80211_hdr *)skb->data;
  212. hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
  213. }
  214. static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
  215. {
  216. struct ath_buf *tbf;
  217. spin_lock_bh(&sc->tx.txbuflock);
  218. if (WARN_ON(list_empty(&sc->tx.txbuf))) {
  219. spin_unlock_bh(&sc->tx.txbuflock);
  220. return NULL;
  221. }
  222. tbf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
  223. list_del(&tbf->list);
  224. spin_unlock_bh(&sc->tx.txbuflock);
  225. ATH_TXBUF_RESET(tbf);
  226. tbf->aphy = bf->aphy;
  227. tbf->bf_mpdu = bf->bf_mpdu;
  228. tbf->bf_buf_addr = bf->bf_buf_addr;
  229. memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
  230. tbf->bf_state = bf->bf_state;
  231. tbf->bf_dmacontext = bf->bf_dmacontext;
  232. return tbf;
  233. }
  234. static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
  235. struct ath_buf *bf, struct list_head *bf_q,
  236. struct ath_tx_status *ts, int txok)
  237. {
  238. struct ath_node *an = NULL;
  239. struct sk_buff *skb;
  240. struct ieee80211_sta *sta;
  241. struct ieee80211_hw *hw;
  242. struct ieee80211_hdr *hdr;
  243. struct ieee80211_tx_info *tx_info;
  244. struct ath_atx_tid *tid = NULL;
  245. struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
  246. struct list_head bf_head, bf_pending;
  247. u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
  248. u32 ba[WME_BA_BMP_SIZE >> 5];
  249. int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
  250. bool rc_update = true;
  251. skb = bf->bf_mpdu;
  252. hdr = (struct ieee80211_hdr *)skb->data;
  253. tx_info = IEEE80211_SKB_CB(skb);
  254. hw = bf->aphy->hw;
  255. rcu_read_lock();
  256. /* XXX: use ieee80211_find_sta! */
  257. sta = ieee80211_find_sta_by_hw(hw, hdr->addr1);
  258. if (!sta) {
  259. rcu_read_unlock();
  260. return;
  261. }
  262. an = (struct ath_node *)sta->drv_priv;
  263. tid = ATH_AN_2_TID(an, bf->bf_tidno);
  264. isaggr = bf_isaggr(bf);
  265. memset(ba, 0, WME_BA_BMP_SIZE >> 3);
  266. if (isaggr && txok) {
  267. if (ts->ts_flags & ATH9K_TX_BA) {
  268. seq_st = ts->ts_seqnum;
  269. memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
  270. } else {
  271. /*
  272. * AR5416 can become deaf/mute when BA
  273. * issue happens. Chip needs to be reset.
  274. * But AP code may have sychronization issues
  275. * when perform internal reset in this routine.
  276. * Only enable reset in STA mode for now.
  277. */
  278. if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
  279. needreset = 1;
  280. }
  281. }
  282. INIT_LIST_HEAD(&bf_pending);
  283. INIT_LIST_HEAD(&bf_head);
  284. nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
  285. while (bf) {
  286. txfail = txpending = 0;
  287. bf_next = bf->bf_next;
  288. if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
  289. /* transmit completion, subframe is
  290. * acked by block ack */
  291. acked_cnt++;
  292. } else if (!isaggr && txok) {
  293. /* transmit completion */
  294. acked_cnt++;
  295. } else {
  296. if (!(tid->state & AGGR_CLEANUP) &&
  297. !bf_last->bf_tx_aborted) {
  298. if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
  299. ath_tx_set_retry(sc, txq, bf);
  300. txpending = 1;
  301. } else {
  302. bf->bf_state.bf_type |= BUF_XRETRY;
  303. txfail = 1;
  304. sendbar = 1;
  305. txfail_cnt++;
  306. }
  307. } else {
  308. /*
  309. * cleanup in progress, just fail
  310. * the un-acked sub-frames
  311. */
  312. txfail = 1;
  313. }
  314. }
  315. if (bf_next == NULL) {
  316. /*
  317. * Make sure the last desc is reclaimed if it
  318. * not a holding desc.
  319. */
  320. if (!bf_last->bf_stale)
  321. list_move_tail(&bf->list, &bf_head);
  322. else
  323. INIT_LIST_HEAD(&bf_head);
  324. } else {
  325. BUG_ON(list_empty(bf_q));
  326. list_move_tail(&bf->list, &bf_head);
  327. }
  328. if (!txpending) {
  329. /*
  330. * complete the acked-ones/xretried ones; update
  331. * block-ack window
  332. */
  333. spin_lock_bh(&txq->axq_lock);
  334. ath_tx_update_baw(sc, tid, bf->bf_seqno);
  335. spin_unlock_bh(&txq->axq_lock);
  336. if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
  337. ath_tx_rc_status(bf, ts, nbad, txok, true);
  338. rc_update = false;
  339. } else {
  340. ath_tx_rc_status(bf, ts, nbad, txok, false);
  341. }
  342. ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
  343. !txfail, sendbar);
  344. } else {
  345. /* retry the un-acked ones */
  346. if (bf->bf_next == NULL && bf_last->bf_stale) {
  347. struct ath_buf *tbf;
  348. tbf = ath_clone_txbuf(sc, bf_last);
  349. /*
  350. * Update tx baw and complete the frame with
  351. * failed status if we run out of tx buf
  352. */
  353. if (!tbf) {
  354. spin_lock_bh(&txq->axq_lock);
  355. ath_tx_update_baw(sc, tid,
  356. bf->bf_seqno);
  357. spin_unlock_bh(&txq->axq_lock);
  358. bf->bf_state.bf_type |= BUF_XRETRY;
  359. ath_tx_rc_status(bf, ts, nbad,
  360. 0, false);
  361. ath_tx_complete_buf(sc, bf, txq,
  362. &bf_head, ts, 0, 0);
  363. break;
  364. }
  365. ath9k_hw_cleartxdesc(sc->sc_ah, tbf->bf_desc);
  366. list_add_tail(&tbf->list, &bf_head);
  367. } else {
  368. /*
  369. * Clear descriptor status words for
  370. * software retry
  371. */
  372. ath9k_hw_cleartxdesc(sc->sc_ah, bf->bf_desc);
  373. }
  374. /*
  375. * Put this buffer to the temporary pending
  376. * queue to retain ordering
  377. */
  378. list_splice_tail_init(&bf_head, &bf_pending);
  379. }
  380. bf = bf_next;
  381. }
  382. if (tid->state & AGGR_CLEANUP) {
  383. if (tid->baw_head == tid->baw_tail) {
  384. tid->state &= ~AGGR_ADDBA_COMPLETE;
  385. tid->state &= ~AGGR_CLEANUP;
  386. /* send buffered frames as singles */
  387. ath_tx_flush_tid(sc, tid);
  388. }
  389. rcu_read_unlock();
  390. return;
  391. }
  392. /* prepend un-acked frames to the beginning of the pending frame queue */
  393. if (!list_empty(&bf_pending)) {
  394. spin_lock_bh(&txq->axq_lock);
  395. list_splice(&bf_pending, &tid->buf_q);
  396. ath_tx_queue_tid(txq, tid);
  397. spin_unlock_bh(&txq->axq_lock);
  398. }
  399. rcu_read_unlock();
  400. if (needreset)
  401. ath_reset(sc, false);
  402. }
  403. static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
  404. struct ath_atx_tid *tid)
  405. {
  406. struct sk_buff *skb;
  407. struct ieee80211_tx_info *tx_info;
  408. struct ieee80211_tx_rate *rates;
  409. u32 max_4ms_framelen, frmlen;
  410. u16 aggr_limit, legacy = 0;
  411. int i;
  412. skb = bf->bf_mpdu;
  413. tx_info = IEEE80211_SKB_CB(skb);
  414. rates = tx_info->control.rates;
  415. /*
  416. * Find the lowest frame length among the rate series that will have a
  417. * 4ms transmit duration.
  418. * TODO - TXOP limit needs to be considered.
  419. */
  420. max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
  421. for (i = 0; i < 4; i++) {
  422. if (rates[i].count) {
  423. int modeidx;
  424. if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
  425. legacy = 1;
  426. break;
  427. }
  428. if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
  429. modeidx = MCS_HT40_SGI;
  430. else if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
  431. modeidx = MCS_HT40;
  432. else
  433. modeidx = MCS_DEFAULT;
  434. frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
  435. max_4ms_framelen = min(max_4ms_framelen, frmlen);
  436. }
  437. }
  438. /*
  439. * limit aggregate size by the minimum rate if rate selected is
  440. * not a probe rate, if rate selected is a probe rate then
  441. * avoid aggregation of this packet.
  442. */
  443. if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
  444. return 0;
  445. if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
  446. aggr_limit = min((max_4ms_framelen * 3) / 8,
  447. (u32)ATH_AMPDU_LIMIT_MAX);
  448. else
  449. aggr_limit = min(max_4ms_framelen,
  450. (u32)ATH_AMPDU_LIMIT_MAX);
  451. /*
  452. * h/w can accept aggregates upto 16 bit lengths (65535).
  453. * The IE, however can hold upto 65536, which shows up here
  454. * as zero. Ignore 65536 since we are constrained by hw.
  455. */
  456. if (tid->an->maxampdu)
  457. aggr_limit = min(aggr_limit, tid->an->maxampdu);
  458. return aggr_limit;
  459. }
  460. /*
  461. * Returns the number of delimiters to be added to
  462. * meet the minimum required mpdudensity.
  463. */
  464. static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
  465. struct ath_buf *bf, u16 frmlen)
  466. {
  467. struct sk_buff *skb = bf->bf_mpdu;
  468. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  469. u32 nsymbits, nsymbols;
  470. u16 minlen;
  471. u8 flags, rix;
  472. int width, half_gi, ndelim, mindelim;
  473. /* Select standard number of delimiters based on frame length alone */
  474. ndelim = ATH_AGGR_GET_NDELIM(frmlen);
  475. /*
  476. * If encryption enabled, hardware requires some more padding between
  477. * subframes.
  478. * TODO - this could be improved to be dependent on the rate.
  479. * The hardware can keep up at lower rates, but not higher rates
  480. */
  481. if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
  482. ndelim += ATH_AGGR_ENCRYPTDELIM;
  483. /*
  484. * Convert desired mpdu density from microeconds to bytes based
  485. * on highest rate in rate series (i.e. first rate) to determine
  486. * required minimum length for subframe. Take into account
  487. * whether high rate is 20 or 40Mhz and half or full GI.
  488. *
  489. * If there is no mpdu density restriction, no further calculation
  490. * is needed.
  491. */
  492. if (tid->an->mpdudensity == 0)
  493. return ndelim;
  494. rix = tx_info->control.rates[0].idx;
  495. flags = tx_info->control.rates[0].flags;
  496. width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
  497. half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
  498. if (half_gi)
  499. nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
  500. else
  501. nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
  502. if (nsymbols == 0)
  503. nsymbols = 1;
  504. nsymbits = bits_per_symbol[rix][width];
  505. minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
  506. if (frmlen < minlen) {
  507. mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
  508. ndelim = max(mindelim, ndelim);
  509. }
  510. return ndelim;
  511. }
  512. static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
  513. struct ath_txq *txq,
  514. struct ath_atx_tid *tid,
  515. struct list_head *bf_q)
  516. {
  517. #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
  518. struct ath_buf *bf, *bf_first, *bf_prev = NULL;
  519. int rl = 0, nframes = 0, ndelim, prev_al = 0;
  520. u16 aggr_limit = 0, al = 0, bpad = 0,
  521. al_delta, h_baw = tid->baw_size / 2;
  522. enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
  523. bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
  524. do {
  525. bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
  526. /* do not step over block-ack window */
  527. if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
  528. status = ATH_AGGR_BAW_CLOSED;
  529. break;
  530. }
  531. if (!rl) {
  532. aggr_limit = ath_lookup_rate(sc, bf, tid);
  533. rl = 1;
  534. }
  535. /* do not exceed aggregation limit */
  536. al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
  537. if (nframes &&
  538. (aggr_limit < (al + bpad + al_delta + prev_al))) {
  539. status = ATH_AGGR_LIMITED;
  540. break;
  541. }
  542. /* do not exceed subframe limit */
  543. if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
  544. status = ATH_AGGR_LIMITED;
  545. break;
  546. }
  547. nframes++;
  548. /* add padding for previous frame to aggregation length */
  549. al += bpad + al_delta;
  550. /*
  551. * Get the delimiters needed to meet the MPDU
  552. * density for this node.
  553. */
  554. ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
  555. bpad = PADBYTES(al_delta) + (ndelim << 2);
  556. bf->bf_next = NULL;
  557. ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
  558. /* link buffers of this frame to the aggregate */
  559. ath_tx_addto_baw(sc, tid, bf);
  560. ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
  561. list_move_tail(&bf->list, bf_q);
  562. if (bf_prev) {
  563. bf_prev->bf_next = bf;
  564. ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
  565. bf->bf_daddr);
  566. }
  567. bf_prev = bf;
  568. } while (!list_empty(&tid->buf_q));
  569. bf_first->bf_al = al;
  570. bf_first->bf_nframes = nframes;
  571. return status;
  572. #undef PADBYTES
  573. }
  574. static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
  575. struct ath_atx_tid *tid)
  576. {
  577. struct ath_buf *bf;
  578. enum ATH_AGGR_STATUS status;
  579. struct list_head bf_q;
  580. do {
  581. if (list_empty(&tid->buf_q))
  582. return;
  583. INIT_LIST_HEAD(&bf_q);
  584. status = ath_tx_form_aggr(sc, txq, tid, &bf_q);
  585. /*
  586. * no frames picked up to be aggregated;
  587. * block-ack window is not open.
  588. */
  589. if (list_empty(&bf_q))
  590. break;
  591. bf = list_first_entry(&bf_q, struct ath_buf, list);
  592. bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
  593. /* if only one frame, send as non-aggregate */
  594. if (bf->bf_nframes == 1) {
  595. bf->bf_state.bf_type &= ~BUF_AGGR;
  596. ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
  597. ath_buf_set_rate(sc, bf);
  598. ath_tx_txqaddbuf(sc, txq, &bf_q);
  599. continue;
  600. }
  601. /* setup first desc of aggregate */
  602. bf->bf_state.bf_type |= BUF_AGGR;
  603. ath_buf_set_rate(sc, bf);
  604. ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
  605. /* anchor last desc of aggregate */
  606. ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
  607. ath_tx_txqaddbuf(sc, txq, &bf_q);
  608. TX_STAT_INC(txq->axq_qnum, a_aggr);
  609. } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
  610. status != ATH_AGGR_BAW_CLOSED);
  611. }
  612. void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
  613. u16 tid, u16 *ssn)
  614. {
  615. struct ath_atx_tid *txtid;
  616. struct ath_node *an;
  617. an = (struct ath_node *)sta->drv_priv;
  618. txtid = ATH_AN_2_TID(an, tid);
  619. txtid->state |= AGGR_ADDBA_PROGRESS;
  620. ath_tx_pause_tid(sc, txtid);
  621. *ssn = txtid->seq_start;
  622. }
  623. void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
  624. {
  625. struct ath_node *an = (struct ath_node *)sta->drv_priv;
  626. struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
  627. struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
  628. struct ath_tx_status ts;
  629. struct ath_buf *bf;
  630. struct list_head bf_head;
  631. memset(&ts, 0, sizeof(ts));
  632. INIT_LIST_HEAD(&bf_head);
  633. if (txtid->state & AGGR_CLEANUP)
  634. return;
  635. if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
  636. txtid->state &= ~AGGR_ADDBA_PROGRESS;
  637. return;
  638. }
  639. ath_tx_pause_tid(sc, txtid);
  640. /* drop all software retried frames and mark this TID */
  641. spin_lock_bh(&txq->axq_lock);
  642. while (!list_empty(&txtid->buf_q)) {
  643. bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
  644. if (!bf_isretried(bf)) {
  645. /*
  646. * NB: it's based on the assumption that
  647. * software retried frame will always stay
  648. * at the head of software queue.
  649. */
  650. break;
  651. }
  652. list_move_tail(&bf->list, &bf_head);
  653. ath_tx_update_baw(sc, txtid, bf->bf_seqno);
  654. ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
  655. }
  656. spin_unlock_bh(&txq->axq_lock);
  657. if (txtid->baw_head != txtid->baw_tail) {
  658. txtid->state |= AGGR_CLEANUP;
  659. } else {
  660. txtid->state &= ~AGGR_ADDBA_COMPLETE;
  661. ath_tx_flush_tid(sc, txtid);
  662. }
  663. }
  664. void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
  665. {
  666. struct ath_atx_tid *txtid;
  667. struct ath_node *an;
  668. an = (struct ath_node *)sta->drv_priv;
  669. if (sc->sc_flags & SC_OP_TXAGGR) {
  670. txtid = ATH_AN_2_TID(an, tid);
  671. txtid->baw_size =
  672. IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
  673. txtid->state |= AGGR_ADDBA_COMPLETE;
  674. txtid->state &= ~AGGR_ADDBA_PROGRESS;
  675. ath_tx_resume_tid(sc, txtid);
  676. }
  677. }
  678. bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
  679. {
  680. struct ath_atx_tid *txtid;
  681. if (!(sc->sc_flags & SC_OP_TXAGGR))
  682. return false;
  683. txtid = ATH_AN_2_TID(an, tidno);
  684. if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS)))
  685. return true;
  686. return false;
  687. }
  688. /********************/
  689. /* Queue Management */
  690. /********************/
  691. static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
  692. struct ath_txq *txq)
  693. {
  694. struct ath_atx_ac *ac, *ac_tmp;
  695. struct ath_atx_tid *tid, *tid_tmp;
  696. list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
  697. list_del(&ac->list);
  698. ac->sched = false;
  699. list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
  700. list_del(&tid->list);
  701. tid->sched = false;
  702. ath_tid_drain(sc, txq, tid);
  703. }
  704. }
  705. }
  706. struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
  707. {
  708. struct ath_hw *ah = sc->sc_ah;
  709. struct ath_common *common = ath9k_hw_common(ah);
  710. struct ath9k_tx_queue_info qi;
  711. int qnum;
  712. memset(&qi, 0, sizeof(qi));
  713. qi.tqi_subtype = subtype;
  714. qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
  715. qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
  716. qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
  717. qi.tqi_physCompBuf = 0;
  718. /*
  719. * Enable interrupts only for EOL and DESC conditions.
  720. * We mark tx descriptors to receive a DESC interrupt
  721. * when a tx queue gets deep; otherwise waiting for the
  722. * EOL to reap descriptors. Note that this is done to
  723. * reduce interrupt load and this only defers reaping
  724. * descriptors, never transmitting frames. Aside from
  725. * reducing interrupts this also permits more concurrency.
  726. * The only potential downside is if the tx queue backs
  727. * up in which case the top half of the kernel may backup
  728. * due to a lack of tx descriptors.
  729. *
  730. * The UAPSD queue is an exception, since we take a desc-
  731. * based intr on the EOSP frames.
  732. */
  733. if (qtype == ATH9K_TX_QUEUE_UAPSD)
  734. qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
  735. else
  736. qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
  737. TXQ_FLAG_TXDESCINT_ENABLE;
  738. qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
  739. if (qnum == -1) {
  740. /*
  741. * NB: don't print a message, this happens
  742. * normally on parts with too few tx queues
  743. */
  744. return NULL;
  745. }
  746. if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
  747. ath_print(common, ATH_DBG_FATAL,
  748. "qnum %u out of range, max %u!\n",
  749. qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
  750. ath9k_hw_releasetxqueue(ah, qnum);
  751. return NULL;
  752. }
  753. if (!ATH_TXQ_SETUP(sc, qnum)) {
  754. struct ath_txq *txq = &sc->tx.txq[qnum];
  755. txq->axq_qnum = qnum;
  756. txq->axq_link = NULL;
  757. INIT_LIST_HEAD(&txq->axq_q);
  758. INIT_LIST_HEAD(&txq->axq_acq);
  759. spin_lock_init(&txq->axq_lock);
  760. txq->axq_depth = 0;
  761. txq->axq_tx_inprogress = false;
  762. sc->tx.txqsetup |= 1<<qnum;
  763. }
  764. return &sc->tx.txq[qnum];
  765. }
  766. int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
  767. {
  768. int qnum;
  769. switch (qtype) {
  770. case ATH9K_TX_QUEUE_DATA:
  771. if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
  772. ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
  773. "HAL AC %u out of range, max %zu!\n",
  774. haltype, ARRAY_SIZE(sc->tx.hwq_map));
  775. return -1;
  776. }
  777. qnum = sc->tx.hwq_map[haltype];
  778. break;
  779. case ATH9K_TX_QUEUE_BEACON:
  780. qnum = sc->beacon.beaconq;
  781. break;
  782. case ATH9K_TX_QUEUE_CAB:
  783. qnum = sc->beacon.cabq->axq_qnum;
  784. break;
  785. default:
  786. qnum = -1;
  787. }
  788. return qnum;
  789. }
  790. struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
  791. {
  792. struct ath_txq *txq = NULL;
  793. u16 skb_queue = skb_get_queue_mapping(skb);
  794. int qnum;
  795. qnum = ath_get_hal_qnum(skb_queue, sc);
  796. txq = &sc->tx.txq[qnum];
  797. spin_lock_bh(&txq->axq_lock);
  798. if (txq->axq_depth >= (ATH_TXBUF - 20)) {
  799. ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_XMIT,
  800. "TX queue: %d is full, depth: %d\n",
  801. qnum, txq->axq_depth);
  802. ath_mac80211_stop_queue(sc, skb_queue);
  803. txq->stopped = 1;
  804. spin_unlock_bh(&txq->axq_lock);
  805. return NULL;
  806. }
  807. spin_unlock_bh(&txq->axq_lock);
  808. return txq;
  809. }
  810. int ath_txq_update(struct ath_softc *sc, int qnum,
  811. struct ath9k_tx_queue_info *qinfo)
  812. {
  813. struct ath_hw *ah = sc->sc_ah;
  814. int error = 0;
  815. struct ath9k_tx_queue_info qi;
  816. if (qnum == sc->beacon.beaconq) {
  817. /*
  818. * XXX: for beacon queue, we just save the parameter.
  819. * It will be picked up by ath_beaconq_config when
  820. * it's necessary.
  821. */
  822. sc->beacon.beacon_qi = *qinfo;
  823. return 0;
  824. }
  825. BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
  826. ath9k_hw_get_txq_props(ah, qnum, &qi);
  827. qi.tqi_aifs = qinfo->tqi_aifs;
  828. qi.tqi_cwmin = qinfo->tqi_cwmin;
  829. qi.tqi_cwmax = qinfo->tqi_cwmax;
  830. qi.tqi_burstTime = qinfo->tqi_burstTime;
  831. qi.tqi_readyTime = qinfo->tqi_readyTime;
  832. if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
  833. ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
  834. "Unable to update hardware queue %u!\n", qnum);
  835. error = -EIO;
  836. } else {
  837. ath9k_hw_resettxqueue(ah, qnum);
  838. }
  839. return error;
  840. }
  841. int ath_cabq_update(struct ath_softc *sc)
  842. {
  843. struct ath9k_tx_queue_info qi;
  844. int qnum = sc->beacon.cabq->axq_qnum;
  845. ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
  846. /*
  847. * Ensure the readytime % is within the bounds.
  848. */
  849. if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
  850. sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
  851. else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
  852. sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
  853. qi.tqi_readyTime = (sc->beacon_interval *
  854. sc->config.cabqReadytime) / 100;
  855. ath_txq_update(sc, qnum, &qi);
  856. return 0;
  857. }
  858. /*
  859. * Drain a given TX queue (could be Beacon or Data)
  860. *
  861. * This assumes output has been stopped and
  862. * we do not need to block ath_tx_tasklet.
  863. */
  864. void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
  865. {
  866. struct ath_buf *bf, *lastbf;
  867. struct list_head bf_head;
  868. struct ath_tx_status ts;
  869. memset(&ts, 0, sizeof(ts));
  870. INIT_LIST_HEAD(&bf_head);
  871. for (;;) {
  872. spin_lock_bh(&txq->axq_lock);
  873. if (list_empty(&txq->axq_q)) {
  874. txq->axq_link = NULL;
  875. spin_unlock_bh(&txq->axq_lock);
  876. break;
  877. }
  878. bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
  879. if (bf->bf_stale) {
  880. list_del(&bf->list);
  881. spin_unlock_bh(&txq->axq_lock);
  882. spin_lock_bh(&sc->tx.txbuflock);
  883. list_add_tail(&bf->list, &sc->tx.txbuf);
  884. spin_unlock_bh(&sc->tx.txbuflock);
  885. continue;
  886. }
  887. lastbf = bf->bf_lastbf;
  888. if (!retry_tx)
  889. lastbf->bf_tx_aborted = true;
  890. /* remove ath_buf's of the same mpdu from txq */
  891. list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
  892. txq->axq_depth--;
  893. spin_unlock_bh(&txq->axq_lock);
  894. if (bf_isampdu(bf))
  895. ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
  896. else
  897. ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
  898. }
  899. spin_lock_bh(&txq->axq_lock);
  900. txq->axq_tx_inprogress = false;
  901. spin_unlock_bh(&txq->axq_lock);
  902. /* flush any pending frames if aggregation is enabled */
  903. if (sc->sc_flags & SC_OP_TXAGGR) {
  904. if (!retry_tx) {
  905. spin_lock_bh(&txq->axq_lock);
  906. ath_txq_drain_pending_buffers(sc, txq);
  907. spin_unlock_bh(&txq->axq_lock);
  908. }
  909. }
  910. }
  911. void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
  912. {
  913. struct ath_hw *ah = sc->sc_ah;
  914. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  915. struct ath_txq *txq;
  916. int i, npend = 0;
  917. if (sc->sc_flags & SC_OP_INVALID)
  918. return;
  919. /* Stop beacon queue */
  920. ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
  921. /* Stop data queues */
  922. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  923. if (ATH_TXQ_SETUP(sc, i)) {
  924. txq = &sc->tx.txq[i];
  925. ath9k_hw_stoptxdma(ah, txq->axq_qnum);
  926. npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
  927. }
  928. }
  929. if (npend) {
  930. int r;
  931. ath_print(common, ATH_DBG_FATAL,
  932. "Unable to stop TxDMA. Reset HAL!\n");
  933. spin_lock_bh(&sc->sc_resetlock);
  934. r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
  935. if (r)
  936. ath_print(common, ATH_DBG_FATAL,
  937. "Unable to reset hardware; reset status %d\n",
  938. r);
  939. spin_unlock_bh(&sc->sc_resetlock);
  940. }
  941. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  942. if (ATH_TXQ_SETUP(sc, i))
  943. ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
  944. }
  945. }
  946. void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
  947. {
  948. ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
  949. sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
  950. }
  951. void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
  952. {
  953. struct ath_atx_ac *ac;
  954. struct ath_atx_tid *tid;
  955. if (list_empty(&txq->axq_acq))
  956. return;
  957. ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
  958. list_del(&ac->list);
  959. ac->sched = false;
  960. do {
  961. if (list_empty(&ac->tid_q))
  962. return;
  963. tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
  964. list_del(&tid->list);
  965. tid->sched = false;
  966. if (tid->paused)
  967. continue;
  968. ath_tx_sched_aggr(sc, txq, tid);
  969. /*
  970. * add tid to round-robin queue if more frames
  971. * are pending for the tid
  972. */
  973. if (!list_empty(&tid->buf_q))
  974. ath_tx_queue_tid(txq, tid);
  975. break;
  976. } while (!list_empty(&ac->tid_q));
  977. if (!list_empty(&ac->tid_q)) {
  978. if (!ac->sched) {
  979. ac->sched = true;
  980. list_add_tail(&ac->list, &txq->axq_acq);
  981. }
  982. }
  983. }
  984. int ath_tx_setup(struct ath_softc *sc, int haltype)
  985. {
  986. struct ath_txq *txq;
  987. if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
  988. ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
  989. "HAL AC %u out of range, max %zu!\n",
  990. haltype, ARRAY_SIZE(sc->tx.hwq_map));
  991. return 0;
  992. }
  993. txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
  994. if (txq != NULL) {
  995. sc->tx.hwq_map[haltype] = txq->axq_qnum;
  996. return 1;
  997. } else
  998. return 0;
  999. }
  1000. /***********/
  1001. /* TX, DMA */
  1002. /***********/
  1003. /*
  1004. * Insert a chain of ath_buf (descriptors) on a txq and
  1005. * assume the descriptors are already chained together by caller.
  1006. */
  1007. static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
  1008. struct list_head *head)
  1009. {
  1010. struct ath_hw *ah = sc->sc_ah;
  1011. struct ath_common *common = ath9k_hw_common(ah);
  1012. struct ath_buf *bf;
  1013. /*
  1014. * Insert the frame on the outbound list and
  1015. * pass it on to the hardware.
  1016. */
  1017. if (list_empty(head))
  1018. return;
  1019. bf = list_first_entry(head, struct ath_buf, list);
  1020. list_splice_tail_init(head, &txq->axq_q);
  1021. txq->axq_depth++;
  1022. ath_print(common, ATH_DBG_QUEUE,
  1023. "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
  1024. if (txq->axq_link == NULL) {
  1025. ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
  1026. ath_print(common, ATH_DBG_XMIT,
  1027. "TXDP[%u] = %llx (%p)\n",
  1028. txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
  1029. } else {
  1030. *txq->axq_link = bf->bf_daddr;
  1031. ath_print(common, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n",
  1032. txq->axq_qnum, txq->axq_link,
  1033. ito64(bf->bf_daddr), bf->bf_desc);
  1034. }
  1035. ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc, &txq->axq_link);
  1036. ath9k_hw_txstart(ah, txq->axq_qnum);
  1037. }
  1038. static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
  1039. {
  1040. struct ath_buf *bf = NULL;
  1041. spin_lock_bh(&sc->tx.txbuflock);
  1042. if (unlikely(list_empty(&sc->tx.txbuf))) {
  1043. spin_unlock_bh(&sc->tx.txbuflock);
  1044. return NULL;
  1045. }
  1046. bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
  1047. list_del(&bf->list);
  1048. spin_unlock_bh(&sc->tx.txbuflock);
  1049. return bf;
  1050. }
  1051. static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
  1052. struct list_head *bf_head,
  1053. struct ath_tx_control *txctl)
  1054. {
  1055. struct ath_buf *bf;
  1056. bf = list_first_entry(bf_head, struct ath_buf, list);
  1057. bf->bf_state.bf_type |= BUF_AMPDU;
  1058. TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
  1059. /*
  1060. * Do not queue to h/w when any of the following conditions is true:
  1061. * - there are pending frames in software queue
  1062. * - the TID is currently paused for ADDBA/BAR request
  1063. * - seqno is not within block-ack window
  1064. * - h/w queue depth exceeds low water mark
  1065. */
  1066. if (!list_empty(&tid->buf_q) || tid->paused ||
  1067. !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
  1068. txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
  1069. /*
  1070. * Add this frame to software queue for scheduling later
  1071. * for aggregation.
  1072. */
  1073. list_move_tail(&bf->list, &tid->buf_q);
  1074. ath_tx_queue_tid(txctl->txq, tid);
  1075. return;
  1076. }
  1077. /* Add sub-frame to BAW */
  1078. ath_tx_addto_baw(sc, tid, bf);
  1079. /* Queue to h/w without aggregation */
  1080. bf->bf_nframes = 1;
  1081. bf->bf_lastbf = bf;
  1082. ath_buf_set_rate(sc, bf);
  1083. ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
  1084. }
  1085. static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
  1086. struct ath_atx_tid *tid,
  1087. struct list_head *bf_head)
  1088. {
  1089. struct ath_buf *bf;
  1090. bf = list_first_entry(bf_head, struct ath_buf, list);
  1091. bf->bf_state.bf_type &= ~BUF_AMPDU;
  1092. /* update starting sequence number for subsequent ADDBA request */
  1093. INCR(tid->seq_start, IEEE80211_SEQ_MAX);
  1094. bf->bf_nframes = 1;
  1095. bf->bf_lastbf = bf;
  1096. ath_buf_set_rate(sc, bf);
  1097. ath_tx_txqaddbuf(sc, txq, bf_head);
  1098. TX_STAT_INC(txq->axq_qnum, queued);
  1099. }
  1100. static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
  1101. struct list_head *bf_head)
  1102. {
  1103. struct ath_buf *bf;
  1104. bf = list_first_entry(bf_head, struct ath_buf, list);
  1105. bf->bf_lastbf = bf;
  1106. bf->bf_nframes = 1;
  1107. ath_buf_set_rate(sc, bf);
  1108. ath_tx_txqaddbuf(sc, txq, bf_head);
  1109. TX_STAT_INC(txq->axq_qnum, queued);
  1110. }
  1111. static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
  1112. {
  1113. struct ieee80211_hdr *hdr;
  1114. enum ath9k_pkt_type htype;
  1115. __le16 fc;
  1116. hdr = (struct ieee80211_hdr *)skb->data;
  1117. fc = hdr->frame_control;
  1118. if (ieee80211_is_beacon(fc))
  1119. htype = ATH9K_PKT_TYPE_BEACON;
  1120. else if (ieee80211_is_probe_resp(fc))
  1121. htype = ATH9K_PKT_TYPE_PROBE_RESP;
  1122. else if (ieee80211_is_atim(fc))
  1123. htype = ATH9K_PKT_TYPE_ATIM;
  1124. else if (ieee80211_is_pspoll(fc))
  1125. htype = ATH9K_PKT_TYPE_PSPOLL;
  1126. else
  1127. htype = ATH9K_PKT_TYPE_NORMAL;
  1128. return htype;
  1129. }
  1130. static int get_hw_crypto_keytype(struct sk_buff *skb)
  1131. {
  1132. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1133. if (tx_info->control.hw_key) {
  1134. if (tx_info->control.hw_key->alg == ALG_WEP)
  1135. return ATH9K_KEY_TYPE_WEP;
  1136. else if (tx_info->control.hw_key->alg == ALG_TKIP)
  1137. return ATH9K_KEY_TYPE_TKIP;
  1138. else if (tx_info->control.hw_key->alg == ALG_CCMP)
  1139. return ATH9K_KEY_TYPE_AES;
  1140. }
  1141. return ATH9K_KEY_TYPE_CLEAR;
  1142. }
  1143. static void assign_aggr_tid_seqno(struct sk_buff *skb,
  1144. struct ath_buf *bf)
  1145. {
  1146. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1147. struct ieee80211_hdr *hdr;
  1148. struct ath_node *an;
  1149. struct ath_atx_tid *tid;
  1150. __le16 fc;
  1151. u8 *qc;
  1152. if (!tx_info->control.sta)
  1153. return;
  1154. an = (struct ath_node *)tx_info->control.sta->drv_priv;
  1155. hdr = (struct ieee80211_hdr *)skb->data;
  1156. fc = hdr->frame_control;
  1157. if (ieee80211_is_data_qos(fc)) {
  1158. qc = ieee80211_get_qos_ctl(hdr);
  1159. bf->bf_tidno = qc[0] & 0xf;
  1160. }
  1161. /*
  1162. * For HT capable stations, we save tidno for later use.
  1163. * We also override seqno set by upper layer with the one
  1164. * in tx aggregation state.
  1165. */
  1166. tid = ATH_AN_2_TID(an, bf->bf_tidno);
  1167. hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
  1168. bf->bf_seqno = tid->seq_next;
  1169. INCR(tid->seq_next, IEEE80211_SEQ_MAX);
  1170. }
  1171. static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
  1172. struct ath_txq *txq)
  1173. {
  1174. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1175. int flags = 0;
  1176. flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
  1177. flags |= ATH9K_TXDESC_INTREQ;
  1178. if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
  1179. flags |= ATH9K_TXDESC_NOACK;
  1180. return flags;
  1181. }
  1182. /*
  1183. * rix - rate index
  1184. * pktlen - total bytes (delims + data + fcs + pads + pad delims)
  1185. * width - 0 for 20 MHz, 1 for 40 MHz
  1186. * half_gi - to use 4us v/s 3.6 us for symbol time
  1187. */
  1188. static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
  1189. int width, int half_gi, bool shortPreamble)
  1190. {
  1191. u32 nbits, nsymbits, duration, nsymbols;
  1192. int streams, pktlen;
  1193. pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
  1194. /* find number of symbols: PLCP + data */
  1195. nbits = (pktlen << 3) + OFDM_PLCP_BITS;
  1196. nsymbits = bits_per_symbol[rix][width];
  1197. nsymbols = (nbits + nsymbits - 1) / nsymbits;
  1198. if (!half_gi)
  1199. duration = SYMBOL_TIME(nsymbols);
  1200. else
  1201. duration = SYMBOL_TIME_HALFGI(nsymbols);
  1202. /* addup duration for legacy/ht training and signal fields */
  1203. streams = HT_RC_2_STREAMS(rix);
  1204. duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
  1205. return duration;
  1206. }
  1207. static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
  1208. {
  1209. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1210. struct ath9k_11n_rate_series series[4];
  1211. struct sk_buff *skb;
  1212. struct ieee80211_tx_info *tx_info;
  1213. struct ieee80211_tx_rate *rates;
  1214. const struct ieee80211_rate *rate;
  1215. struct ieee80211_hdr *hdr;
  1216. int i, flags = 0;
  1217. u8 rix = 0, ctsrate = 0;
  1218. bool is_pspoll;
  1219. memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
  1220. skb = bf->bf_mpdu;
  1221. tx_info = IEEE80211_SKB_CB(skb);
  1222. rates = tx_info->control.rates;
  1223. hdr = (struct ieee80211_hdr *)skb->data;
  1224. is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
  1225. /*
  1226. * We check if Short Preamble is needed for the CTS rate by
  1227. * checking the BSS's global flag.
  1228. * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
  1229. */
  1230. rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
  1231. ctsrate = rate->hw_value;
  1232. if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
  1233. ctsrate |= rate->hw_value_short;
  1234. for (i = 0; i < 4; i++) {
  1235. bool is_40, is_sgi, is_sp;
  1236. int phy;
  1237. if (!rates[i].count || (rates[i].idx < 0))
  1238. continue;
  1239. rix = rates[i].idx;
  1240. series[i].Tries = rates[i].count;
  1241. series[i].ChSel = common->tx_chainmask;
  1242. if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
  1243. (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
  1244. series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
  1245. flags |= ATH9K_TXDESC_RTSENA;
  1246. } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
  1247. series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
  1248. flags |= ATH9K_TXDESC_CTSENA;
  1249. }
  1250. if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
  1251. series[i].RateFlags |= ATH9K_RATESERIES_2040;
  1252. if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
  1253. series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
  1254. is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
  1255. is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
  1256. is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
  1257. if (rates[i].flags & IEEE80211_TX_RC_MCS) {
  1258. /* MCS rates */
  1259. series[i].Rate = rix | 0x80;
  1260. series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
  1261. is_40, is_sgi, is_sp);
  1262. continue;
  1263. }
  1264. /* legcay rates */
  1265. if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
  1266. !(rate->flags & IEEE80211_RATE_ERP_G))
  1267. phy = WLAN_RC_PHY_CCK;
  1268. else
  1269. phy = WLAN_RC_PHY_OFDM;
  1270. rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
  1271. series[i].Rate = rate->hw_value;
  1272. if (rate->hw_value_short) {
  1273. if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
  1274. series[i].Rate |= rate->hw_value_short;
  1275. } else {
  1276. is_sp = false;
  1277. }
  1278. series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
  1279. phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
  1280. }
  1281. /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
  1282. if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
  1283. flags &= ~ATH9K_TXDESC_RTSENA;
  1284. /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
  1285. if (flags & ATH9K_TXDESC_RTSENA)
  1286. flags &= ~ATH9K_TXDESC_CTSENA;
  1287. /* set dur_update_en for l-sig computation except for PS-Poll frames */
  1288. ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
  1289. bf->bf_lastbf->bf_desc,
  1290. !is_pspoll, ctsrate,
  1291. 0, series, 4, flags);
  1292. if (sc->config.ath_aggr_prot && flags)
  1293. ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
  1294. }
  1295. static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
  1296. struct sk_buff *skb,
  1297. struct ath_tx_control *txctl)
  1298. {
  1299. struct ath_wiphy *aphy = hw->priv;
  1300. struct ath_softc *sc = aphy->sc;
  1301. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1302. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1303. int hdrlen;
  1304. __le16 fc;
  1305. int padpos, padsize;
  1306. tx_info->pad[0] = 0;
  1307. switch (txctl->frame_type) {
  1308. case ATH9K_IFT_NOT_INTERNAL:
  1309. break;
  1310. case ATH9K_IFT_PAUSE:
  1311. tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
  1312. /* fall through */
  1313. case ATH9K_IFT_UNPAUSE:
  1314. tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
  1315. break;
  1316. }
  1317. hdrlen = ieee80211_get_hdrlen_from_skb(skb);
  1318. fc = hdr->frame_control;
  1319. ATH_TXBUF_RESET(bf);
  1320. bf->aphy = aphy;
  1321. bf->bf_frmlen = skb->len + FCS_LEN;
  1322. /* Remove the padding size from bf_frmlen, if any */
  1323. padpos = ath9k_cmn_padpos(hdr->frame_control);
  1324. padsize = padpos & 3;
  1325. if (padsize && skb->len>padpos+padsize) {
  1326. bf->bf_frmlen -= padsize;
  1327. }
  1328. if (conf_is_ht(&hw->conf))
  1329. bf->bf_state.bf_type |= BUF_HT;
  1330. bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
  1331. bf->bf_keytype = get_hw_crypto_keytype(skb);
  1332. if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
  1333. bf->bf_frmlen += tx_info->control.hw_key->icv_len;
  1334. bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
  1335. } else {
  1336. bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
  1337. }
  1338. if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
  1339. (sc->sc_flags & SC_OP_TXAGGR))
  1340. assign_aggr_tid_seqno(skb, bf);
  1341. bf->bf_mpdu = skb;
  1342. bf->bf_dmacontext = dma_map_single(sc->dev, skb->data,
  1343. skb->len, DMA_TO_DEVICE);
  1344. if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) {
  1345. bf->bf_mpdu = NULL;
  1346. ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
  1347. "dma_mapping_error() on TX\n");
  1348. return -ENOMEM;
  1349. }
  1350. bf->bf_buf_addr = bf->bf_dmacontext;
  1351. /* tag if this is a nullfunc frame to enable PS when AP acks it */
  1352. if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
  1353. bf->bf_isnullfunc = true;
  1354. sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
  1355. } else
  1356. bf->bf_isnullfunc = false;
  1357. return 0;
  1358. }
  1359. /* FIXME: tx power */
  1360. static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
  1361. struct ath_tx_control *txctl)
  1362. {
  1363. struct sk_buff *skb = bf->bf_mpdu;
  1364. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1365. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1366. struct ath_node *an = NULL;
  1367. struct list_head bf_head;
  1368. struct ath_desc *ds;
  1369. struct ath_atx_tid *tid;
  1370. struct ath_hw *ah = sc->sc_ah;
  1371. int frm_type;
  1372. __le16 fc;
  1373. frm_type = get_hw_packet_type(skb);
  1374. fc = hdr->frame_control;
  1375. INIT_LIST_HEAD(&bf_head);
  1376. list_add_tail(&bf->list, &bf_head);
  1377. ds = bf->bf_desc;
  1378. ath9k_hw_set_desc_link(ah, ds, 0);
  1379. ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
  1380. bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
  1381. ath9k_hw_filltxdesc(ah, ds,
  1382. skb->len, /* segment length */
  1383. true, /* first segment */
  1384. true, /* last segment */
  1385. ds, /* first descriptor */
  1386. bf->bf_buf_addr,
  1387. txctl->txq->axq_qnum);
  1388. spin_lock_bh(&txctl->txq->axq_lock);
  1389. if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
  1390. tx_info->control.sta) {
  1391. an = (struct ath_node *)tx_info->control.sta->drv_priv;
  1392. tid = ATH_AN_2_TID(an, bf->bf_tidno);
  1393. if (!ieee80211_is_data_qos(fc)) {
  1394. ath_tx_send_normal(sc, txctl->txq, &bf_head);
  1395. goto tx_done;
  1396. }
  1397. if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
  1398. /*
  1399. * Try aggregation if it's a unicast data frame
  1400. * and the destination is HT capable.
  1401. */
  1402. ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
  1403. } else {
  1404. /*
  1405. * Send this frame as regular when ADDBA
  1406. * exchange is neither complete nor pending.
  1407. */
  1408. ath_tx_send_ht_normal(sc, txctl->txq,
  1409. tid, &bf_head);
  1410. }
  1411. } else {
  1412. ath_tx_send_normal(sc, txctl->txq, &bf_head);
  1413. }
  1414. tx_done:
  1415. spin_unlock_bh(&txctl->txq->axq_lock);
  1416. }
  1417. /* Upon failure caller should free skb */
  1418. int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
  1419. struct ath_tx_control *txctl)
  1420. {
  1421. struct ath_wiphy *aphy = hw->priv;
  1422. struct ath_softc *sc = aphy->sc;
  1423. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1424. struct ath_buf *bf;
  1425. int r;
  1426. bf = ath_tx_get_buffer(sc);
  1427. if (!bf) {
  1428. ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
  1429. return -1;
  1430. }
  1431. r = ath_tx_setup_buffer(hw, bf, skb, txctl);
  1432. if (unlikely(r)) {
  1433. struct ath_txq *txq = txctl->txq;
  1434. ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
  1435. /* upon ath_tx_processq() this TX queue will be resumed, we
  1436. * guarantee this will happen by knowing beforehand that
  1437. * we will at least have to run TX completionon one buffer
  1438. * on the queue */
  1439. spin_lock_bh(&txq->axq_lock);
  1440. if (sc->tx.txq[txq->axq_qnum].axq_depth > 1) {
  1441. ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
  1442. txq->stopped = 1;
  1443. }
  1444. spin_unlock_bh(&txq->axq_lock);
  1445. spin_lock_bh(&sc->tx.txbuflock);
  1446. list_add_tail(&bf->list, &sc->tx.txbuf);
  1447. spin_unlock_bh(&sc->tx.txbuflock);
  1448. return r;
  1449. }
  1450. ath_tx_start_dma(sc, bf, txctl);
  1451. return 0;
  1452. }
  1453. void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
  1454. {
  1455. struct ath_wiphy *aphy = hw->priv;
  1456. struct ath_softc *sc = aphy->sc;
  1457. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1458. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  1459. int padpos, padsize;
  1460. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1461. struct ath_tx_control txctl;
  1462. memset(&txctl, 0, sizeof(struct ath_tx_control));
  1463. /*
  1464. * As a temporary workaround, assign seq# here; this will likely need
  1465. * to be cleaned up to work better with Beacon transmission and virtual
  1466. * BSSes.
  1467. */
  1468. if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
  1469. if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
  1470. sc->tx.seq_no += 0x10;
  1471. hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
  1472. hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
  1473. }
  1474. /* Add the padding after the header if this is not already done */
  1475. padpos = ath9k_cmn_padpos(hdr->frame_control);
  1476. padsize = padpos & 3;
  1477. if (padsize && skb->len>padpos) {
  1478. if (skb_headroom(skb) < padsize) {
  1479. ath_print(common, ATH_DBG_XMIT,
  1480. "TX CABQ padding failed\n");
  1481. dev_kfree_skb_any(skb);
  1482. return;
  1483. }
  1484. skb_push(skb, padsize);
  1485. memmove(skb->data, skb->data + padsize, padpos);
  1486. }
  1487. txctl.txq = sc->beacon.cabq;
  1488. ath_print(common, ATH_DBG_XMIT,
  1489. "transmitting CABQ packet, skb: %p\n", skb);
  1490. if (ath_tx_start(hw, skb, &txctl) != 0) {
  1491. ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
  1492. goto exit;
  1493. }
  1494. return;
  1495. exit:
  1496. dev_kfree_skb_any(skb);
  1497. }
  1498. /*****************/
  1499. /* TX Completion */
  1500. /*****************/
  1501. static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
  1502. struct ath_wiphy *aphy, int tx_flags)
  1503. {
  1504. struct ieee80211_hw *hw = sc->hw;
  1505. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1506. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1507. struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
  1508. int padpos, padsize;
  1509. ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
  1510. if (aphy)
  1511. hw = aphy->hw;
  1512. if (tx_flags & ATH_TX_BAR)
  1513. tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
  1514. if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
  1515. /* Frame was ACKed */
  1516. tx_info->flags |= IEEE80211_TX_STAT_ACK;
  1517. }
  1518. padpos = ath9k_cmn_padpos(hdr->frame_control);
  1519. padsize = padpos & 3;
  1520. if (padsize && skb->len>padpos+padsize) {
  1521. /*
  1522. * Remove MAC header padding before giving the frame back to
  1523. * mac80211.
  1524. */
  1525. memmove(skb->data + padsize, skb->data, padpos);
  1526. skb_pull(skb, padsize);
  1527. }
  1528. if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
  1529. sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
  1530. ath_print(common, ATH_DBG_PS,
  1531. "Going back to sleep after having "
  1532. "received TX status (0x%lx)\n",
  1533. sc->ps_flags & (PS_WAIT_FOR_BEACON |
  1534. PS_WAIT_FOR_CAB |
  1535. PS_WAIT_FOR_PSPOLL_DATA |
  1536. PS_WAIT_FOR_TX_ACK));
  1537. }
  1538. if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
  1539. ath9k_tx_status(hw, skb);
  1540. else
  1541. ieee80211_tx_status(hw, skb);
  1542. }
  1543. static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
  1544. struct ath_txq *txq, struct list_head *bf_q,
  1545. struct ath_tx_status *ts, int txok, int sendbar)
  1546. {
  1547. struct sk_buff *skb = bf->bf_mpdu;
  1548. unsigned long flags;
  1549. int tx_flags = 0;
  1550. if (sendbar)
  1551. tx_flags = ATH_TX_BAR;
  1552. if (!txok) {
  1553. tx_flags |= ATH_TX_ERROR;
  1554. if (bf_isxretried(bf))
  1555. tx_flags |= ATH_TX_XRETRY;
  1556. }
  1557. dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
  1558. ath_tx_complete(sc, skb, bf->aphy, tx_flags);
  1559. ath_debug_stat_tx(sc, txq, bf, ts);
  1560. /*
  1561. * Return the list of ath_buf of this mpdu to free queue
  1562. */
  1563. spin_lock_irqsave(&sc->tx.txbuflock, flags);
  1564. list_splice_tail_init(bf_q, &sc->tx.txbuf);
  1565. spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
  1566. }
  1567. static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
  1568. struct ath_tx_status *ts, int txok)
  1569. {
  1570. u16 seq_st = 0;
  1571. u32 ba[WME_BA_BMP_SIZE >> 5];
  1572. int ba_index;
  1573. int nbad = 0;
  1574. int isaggr = 0;
  1575. if (bf->bf_tx_aborted)
  1576. return 0;
  1577. isaggr = bf_isaggr(bf);
  1578. if (isaggr) {
  1579. seq_st = ts->ts_seqnum;
  1580. memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
  1581. }
  1582. while (bf) {
  1583. ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
  1584. if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
  1585. nbad++;
  1586. bf = bf->bf_next;
  1587. }
  1588. return nbad;
  1589. }
  1590. static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
  1591. int nbad, int txok, bool update_rc)
  1592. {
  1593. struct sk_buff *skb = bf->bf_mpdu;
  1594. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1595. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1596. struct ieee80211_hw *hw = bf->aphy->hw;
  1597. u8 i, tx_rateindex;
  1598. if (txok)
  1599. tx_info->status.ack_signal = ts->ts_rssi;
  1600. tx_rateindex = ts->ts_rateindex;
  1601. WARN_ON(tx_rateindex >= hw->max_rates);
  1602. if (ts->ts_status & ATH9K_TXERR_FILT)
  1603. tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
  1604. if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc)
  1605. tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
  1606. if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
  1607. (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
  1608. if (ieee80211_is_data(hdr->frame_control)) {
  1609. if (ts->ts_flags &
  1610. (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
  1611. tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
  1612. if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
  1613. (ts->ts_status & ATH9K_TXERR_FIFO))
  1614. tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
  1615. tx_info->status.ampdu_len = bf->bf_nframes;
  1616. tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
  1617. }
  1618. }
  1619. for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
  1620. tx_info->status.rates[i].count = 0;
  1621. tx_info->status.rates[i].idx = -1;
  1622. }
  1623. tx_info->status.rates[tx_rateindex].count = bf->bf_retries + 1;
  1624. }
  1625. static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
  1626. {
  1627. int qnum;
  1628. spin_lock_bh(&txq->axq_lock);
  1629. if (txq->stopped &&
  1630. sc->tx.txq[txq->axq_qnum].axq_depth <= (ATH_TXBUF - 20)) {
  1631. qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
  1632. if (qnum != -1) {
  1633. ath_mac80211_start_queue(sc, qnum);
  1634. txq->stopped = 0;
  1635. }
  1636. }
  1637. spin_unlock_bh(&txq->axq_lock);
  1638. }
  1639. static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
  1640. {
  1641. struct ath_hw *ah = sc->sc_ah;
  1642. struct ath_common *common = ath9k_hw_common(ah);
  1643. struct ath_buf *bf, *lastbf, *bf_held = NULL;
  1644. struct list_head bf_head;
  1645. struct ath_desc *ds;
  1646. struct ath_tx_status ts;
  1647. int txok;
  1648. int status;
  1649. ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
  1650. txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
  1651. txq->axq_link);
  1652. for (;;) {
  1653. spin_lock_bh(&txq->axq_lock);
  1654. if (list_empty(&txq->axq_q)) {
  1655. txq->axq_link = NULL;
  1656. spin_unlock_bh(&txq->axq_lock);
  1657. break;
  1658. }
  1659. bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
  1660. /*
  1661. * There is a race condition that a BH gets scheduled
  1662. * after sw writes TxE and before hw re-load the last
  1663. * descriptor to get the newly chained one.
  1664. * Software must keep the last DONE descriptor as a
  1665. * holding descriptor - software does so by marking
  1666. * it with the STALE flag.
  1667. */
  1668. bf_held = NULL;
  1669. if (bf->bf_stale) {
  1670. bf_held = bf;
  1671. if (list_is_last(&bf_held->list, &txq->axq_q)) {
  1672. spin_unlock_bh(&txq->axq_lock);
  1673. break;
  1674. } else {
  1675. bf = list_entry(bf_held->list.next,
  1676. struct ath_buf, list);
  1677. }
  1678. }
  1679. lastbf = bf->bf_lastbf;
  1680. ds = lastbf->bf_desc;
  1681. memset(&ts, 0, sizeof(ts));
  1682. status = ath9k_hw_txprocdesc(ah, ds, &ts);
  1683. if (status == -EINPROGRESS) {
  1684. spin_unlock_bh(&txq->axq_lock);
  1685. break;
  1686. }
  1687. /*
  1688. * We now know the nullfunc frame has been ACKed so we
  1689. * can disable RX.
  1690. */
  1691. if (bf->bf_isnullfunc &&
  1692. (ts.ts_status & ATH9K_TX_ACKED)) {
  1693. if ((sc->ps_flags & PS_ENABLED))
  1694. ath9k_enable_ps(sc);
  1695. else
  1696. sc->ps_flags |= PS_NULLFUNC_COMPLETED;
  1697. }
  1698. /*
  1699. * Remove ath_buf's of the same transmit unit from txq,
  1700. * however leave the last descriptor back as the holding
  1701. * descriptor for hw.
  1702. */
  1703. lastbf->bf_stale = true;
  1704. INIT_LIST_HEAD(&bf_head);
  1705. if (!list_is_singular(&lastbf->list))
  1706. list_cut_position(&bf_head,
  1707. &txq->axq_q, lastbf->list.prev);
  1708. txq->axq_depth--;
  1709. txok = !(ts.ts_status & ATH9K_TXERR_MASK);
  1710. txq->axq_tx_inprogress = false;
  1711. spin_unlock_bh(&txq->axq_lock);
  1712. if (bf_held) {
  1713. spin_lock_bh(&sc->tx.txbuflock);
  1714. list_move_tail(&bf_held->list, &sc->tx.txbuf);
  1715. spin_unlock_bh(&sc->tx.txbuflock);
  1716. }
  1717. if (!bf_isampdu(bf)) {
  1718. /*
  1719. * This frame is sent out as a single frame.
  1720. * Use hardware retry status for this frame.
  1721. */
  1722. bf->bf_retries = ts.ts_longretry;
  1723. if (ts.ts_status & ATH9K_TXERR_XRETRY)
  1724. bf->bf_state.bf_type |= BUF_XRETRY;
  1725. ath_tx_rc_status(bf, &ts, 0, txok, true);
  1726. }
  1727. if (bf_isampdu(bf))
  1728. ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
  1729. else
  1730. ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
  1731. ath_wake_mac80211_queue(sc, txq);
  1732. spin_lock_bh(&txq->axq_lock);
  1733. if (sc->sc_flags & SC_OP_TXAGGR)
  1734. ath_txq_schedule(sc, txq);
  1735. spin_unlock_bh(&txq->axq_lock);
  1736. }
  1737. }
  1738. static void ath_tx_complete_poll_work(struct work_struct *work)
  1739. {
  1740. struct ath_softc *sc = container_of(work, struct ath_softc,
  1741. tx_complete_work.work);
  1742. struct ath_txq *txq;
  1743. int i;
  1744. bool needreset = false;
  1745. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
  1746. if (ATH_TXQ_SETUP(sc, i)) {
  1747. txq = &sc->tx.txq[i];
  1748. spin_lock_bh(&txq->axq_lock);
  1749. if (txq->axq_depth) {
  1750. if (txq->axq_tx_inprogress) {
  1751. needreset = true;
  1752. spin_unlock_bh(&txq->axq_lock);
  1753. break;
  1754. } else {
  1755. txq->axq_tx_inprogress = true;
  1756. }
  1757. }
  1758. spin_unlock_bh(&txq->axq_lock);
  1759. }
  1760. if (needreset) {
  1761. ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
  1762. "tx hung, resetting the chip\n");
  1763. ath9k_ps_wakeup(sc);
  1764. ath_reset(sc, false);
  1765. ath9k_ps_restore(sc);
  1766. }
  1767. ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
  1768. msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
  1769. }
  1770. void ath_tx_tasklet(struct ath_softc *sc)
  1771. {
  1772. int i;
  1773. u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
  1774. ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
  1775. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  1776. if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
  1777. ath_tx_processq(sc, &sc->tx.txq[i]);
  1778. }
  1779. }
  1780. /*****************/
  1781. /* Init, Cleanup */
  1782. /*****************/
  1783. int ath_tx_init(struct ath_softc *sc, int nbufs)
  1784. {
  1785. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1786. int error = 0;
  1787. spin_lock_init(&sc->tx.txbuflock);
  1788. error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
  1789. "tx", nbufs, 1);
  1790. if (error != 0) {
  1791. ath_print(common, ATH_DBG_FATAL,
  1792. "Failed to allocate tx descriptors: %d\n", error);
  1793. goto err;
  1794. }
  1795. error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
  1796. "beacon", ATH_BCBUF, 1);
  1797. if (error != 0) {
  1798. ath_print(common, ATH_DBG_FATAL,
  1799. "Failed to allocate beacon descriptors: %d\n", error);
  1800. goto err;
  1801. }
  1802. INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
  1803. err:
  1804. if (error != 0)
  1805. ath_tx_cleanup(sc);
  1806. return error;
  1807. }
  1808. void ath_tx_cleanup(struct ath_softc *sc)
  1809. {
  1810. if (sc->beacon.bdma.dd_desc_len != 0)
  1811. ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
  1812. if (sc->tx.txdma.dd_desc_len != 0)
  1813. ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
  1814. }
  1815. void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
  1816. {
  1817. struct ath_atx_tid *tid;
  1818. struct ath_atx_ac *ac;
  1819. int tidno, acno;
  1820. for (tidno = 0, tid = &an->tid[tidno];
  1821. tidno < WME_NUM_TID;
  1822. tidno++, tid++) {
  1823. tid->an = an;
  1824. tid->tidno = tidno;
  1825. tid->seq_start = tid->seq_next = 0;
  1826. tid->baw_size = WME_MAX_BA;
  1827. tid->baw_head = tid->baw_tail = 0;
  1828. tid->sched = false;
  1829. tid->paused = false;
  1830. tid->state &= ~AGGR_CLEANUP;
  1831. INIT_LIST_HEAD(&tid->buf_q);
  1832. acno = TID_TO_WME_AC(tidno);
  1833. tid->ac = &an->ac[acno];
  1834. tid->state &= ~AGGR_ADDBA_COMPLETE;
  1835. tid->state &= ~AGGR_ADDBA_PROGRESS;
  1836. }
  1837. for (acno = 0, ac = &an->ac[acno];
  1838. acno < WME_NUM_AC; acno++, ac++) {
  1839. ac->sched = false;
  1840. INIT_LIST_HEAD(&ac->tid_q);
  1841. switch (acno) {
  1842. case WME_AC_BE:
  1843. ac->qnum = ath_tx_get_qnum(sc,
  1844. ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
  1845. break;
  1846. case WME_AC_BK:
  1847. ac->qnum = ath_tx_get_qnum(sc,
  1848. ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
  1849. break;
  1850. case WME_AC_VI:
  1851. ac->qnum = ath_tx_get_qnum(sc,
  1852. ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
  1853. break;
  1854. case WME_AC_VO:
  1855. ac->qnum = ath_tx_get_qnum(sc,
  1856. ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
  1857. break;
  1858. }
  1859. }
  1860. }
  1861. void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
  1862. {
  1863. int i;
  1864. struct ath_atx_ac *ac, *ac_tmp;
  1865. struct ath_atx_tid *tid, *tid_tmp;
  1866. struct ath_txq *txq;
  1867. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  1868. if (ATH_TXQ_SETUP(sc, i)) {
  1869. txq = &sc->tx.txq[i];
  1870. spin_lock_bh(&txq->axq_lock);
  1871. list_for_each_entry_safe(ac,
  1872. ac_tmp, &txq->axq_acq, list) {
  1873. tid = list_first_entry(&ac->tid_q,
  1874. struct ath_atx_tid, list);
  1875. if (tid && tid->an != an)
  1876. continue;
  1877. list_del(&ac->list);
  1878. ac->sched = false;
  1879. list_for_each_entry_safe(tid,
  1880. tid_tmp, &ac->tid_q, list) {
  1881. list_del(&tid->list);
  1882. tid->sched = false;
  1883. ath_tid_drain(sc, txq, tid);
  1884. tid->state &= ~AGGR_ADDBA_COMPLETE;
  1885. tid->state &= ~AGGR_CLEANUP;
  1886. }
  1887. }
  1888. spin_unlock_bh(&txq->axq_lock);
  1889. }
  1890. }
  1891. }