xmit.c 63 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413
  1. /*
  2. * Copyright (c) 2008-2009 Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include "ath9k.h"
  17. #include "ar9003_mac.h"
  18. #define BITS_PER_BYTE 8
  19. #define OFDM_PLCP_BITS 22
  20. #define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
  21. #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
  22. #define L_STF 8
  23. #define L_LTF 8
  24. #define L_SIG 4
  25. #define HT_SIG 8
  26. #define HT_STF 4
  27. #define HT_LTF(_ns) (4 * (_ns))
  28. #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
  29. #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
  30. #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
  31. #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
  32. #define OFDM_SIFS_TIME 16
  33. static u16 bits_per_symbol[][2] = {
  34. /* 20MHz 40MHz */
  35. { 26, 54 }, /* 0: BPSK */
  36. { 52, 108 }, /* 1: QPSK 1/2 */
  37. { 78, 162 }, /* 2: QPSK 3/4 */
  38. { 104, 216 }, /* 3: 16-QAM 1/2 */
  39. { 156, 324 }, /* 4: 16-QAM 3/4 */
  40. { 208, 432 }, /* 5: 64-QAM 2/3 */
  41. { 234, 486 }, /* 6: 64-QAM 3/4 */
  42. { 260, 540 }, /* 7: 64-QAM 5/6 */
  43. };
  44. #define IS_HT_RATE(_rate) ((_rate) & 0x80)
  45. static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
  46. struct ath_atx_tid *tid,
  47. struct list_head *bf_head);
  48. static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
  49. struct ath_txq *txq, struct list_head *bf_q,
  50. struct ath_tx_status *ts, int txok, int sendbar);
  51. static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
  52. struct list_head *head);
  53. static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
  54. static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
  55. int nframes, int nbad, int txok, bool update_rc);
  56. static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
  57. int seqno);
  58. enum {
  59. MCS_HT20,
  60. MCS_HT20_SGI,
  61. MCS_HT40,
  62. MCS_HT40_SGI,
  63. };
  64. static int ath_max_4ms_framelen[4][32] = {
  65. [MCS_HT20] = {
  66. 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
  67. 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
  68. 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
  69. 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
  70. },
  71. [MCS_HT20_SGI] = {
  72. 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
  73. 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
  74. 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
  75. 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
  76. },
  77. [MCS_HT40] = {
  78. 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
  79. 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
  80. 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
  81. 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
  82. },
  83. [MCS_HT40_SGI] = {
  84. 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
  85. 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
  86. 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
  87. 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
  88. }
  89. };
  90. /*********************/
  91. /* Aggregation logic */
  92. /*********************/
  93. static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
  94. {
  95. struct ath_atx_ac *ac = tid->ac;
  96. if (tid->paused)
  97. return;
  98. if (tid->sched)
  99. return;
  100. tid->sched = true;
  101. list_add_tail(&tid->list, &ac->tid_q);
  102. if (ac->sched)
  103. return;
  104. ac->sched = true;
  105. list_add_tail(&ac->list, &txq->axq_acq);
  106. }
  107. static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
  108. {
  109. struct ath_txq *txq = tid->ac->txq;
  110. WARN_ON(!tid->paused);
  111. spin_lock_bh(&txq->axq_lock);
  112. tid->paused = false;
  113. if (list_empty(&tid->buf_q))
  114. goto unlock;
  115. ath_tx_queue_tid(txq, tid);
  116. ath_txq_schedule(sc, txq);
  117. unlock:
  118. spin_unlock_bh(&txq->axq_lock);
  119. }
  120. static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
  121. {
  122. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  123. BUILD_BUG_ON(sizeof(struct ath_frame_info) >
  124. sizeof(tx_info->rate_driver_data));
  125. return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
  126. }
  127. static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
  128. {
  129. struct ath_txq *txq = tid->ac->txq;
  130. struct ath_buf *bf;
  131. struct list_head bf_head;
  132. struct ath_tx_status ts;
  133. struct ath_frame_info *fi;
  134. INIT_LIST_HEAD(&bf_head);
  135. memset(&ts, 0, sizeof(ts));
  136. spin_lock_bh(&txq->axq_lock);
  137. while (!list_empty(&tid->buf_q)) {
  138. bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
  139. list_move_tail(&bf->list, &bf_head);
  140. spin_unlock_bh(&txq->axq_lock);
  141. fi = get_frame_info(bf->bf_mpdu);
  142. if (fi->retries) {
  143. ath_tx_update_baw(sc, tid, fi->seqno);
  144. ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
  145. } else {
  146. ath_tx_send_normal(sc, txq, NULL, &bf_head);
  147. }
  148. spin_lock_bh(&txq->axq_lock);
  149. }
  150. spin_unlock_bh(&txq->axq_lock);
  151. }
  152. static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
  153. int seqno)
  154. {
  155. int index, cindex;
  156. index = ATH_BA_INDEX(tid->seq_start, seqno);
  157. cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
  158. __clear_bit(cindex, tid->tx_buf);
  159. while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
  160. INCR(tid->seq_start, IEEE80211_SEQ_MAX);
  161. INCR(tid->baw_head, ATH_TID_MAX_BUFS);
  162. }
  163. }
  164. static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
  165. u16 seqno)
  166. {
  167. int index, cindex;
  168. index = ATH_BA_INDEX(tid->seq_start, seqno);
  169. cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
  170. __set_bit(cindex, tid->tx_buf);
  171. if (index >= ((tid->baw_tail - tid->baw_head) &
  172. (ATH_TID_MAX_BUFS - 1))) {
  173. tid->baw_tail = cindex;
  174. INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
  175. }
  176. }
  177. /*
  178. * TODO: For frame(s) that are in the retry state, we will reuse the
  179. * sequence number(s) without setting the retry bit. The
  180. * alternative is to give up on these and BAR the receiver's window
  181. * forward.
  182. */
  183. static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
  184. struct ath_atx_tid *tid)
  185. {
  186. struct ath_buf *bf;
  187. struct list_head bf_head;
  188. struct ath_tx_status ts;
  189. struct ath_frame_info *fi;
  190. memset(&ts, 0, sizeof(ts));
  191. INIT_LIST_HEAD(&bf_head);
  192. for (;;) {
  193. if (list_empty(&tid->buf_q))
  194. break;
  195. bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
  196. list_move_tail(&bf->list, &bf_head);
  197. fi = get_frame_info(bf->bf_mpdu);
  198. if (fi->retries)
  199. ath_tx_update_baw(sc, tid, fi->seqno);
  200. spin_unlock(&txq->axq_lock);
  201. ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
  202. spin_lock(&txq->axq_lock);
  203. }
  204. tid->seq_next = tid->seq_start;
  205. tid->baw_tail = tid->baw_head;
  206. }
  207. static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
  208. struct sk_buff *skb)
  209. {
  210. struct ath_frame_info *fi = get_frame_info(skb);
  211. struct ieee80211_hdr *hdr;
  212. TX_STAT_INC(txq->axq_qnum, a_retries);
  213. if (fi->retries++ > 0)
  214. return;
  215. hdr = (struct ieee80211_hdr *)skb->data;
  216. hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
  217. }
  218. static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
  219. {
  220. struct ath_buf *bf = NULL;
  221. spin_lock_bh(&sc->tx.txbuflock);
  222. if (unlikely(list_empty(&sc->tx.txbuf))) {
  223. spin_unlock_bh(&sc->tx.txbuflock);
  224. return NULL;
  225. }
  226. bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
  227. list_del(&bf->list);
  228. spin_unlock_bh(&sc->tx.txbuflock);
  229. return bf;
  230. }
  231. static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
  232. {
  233. spin_lock_bh(&sc->tx.txbuflock);
  234. list_add_tail(&bf->list, &sc->tx.txbuf);
  235. spin_unlock_bh(&sc->tx.txbuflock);
  236. }
  237. static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
  238. {
  239. struct ath_buf *tbf;
  240. tbf = ath_tx_get_buffer(sc);
  241. if (WARN_ON(!tbf))
  242. return NULL;
  243. ATH_TXBUF_RESET(tbf);
  244. tbf->aphy = bf->aphy;
  245. tbf->bf_mpdu = bf->bf_mpdu;
  246. tbf->bf_buf_addr = bf->bf_buf_addr;
  247. memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
  248. tbf->bf_state = bf->bf_state;
  249. return tbf;
  250. }
  251. static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
  252. struct ath_tx_status *ts, int txok,
  253. int *nframes, int *nbad)
  254. {
  255. struct ath_frame_info *fi;
  256. u16 seq_st = 0;
  257. u32 ba[WME_BA_BMP_SIZE >> 5];
  258. int ba_index;
  259. int isaggr = 0;
  260. *nbad = 0;
  261. *nframes = 0;
  262. isaggr = bf_isaggr(bf);
  263. if (isaggr) {
  264. seq_st = ts->ts_seqnum;
  265. memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
  266. }
  267. while (bf) {
  268. fi = get_frame_info(bf->bf_mpdu);
  269. ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
  270. (*nframes)++;
  271. if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
  272. (*nbad)++;
  273. bf = bf->bf_next;
  274. }
  275. }
  276. static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
  277. struct ath_buf *bf, struct list_head *bf_q,
  278. struct ath_tx_status *ts, int txok, bool retry)
  279. {
  280. struct ath_node *an = NULL;
  281. struct sk_buff *skb;
  282. struct ieee80211_sta *sta;
  283. struct ieee80211_hw *hw;
  284. struct ieee80211_hdr *hdr;
  285. struct ieee80211_tx_info *tx_info;
  286. struct ath_atx_tid *tid = NULL;
  287. struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
  288. struct list_head bf_head, bf_pending;
  289. u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
  290. u32 ba[WME_BA_BMP_SIZE >> 5];
  291. int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
  292. bool rc_update = true;
  293. struct ieee80211_tx_rate rates[4];
  294. struct ath_frame_info *fi;
  295. int nframes;
  296. u8 tidno;
  297. skb = bf->bf_mpdu;
  298. hdr = (struct ieee80211_hdr *)skb->data;
  299. tx_info = IEEE80211_SKB_CB(skb);
  300. hw = bf->aphy->hw;
  301. memcpy(rates, tx_info->control.rates, sizeof(rates));
  302. rcu_read_lock();
  303. sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
  304. if (!sta) {
  305. rcu_read_unlock();
  306. INIT_LIST_HEAD(&bf_head);
  307. while (bf) {
  308. bf_next = bf->bf_next;
  309. bf->bf_state.bf_type |= BUF_XRETRY;
  310. if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
  311. !bf->bf_stale || bf_next != NULL)
  312. list_move_tail(&bf->list, &bf_head);
  313. ath_tx_rc_status(bf, ts, 1, 1, 0, false);
  314. ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
  315. 0, 0);
  316. bf = bf_next;
  317. }
  318. return;
  319. }
  320. an = (struct ath_node *)sta->drv_priv;
  321. tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
  322. tid = ATH_AN_2_TID(an, tidno);
  323. /*
  324. * The hardware occasionally sends a tx status for the wrong TID.
  325. * In this case, the BA status cannot be considered valid and all
  326. * subframes need to be retransmitted
  327. */
  328. if (tidno != ts->tid)
  329. txok = false;
  330. isaggr = bf_isaggr(bf);
  331. memset(ba, 0, WME_BA_BMP_SIZE >> 3);
  332. if (isaggr && txok) {
  333. if (ts->ts_flags & ATH9K_TX_BA) {
  334. seq_st = ts->ts_seqnum;
  335. memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
  336. } else {
  337. /*
  338. * AR5416 can become deaf/mute when BA
  339. * issue happens. Chip needs to be reset.
  340. * But AP code may have sychronization issues
  341. * when perform internal reset in this routine.
  342. * Only enable reset in STA mode for now.
  343. */
  344. if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
  345. needreset = 1;
  346. }
  347. }
  348. INIT_LIST_HEAD(&bf_pending);
  349. INIT_LIST_HEAD(&bf_head);
  350. ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
  351. while (bf) {
  352. txfail = txpending = sendbar = 0;
  353. bf_next = bf->bf_next;
  354. skb = bf->bf_mpdu;
  355. tx_info = IEEE80211_SKB_CB(skb);
  356. fi = get_frame_info(skb);
  357. if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
  358. /* transmit completion, subframe is
  359. * acked by block ack */
  360. acked_cnt++;
  361. } else if (!isaggr && txok) {
  362. /* transmit completion */
  363. acked_cnt++;
  364. } else {
  365. if (!(tid->state & AGGR_CLEANUP) && retry) {
  366. if (fi->retries < ATH_MAX_SW_RETRIES) {
  367. ath_tx_set_retry(sc, txq, bf->bf_mpdu);
  368. txpending = 1;
  369. } else {
  370. bf->bf_state.bf_type |= BUF_XRETRY;
  371. txfail = 1;
  372. sendbar = 1;
  373. txfail_cnt++;
  374. }
  375. } else {
  376. /*
  377. * cleanup in progress, just fail
  378. * the un-acked sub-frames
  379. */
  380. txfail = 1;
  381. }
  382. }
  383. if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
  384. bf_next == NULL) {
  385. /*
  386. * Make sure the last desc is reclaimed if it
  387. * not a holding desc.
  388. */
  389. if (!bf_last->bf_stale)
  390. list_move_tail(&bf->list, &bf_head);
  391. else
  392. INIT_LIST_HEAD(&bf_head);
  393. } else {
  394. BUG_ON(list_empty(bf_q));
  395. list_move_tail(&bf->list, &bf_head);
  396. }
  397. if (!txpending || (tid->state & AGGR_CLEANUP)) {
  398. /*
  399. * complete the acked-ones/xretried ones; update
  400. * block-ack window
  401. */
  402. spin_lock_bh(&txq->axq_lock);
  403. ath_tx_update_baw(sc, tid, fi->seqno);
  404. spin_unlock_bh(&txq->axq_lock);
  405. if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
  406. memcpy(tx_info->control.rates, rates, sizeof(rates));
  407. ath_tx_rc_status(bf, ts, nframes, nbad, txok, true);
  408. rc_update = false;
  409. } else {
  410. ath_tx_rc_status(bf, ts, nframes, nbad, txok, false);
  411. }
  412. ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
  413. !txfail, sendbar);
  414. } else {
  415. /* retry the un-acked ones */
  416. if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
  417. if (bf->bf_next == NULL && bf_last->bf_stale) {
  418. struct ath_buf *tbf;
  419. tbf = ath_clone_txbuf(sc, bf_last);
  420. /*
  421. * Update tx baw and complete the
  422. * frame with failed status if we
  423. * run out of tx buf.
  424. */
  425. if (!tbf) {
  426. spin_lock_bh(&txq->axq_lock);
  427. ath_tx_update_baw(sc, tid, fi->seqno);
  428. spin_unlock_bh(&txq->axq_lock);
  429. bf->bf_state.bf_type |=
  430. BUF_XRETRY;
  431. ath_tx_rc_status(bf, ts, nframes,
  432. nbad, 0, false);
  433. ath_tx_complete_buf(sc, bf, txq,
  434. &bf_head,
  435. ts, 0, 0);
  436. break;
  437. }
  438. ath9k_hw_cleartxdesc(sc->sc_ah,
  439. tbf->bf_desc);
  440. list_add_tail(&tbf->list, &bf_head);
  441. } else {
  442. /*
  443. * Clear descriptor status words for
  444. * software retry
  445. */
  446. ath9k_hw_cleartxdesc(sc->sc_ah,
  447. bf->bf_desc);
  448. }
  449. }
  450. /*
  451. * Put this buffer to the temporary pending
  452. * queue to retain ordering
  453. */
  454. list_splice_tail_init(&bf_head, &bf_pending);
  455. }
  456. bf = bf_next;
  457. }
  458. /* prepend un-acked frames to the beginning of the pending frame queue */
  459. if (!list_empty(&bf_pending)) {
  460. spin_lock_bh(&txq->axq_lock);
  461. list_splice(&bf_pending, &tid->buf_q);
  462. ath_tx_queue_tid(txq, tid);
  463. spin_unlock_bh(&txq->axq_lock);
  464. }
  465. if (tid->state & AGGR_CLEANUP) {
  466. ath_tx_flush_tid(sc, tid);
  467. if (tid->baw_head == tid->baw_tail) {
  468. tid->state &= ~AGGR_ADDBA_COMPLETE;
  469. tid->state &= ~AGGR_CLEANUP;
  470. }
  471. }
  472. rcu_read_unlock();
  473. if (needreset)
  474. ath_reset(sc, false);
  475. }
  476. static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
  477. struct ath_atx_tid *tid)
  478. {
  479. struct sk_buff *skb;
  480. struct ieee80211_tx_info *tx_info;
  481. struct ieee80211_tx_rate *rates;
  482. u32 max_4ms_framelen, frmlen;
  483. u16 aggr_limit, legacy = 0;
  484. int i;
  485. skb = bf->bf_mpdu;
  486. tx_info = IEEE80211_SKB_CB(skb);
  487. rates = tx_info->control.rates;
  488. /*
  489. * Find the lowest frame length among the rate series that will have a
  490. * 4ms transmit duration.
  491. * TODO - TXOP limit needs to be considered.
  492. */
  493. max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
  494. for (i = 0; i < 4; i++) {
  495. if (rates[i].count) {
  496. int modeidx;
  497. if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
  498. legacy = 1;
  499. break;
  500. }
  501. if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
  502. modeidx = MCS_HT40;
  503. else
  504. modeidx = MCS_HT20;
  505. if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
  506. modeidx++;
  507. frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
  508. max_4ms_framelen = min(max_4ms_framelen, frmlen);
  509. }
  510. }
  511. /*
  512. * limit aggregate size by the minimum rate if rate selected is
  513. * not a probe rate, if rate selected is a probe rate then
  514. * avoid aggregation of this packet.
  515. */
  516. if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
  517. return 0;
  518. if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
  519. aggr_limit = min((max_4ms_framelen * 3) / 8,
  520. (u32)ATH_AMPDU_LIMIT_MAX);
  521. else
  522. aggr_limit = min(max_4ms_framelen,
  523. (u32)ATH_AMPDU_LIMIT_MAX);
  524. /*
  525. * h/w can accept aggregates upto 16 bit lengths (65535).
  526. * The IE, however can hold upto 65536, which shows up here
  527. * as zero. Ignore 65536 since we are constrained by hw.
  528. */
  529. if (tid->an->maxampdu)
  530. aggr_limit = min(aggr_limit, tid->an->maxampdu);
  531. return aggr_limit;
  532. }
  533. /*
  534. * Returns the number of delimiters to be added to
  535. * meet the minimum required mpdudensity.
  536. */
  537. static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
  538. struct ath_buf *bf, u16 frmlen)
  539. {
  540. struct sk_buff *skb = bf->bf_mpdu;
  541. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  542. u32 nsymbits, nsymbols;
  543. u16 minlen;
  544. u8 flags, rix;
  545. int width, streams, half_gi, ndelim, mindelim;
  546. struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
  547. /* Select standard number of delimiters based on frame length alone */
  548. ndelim = ATH_AGGR_GET_NDELIM(frmlen);
  549. /*
  550. * If encryption enabled, hardware requires some more padding between
  551. * subframes.
  552. * TODO - this could be improved to be dependent on the rate.
  553. * The hardware can keep up at lower rates, but not higher rates
  554. */
  555. if (fi->keyix != ATH9K_TXKEYIX_INVALID)
  556. ndelim += ATH_AGGR_ENCRYPTDELIM;
  557. /*
  558. * Convert desired mpdu density from microeconds to bytes based
  559. * on highest rate in rate series (i.e. first rate) to determine
  560. * required minimum length for subframe. Take into account
  561. * whether high rate is 20 or 40Mhz and half or full GI.
  562. *
  563. * If there is no mpdu density restriction, no further calculation
  564. * is needed.
  565. */
  566. if (tid->an->mpdudensity == 0)
  567. return ndelim;
  568. rix = tx_info->control.rates[0].idx;
  569. flags = tx_info->control.rates[0].flags;
  570. width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
  571. half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
  572. if (half_gi)
  573. nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
  574. else
  575. nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
  576. if (nsymbols == 0)
  577. nsymbols = 1;
  578. streams = HT_RC_2_STREAMS(rix);
  579. nsymbits = bits_per_symbol[rix % 8][width] * streams;
  580. minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
  581. if (frmlen < minlen) {
  582. mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
  583. ndelim = max(mindelim, ndelim);
  584. }
  585. return ndelim;
  586. }
  587. static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
  588. struct ath_txq *txq,
  589. struct ath_atx_tid *tid,
  590. struct list_head *bf_q,
  591. int *aggr_len)
  592. {
  593. #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
  594. struct ath_buf *bf, *bf_first, *bf_prev = NULL;
  595. int rl = 0, nframes = 0, ndelim, prev_al = 0;
  596. u16 aggr_limit = 0, al = 0, bpad = 0,
  597. al_delta, h_baw = tid->baw_size / 2;
  598. enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
  599. struct ieee80211_tx_info *tx_info;
  600. struct ath_frame_info *fi;
  601. bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
  602. do {
  603. bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
  604. fi = get_frame_info(bf->bf_mpdu);
  605. /* do not step over block-ack window */
  606. if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
  607. status = ATH_AGGR_BAW_CLOSED;
  608. break;
  609. }
  610. if (!rl) {
  611. aggr_limit = ath_lookup_rate(sc, bf, tid);
  612. rl = 1;
  613. }
  614. /* do not exceed aggregation limit */
  615. al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
  616. if (nframes &&
  617. (aggr_limit < (al + bpad + al_delta + prev_al))) {
  618. status = ATH_AGGR_LIMITED;
  619. break;
  620. }
  621. tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
  622. if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
  623. !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
  624. break;
  625. /* do not exceed subframe limit */
  626. if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
  627. status = ATH_AGGR_LIMITED;
  628. break;
  629. }
  630. nframes++;
  631. /* add padding for previous frame to aggregation length */
  632. al += bpad + al_delta;
  633. /*
  634. * Get the delimiters needed to meet the MPDU
  635. * density for this node.
  636. */
  637. ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen);
  638. bpad = PADBYTES(al_delta) + (ndelim << 2);
  639. bf->bf_next = NULL;
  640. ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
  641. /* link buffers of this frame to the aggregate */
  642. if (!fi->retries)
  643. ath_tx_addto_baw(sc, tid, fi->seqno);
  644. ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
  645. list_move_tail(&bf->list, bf_q);
  646. if (bf_prev) {
  647. bf_prev->bf_next = bf;
  648. ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
  649. bf->bf_daddr);
  650. }
  651. bf_prev = bf;
  652. } while (!list_empty(&tid->buf_q));
  653. *aggr_len = al;
  654. return status;
  655. #undef PADBYTES
  656. }
  657. static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
  658. struct ath_atx_tid *tid)
  659. {
  660. struct ath_buf *bf;
  661. enum ATH_AGGR_STATUS status;
  662. struct ath_frame_info *fi;
  663. struct list_head bf_q;
  664. int aggr_len;
  665. do {
  666. if (list_empty(&tid->buf_q))
  667. return;
  668. INIT_LIST_HEAD(&bf_q);
  669. status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
  670. /*
  671. * no frames picked up to be aggregated;
  672. * block-ack window is not open.
  673. */
  674. if (list_empty(&bf_q))
  675. break;
  676. bf = list_first_entry(&bf_q, struct ath_buf, list);
  677. bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
  678. /* if only one frame, send as non-aggregate */
  679. if (bf == bf->bf_lastbf) {
  680. fi = get_frame_info(bf->bf_mpdu);
  681. bf->bf_state.bf_type &= ~BUF_AGGR;
  682. ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
  683. ath_buf_set_rate(sc, bf, fi->framelen);
  684. ath_tx_txqaddbuf(sc, txq, &bf_q);
  685. continue;
  686. }
  687. /* setup first desc of aggregate */
  688. bf->bf_state.bf_type |= BUF_AGGR;
  689. ath_buf_set_rate(sc, bf, aggr_len);
  690. ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
  691. /* anchor last desc of aggregate */
  692. ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
  693. ath_tx_txqaddbuf(sc, txq, &bf_q);
  694. TX_STAT_INC(txq->axq_qnum, a_aggr);
  695. } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
  696. status != ATH_AGGR_BAW_CLOSED);
  697. }
  698. int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
  699. u16 tid, u16 *ssn)
  700. {
  701. struct ath_atx_tid *txtid;
  702. struct ath_node *an;
  703. an = (struct ath_node *)sta->drv_priv;
  704. txtid = ATH_AN_2_TID(an, tid);
  705. if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
  706. return -EAGAIN;
  707. txtid->state |= AGGR_ADDBA_PROGRESS;
  708. txtid->paused = true;
  709. *ssn = txtid->seq_start = txtid->seq_next;
  710. memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
  711. txtid->baw_head = txtid->baw_tail = 0;
  712. return 0;
  713. }
  714. void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
  715. {
  716. struct ath_node *an = (struct ath_node *)sta->drv_priv;
  717. struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
  718. struct ath_txq *txq = txtid->ac->txq;
  719. if (txtid->state & AGGR_CLEANUP)
  720. return;
  721. if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
  722. txtid->state &= ~AGGR_ADDBA_PROGRESS;
  723. return;
  724. }
  725. spin_lock_bh(&txq->axq_lock);
  726. txtid->paused = true;
  727. /*
  728. * If frames are still being transmitted for this TID, they will be
  729. * cleaned up during tx completion. To prevent race conditions, this
  730. * TID can only be reused after all in-progress subframes have been
  731. * completed.
  732. */
  733. if (txtid->baw_head != txtid->baw_tail)
  734. txtid->state |= AGGR_CLEANUP;
  735. else
  736. txtid->state &= ~AGGR_ADDBA_COMPLETE;
  737. spin_unlock_bh(&txq->axq_lock);
  738. ath_tx_flush_tid(sc, txtid);
  739. }
  740. void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
  741. {
  742. struct ath_atx_tid *txtid;
  743. struct ath_node *an;
  744. an = (struct ath_node *)sta->drv_priv;
  745. if (sc->sc_flags & SC_OP_TXAGGR) {
  746. txtid = ATH_AN_2_TID(an, tid);
  747. txtid->baw_size =
  748. IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
  749. txtid->state |= AGGR_ADDBA_COMPLETE;
  750. txtid->state &= ~AGGR_ADDBA_PROGRESS;
  751. ath_tx_resume_tid(sc, txtid);
  752. }
  753. }
  754. /********************/
  755. /* Queue Management */
  756. /********************/
  757. static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
  758. struct ath_txq *txq)
  759. {
  760. struct ath_atx_ac *ac, *ac_tmp;
  761. struct ath_atx_tid *tid, *tid_tmp;
  762. list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
  763. list_del(&ac->list);
  764. ac->sched = false;
  765. list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
  766. list_del(&tid->list);
  767. tid->sched = false;
  768. ath_tid_drain(sc, txq, tid);
  769. }
  770. }
  771. }
  772. struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
  773. {
  774. struct ath_hw *ah = sc->sc_ah;
  775. struct ath_common *common = ath9k_hw_common(ah);
  776. struct ath9k_tx_queue_info qi;
  777. static const int subtype_txq_to_hwq[] = {
  778. [WME_AC_BE] = ATH_TXQ_AC_BE,
  779. [WME_AC_BK] = ATH_TXQ_AC_BK,
  780. [WME_AC_VI] = ATH_TXQ_AC_VI,
  781. [WME_AC_VO] = ATH_TXQ_AC_VO,
  782. };
  783. int axq_qnum, i;
  784. memset(&qi, 0, sizeof(qi));
  785. qi.tqi_subtype = subtype_txq_to_hwq[subtype];
  786. qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
  787. qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
  788. qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
  789. qi.tqi_physCompBuf = 0;
  790. /*
  791. * Enable interrupts only for EOL and DESC conditions.
  792. * We mark tx descriptors to receive a DESC interrupt
  793. * when a tx queue gets deep; otherwise waiting for the
  794. * EOL to reap descriptors. Note that this is done to
  795. * reduce interrupt load and this only defers reaping
  796. * descriptors, never transmitting frames. Aside from
  797. * reducing interrupts this also permits more concurrency.
  798. * The only potential downside is if the tx queue backs
  799. * up in which case the top half of the kernel may backup
  800. * due to a lack of tx descriptors.
  801. *
  802. * The UAPSD queue is an exception, since we take a desc-
  803. * based intr on the EOSP frames.
  804. */
  805. if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
  806. qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
  807. TXQ_FLAG_TXERRINT_ENABLE;
  808. } else {
  809. if (qtype == ATH9K_TX_QUEUE_UAPSD)
  810. qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
  811. else
  812. qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
  813. TXQ_FLAG_TXDESCINT_ENABLE;
  814. }
  815. axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
  816. if (axq_qnum == -1) {
  817. /*
  818. * NB: don't print a message, this happens
  819. * normally on parts with too few tx queues
  820. */
  821. return NULL;
  822. }
  823. if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
  824. ath_err(common, "qnum %u out of range, max %zu!\n",
  825. axq_qnum, ARRAY_SIZE(sc->tx.txq));
  826. ath9k_hw_releasetxqueue(ah, axq_qnum);
  827. return NULL;
  828. }
  829. if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
  830. struct ath_txq *txq = &sc->tx.txq[axq_qnum];
  831. txq->axq_qnum = axq_qnum;
  832. txq->mac80211_qnum = -1;
  833. txq->axq_link = NULL;
  834. INIT_LIST_HEAD(&txq->axq_q);
  835. INIT_LIST_HEAD(&txq->axq_acq);
  836. spin_lock_init(&txq->axq_lock);
  837. txq->axq_depth = 0;
  838. txq->axq_ampdu_depth = 0;
  839. txq->axq_tx_inprogress = false;
  840. sc->tx.txqsetup |= 1<<axq_qnum;
  841. txq->txq_headidx = txq->txq_tailidx = 0;
  842. for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
  843. INIT_LIST_HEAD(&txq->txq_fifo[i]);
  844. INIT_LIST_HEAD(&txq->txq_fifo_pending);
  845. }
  846. return &sc->tx.txq[axq_qnum];
  847. }
  848. int ath_txq_update(struct ath_softc *sc, int qnum,
  849. struct ath9k_tx_queue_info *qinfo)
  850. {
  851. struct ath_hw *ah = sc->sc_ah;
  852. int error = 0;
  853. struct ath9k_tx_queue_info qi;
  854. if (qnum == sc->beacon.beaconq) {
  855. /*
  856. * XXX: for beacon queue, we just save the parameter.
  857. * It will be picked up by ath_beaconq_config when
  858. * it's necessary.
  859. */
  860. sc->beacon.beacon_qi = *qinfo;
  861. return 0;
  862. }
  863. BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
  864. ath9k_hw_get_txq_props(ah, qnum, &qi);
  865. qi.tqi_aifs = qinfo->tqi_aifs;
  866. qi.tqi_cwmin = qinfo->tqi_cwmin;
  867. qi.tqi_cwmax = qinfo->tqi_cwmax;
  868. qi.tqi_burstTime = qinfo->tqi_burstTime;
  869. qi.tqi_readyTime = qinfo->tqi_readyTime;
  870. if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
  871. ath_err(ath9k_hw_common(sc->sc_ah),
  872. "Unable to update hardware queue %u!\n", qnum);
  873. error = -EIO;
  874. } else {
  875. ath9k_hw_resettxqueue(ah, qnum);
  876. }
  877. return error;
  878. }
  879. int ath_cabq_update(struct ath_softc *sc)
  880. {
  881. struct ath9k_tx_queue_info qi;
  882. int qnum = sc->beacon.cabq->axq_qnum;
  883. ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
  884. /*
  885. * Ensure the readytime % is within the bounds.
  886. */
  887. if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
  888. sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
  889. else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
  890. sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
  891. qi.tqi_readyTime = (sc->beacon_interval *
  892. sc->config.cabqReadytime) / 100;
  893. ath_txq_update(sc, qnum, &qi);
  894. return 0;
  895. }
  896. static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
  897. {
  898. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
  899. return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
  900. }
  901. /*
  902. * Drain a given TX queue (could be Beacon or Data)
  903. *
  904. * This assumes output has been stopped and
  905. * we do not need to block ath_tx_tasklet.
  906. */
  907. void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
  908. {
  909. struct ath_buf *bf, *lastbf;
  910. struct list_head bf_head;
  911. struct ath_tx_status ts;
  912. memset(&ts, 0, sizeof(ts));
  913. INIT_LIST_HEAD(&bf_head);
  914. for (;;) {
  915. spin_lock_bh(&txq->axq_lock);
  916. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
  917. if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
  918. txq->txq_headidx = txq->txq_tailidx = 0;
  919. spin_unlock_bh(&txq->axq_lock);
  920. break;
  921. } else {
  922. bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
  923. struct ath_buf, list);
  924. }
  925. } else {
  926. if (list_empty(&txq->axq_q)) {
  927. txq->axq_link = NULL;
  928. spin_unlock_bh(&txq->axq_lock);
  929. break;
  930. }
  931. bf = list_first_entry(&txq->axq_q, struct ath_buf,
  932. list);
  933. if (bf->bf_stale) {
  934. list_del(&bf->list);
  935. spin_unlock_bh(&txq->axq_lock);
  936. ath_tx_return_buffer(sc, bf);
  937. continue;
  938. }
  939. }
  940. lastbf = bf->bf_lastbf;
  941. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
  942. list_cut_position(&bf_head,
  943. &txq->txq_fifo[txq->txq_tailidx],
  944. &lastbf->list);
  945. INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
  946. } else {
  947. /* remove ath_buf's of the same mpdu from txq */
  948. list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
  949. }
  950. txq->axq_depth--;
  951. if (bf_is_ampdu_not_probing(bf))
  952. txq->axq_ampdu_depth--;
  953. spin_unlock_bh(&txq->axq_lock);
  954. if (bf_isampdu(bf))
  955. ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
  956. retry_tx);
  957. else
  958. ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
  959. }
  960. spin_lock_bh(&txq->axq_lock);
  961. txq->axq_tx_inprogress = false;
  962. spin_unlock_bh(&txq->axq_lock);
  963. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
  964. spin_lock_bh(&txq->axq_lock);
  965. while (!list_empty(&txq->txq_fifo_pending)) {
  966. bf = list_first_entry(&txq->txq_fifo_pending,
  967. struct ath_buf, list);
  968. list_cut_position(&bf_head,
  969. &txq->txq_fifo_pending,
  970. &bf->bf_lastbf->list);
  971. spin_unlock_bh(&txq->axq_lock);
  972. if (bf_isampdu(bf))
  973. ath_tx_complete_aggr(sc, txq, bf, &bf_head,
  974. &ts, 0, retry_tx);
  975. else
  976. ath_tx_complete_buf(sc, bf, txq, &bf_head,
  977. &ts, 0, 0);
  978. spin_lock_bh(&txq->axq_lock);
  979. }
  980. spin_unlock_bh(&txq->axq_lock);
  981. }
  982. /* flush any pending frames if aggregation is enabled */
  983. if (sc->sc_flags & SC_OP_TXAGGR) {
  984. if (!retry_tx) {
  985. spin_lock_bh(&txq->axq_lock);
  986. ath_txq_drain_pending_buffers(sc, txq);
  987. spin_unlock_bh(&txq->axq_lock);
  988. }
  989. }
  990. }
  991. bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
  992. {
  993. struct ath_hw *ah = sc->sc_ah;
  994. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  995. struct ath_txq *txq;
  996. int i, npend = 0;
  997. if (sc->sc_flags & SC_OP_INVALID)
  998. return true;
  999. /* Stop beacon queue */
  1000. ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
  1001. /* Stop data queues */
  1002. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  1003. if (ATH_TXQ_SETUP(sc, i)) {
  1004. txq = &sc->tx.txq[i];
  1005. ath9k_hw_stoptxdma(ah, txq->axq_qnum);
  1006. npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
  1007. }
  1008. }
  1009. if (npend)
  1010. ath_err(common, "Failed to stop TX DMA!\n");
  1011. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  1012. if (ATH_TXQ_SETUP(sc, i))
  1013. ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
  1014. }
  1015. return !npend;
  1016. }
  1017. void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
  1018. {
  1019. ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
  1020. sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
  1021. }
  1022. void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
  1023. {
  1024. struct ath_atx_ac *ac;
  1025. struct ath_atx_tid *tid, *last;
  1026. if (list_empty(&txq->axq_acq) ||
  1027. txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
  1028. return;
  1029. ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
  1030. last = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
  1031. list_del(&ac->list);
  1032. ac->sched = false;
  1033. do {
  1034. if (list_empty(&ac->tid_q))
  1035. return;
  1036. tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
  1037. list_del(&tid->list);
  1038. tid->sched = false;
  1039. if (tid->paused)
  1040. continue;
  1041. ath_tx_sched_aggr(sc, txq, tid);
  1042. /*
  1043. * add tid to round-robin queue if more frames
  1044. * are pending for the tid
  1045. */
  1046. if (!list_empty(&tid->buf_q))
  1047. ath_tx_queue_tid(txq, tid);
  1048. if (tid == last || txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
  1049. break;
  1050. } while (!list_empty(&ac->tid_q));
  1051. if (!list_empty(&ac->tid_q)) {
  1052. if (!ac->sched) {
  1053. ac->sched = true;
  1054. list_add_tail(&ac->list, &txq->axq_acq);
  1055. }
  1056. }
  1057. }
  1058. /***********/
  1059. /* TX, DMA */
  1060. /***********/
  1061. /*
  1062. * Insert a chain of ath_buf (descriptors) on a txq and
  1063. * assume the descriptors are already chained together by caller.
  1064. */
  1065. static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
  1066. struct list_head *head)
  1067. {
  1068. struct ath_hw *ah = sc->sc_ah;
  1069. struct ath_common *common = ath9k_hw_common(ah);
  1070. struct ath_buf *bf;
  1071. /*
  1072. * Insert the frame on the outbound list and
  1073. * pass it on to the hardware.
  1074. */
  1075. if (list_empty(head))
  1076. return;
  1077. bf = list_first_entry(head, struct ath_buf, list);
  1078. ath_dbg(common, ATH_DBG_QUEUE,
  1079. "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
  1080. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
  1081. if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
  1082. list_splice_tail_init(head, &txq->txq_fifo_pending);
  1083. return;
  1084. }
  1085. if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
  1086. ath_dbg(common, ATH_DBG_XMIT,
  1087. "Initializing tx fifo %d which is non-empty\n",
  1088. txq->txq_headidx);
  1089. INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
  1090. list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
  1091. INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
  1092. ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
  1093. ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
  1094. txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
  1095. } else {
  1096. list_splice_tail_init(head, &txq->axq_q);
  1097. if (txq->axq_link == NULL) {
  1098. ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
  1099. ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
  1100. txq->axq_qnum, ito64(bf->bf_daddr),
  1101. bf->bf_desc);
  1102. } else {
  1103. *txq->axq_link = bf->bf_daddr;
  1104. ath_dbg(common, ATH_DBG_XMIT,
  1105. "link[%u] (%p)=%llx (%p)\n",
  1106. txq->axq_qnum, txq->axq_link,
  1107. ito64(bf->bf_daddr), bf->bf_desc);
  1108. }
  1109. ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
  1110. &txq->axq_link);
  1111. ath9k_hw_txstart(ah, txq->axq_qnum);
  1112. }
  1113. txq->axq_depth++;
  1114. if (bf_is_ampdu_not_probing(bf))
  1115. txq->axq_ampdu_depth++;
  1116. }
  1117. static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
  1118. struct ath_buf *bf, struct ath_tx_control *txctl)
  1119. {
  1120. struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
  1121. struct list_head bf_head;
  1122. bf->bf_state.bf_type |= BUF_AMPDU;
  1123. /*
  1124. * Do not queue to h/w when any of the following conditions is true:
  1125. * - there are pending frames in software queue
  1126. * - the TID is currently paused for ADDBA/BAR request
  1127. * - seqno is not within block-ack window
  1128. * - h/w queue depth exceeds low water mark
  1129. */
  1130. if (!list_empty(&tid->buf_q) || tid->paused ||
  1131. !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
  1132. txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
  1133. /*
  1134. * Add this frame to software queue for scheduling later
  1135. * for aggregation.
  1136. */
  1137. TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
  1138. list_add_tail(&bf->list, &tid->buf_q);
  1139. ath_tx_queue_tid(txctl->txq, tid);
  1140. return;
  1141. }
  1142. INIT_LIST_HEAD(&bf_head);
  1143. list_add(&bf->list, &bf_head);
  1144. /* Add sub-frame to BAW */
  1145. if (!fi->retries)
  1146. ath_tx_addto_baw(sc, tid, fi->seqno);
  1147. /* Queue to h/w without aggregation */
  1148. TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
  1149. bf->bf_lastbf = bf;
  1150. ath_buf_set_rate(sc, bf, fi->framelen);
  1151. ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
  1152. }
  1153. static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
  1154. struct ath_atx_tid *tid,
  1155. struct list_head *bf_head)
  1156. {
  1157. struct ath_frame_info *fi;
  1158. struct ath_buf *bf;
  1159. bf = list_first_entry(bf_head, struct ath_buf, list);
  1160. bf->bf_state.bf_type &= ~BUF_AMPDU;
  1161. /* update starting sequence number for subsequent ADDBA request */
  1162. if (tid)
  1163. INCR(tid->seq_start, IEEE80211_SEQ_MAX);
  1164. bf->bf_lastbf = bf;
  1165. fi = get_frame_info(bf->bf_mpdu);
  1166. ath_buf_set_rate(sc, bf, fi->framelen);
  1167. ath_tx_txqaddbuf(sc, txq, bf_head);
  1168. TX_STAT_INC(txq->axq_qnum, queued);
  1169. }
  1170. static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
  1171. {
  1172. struct ieee80211_hdr *hdr;
  1173. enum ath9k_pkt_type htype;
  1174. __le16 fc;
  1175. hdr = (struct ieee80211_hdr *)skb->data;
  1176. fc = hdr->frame_control;
  1177. if (ieee80211_is_beacon(fc))
  1178. htype = ATH9K_PKT_TYPE_BEACON;
  1179. else if (ieee80211_is_probe_resp(fc))
  1180. htype = ATH9K_PKT_TYPE_PROBE_RESP;
  1181. else if (ieee80211_is_atim(fc))
  1182. htype = ATH9K_PKT_TYPE_ATIM;
  1183. else if (ieee80211_is_pspoll(fc))
  1184. htype = ATH9K_PKT_TYPE_PSPOLL;
  1185. else
  1186. htype = ATH9K_PKT_TYPE_NORMAL;
  1187. return htype;
  1188. }
  1189. static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
  1190. int framelen)
  1191. {
  1192. struct ath_wiphy *aphy = hw->priv;
  1193. struct ath_softc *sc = aphy->sc;
  1194. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1195. struct ieee80211_sta *sta = tx_info->control.sta;
  1196. struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
  1197. struct ieee80211_hdr *hdr;
  1198. struct ath_frame_info *fi = get_frame_info(skb);
  1199. struct ath_node *an;
  1200. struct ath_atx_tid *tid;
  1201. enum ath9k_key_type keytype;
  1202. u16 seqno = 0;
  1203. u8 tidno;
  1204. keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
  1205. hdr = (struct ieee80211_hdr *)skb->data;
  1206. if (sta && ieee80211_is_data_qos(hdr->frame_control) &&
  1207. conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
  1208. an = (struct ath_node *) sta->drv_priv;
  1209. tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
  1210. /*
  1211. * Override seqno set by upper layer with the one
  1212. * in tx aggregation state.
  1213. */
  1214. tid = ATH_AN_2_TID(an, tidno);
  1215. seqno = tid->seq_next;
  1216. hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
  1217. INCR(tid->seq_next, IEEE80211_SEQ_MAX);
  1218. }
  1219. memset(fi, 0, sizeof(*fi));
  1220. if (hw_key)
  1221. fi->keyix = hw_key->hw_key_idx;
  1222. else
  1223. fi->keyix = ATH9K_TXKEYIX_INVALID;
  1224. fi->keytype = keytype;
  1225. fi->framelen = framelen;
  1226. fi->seqno = seqno;
  1227. }
  1228. static int setup_tx_flags(struct sk_buff *skb)
  1229. {
  1230. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1231. int flags = 0;
  1232. flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
  1233. flags |= ATH9K_TXDESC_INTREQ;
  1234. if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
  1235. flags |= ATH9K_TXDESC_NOACK;
  1236. if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
  1237. flags |= ATH9K_TXDESC_LDPC;
  1238. return flags;
  1239. }
  1240. /*
  1241. * rix - rate index
  1242. * pktlen - total bytes (delims + data + fcs + pads + pad delims)
  1243. * width - 0 for 20 MHz, 1 for 40 MHz
  1244. * half_gi - to use 4us v/s 3.6 us for symbol time
  1245. */
  1246. static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
  1247. int width, int half_gi, bool shortPreamble)
  1248. {
  1249. u32 nbits, nsymbits, duration, nsymbols;
  1250. int streams;
  1251. /* find number of symbols: PLCP + data */
  1252. streams = HT_RC_2_STREAMS(rix);
  1253. nbits = (pktlen << 3) + OFDM_PLCP_BITS;
  1254. nsymbits = bits_per_symbol[rix % 8][width] * streams;
  1255. nsymbols = (nbits + nsymbits - 1) / nsymbits;
  1256. if (!half_gi)
  1257. duration = SYMBOL_TIME(nsymbols);
  1258. else
  1259. duration = SYMBOL_TIME_HALFGI(nsymbols);
  1260. /* addup duration for legacy/ht training and signal fields */
  1261. duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
  1262. return duration;
  1263. }
  1264. u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
  1265. {
  1266. struct ath_hw *ah = sc->sc_ah;
  1267. struct ath9k_channel *curchan = ah->curchan;
  1268. if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
  1269. (curchan->channelFlags & CHANNEL_5GHZ) &&
  1270. (chainmask == 0x7) && (rate < 0x90))
  1271. return 0x3;
  1272. else
  1273. return chainmask;
  1274. }
  1275. static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
  1276. {
  1277. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1278. struct ath9k_11n_rate_series series[4];
  1279. struct sk_buff *skb;
  1280. struct ieee80211_tx_info *tx_info;
  1281. struct ieee80211_tx_rate *rates;
  1282. const struct ieee80211_rate *rate;
  1283. struct ieee80211_hdr *hdr;
  1284. int i, flags = 0;
  1285. u8 rix = 0, ctsrate = 0;
  1286. bool is_pspoll;
  1287. memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
  1288. skb = bf->bf_mpdu;
  1289. tx_info = IEEE80211_SKB_CB(skb);
  1290. rates = tx_info->control.rates;
  1291. hdr = (struct ieee80211_hdr *)skb->data;
  1292. is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
  1293. /*
  1294. * We check if Short Preamble is needed for the CTS rate by
  1295. * checking the BSS's global flag.
  1296. * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
  1297. */
  1298. rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
  1299. ctsrate = rate->hw_value;
  1300. if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
  1301. ctsrate |= rate->hw_value_short;
  1302. for (i = 0; i < 4; i++) {
  1303. bool is_40, is_sgi, is_sp;
  1304. int phy;
  1305. if (!rates[i].count || (rates[i].idx < 0))
  1306. continue;
  1307. rix = rates[i].idx;
  1308. series[i].Tries = rates[i].count;
  1309. if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
  1310. (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
  1311. series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
  1312. flags |= ATH9K_TXDESC_RTSENA;
  1313. } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
  1314. series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
  1315. flags |= ATH9K_TXDESC_CTSENA;
  1316. }
  1317. if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
  1318. series[i].RateFlags |= ATH9K_RATESERIES_2040;
  1319. if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
  1320. series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
  1321. is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
  1322. is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
  1323. is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
  1324. if (rates[i].flags & IEEE80211_TX_RC_MCS) {
  1325. /* MCS rates */
  1326. series[i].Rate = rix | 0x80;
  1327. series[i].ChSel = ath_txchainmask_reduction(sc,
  1328. common->tx_chainmask, series[i].Rate);
  1329. series[i].PktDuration = ath_pkt_duration(sc, rix, len,
  1330. is_40, is_sgi, is_sp);
  1331. if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
  1332. series[i].RateFlags |= ATH9K_RATESERIES_STBC;
  1333. continue;
  1334. }
  1335. /* legacy rates */
  1336. if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
  1337. !(rate->flags & IEEE80211_RATE_ERP_G))
  1338. phy = WLAN_RC_PHY_CCK;
  1339. else
  1340. phy = WLAN_RC_PHY_OFDM;
  1341. rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
  1342. series[i].Rate = rate->hw_value;
  1343. if (rate->hw_value_short) {
  1344. if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
  1345. series[i].Rate |= rate->hw_value_short;
  1346. } else {
  1347. is_sp = false;
  1348. }
  1349. if (bf->bf_state.bfs_paprd)
  1350. series[i].ChSel = common->tx_chainmask;
  1351. else
  1352. series[i].ChSel = ath_txchainmask_reduction(sc,
  1353. common->tx_chainmask, series[i].Rate);
  1354. series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
  1355. phy, rate->bitrate * 100, len, rix, is_sp);
  1356. }
  1357. /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
  1358. if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
  1359. flags &= ~ATH9K_TXDESC_RTSENA;
  1360. /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
  1361. if (flags & ATH9K_TXDESC_RTSENA)
  1362. flags &= ~ATH9K_TXDESC_CTSENA;
  1363. /* set dur_update_en for l-sig computation except for PS-Poll frames */
  1364. ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
  1365. bf->bf_lastbf->bf_desc,
  1366. !is_pspoll, ctsrate,
  1367. 0, series, 4, flags);
  1368. if (sc->config.ath_aggr_prot && flags)
  1369. ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
  1370. }
  1371. static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
  1372. struct ath_txq *txq,
  1373. struct sk_buff *skb)
  1374. {
  1375. struct ath_wiphy *aphy = hw->priv;
  1376. struct ath_softc *sc = aphy->sc;
  1377. struct ath_hw *ah = sc->sc_ah;
  1378. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1379. struct ath_frame_info *fi = get_frame_info(skb);
  1380. struct ath_buf *bf;
  1381. struct ath_desc *ds;
  1382. int frm_type;
  1383. bf = ath_tx_get_buffer(sc);
  1384. if (!bf) {
  1385. ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
  1386. return NULL;
  1387. }
  1388. ATH_TXBUF_RESET(bf);
  1389. bf->aphy = aphy;
  1390. bf->bf_flags = setup_tx_flags(skb);
  1391. bf->bf_mpdu = skb;
  1392. bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
  1393. skb->len, DMA_TO_DEVICE);
  1394. if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
  1395. bf->bf_mpdu = NULL;
  1396. bf->bf_buf_addr = 0;
  1397. ath_err(ath9k_hw_common(sc->sc_ah),
  1398. "dma_mapping_error() on TX\n");
  1399. ath_tx_return_buffer(sc, bf);
  1400. return NULL;
  1401. }
  1402. frm_type = get_hw_packet_type(skb);
  1403. ds = bf->bf_desc;
  1404. ath9k_hw_set_desc_link(ah, ds, 0);
  1405. ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
  1406. fi->keyix, fi->keytype, bf->bf_flags);
  1407. ath9k_hw_filltxdesc(ah, ds,
  1408. skb->len, /* segment length */
  1409. true, /* first segment */
  1410. true, /* last segment */
  1411. ds, /* first descriptor */
  1412. bf->bf_buf_addr,
  1413. txq->axq_qnum);
  1414. return bf;
  1415. }
  1416. /* FIXME: tx power */
  1417. static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
  1418. struct ath_tx_control *txctl)
  1419. {
  1420. struct sk_buff *skb = bf->bf_mpdu;
  1421. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1422. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1423. struct list_head bf_head;
  1424. struct ath_atx_tid *tid = NULL;
  1425. u8 tidno;
  1426. spin_lock_bh(&txctl->txq->axq_lock);
  1427. if (ieee80211_is_data_qos(hdr->frame_control) && txctl->an) {
  1428. tidno = ieee80211_get_qos_ctl(hdr)[0] &
  1429. IEEE80211_QOS_CTL_TID_MASK;
  1430. tid = ATH_AN_2_TID(txctl->an, tidno);
  1431. WARN_ON(tid->ac->txq != txctl->txq);
  1432. }
  1433. if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
  1434. /*
  1435. * Try aggregation if it's a unicast data frame
  1436. * and the destination is HT capable.
  1437. */
  1438. ath_tx_send_ampdu(sc, tid, bf, txctl);
  1439. } else {
  1440. INIT_LIST_HEAD(&bf_head);
  1441. list_add_tail(&bf->list, &bf_head);
  1442. bf->bf_state.bfs_ftype = txctl->frame_type;
  1443. bf->bf_state.bfs_paprd = txctl->paprd;
  1444. if (bf->bf_state.bfs_paprd)
  1445. ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
  1446. bf->bf_state.bfs_paprd);
  1447. ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
  1448. }
  1449. spin_unlock_bh(&txctl->txq->axq_lock);
  1450. }
  1451. /* Upon failure caller should free skb */
  1452. int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
  1453. struct ath_tx_control *txctl)
  1454. {
  1455. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  1456. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1457. struct ieee80211_sta *sta = info->control.sta;
  1458. struct ath_wiphy *aphy = hw->priv;
  1459. struct ath_softc *sc = aphy->sc;
  1460. struct ath_txq *txq = txctl->txq;
  1461. struct ath_buf *bf;
  1462. int padpos, padsize;
  1463. int frmlen = skb->len + FCS_LEN;
  1464. int q;
  1465. /* NOTE: sta can be NULL according to net/mac80211.h */
  1466. if (sta)
  1467. txctl->an = (struct ath_node *)sta->drv_priv;
  1468. if (info->control.hw_key)
  1469. frmlen += info->control.hw_key->icv_len;
  1470. /*
  1471. * As a temporary workaround, assign seq# here; this will likely need
  1472. * to be cleaned up to work better with Beacon transmission and virtual
  1473. * BSSes.
  1474. */
  1475. if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
  1476. if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
  1477. sc->tx.seq_no += 0x10;
  1478. hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
  1479. hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
  1480. }
  1481. /* Add the padding after the header if this is not already done */
  1482. padpos = ath9k_cmn_padpos(hdr->frame_control);
  1483. padsize = padpos & 3;
  1484. if (padsize && skb->len > padpos) {
  1485. if (skb_headroom(skb) < padsize)
  1486. return -ENOMEM;
  1487. skb_push(skb, padsize);
  1488. memmove(skb->data, skb->data + padsize, padpos);
  1489. }
  1490. setup_frame_info(hw, skb, frmlen);
  1491. /*
  1492. * At this point, the vif, hw_key and sta pointers in the tx control
  1493. * info are no longer valid (overwritten by the ath_frame_info data.
  1494. */
  1495. bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
  1496. if (unlikely(!bf))
  1497. return -ENOMEM;
  1498. q = skb_get_queue_mapping(skb);
  1499. spin_lock_bh(&txq->axq_lock);
  1500. if (txq == sc->tx.txq_map[q] &&
  1501. ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
  1502. ath_mac80211_stop_queue(sc, q);
  1503. txq->stopped = 1;
  1504. }
  1505. spin_unlock_bh(&txq->axq_lock);
  1506. ath_tx_start_dma(sc, bf, txctl);
  1507. return 0;
  1508. }
  1509. /*****************/
  1510. /* TX Completion */
  1511. /*****************/
  1512. static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
  1513. struct ath_wiphy *aphy, int tx_flags, int ftype,
  1514. struct ath_txq *txq)
  1515. {
  1516. struct ieee80211_hw *hw = sc->hw;
  1517. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1518. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1519. struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
  1520. int q, padpos, padsize;
  1521. ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
  1522. if (aphy)
  1523. hw = aphy->hw;
  1524. if (tx_flags & ATH_TX_BAR)
  1525. tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
  1526. if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
  1527. /* Frame was ACKed */
  1528. tx_info->flags |= IEEE80211_TX_STAT_ACK;
  1529. }
  1530. padpos = ath9k_cmn_padpos(hdr->frame_control);
  1531. padsize = padpos & 3;
  1532. if (padsize && skb->len>padpos+padsize) {
  1533. /*
  1534. * Remove MAC header padding before giving the frame back to
  1535. * mac80211.
  1536. */
  1537. memmove(skb->data + padsize, skb->data, padpos);
  1538. skb_pull(skb, padsize);
  1539. }
  1540. if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
  1541. sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
  1542. ath_dbg(common, ATH_DBG_PS,
  1543. "Going back to sleep after having received TX status (0x%lx)\n",
  1544. sc->ps_flags & (PS_WAIT_FOR_BEACON |
  1545. PS_WAIT_FOR_CAB |
  1546. PS_WAIT_FOR_PSPOLL_DATA |
  1547. PS_WAIT_FOR_TX_ACK));
  1548. }
  1549. if (unlikely(ftype))
  1550. ath9k_tx_status(hw, skb, ftype);
  1551. else {
  1552. q = skb_get_queue_mapping(skb);
  1553. if (txq == sc->tx.txq_map[q]) {
  1554. spin_lock_bh(&txq->axq_lock);
  1555. if (WARN_ON(--txq->pending_frames < 0))
  1556. txq->pending_frames = 0;
  1557. spin_unlock_bh(&txq->axq_lock);
  1558. }
  1559. ieee80211_tx_status(hw, skb);
  1560. }
  1561. }
  1562. static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
  1563. struct ath_txq *txq, struct list_head *bf_q,
  1564. struct ath_tx_status *ts, int txok, int sendbar)
  1565. {
  1566. struct sk_buff *skb = bf->bf_mpdu;
  1567. unsigned long flags;
  1568. int tx_flags = 0;
  1569. if (sendbar)
  1570. tx_flags = ATH_TX_BAR;
  1571. if (!txok) {
  1572. tx_flags |= ATH_TX_ERROR;
  1573. if (bf_isxretried(bf))
  1574. tx_flags |= ATH_TX_XRETRY;
  1575. }
  1576. dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
  1577. bf->bf_buf_addr = 0;
  1578. if (bf->bf_state.bfs_paprd) {
  1579. if (!sc->paprd_pending)
  1580. dev_kfree_skb_any(skb);
  1581. else
  1582. complete(&sc->paprd_complete);
  1583. } else {
  1584. ath_debug_stat_tx(sc, bf, ts);
  1585. ath_tx_complete(sc, skb, bf->aphy, tx_flags,
  1586. bf->bf_state.bfs_ftype, txq);
  1587. }
  1588. /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
  1589. * accidentally reference it later.
  1590. */
  1591. bf->bf_mpdu = NULL;
  1592. /*
  1593. * Return the list of ath_buf of this mpdu to free queue
  1594. */
  1595. spin_lock_irqsave(&sc->tx.txbuflock, flags);
  1596. list_splice_tail_init(bf_q, &sc->tx.txbuf);
  1597. spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
  1598. }
  1599. static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
  1600. int nframes, int nbad, int txok, bool update_rc)
  1601. {
  1602. struct sk_buff *skb = bf->bf_mpdu;
  1603. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1604. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1605. struct ieee80211_hw *hw = bf->aphy->hw;
  1606. struct ath_softc *sc = bf->aphy->sc;
  1607. struct ath_hw *ah = sc->sc_ah;
  1608. u8 i, tx_rateindex;
  1609. if (txok)
  1610. tx_info->status.ack_signal = ts->ts_rssi;
  1611. tx_rateindex = ts->ts_rateindex;
  1612. WARN_ON(tx_rateindex >= hw->max_rates);
  1613. if (ts->ts_status & ATH9K_TXERR_FILT)
  1614. tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
  1615. if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
  1616. tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
  1617. BUG_ON(nbad > nframes);
  1618. tx_info->status.ampdu_len = nframes;
  1619. tx_info->status.ampdu_ack_len = nframes - nbad;
  1620. }
  1621. if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
  1622. (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
  1623. /*
  1624. * If an underrun error is seen assume it as an excessive
  1625. * retry only if max frame trigger level has been reached
  1626. * (2 KB for single stream, and 4 KB for dual stream).
  1627. * Adjust the long retry as if the frame was tried
  1628. * hw->max_rate_tries times to affect how rate control updates
  1629. * PER for the failed rate.
  1630. * In case of congestion on the bus penalizing this type of
  1631. * underruns should help hardware actually transmit new frames
  1632. * successfully by eventually preferring slower rates.
  1633. * This itself should also alleviate congestion on the bus.
  1634. */
  1635. if (ieee80211_is_data(hdr->frame_control) &&
  1636. (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
  1637. ATH9K_TX_DELIM_UNDERRUN)) &&
  1638. ah->tx_trig_level >= sc->sc_ah->caps.tx_triglevel_max)
  1639. tx_info->status.rates[tx_rateindex].count =
  1640. hw->max_rate_tries;
  1641. }
  1642. for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
  1643. tx_info->status.rates[i].count = 0;
  1644. tx_info->status.rates[i].idx = -1;
  1645. }
  1646. tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
  1647. }
  1648. /* Has no locking. Must hold spin_lock_bh(&txq->axq_lock)
  1649. * before calling this.
  1650. */
  1651. static void __ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
  1652. {
  1653. if (txq->mac80211_qnum >= 0 &&
  1654. txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
  1655. if (ath_mac80211_start_queue(sc, txq->mac80211_qnum))
  1656. txq->stopped = 0;
  1657. }
  1658. }
  1659. static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
  1660. {
  1661. struct ath_hw *ah = sc->sc_ah;
  1662. struct ath_common *common = ath9k_hw_common(ah);
  1663. struct ath_buf *bf, *lastbf, *bf_held = NULL;
  1664. struct list_head bf_head;
  1665. struct ath_desc *ds;
  1666. struct ath_tx_status ts;
  1667. int txok;
  1668. int status;
  1669. int qnum;
  1670. ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
  1671. txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
  1672. txq->axq_link);
  1673. for (;;) {
  1674. spin_lock_bh(&txq->axq_lock);
  1675. if (list_empty(&txq->axq_q)) {
  1676. txq->axq_link = NULL;
  1677. if (sc->sc_flags & SC_OP_TXAGGR)
  1678. ath_txq_schedule(sc, txq);
  1679. spin_unlock_bh(&txq->axq_lock);
  1680. break;
  1681. }
  1682. bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
  1683. /*
  1684. * There is a race condition that a BH gets scheduled
  1685. * after sw writes TxE and before hw re-load the last
  1686. * descriptor to get the newly chained one.
  1687. * Software must keep the last DONE descriptor as a
  1688. * holding descriptor - software does so by marking
  1689. * it with the STALE flag.
  1690. */
  1691. bf_held = NULL;
  1692. if (bf->bf_stale) {
  1693. bf_held = bf;
  1694. if (list_is_last(&bf_held->list, &txq->axq_q)) {
  1695. spin_unlock_bh(&txq->axq_lock);
  1696. break;
  1697. } else {
  1698. bf = list_entry(bf_held->list.next,
  1699. struct ath_buf, list);
  1700. }
  1701. }
  1702. lastbf = bf->bf_lastbf;
  1703. ds = lastbf->bf_desc;
  1704. memset(&ts, 0, sizeof(ts));
  1705. status = ath9k_hw_txprocdesc(ah, ds, &ts);
  1706. if (status == -EINPROGRESS) {
  1707. spin_unlock_bh(&txq->axq_lock);
  1708. break;
  1709. }
  1710. TX_STAT_INC(txq->axq_qnum, txprocdesc);
  1711. /*
  1712. * Remove ath_buf's of the same transmit unit from txq,
  1713. * however leave the last descriptor back as the holding
  1714. * descriptor for hw.
  1715. */
  1716. lastbf->bf_stale = true;
  1717. INIT_LIST_HEAD(&bf_head);
  1718. if (!list_is_singular(&lastbf->list))
  1719. list_cut_position(&bf_head,
  1720. &txq->axq_q, lastbf->list.prev);
  1721. txq->axq_depth--;
  1722. txok = !(ts.ts_status & ATH9K_TXERR_MASK);
  1723. txq->axq_tx_inprogress = false;
  1724. if (bf_held)
  1725. list_del(&bf_held->list);
  1726. if (bf_is_ampdu_not_probing(bf))
  1727. txq->axq_ampdu_depth--;
  1728. spin_unlock_bh(&txq->axq_lock);
  1729. if (bf_held)
  1730. ath_tx_return_buffer(sc, bf_held);
  1731. if (!bf_isampdu(bf)) {
  1732. /*
  1733. * This frame is sent out as a single frame.
  1734. * Use hardware retry status for this frame.
  1735. */
  1736. if (ts.ts_status & ATH9K_TXERR_XRETRY)
  1737. bf->bf_state.bf_type |= BUF_XRETRY;
  1738. ath_tx_rc_status(bf, &ts, 1, txok ? 0 : 1, txok, true);
  1739. }
  1740. qnum = skb_get_queue_mapping(bf->bf_mpdu);
  1741. if (bf_isampdu(bf))
  1742. ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
  1743. true);
  1744. else
  1745. ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
  1746. spin_lock_bh(&txq->axq_lock);
  1747. __ath_wake_mac80211_queue(sc, txq);
  1748. if (sc->sc_flags & SC_OP_TXAGGR)
  1749. ath_txq_schedule(sc, txq);
  1750. spin_unlock_bh(&txq->axq_lock);
  1751. }
  1752. }
  1753. static void ath_tx_complete_poll_work(struct work_struct *work)
  1754. {
  1755. struct ath_softc *sc = container_of(work, struct ath_softc,
  1756. tx_complete_work.work);
  1757. struct ath_txq *txq;
  1758. int i;
  1759. bool needreset = false;
  1760. #ifdef CONFIG_ATH9K_DEBUGFS
  1761. sc->tx_complete_poll_work_seen++;
  1762. #endif
  1763. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
  1764. if (ATH_TXQ_SETUP(sc, i)) {
  1765. txq = &sc->tx.txq[i];
  1766. spin_lock_bh(&txq->axq_lock);
  1767. if (txq->axq_depth) {
  1768. if (txq->axq_tx_inprogress) {
  1769. needreset = true;
  1770. spin_unlock_bh(&txq->axq_lock);
  1771. break;
  1772. } else {
  1773. txq->axq_tx_inprogress = true;
  1774. }
  1775. } else {
  1776. /* If the queue has pending buffers, then it
  1777. * should be doing tx work (and have axq_depth).
  1778. * Shouldn't get to this state I think..but
  1779. * we do.
  1780. */
  1781. if (!(sc->sc_flags & (SC_OP_OFFCHANNEL)) &&
  1782. (txq->pending_frames > 0 ||
  1783. !list_empty(&txq->axq_acq) ||
  1784. txq->stopped)) {
  1785. ath_err(ath9k_hw_common(sc->sc_ah),
  1786. "txq: %p axq_qnum: %u,"
  1787. " mac80211_qnum: %i"
  1788. " axq_link: %p"
  1789. " pending frames: %i"
  1790. " axq_acq empty: %i"
  1791. " stopped: %i"
  1792. " axq_depth: 0 Attempting to"
  1793. " restart tx logic.\n",
  1794. txq, txq->axq_qnum,
  1795. txq->mac80211_qnum,
  1796. txq->axq_link,
  1797. txq->pending_frames,
  1798. list_empty(&txq->axq_acq),
  1799. txq->stopped);
  1800. __ath_wake_mac80211_queue(sc, txq);
  1801. ath_txq_schedule(sc, txq);
  1802. }
  1803. }
  1804. spin_unlock_bh(&txq->axq_lock);
  1805. }
  1806. if (needreset) {
  1807. ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
  1808. "tx hung, resetting the chip\n");
  1809. ath9k_ps_wakeup(sc);
  1810. ath_reset(sc, true);
  1811. ath9k_ps_restore(sc);
  1812. }
  1813. ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
  1814. msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
  1815. }
  1816. void ath_tx_tasklet(struct ath_softc *sc)
  1817. {
  1818. int i;
  1819. u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
  1820. ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
  1821. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  1822. if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
  1823. ath_tx_processq(sc, &sc->tx.txq[i]);
  1824. }
  1825. }
  1826. void ath_tx_edma_tasklet(struct ath_softc *sc)
  1827. {
  1828. struct ath_tx_status txs;
  1829. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1830. struct ath_hw *ah = sc->sc_ah;
  1831. struct ath_txq *txq;
  1832. struct ath_buf *bf, *lastbf;
  1833. struct list_head bf_head;
  1834. int status;
  1835. int txok;
  1836. int qnum;
  1837. for (;;) {
  1838. status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
  1839. if (status == -EINPROGRESS)
  1840. break;
  1841. if (status == -EIO) {
  1842. ath_dbg(common, ATH_DBG_XMIT,
  1843. "Error processing tx status\n");
  1844. break;
  1845. }
  1846. /* Skip beacon completions */
  1847. if (txs.qid == sc->beacon.beaconq)
  1848. continue;
  1849. txq = &sc->tx.txq[txs.qid];
  1850. spin_lock_bh(&txq->axq_lock);
  1851. if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
  1852. spin_unlock_bh(&txq->axq_lock);
  1853. return;
  1854. }
  1855. bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
  1856. struct ath_buf, list);
  1857. lastbf = bf->bf_lastbf;
  1858. INIT_LIST_HEAD(&bf_head);
  1859. list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
  1860. &lastbf->list);
  1861. INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
  1862. txq->axq_depth--;
  1863. txq->axq_tx_inprogress = false;
  1864. if (bf_is_ampdu_not_probing(bf))
  1865. txq->axq_ampdu_depth--;
  1866. spin_unlock_bh(&txq->axq_lock);
  1867. txok = !(txs.ts_status & ATH9K_TXERR_MASK);
  1868. if (!bf_isampdu(bf)) {
  1869. if (txs.ts_status & ATH9K_TXERR_XRETRY)
  1870. bf->bf_state.bf_type |= BUF_XRETRY;
  1871. ath_tx_rc_status(bf, &txs, 1, txok ? 0 : 1, txok, true);
  1872. }
  1873. qnum = skb_get_queue_mapping(bf->bf_mpdu);
  1874. if (bf_isampdu(bf))
  1875. ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
  1876. txok, true);
  1877. else
  1878. ath_tx_complete_buf(sc, bf, txq, &bf_head,
  1879. &txs, txok, 0);
  1880. spin_lock_bh(&txq->axq_lock);
  1881. __ath_wake_mac80211_queue(sc, txq);
  1882. if (!list_empty(&txq->txq_fifo_pending)) {
  1883. INIT_LIST_HEAD(&bf_head);
  1884. bf = list_first_entry(&txq->txq_fifo_pending,
  1885. struct ath_buf, list);
  1886. list_cut_position(&bf_head, &txq->txq_fifo_pending,
  1887. &bf->bf_lastbf->list);
  1888. ath_tx_txqaddbuf(sc, txq, &bf_head);
  1889. } else if (sc->sc_flags & SC_OP_TXAGGR)
  1890. ath_txq_schedule(sc, txq);
  1891. spin_unlock_bh(&txq->axq_lock);
  1892. }
  1893. }
  1894. /*****************/
  1895. /* Init, Cleanup */
  1896. /*****************/
  1897. static int ath_txstatus_setup(struct ath_softc *sc, int size)
  1898. {
  1899. struct ath_descdma *dd = &sc->txsdma;
  1900. u8 txs_len = sc->sc_ah->caps.txs_len;
  1901. dd->dd_desc_len = size * txs_len;
  1902. dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
  1903. &dd->dd_desc_paddr, GFP_KERNEL);
  1904. if (!dd->dd_desc)
  1905. return -ENOMEM;
  1906. return 0;
  1907. }
  1908. static int ath_tx_edma_init(struct ath_softc *sc)
  1909. {
  1910. int err;
  1911. err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
  1912. if (!err)
  1913. ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
  1914. sc->txsdma.dd_desc_paddr,
  1915. ATH_TXSTATUS_RING_SIZE);
  1916. return err;
  1917. }
  1918. static void ath_tx_edma_cleanup(struct ath_softc *sc)
  1919. {
  1920. struct ath_descdma *dd = &sc->txsdma;
  1921. dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
  1922. dd->dd_desc_paddr);
  1923. }
  1924. int ath_tx_init(struct ath_softc *sc, int nbufs)
  1925. {
  1926. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1927. int error = 0;
  1928. spin_lock_init(&sc->tx.txbuflock);
  1929. error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
  1930. "tx", nbufs, 1, 1);
  1931. if (error != 0) {
  1932. ath_err(common,
  1933. "Failed to allocate tx descriptors: %d\n", error);
  1934. goto err;
  1935. }
  1936. error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
  1937. "beacon", ATH_BCBUF, 1, 1);
  1938. if (error != 0) {
  1939. ath_err(common,
  1940. "Failed to allocate beacon descriptors: %d\n", error);
  1941. goto err;
  1942. }
  1943. INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
  1944. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
  1945. error = ath_tx_edma_init(sc);
  1946. if (error)
  1947. goto err;
  1948. }
  1949. err:
  1950. if (error != 0)
  1951. ath_tx_cleanup(sc);
  1952. return error;
  1953. }
  1954. void ath_tx_cleanup(struct ath_softc *sc)
  1955. {
  1956. if (sc->beacon.bdma.dd_desc_len != 0)
  1957. ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
  1958. if (sc->tx.txdma.dd_desc_len != 0)
  1959. ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
  1960. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
  1961. ath_tx_edma_cleanup(sc);
  1962. }
  1963. void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
  1964. {
  1965. struct ath_atx_tid *tid;
  1966. struct ath_atx_ac *ac;
  1967. int tidno, acno;
  1968. for (tidno = 0, tid = &an->tid[tidno];
  1969. tidno < WME_NUM_TID;
  1970. tidno++, tid++) {
  1971. tid->an = an;
  1972. tid->tidno = tidno;
  1973. tid->seq_start = tid->seq_next = 0;
  1974. tid->baw_size = WME_MAX_BA;
  1975. tid->baw_head = tid->baw_tail = 0;
  1976. tid->sched = false;
  1977. tid->paused = false;
  1978. tid->state &= ~AGGR_CLEANUP;
  1979. INIT_LIST_HEAD(&tid->buf_q);
  1980. acno = TID_TO_WME_AC(tidno);
  1981. tid->ac = &an->ac[acno];
  1982. tid->state &= ~AGGR_ADDBA_COMPLETE;
  1983. tid->state &= ~AGGR_ADDBA_PROGRESS;
  1984. }
  1985. for (acno = 0, ac = &an->ac[acno];
  1986. acno < WME_NUM_AC; acno++, ac++) {
  1987. ac->sched = false;
  1988. ac->txq = sc->tx.txq_map[acno];
  1989. INIT_LIST_HEAD(&ac->tid_q);
  1990. }
  1991. }
  1992. void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
  1993. {
  1994. struct ath_atx_ac *ac;
  1995. struct ath_atx_tid *tid;
  1996. struct ath_txq *txq;
  1997. int tidno;
  1998. for (tidno = 0, tid = &an->tid[tidno];
  1999. tidno < WME_NUM_TID; tidno++, tid++) {
  2000. ac = tid->ac;
  2001. txq = ac->txq;
  2002. spin_lock_bh(&txq->axq_lock);
  2003. if (tid->sched) {
  2004. list_del(&tid->list);
  2005. tid->sched = false;
  2006. }
  2007. if (ac->sched) {
  2008. list_del(&ac->list);
  2009. tid->ac->sched = false;
  2010. }
  2011. ath_tid_drain(sc, txq, tid);
  2012. tid->state &= ~AGGR_ADDBA_COMPLETE;
  2013. tid->state &= ~AGGR_CLEANUP;
  2014. spin_unlock_bh(&txq->axq_lock);
  2015. }
  2016. }