xmit.c 61 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356
  1. /*
  2. * Copyright (c) 2008-2009 Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include "ath9k.h"
  17. #include "ar9003_mac.h"
  18. #define BITS_PER_BYTE 8
  19. #define OFDM_PLCP_BITS 22
  20. #define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
  21. #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
  22. #define L_STF 8
  23. #define L_LTF 8
  24. #define L_SIG 4
  25. #define HT_SIG 8
  26. #define HT_STF 4
  27. #define HT_LTF(_ns) (4 * (_ns))
  28. #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
  29. #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
  30. #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
  31. #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
  32. #define OFDM_SIFS_TIME 16
  33. static u16 bits_per_symbol[][2] = {
  34. /* 20MHz 40MHz */
  35. { 26, 54 }, /* 0: BPSK */
  36. { 52, 108 }, /* 1: QPSK 1/2 */
  37. { 78, 162 }, /* 2: QPSK 3/4 */
  38. { 104, 216 }, /* 3: 16-QAM 1/2 */
  39. { 156, 324 }, /* 4: 16-QAM 3/4 */
  40. { 208, 432 }, /* 5: 64-QAM 2/3 */
  41. { 234, 486 }, /* 6: 64-QAM 3/4 */
  42. { 260, 540 }, /* 7: 64-QAM 5/6 */
  43. };
  44. #define IS_HT_RATE(_rate) ((_rate) & 0x80)
  45. static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
  46. struct ath_atx_tid *tid,
  47. struct list_head *bf_head);
  48. static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
  49. struct ath_txq *txq, struct list_head *bf_q,
  50. struct ath_tx_status *ts, int txok, int sendbar);
  51. static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
  52. struct list_head *head);
  53. static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
  54. static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
  55. int nframes, int nbad, int txok, bool update_rc);
  56. static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
  57. int seqno);
  58. enum {
  59. MCS_HT20,
  60. MCS_HT20_SGI,
  61. MCS_HT40,
  62. MCS_HT40_SGI,
  63. };
  64. static int ath_max_4ms_framelen[4][32] = {
  65. [MCS_HT20] = {
  66. 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
  67. 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
  68. 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
  69. 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
  70. },
  71. [MCS_HT20_SGI] = {
  72. 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
  73. 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
  74. 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
  75. 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
  76. },
  77. [MCS_HT40] = {
  78. 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
  79. 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
  80. 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
  81. 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
  82. },
  83. [MCS_HT40_SGI] = {
  84. 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
  85. 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
  86. 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
  87. 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
  88. }
  89. };
  90. /*********************/
  91. /* Aggregation logic */
  92. /*********************/
  93. static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
  94. {
  95. struct ath_atx_ac *ac = tid->ac;
  96. if (tid->paused)
  97. return;
  98. if (tid->sched)
  99. return;
  100. tid->sched = true;
  101. list_add_tail(&tid->list, &ac->tid_q);
  102. if (ac->sched)
  103. return;
  104. ac->sched = true;
  105. list_add_tail(&ac->list, &txq->axq_acq);
  106. }
  107. static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
  108. {
  109. struct ath_txq *txq = tid->ac->txq;
  110. WARN_ON(!tid->paused);
  111. spin_lock_bh(&txq->axq_lock);
  112. tid->paused = false;
  113. if (list_empty(&tid->buf_q))
  114. goto unlock;
  115. ath_tx_queue_tid(txq, tid);
  116. ath_txq_schedule(sc, txq);
  117. unlock:
  118. spin_unlock_bh(&txq->axq_lock);
  119. }
  120. static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
  121. {
  122. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  123. BUILD_BUG_ON(sizeof(struct ath_frame_info) >
  124. sizeof(tx_info->rate_driver_data));
  125. return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
  126. }
  127. static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
  128. {
  129. struct ath_txq *txq = tid->ac->txq;
  130. struct ath_buf *bf;
  131. struct list_head bf_head;
  132. struct ath_tx_status ts;
  133. struct ath_frame_info *fi;
  134. INIT_LIST_HEAD(&bf_head);
  135. memset(&ts, 0, sizeof(ts));
  136. spin_lock_bh(&txq->axq_lock);
  137. while (!list_empty(&tid->buf_q)) {
  138. bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
  139. list_move_tail(&bf->list, &bf_head);
  140. spin_unlock_bh(&txq->axq_lock);
  141. fi = get_frame_info(bf->bf_mpdu);
  142. if (fi->retries) {
  143. ath_tx_update_baw(sc, tid, fi->seqno);
  144. ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
  145. } else {
  146. ath_tx_send_normal(sc, txq, tid, &bf_head);
  147. }
  148. spin_lock_bh(&txq->axq_lock);
  149. }
  150. spin_unlock_bh(&txq->axq_lock);
  151. }
  152. static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
  153. int seqno)
  154. {
  155. int index, cindex;
  156. index = ATH_BA_INDEX(tid->seq_start, seqno);
  157. cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
  158. __clear_bit(cindex, tid->tx_buf);
  159. while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
  160. INCR(tid->seq_start, IEEE80211_SEQ_MAX);
  161. INCR(tid->baw_head, ATH_TID_MAX_BUFS);
  162. }
  163. }
  164. static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
  165. u16 seqno)
  166. {
  167. int index, cindex;
  168. index = ATH_BA_INDEX(tid->seq_start, seqno);
  169. cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
  170. __set_bit(cindex, tid->tx_buf);
  171. if (index >= ((tid->baw_tail - tid->baw_head) &
  172. (ATH_TID_MAX_BUFS - 1))) {
  173. tid->baw_tail = cindex;
  174. INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
  175. }
  176. }
  177. /*
  178. * TODO: For frame(s) that are in the retry state, we will reuse the
  179. * sequence number(s) without setting the retry bit. The
  180. * alternative is to give up on these and BAR the receiver's window
  181. * forward.
  182. */
  183. static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
  184. struct ath_atx_tid *tid)
  185. {
  186. struct ath_buf *bf;
  187. struct list_head bf_head;
  188. struct ath_tx_status ts;
  189. struct ath_frame_info *fi;
  190. memset(&ts, 0, sizeof(ts));
  191. INIT_LIST_HEAD(&bf_head);
  192. for (;;) {
  193. if (list_empty(&tid->buf_q))
  194. break;
  195. bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
  196. list_move_tail(&bf->list, &bf_head);
  197. fi = get_frame_info(bf->bf_mpdu);
  198. if (fi->retries)
  199. ath_tx_update_baw(sc, tid, fi->seqno);
  200. spin_unlock(&txq->axq_lock);
  201. ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
  202. spin_lock(&txq->axq_lock);
  203. }
  204. tid->seq_next = tid->seq_start;
  205. tid->baw_tail = tid->baw_head;
  206. }
  207. static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
  208. struct sk_buff *skb)
  209. {
  210. struct ath_frame_info *fi = get_frame_info(skb);
  211. struct ieee80211_hdr *hdr;
  212. TX_STAT_INC(txq->axq_qnum, a_retries);
  213. if (fi->retries++ > 0)
  214. return;
  215. hdr = (struct ieee80211_hdr *)skb->data;
  216. hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
  217. }
  218. static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
  219. {
  220. struct ath_buf *bf = NULL;
  221. spin_lock_bh(&sc->tx.txbuflock);
  222. if (unlikely(list_empty(&sc->tx.txbuf))) {
  223. spin_unlock_bh(&sc->tx.txbuflock);
  224. return NULL;
  225. }
  226. bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
  227. list_del(&bf->list);
  228. spin_unlock_bh(&sc->tx.txbuflock);
  229. return bf;
  230. }
  231. static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
  232. {
  233. spin_lock_bh(&sc->tx.txbuflock);
  234. list_add_tail(&bf->list, &sc->tx.txbuf);
  235. spin_unlock_bh(&sc->tx.txbuflock);
  236. }
  237. static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
  238. {
  239. struct ath_buf *tbf;
  240. tbf = ath_tx_get_buffer(sc);
  241. if (WARN_ON(!tbf))
  242. return NULL;
  243. ATH_TXBUF_RESET(tbf);
  244. tbf->aphy = bf->aphy;
  245. tbf->bf_mpdu = bf->bf_mpdu;
  246. tbf->bf_buf_addr = bf->bf_buf_addr;
  247. memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
  248. tbf->bf_state = bf->bf_state;
  249. return tbf;
  250. }
  251. static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
  252. struct ath_tx_status *ts, int txok,
  253. int *nframes, int *nbad)
  254. {
  255. struct ath_frame_info *fi;
  256. u16 seq_st = 0;
  257. u32 ba[WME_BA_BMP_SIZE >> 5];
  258. int ba_index;
  259. int isaggr = 0;
  260. *nbad = 0;
  261. *nframes = 0;
  262. isaggr = bf_isaggr(bf);
  263. if (isaggr) {
  264. seq_st = ts->ts_seqnum;
  265. memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
  266. }
  267. while (bf) {
  268. fi = get_frame_info(bf->bf_mpdu);
  269. ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
  270. (*nframes)++;
  271. if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
  272. (*nbad)++;
  273. bf = bf->bf_next;
  274. }
  275. }
  276. static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
  277. struct ath_buf *bf, struct list_head *bf_q,
  278. struct ath_tx_status *ts, int txok, bool retry)
  279. {
  280. struct ath_node *an = NULL;
  281. struct sk_buff *skb;
  282. struct ieee80211_sta *sta;
  283. struct ieee80211_hw *hw;
  284. struct ieee80211_hdr *hdr;
  285. struct ieee80211_tx_info *tx_info;
  286. struct ath_atx_tid *tid = NULL;
  287. struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
  288. struct list_head bf_head, bf_pending;
  289. u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
  290. u32 ba[WME_BA_BMP_SIZE >> 5];
  291. int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
  292. bool rc_update = true;
  293. struct ieee80211_tx_rate rates[4];
  294. struct ath_frame_info *fi;
  295. int nframes;
  296. u8 tidno;
  297. skb = bf->bf_mpdu;
  298. hdr = (struct ieee80211_hdr *)skb->data;
  299. tx_info = IEEE80211_SKB_CB(skb);
  300. hw = bf->aphy->hw;
  301. memcpy(rates, tx_info->control.rates, sizeof(rates));
  302. rcu_read_lock();
  303. sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
  304. if (!sta) {
  305. rcu_read_unlock();
  306. INIT_LIST_HEAD(&bf_head);
  307. while (bf) {
  308. bf_next = bf->bf_next;
  309. bf->bf_state.bf_type |= BUF_XRETRY;
  310. if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
  311. !bf->bf_stale || bf_next != NULL)
  312. list_move_tail(&bf->list, &bf_head);
  313. ath_tx_rc_status(bf, ts, 1, 1, 0, false);
  314. ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
  315. 0, 0);
  316. bf = bf_next;
  317. }
  318. return;
  319. }
  320. an = (struct ath_node *)sta->drv_priv;
  321. tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
  322. tid = ATH_AN_2_TID(an, tidno);
  323. /*
  324. * The hardware occasionally sends a tx status for the wrong TID.
  325. * In this case, the BA status cannot be considered valid and all
  326. * subframes need to be retransmitted
  327. */
  328. if (tidno != ts->tid)
  329. txok = false;
  330. isaggr = bf_isaggr(bf);
  331. memset(ba, 0, WME_BA_BMP_SIZE >> 3);
  332. if (isaggr && txok) {
  333. if (ts->ts_flags & ATH9K_TX_BA) {
  334. seq_st = ts->ts_seqnum;
  335. memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
  336. } else {
  337. /*
  338. * AR5416 can become deaf/mute when BA
  339. * issue happens. Chip needs to be reset.
  340. * But AP code may have sychronization issues
  341. * when perform internal reset in this routine.
  342. * Only enable reset in STA mode for now.
  343. */
  344. if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
  345. needreset = 1;
  346. }
  347. }
  348. INIT_LIST_HEAD(&bf_pending);
  349. INIT_LIST_HEAD(&bf_head);
  350. ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
  351. while (bf) {
  352. txfail = txpending = 0;
  353. bf_next = bf->bf_next;
  354. skb = bf->bf_mpdu;
  355. tx_info = IEEE80211_SKB_CB(skb);
  356. fi = get_frame_info(skb);
  357. if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
  358. /* transmit completion, subframe is
  359. * acked by block ack */
  360. acked_cnt++;
  361. } else if (!isaggr && txok) {
  362. /* transmit completion */
  363. acked_cnt++;
  364. } else {
  365. if (!(tid->state & AGGR_CLEANUP) && retry) {
  366. if (fi->retries < ATH_MAX_SW_RETRIES) {
  367. ath_tx_set_retry(sc, txq, bf->bf_mpdu);
  368. txpending = 1;
  369. } else {
  370. bf->bf_state.bf_type |= BUF_XRETRY;
  371. txfail = 1;
  372. sendbar = 1;
  373. txfail_cnt++;
  374. }
  375. } else {
  376. /*
  377. * cleanup in progress, just fail
  378. * the un-acked sub-frames
  379. */
  380. txfail = 1;
  381. }
  382. }
  383. if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
  384. bf_next == NULL) {
  385. /*
  386. * Make sure the last desc is reclaimed if it
  387. * not a holding desc.
  388. */
  389. if (!bf_last->bf_stale)
  390. list_move_tail(&bf->list, &bf_head);
  391. else
  392. INIT_LIST_HEAD(&bf_head);
  393. } else {
  394. BUG_ON(list_empty(bf_q));
  395. list_move_tail(&bf->list, &bf_head);
  396. }
  397. if (!txpending || (tid->state & AGGR_CLEANUP)) {
  398. /*
  399. * complete the acked-ones/xretried ones; update
  400. * block-ack window
  401. */
  402. spin_lock_bh(&txq->axq_lock);
  403. ath_tx_update_baw(sc, tid, fi->seqno);
  404. spin_unlock_bh(&txq->axq_lock);
  405. if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
  406. memcpy(tx_info->control.rates, rates, sizeof(rates));
  407. ath_tx_rc_status(bf, ts, nframes, nbad, txok, true);
  408. rc_update = false;
  409. } else {
  410. ath_tx_rc_status(bf, ts, nframes, nbad, txok, false);
  411. }
  412. ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
  413. !txfail, sendbar);
  414. } else {
  415. /* retry the un-acked ones */
  416. if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
  417. if (bf->bf_next == NULL && bf_last->bf_stale) {
  418. struct ath_buf *tbf;
  419. tbf = ath_clone_txbuf(sc, bf_last);
  420. /*
  421. * Update tx baw and complete the
  422. * frame with failed status if we
  423. * run out of tx buf.
  424. */
  425. if (!tbf) {
  426. spin_lock_bh(&txq->axq_lock);
  427. ath_tx_update_baw(sc, tid, fi->seqno);
  428. spin_unlock_bh(&txq->axq_lock);
  429. bf->bf_state.bf_type |=
  430. BUF_XRETRY;
  431. ath_tx_rc_status(bf, ts, nframes,
  432. nbad, 0, false);
  433. ath_tx_complete_buf(sc, bf, txq,
  434. &bf_head,
  435. ts, 0, 0);
  436. break;
  437. }
  438. ath9k_hw_cleartxdesc(sc->sc_ah,
  439. tbf->bf_desc);
  440. list_add_tail(&tbf->list, &bf_head);
  441. } else {
  442. /*
  443. * Clear descriptor status words for
  444. * software retry
  445. */
  446. ath9k_hw_cleartxdesc(sc->sc_ah,
  447. bf->bf_desc);
  448. }
  449. }
  450. /*
  451. * Put this buffer to the temporary pending
  452. * queue to retain ordering
  453. */
  454. list_splice_tail_init(&bf_head, &bf_pending);
  455. }
  456. bf = bf_next;
  457. }
  458. /* prepend un-acked frames to the beginning of the pending frame queue */
  459. if (!list_empty(&bf_pending)) {
  460. spin_lock_bh(&txq->axq_lock);
  461. list_splice(&bf_pending, &tid->buf_q);
  462. ath_tx_queue_tid(txq, tid);
  463. spin_unlock_bh(&txq->axq_lock);
  464. }
  465. if (tid->state & AGGR_CLEANUP) {
  466. ath_tx_flush_tid(sc, tid);
  467. if (tid->baw_head == tid->baw_tail) {
  468. tid->state &= ~AGGR_ADDBA_COMPLETE;
  469. tid->state &= ~AGGR_CLEANUP;
  470. }
  471. }
  472. rcu_read_unlock();
  473. if (needreset)
  474. ath_reset(sc, false);
  475. }
  476. static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
  477. struct ath_atx_tid *tid)
  478. {
  479. struct sk_buff *skb;
  480. struct ieee80211_tx_info *tx_info;
  481. struct ieee80211_tx_rate *rates;
  482. u32 max_4ms_framelen, frmlen;
  483. u16 aggr_limit, legacy = 0;
  484. int i;
  485. skb = bf->bf_mpdu;
  486. tx_info = IEEE80211_SKB_CB(skb);
  487. rates = tx_info->control.rates;
  488. /*
  489. * Find the lowest frame length among the rate series that will have a
  490. * 4ms transmit duration.
  491. * TODO - TXOP limit needs to be considered.
  492. */
  493. max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
  494. for (i = 0; i < 4; i++) {
  495. if (rates[i].count) {
  496. int modeidx;
  497. if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
  498. legacy = 1;
  499. break;
  500. }
  501. if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
  502. modeidx = MCS_HT40;
  503. else
  504. modeidx = MCS_HT20;
  505. if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
  506. modeidx++;
  507. frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
  508. max_4ms_framelen = min(max_4ms_framelen, frmlen);
  509. }
  510. }
  511. /*
  512. * limit aggregate size by the minimum rate if rate selected is
  513. * not a probe rate, if rate selected is a probe rate then
  514. * avoid aggregation of this packet.
  515. */
  516. if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
  517. return 0;
  518. if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
  519. aggr_limit = min((max_4ms_framelen * 3) / 8,
  520. (u32)ATH_AMPDU_LIMIT_MAX);
  521. else
  522. aggr_limit = min(max_4ms_framelen,
  523. (u32)ATH_AMPDU_LIMIT_MAX);
  524. /*
  525. * h/w can accept aggregates upto 16 bit lengths (65535).
  526. * The IE, however can hold upto 65536, which shows up here
  527. * as zero. Ignore 65536 since we are constrained by hw.
  528. */
  529. if (tid->an->maxampdu)
  530. aggr_limit = min(aggr_limit, tid->an->maxampdu);
  531. return aggr_limit;
  532. }
  533. /*
  534. * Returns the number of delimiters to be added to
  535. * meet the minimum required mpdudensity.
  536. */
  537. static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
  538. struct ath_buf *bf, u16 frmlen)
  539. {
  540. struct sk_buff *skb = bf->bf_mpdu;
  541. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  542. u32 nsymbits, nsymbols;
  543. u16 minlen;
  544. u8 flags, rix;
  545. int width, streams, half_gi, ndelim, mindelim;
  546. struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
  547. /* Select standard number of delimiters based on frame length alone */
  548. ndelim = ATH_AGGR_GET_NDELIM(frmlen);
  549. /*
  550. * If encryption enabled, hardware requires some more padding between
  551. * subframes.
  552. * TODO - this could be improved to be dependent on the rate.
  553. * The hardware can keep up at lower rates, but not higher rates
  554. */
  555. if (fi->keyix != ATH9K_TXKEYIX_INVALID)
  556. ndelim += ATH_AGGR_ENCRYPTDELIM;
  557. /*
  558. * Convert desired mpdu density from microeconds to bytes based
  559. * on highest rate in rate series (i.e. first rate) to determine
  560. * required minimum length for subframe. Take into account
  561. * whether high rate is 20 or 40Mhz and half or full GI.
  562. *
  563. * If there is no mpdu density restriction, no further calculation
  564. * is needed.
  565. */
  566. if (tid->an->mpdudensity == 0)
  567. return ndelim;
  568. rix = tx_info->control.rates[0].idx;
  569. flags = tx_info->control.rates[0].flags;
  570. width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
  571. half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
  572. if (half_gi)
  573. nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
  574. else
  575. nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
  576. if (nsymbols == 0)
  577. nsymbols = 1;
  578. streams = HT_RC_2_STREAMS(rix);
  579. nsymbits = bits_per_symbol[rix % 8][width] * streams;
  580. minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
  581. if (frmlen < minlen) {
  582. mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
  583. ndelim = max(mindelim, ndelim);
  584. }
  585. return ndelim;
  586. }
  587. static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
  588. struct ath_txq *txq,
  589. struct ath_atx_tid *tid,
  590. struct list_head *bf_q,
  591. int *aggr_len)
  592. {
  593. #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
  594. struct ath_buf *bf, *bf_first, *bf_prev = NULL;
  595. int rl = 0, nframes = 0, ndelim, prev_al = 0;
  596. u16 aggr_limit = 0, al = 0, bpad = 0,
  597. al_delta, h_baw = tid->baw_size / 2;
  598. enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
  599. struct ieee80211_tx_info *tx_info;
  600. struct ath_frame_info *fi;
  601. bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
  602. do {
  603. bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
  604. fi = get_frame_info(bf->bf_mpdu);
  605. /* do not step over block-ack window */
  606. if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
  607. status = ATH_AGGR_BAW_CLOSED;
  608. break;
  609. }
  610. if (!rl) {
  611. aggr_limit = ath_lookup_rate(sc, bf, tid);
  612. rl = 1;
  613. }
  614. /* do not exceed aggregation limit */
  615. al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
  616. if (nframes &&
  617. (aggr_limit < (al + bpad + al_delta + prev_al))) {
  618. status = ATH_AGGR_LIMITED;
  619. break;
  620. }
  621. tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
  622. if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
  623. !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
  624. break;
  625. /* do not exceed subframe limit */
  626. if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
  627. status = ATH_AGGR_LIMITED;
  628. break;
  629. }
  630. nframes++;
  631. /* add padding for previous frame to aggregation length */
  632. al += bpad + al_delta;
  633. /*
  634. * Get the delimiters needed to meet the MPDU
  635. * density for this node.
  636. */
  637. ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen);
  638. bpad = PADBYTES(al_delta) + (ndelim << 2);
  639. bf->bf_next = NULL;
  640. ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
  641. /* link buffers of this frame to the aggregate */
  642. if (!fi->retries)
  643. ath_tx_addto_baw(sc, tid, fi->seqno);
  644. ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
  645. list_move_tail(&bf->list, bf_q);
  646. if (bf_prev) {
  647. bf_prev->bf_next = bf;
  648. ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
  649. bf->bf_daddr);
  650. }
  651. bf_prev = bf;
  652. } while (!list_empty(&tid->buf_q));
  653. *aggr_len = al;
  654. return status;
  655. #undef PADBYTES
  656. }
  657. static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
  658. struct ath_atx_tid *tid)
  659. {
  660. struct ath_buf *bf;
  661. enum ATH_AGGR_STATUS status;
  662. struct ath_frame_info *fi;
  663. struct list_head bf_q;
  664. int aggr_len;
  665. do {
  666. if (list_empty(&tid->buf_q))
  667. return;
  668. INIT_LIST_HEAD(&bf_q);
  669. status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
  670. /*
  671. * no frames picked up to be aggregated;
  672. * block-ack window is not open.
  673. */
  674. if (list_empty(&bf_q))
  675. break;
  676. bf = list_first_entry(&bf_q, struct ath_buf, list);
  677. bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
  678. /* if only one frame, send as non-aggregate */
  679. if (bf == bf->bf_lastbf) {
  680. fi = get_frame_info(bf->bf_mpdu);
  681. bf->bf_state.bf_type &= ~BUF_AGGR;
  682. ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
  683. ath_buf_set_rate(sc, bf, fi->framelen);
  684. ath_tx_txqaddbuf(sc, txq, &bf_q);
  685. continue;
  686. }
  687. /* setup first desc of aggregate */
  688. bf->bf_state.bf_type |= BUF_AGGR;
  689. ath_buf_set_rate(sc, bf, aggr_len);
  690. ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
  691. /* anchor last desc of aggregate */
  692. ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
  693. ath_tx_txqaddbuf(sc, txq, &bf_q);
  694. TX_STAT_INC(txq->axq_qnum, a_aggr);
  695. } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
  696. status != ATH_AGGR_BAW_CLOSED);
  697. }
  698. int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
  699. u16 tid, u16 *ssn)
  700. {
  701. struct ath_atx_tid *txtid;
  702. struct ath_node *an;
  703. an = (struct ath_node *)sta->drv_priv;
  704. txtid = ATH_AN_2_TID(an, tid);
  705. if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
  706. return -EAGAIN;
  707. txtid->state |= AGGR_ADDBA_PROGRESS;
  708. txtid->paused = true;
  709. *ssn = txtid->seq_start;
  710. return 0;
  711. }
  712. void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
  713. {
  714. struct ath_node *an = (struct ath_node *)sta->drv_priv;
  715. struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
  716. struct ath_txq *txq = txtid->ac->txq;
  717. if (txtid->state & AGGR_CLEANUP)
  718. return;
  719. if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
  720. txtid->state &= ~AGGR_ADDBA_PROGRESS;
  721. return;
  722. }
  723. spin_lock_bh(&txq->axq_lock);
  724. txtid->paused = true;
  725. /*
  726. * If frames are still being transmitted for this TID, they will be
  727. * cleaned up during tx completion. To prevent race conditions, this
  728. * TID can only be reused after all in-progress subframes have been
  729. * completed.
  730. */
  731. if (txtid->baw_head != txtid->baw_tail)
  732. txtid->state |= AGGR_CLEANUP;
  733. else
  734. txtid->state &= ~AGGR_ADDBA_COMPLETE;
  735. spin_unlock_bh(&txq->axq_lock);
  736. ath_tx_flush_tid(sc, txtid);
  737. }
  738. void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
  739. {
  740. struct ath_atx_tid *txtid;
  741. struct ath_node *an;
  742. an = (struct ath_node *)sta->drv_priv;
  743. if (sc->sc_flags & SC_OP_TXAGGR) {
  744. txtid = ATH_AN_2_TID(an, tid);
  745. txtid->baw_size =
  746. IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
  747. txtid->state |= AGGR_ADDBA_COMPLETE;
  748. txtid->state &= ~AGGR_ADDBA_PROGRESS;
  749. ath_tx_resume_tid(sc, txtid);
  750. }
  751. }
  752. /********************/
  753. /* Queue Management */
  754. /********************/
  755. static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
  756. struct ath_txq *txq)
  757. {
  758. struct ath_atx_ac *ac, *ac_tmp;
  759. struct ath_atx_tid *tid, *tid_tmp;
  760. list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
  761. list_del(&ac->list);
  762. ac->sched = false;
  763. list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
  764. list_del(&tid->list);
  765. tid->sched = false;
  766. ath_tid_drain(sc, txq, tid);
  767. }
  768. }
  769. }
  770. struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
  771. {
  772. struct ath_hw *ah = sc->sc_ah;
  773. struct ath_common *common = ath9k_hw_common(ah);
  774. struct ath9k_tx_queue_info qi;
  775. static const int subtype_txq_to_hwq[] = {
  776. [WME_AC_BE] = ATH_TXQ_AC_BE,
  777. [WME_AC_BK] = ATH_TXQ_AC_BK,
  778. [WME_AC_VI] = ATH_TXQ_AC_VI,
  779. [WME_AC_VO] = ATH_TXQ_AC_VO,
  780. };
  781. int qnum, i;
  782. memset(&qi, 0, sizeof(qi));
  783. qi.tqi_subtype = subtype_txq_to_hwq[subtype];
  784. qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
  785. qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
  786. qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
  787. qi.tqi_physCompBuf = 0;
  788. /*
  789. * Enable interrupts only for EOL and DESC conditions.
  790. * We mark tx descriptors to receive a DESC interrupt
  791. * when a tx queue gets deep; otherwise waiting for the
  792. * EOL to reap descriptors. Note that this is done to
  793. * reduce interrupt load and this only defers reaping
  794. * descriptors, never transmitting frames. Aside from
  795. * reducing interrupts this also permits more concurrency.
  796. * The only potential downside is if the tx queue backs
  797. * up in which case the top half of the kernel may backup
  798. * due to a lack of tx descriptors.
  799. *
  800. * The UAPSD queue is an exception, since we take a desc-
  801. * based intr on the EOSP frames.
  802. */
  803. if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
  804. qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
  805. TXQ_FLAG_TXERRINT_ENABLE;
  806. } else {
  807. if (qtype == ATH9K_TX_QUEUE_UAPSD)
  808. qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
  809. else
  810. qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
  811. TXQ_FLAG_TXDESCINT_ENABLE;
  812. }
  813. qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
  814. if (qnum == -1) {
  815. /*
  816. * NB: don't print a message, this happens
  817. * normally on parts with too few tx queues
  818. */
  819. return NULL;
  820. }
  821. if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
  822. ath_err(common, "qnum %u out of range, max %zu!\n",
  823. qnum, ARRAY_SIZE(sc->tx.txq));
  824. ath9k_hw_releasetxqueue(ah, qnum);
  825. return NULL;
  826. }
  827. if (!ATH_TXQ_SETUP(sc, qnum)) {
  828. struct ath_txq *txq = &sc->tx.txq[qnum];
  829. txq->axq_qnum = qnum;
  830. txq->axq_link = NULL;
  831. INIT_LIST_HEAD(&txq->axq_q);
  832. INIT_LIST_HEAD(&txq->axq_acq);
  833. spin_lock_init(&txq->axq_lock);
  834. txq->axq_depth = 0;
  835. txq->axq_tx_inprogress = false;
  836. sc->tx.txqsetup |= 1<<qnum;
  837. txq->txq_headidx = txq->txq_tailidx = 0;
  838. for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
  839. INIT_LIST_HEAD(&txq->txq_fifo[i]);
  840. INIT_LIST_HEAD(&txq->txq_fifo_pending);
  841. }
  842. return &sc->tx.txq[qnum];
  843. }
  844. int ath_txq_update(struct ath_softc *sc, int qnum,
  845. struct ath9k_tx_queue_info *qinfo)
  846. {
  847. struct ath_hw *ah = sc->sc_ah;
  848. int error = 0;
  849. struct ath9k_tx_queue_info qi;
  850. if (qnum == sc->beacon.beaconq) {
  851. /*
  852. * XXX: for beacon queue, we just save the parameter.
  853. * It will be picked up by ath_beaconq_config when
  854. * it's necessary.
  855. */
  856. sc->beacon.beacon_qi = *qinfo;
  857. return 0;
  858. }
  859. BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
  860. ath9k_hw_get_txq_props(ah, qnum, &qi);
  861. qi.tqi_aifs = qinfo->tqi_aifs;
  862. qi.tqi_cwmin = qinfo->tqi_cwmin;
  863. qi.tqi_cwmax = qinfo->tqi_cwmax;
  864. qi.tqi_burstTime = qinfo->tqi_burstTime;
  865. qi.tqi_readyTime = qinfo->tqi_readyTime;
  866. if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
  867. ath_err(ath9k_hw_common(sc->sc_ah),
  868. "Unable to update hardware queue %u!\n", qnum);
  869. error = -EIO;
  870. } else {
  871. ath9k_hw_resettxqueue(ah, qnum);
  872. }
  873. return error;
  874. }
  875. int ath_cabq_update(struct ath_softc *sc)
  876. {
  877. struct ath9k_tx_queue_info qi;
  878. int qnum = sc->beacon.cabq->axq_qnum;
  879. ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
  880. /*
  881. * Ensure the readytime % is within the bounds.
  882. */
  883. if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
  884. sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
  885. else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
  886. sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
  887. qi.tqi_readyTime = (sc->beacon_interval *
  888. sc->config.cabqReadytime) / 100;
  889. ath_txq_update(sc, qnum, &qi);
  890. return 0;
  891. }
  892. /*
  893. * Drain a given TX queue (could be Beacon or Data)
  894. *
  895. * This assumes output has been stopped and
  896. * we do not need to block ath_tx_tasklet.
  897. */
  898. void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
  899. {
  900. struct ath_buf *bf, *lastbf;
  901. struct list_head bf_head;
  902. struct ath_tx_status ts;
  903. memset(&ts, 0, sizeof(ts));
  904. INIT_LIST_HEAD(&bf_head);
  905. for (;;) {
  906. spin_lock_bh(&txq->axq_lock);
  907. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
  908. if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
  909. txq->txq_headidx = txq->txq_tailidx = 0;
  910. spin_unlock_bh(&txq->axq_lock);
  911. break;
  912. } else {
  913. bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
  914. struct ath_buf, list);
  915. }
  916. } else {
  917. if (list_empty(&txq->axq_q)) {
  918. txq->axq_link = NULL;
  919. spin_unlock_bh(&txq->axq_lock);
  920. break;
  921. }
  922. bf = list_first_entry(&txq->axq_q, struct ath_buf,
  923. list);
  924. if (bf->bf_stale) {
  925. list_del(&bf->list);
  926. spin_unlock_bh(&txq->axq_lock);
  927. ath_tx_return_buffer(sc, bf);
  928. continue;
  929. }
  930. }
  931. lastbf = bf->bf_lastbf;
  932. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
  933. list_cut_position(&bf_head,
  934. &txq->txq_fifo[txq->txq_tailidx],
  935. &lastbf->list);
  936. INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
  937. } else {
  938. /* remove ath_buf's of the same mpdu from txq */
  939. list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
  940. }
  941. txq->axq_depth--;
  942. spin_unlock_bh(&txq->axq_lock);
  943. if (bf_isampdu(bf))
  944. ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
  945. retry_tx);
  946. else
  947. ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
  948. }
  949. spin_lock_bh(&txq->axq_lock);
  950. txq->axq_tx_inprogress = false;
  951. spin_unlock_bh(&txq->axq_lock);
  952. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
  953. spin_lock_bh(&txq->axq_lock);
  954. while (!list_empty(&txq->txq_fifo_pending)) {
  955. bf = list_first_entry(&txq->txq_fifo_pending,
  956. struct ath_buf, list);
  957. list_cut_position(&bf_head,
  958. &txq->txq_fifo_pending,
  959. &bf->bf_lastbf->list);
  960. spin_unlock_bh(&txq->axq_lock);
  961. if (bf_isampdu(bf))
  962. ath_tx_complete_aggr(sc, txq, bf, &bf_head,
  963. &ts, 0, retry_tx);
  964. else
  965. ath_tx_complete_buf(sc, bf, txq, &bf_head,
  966. &ts, 0, 0);
  967. spin_lock_bh(&txq->axq_lock);
  968. }
  969. spin_unlock_bh(&txq->axq_lock);
  970. }
  971. /* flush any pending frames if aggregation is enabled */
  972. if (sc->sc_flags & SC_OP_TXAGGR) {
  973. if (!retry_tx) {
  974. spin_lock_bh(&txq->axq_lock);
  975. ath_txq_drain_pending_buffers(sc, txq);
  976. spin_unlock_bh(&txq->axq_lock);
  977. }
  978. }
  979. }
  980. bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
  981. {
  982. struct ath_hw *ah = sc->sc_ah;
  983. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  984. struct ath_txq *txq;
  985. int i, npend = 0;
  986. if (sc->sc_flags & SC_OP_INVALID)
  987. return true;
  988. /* Stop beacon queue */
  989. ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
  990. /* Stop data queues */
  991. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  992. if (ATH_TXQ_SETUP(sc, i)) {
  993. txq = &sc->tx.txq[i];
  994. ath9k_hw_stoptxdma(ah, txq->axq_qnum);
  995. npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
  996. }
  997. }
  998. if (npend)
  999. ath_err(common, "Failed to stop TX DMA!\n");
  1000. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  1001. if (ATH_TXQ_SETUP(sc, i))
  1002. ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
  1003. }
  1004. return !npend;
  1005. }
  1006. void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
  1007. {
  1008. ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
  1009. sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
  1010. }
  1011. void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
  1012. {
  1013. struct ath_atx_ac *ac;
  1014. struct ath_atx_tid *tid;
  1015. if (list_empty(&txq->axq_acq))
  1016. return;
  1017. ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
  1018. list_del(&ac->list);
  1019. ac->sched = false;
  1020. do {
  1021. if (list_empty(&ac->tid_q))
  1022. return;
  1023. tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
  1024. list_del(&tid->list);
  1025. tid->sched = false;
  1026. if (tid->paused)
  1027. continue;
  1028. ath_tx_sched_aggr(sc, txq, tid);
  1029. /*
  1030. * add tid to round-robin queue if more frames
  1031. * are pending for the tid
  1032. */
  1033. if (!list_empty(&tid->buf_q))
  1034. ath_tx_queue_tid(txq, tid);
  1035. break;
  1036. } while (!list_empty(&ac->tid_q));
  1037. if (!list_empty(&ac->tid_q)) {
  1038. if (!ac->sched) {
  1039. ac->sched = true;
  1040. list_add_tail(&ac->list, &txq->axq_acq);
  1041. }
  1042. }
  1043. }
  1044. /***********/
  1045. /* TX, DMA */
  1046. /***********/
  1047. /*
  1048. * Insert a chain of ath_buf (descriptors) on a txq and
  1049. * assume the descriptors are already chained together by caller.
  1050. */
  1051. static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
  1052. struct list_head *head)
  1053. {
  1054. struct ath_hw *ah = sc->sc_ah;
  1055. struct ath_common *common = ath9k_hw_common(ah);
  1056. struct ath_buf *bf;
  1057. /*
  1058. * Insert the frame on the outbound list and
  1059. * pass it on to the hardware.
  1060. */
  1061. if (list_empty(head))
  1062. return;
  1063. bf = list_first_entry(head, struct ath_buf, list);
  1064. ath_dbg(common, ATH_DBG_QUEUE,
  1065. "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
  1066. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
  1067. if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
  1068. list_splice_tail_init(head, &txq->txq_fifo_pending);
  1069. return;
  1070. }
  1071. if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
  1072. ath_dbg(common, ATH_DBG_XMIT,
  1073. "Initializing tx fifo %d which is non-empty\n",
  1074. txq->txq_headidx);
  1075. INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
  1076. list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
  1077. INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
  1078. ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
  1079. ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
  1080. txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
  1081. } else {
  1082. list_splice_tail_init(head, &txq->axq_q);
  1083. if (txq->axq_link == NULL) {
  1084. ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
  1085. ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
  1086. txq->axq_qnum, ito64(bf->bf_daddr),
  1087. bf->bf_desc);
  1088. } else {
  1089. *txq->axq_link = bf->bf_daddr;
  1090. ath_dbg(common, ATH_DBG_XMIT,
  1091. "link[%u] (%p)=%llx (%p)\n",
  1092. txq->axq_qnum, txq->axq_link,
  1093. ito64(bf->bf_daddr), bf->bf_desc);
  1094. }
  1095. ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
  1096. &txq->axq_link);
  1097. ath9k_hw_txstart(ah, txq->axq_qnum);
  1098. }
  1099. txq->axq_depth++;
  1100. }
  1101. static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
  1102. struct ath_buf *bf, struct ath_tx_control *txctl)
  1103. {
  1104. struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
  1105. struct list_head bf_head;
  1106. bf->bf_state.bf_type |= BUF_AMPDU;
  1107. TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
  1108. /*
  1109. * Do not queue to h/w when any of the following conditions is true:
  1110. * - there are pending frames in software queue
  1111. * - the TID is currently paused for ADDBA/BAR request
  1112. * - seqno is not within block-ack window
  1113. * - h/w queue depth exceeds low water mark
  1114. */
  1115. if (!list_empty(&tid->buf_q) || tid->paused ||
  1116. !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
  1117. txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
  1118. /*
  1119. * Add this frame to software queue for scheduling later
  1120. * for aggregation.
  1121. */
  1122. list_add_tail(&bf->list, &tid->buf_q);
  1123. ath_tx_queue_tid(txctl->txq, tid);
  1124. return;
  1125. }
  1126. INIT_LIST_HEAD(&bf_head);
  1127. list_add(&bf->list, &bf_head);
  1128. /* Add sub-frame to BAW */
  1129. if (!fi->retries)
  1130. ath_tx_addto_baw(sc, tid, fi->seqno);
  1131. /* Queue to h/w without aggregation */
  1132. bf->bf_lastbf = bf;
  1133. ath_buf_set_rate(sc, bf, fi->framelen);
  1134. ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
  1135. }
  1136. static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
  1137. struct ath_atx_tid *tid,
  1138. struct list_head *bf_head)
  1139. {
  1140. struct ath_frame_info *fi;
  1141. struct ath_buf *bf;
  1142. bf = list_first_entry(bf_head, struct ath_buf, list);
  1143. bf->bf_state.bf_type &= ~BUF_AMPDU;
  1144. /* update starting sequence number for subsequent ADDBA request */
  1145. if (tid)
  1146. INCR(tid->seq_start, IEEE80211_SEQ_MAX);
  1147. bf->bf_lastbf = bf;
  1148. fi = get_frame_info(bf->bf_mpdu);
  1149. ath_buf_set_rate(sc, bf, fi->framelen);
  1150. ath_tx_txqaddbuf(sc, txq, bf_head);
  1151. TX_STAT_INC(txq->axq_qnum, queued);
  1152. }
  1153. static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
  1154. {
  1155. struct ieee80211_hdr *hdr;
  1156. enum ath9k_pkt_type htype;
  1157. __le16 fc;
  1158. hdr = (struct ieee80211_hdr *)skb->data;
  1159. fc = hdr->frame_control;
  1160. if (ieee80211_is_beacon(fc))
  1161. htype = ATH9K_PKT_TYPE_BEACON;
  1162. else if (ieee80211_is_probe_resp(fc))
  1163. htype = ATH9K_PKT_TYPE_PROBE_RESP;
  1164. else if (ieee80211_is_atim(fc))
  1165. htype = ATH9K_PKT_TYPE_ATIM;
  1166. else if (ieee80211_is_pspoll(fc))
  1167. htype = ATH9K_PKT_TYPE_PSPOLL;
  1168. else
  1169. htype = ATH9K_PKT_TYPE_NORMAL;
  1170. return htype;
  1171. }
  1172. static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
  1173. int framelen)
  1174. {
  1175. struct ath_wiphy *aphy = hw->priv;
  1176. struct ath_softc *sc = aphy->sc;
  1177. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1178. struct ieee80211_sta *sta = tx_info->control.sta;
  1179. struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
  1180. struct ieee80211_hdr *hdr;
  1181. struct ath_frame_info *fi = get_frame_info(skb);
  1182. struct ath_node *an;
  1183. struct ath_atx_tid *tid;
  1184. enum ath9k_key_type keytype;
  1185. u16 seqno = 0;
  1186. u8 tidno;
  1187. keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
  1188. hdr = (struct ieee80211_hdr *)skb->data;
  1189. if (sta && ieee80211_is_data_qos(hdr->frame_control) &&
  1190. conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
  1191. an = (struct ath_node *) sta->drv_priv;
  1192. tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
  1193. /*
  1194. * Override seqno set by upper layer with the one
  1195. * in tx aggregation state.
  1196. */
  1197. tid = ATH_AN_2_TID(an, tidno);
  1198. seqno = tid->seq_next;
  1199. hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
  1200. INCR(tid->seq_next, IEEE80211_SEQ_MAX);
  1201. }
  1202. memset(fi, 0, sizeof(*fi));
  1203. if (hw_key)
  1204. fi->keyix = hw_key->hw_key_idx;
  1205. else
  1206. fi->keyix = ATH9K_TXKEYIX_INVALID;
  1207. fi->keytype = keytype;
  1208. fi->framelen = framelen;
  1209. fi->seqno = seqno;
  1210. }
  1211. static int setup_tx_flags(struct sk_buff *skb)
  1212. {
  1213. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1214. int flags = 0;
  1215. flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
  1216. flags |= ATH9K_TXDESC_INTREQ;
  1217. if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
  1218. flags |= ATH9K_TXDESC_NOACK;
  1219. if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
  1220. flags |= ATH9K_TXDESC_LDPC;
  1221. return flags;
  1222. }
  1223. /*
  1224. * rix - rate index
  1225. * pktlen - total bytes (delims + data + fcs + pads + pad delims)
  1226. * width - 0 for 20 MHz, 1 for 40 MHz
  1227. * half_gi - to use 4us v/s 3.6 us for symbol time
  1228. */
  1229. static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
  1230. int width, int half_gi, bool shortPreamble)
  1231. {
  1232. u32 nbits, nsymbits, duration, nsymbols;
  1233. int streams;
  1234. /* find number of symbols: PLCP + data */
  1235. streams = HT_RC_2_STREAMS(rix);
  1236. nbits = (pktlen << 3) + OFDM_PLCP_BITS;
  1237. nsymbits = bits_per_symbol[rix % 8][width] * streams;
  1238. nsymbols = (nbits + nsymbits - 1) / nsymbits;
  1239. if (!half_gi)
  1240. duration = SYMBOL_TIME(nsymbols);
  1241. else
  1242. duration = SYMBOL_TIME_HALFGI(nsymbols);
  1243. /* addup duration for legacy/ht training and signal fields */
  1244. duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
  1245. return duration;
  1246. }
  1247. u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
  1248. {
  1249. struct ath_hw *ah = sc->sc_ah;
  1250. struct ath9k_channel *curchan = ah->curchan;
  1251. if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
  1252. (curchan->channelFlags & CHANNEL_5GHZ) &&
  1253. (chainmask == 0x7) && (rate < 0x90))
  1254. return 0x3;
  1255. else
  1256. return chainmask;
  1257. }
  1258. static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
  1259. {
  1260. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1261. struct ath9k_11n_rate_series series[4];
  1262. struct sk_buff *skb;
  1263. struct ieee80211_tx_info *tx_info;
  1264. struct ieee80211_tx_rate *rates;
  1265. const struct ieee80211_rate *rate;
  1266. struct ieee80211_hdr *hdr;
  1267. int i, flags = 0;
  1268. u8 rix = 0, ctsrate = 0;
  1269. bool is_pspoll;
  1270. memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
  1271. skb = bf->bf_mpdu;
  1272. tx_info = IEEE80211_SKB_CB(skb);
  1273. rates = tx_info->control.rates;
  1274. hdr = (struct ieee80211_hdr *)skb->data;
  1275. is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
  1276. /*
  1277. * We check if Short Preamble is needed for the CTS rate by
  1278. * checking the BSS's global flag.
  1279. * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
  1280. */
  1281. rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
  1282. ctsrate = rate->hw_value;
  1283. if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
  1284. ctsrate |= rate->hw_value_short;
  1285. for (i = 0; i < 4; i++) {
  1286. bool is_40, is_sgi, is_sp;
  1287. int phy;
  1288. if (!rates[i].count || (rates[i].idx < 0))
  1289. continue;
  1290. rix = rates[i].idx;
  1291. series[i].Tries = rates[i].count;
  1292. if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
  1293. (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
  1294. series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
  1295. flags |= ATH9K_TXDESC_RTSENA;
  1296. } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
  1297. series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
  1298. flags |= ATH9K_TXDESC_CTSENA;
  1299. }
  1300. if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
  1301. series[i].RateFlags |= ATH9K_RATESERIES_2040;
  1302. if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
  1303. series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
  1304. is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
  1305. is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
  1306. is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
  1307. if (rates[i].flags & IEEE80211_TX_RC_MCS) {
  1308. /* MCS rates */
  1309. series[i].Rate = rix | 0x80;
  1310. series[i].ChSel = ath_txchainmask_reduction(sc,
  1311. common->tx_chainmask, series[i].Rate);
  1312. series[i].PktDuration = ath_pkt_duration(sc, rix, len,
  1313. is_40, is_sgi, is_sp);
  1314. if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
  1315. series[i].RateFlags |= ATH9K_RATESERIES_STBC;
  1316. continue;
  1317. }
  1318. /* legacy rates */
  1319. if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
  1320. !(rate->flags & IEEE80211_RATE_ERP_G))
  1321. phy = WLAN_RC_PHY_CCK;
  1322. else
  1323. phy = WLAN_RC_PHY_OFDM;
  1324. rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
  1325. series[i].Rate = rate->hw_value;
  1326. if (rate->hw_value_short) {
  1327. if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
  1328. series[i].Rate |= rate->hw_value_short;
  1329. } else {
  1330. is_sp = false;
  1331. }
  1332. if (bf->bf_state.bfs_paprd)
  1333. series[i].ChSel = common->tx_chainmask;
  1334. else
  1335. series[i].ChSel = ath_txchainmask_reduction(sc,
  1336. common->tx_chainmask, series[i].Rate);
  1337. series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
  1338. phy, rate->bitrate * 100, len, rix, is_sp);
  1339. }
  1340. /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
  1341. if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
  1342. flags &= ~ATH9K_TXDESC_RTSENA;
  1343. /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
  1344. if (flags & ATH9K_TXDESC_RTSENA)
  1345. flags &= ~ATH9K_TXDESC_CTSENA;
  1346. /* set dur_update_en for l-sig computation except for PS-Poll frames */
  1347. ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
  1348. bf->bf_lastbf->bf_desc,
  1349. !is_pspoll, ctsrate,
  1350. 0, series, 4, flags);
  1351. if (sc->config.ath_aggr_prot && flags)
  1352. ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
  1353. }
  1354. static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
  1355. struct ath_txq *txq,
  1356. struct sk_buff *skb)
  1357. {
  1358. struct ath_wiphy *aphy = hw->priv;
  1359. struct ath_softc *sc = aphy->sc;
  1360. struct ath_hw *ah = sc->sc_ah;
  1361. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1362. struct ath_frame_info *fi = get_frame_info(skb);
  1363. struct ath_buf *bf;
  1364. struct ath_desc *ds;
  1365. int frm_type;
  1366. bf = ath_tx_get_buffer(sc);
  1367. if (!bf) {
  1368. ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
  1369. return NULL;
  1370. }
  1371. ATH_TXBUF_RESET(bf);
  1372. bf->aphy = aphy;
  1373. bf->bf_flags = setup_tx_flags(skb);
  1374. bf->bf_mpdu = skb;
  1375. bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
  1376. skb->len, DMA_TO_DEVICE);
  1377. if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
  1378. bf->bf_mpdu = NULL;
  1379. bf->bf_buf_addr = 0;
  1380. ath_err(ath9k_hw_common(sc->sc_ah),
  1381. "dma_mapping_error() on TX\n");
  1382. ath_tx_return_buffer(sc, bf);
  1383. return NULL;
  1384. }
  1385. frm_type = get_hw_packet_type(skb);
  1386. ds = bf->bf_desc;
  1387. ath9k_hw_set_desc_link(ah, ds, 0);
  1388. ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
  1389. fi->keyix, fi->keytype, bf->bf_flags);
  1390. ath9k_hw_filltxdesc(ah, ds,
  1391. skb->len, /* segment length */
  1392. true, /* first segment */
  1393. true, /* last segment */
  1394. ds, /* first descriptor */
  1395. bf->bf_buf_addr,
  1396. txq->axq_qnum);
  1397. return bf;
  1398. }
  1399. /* FIXME: tx power */
  1400. static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
  1401. struct ath_tx_control *txctl)
  1402. {
  1403. struct sk_buff *skb = bf->bf_mpdu;
  1404. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1405. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1406. struct list_head bf_head;
  1407. struct ath_atx_tid *tid;
  1408. u8 tidno;
  1409. spin_lock_bh(&txctl->txq->axq_lock);
  1410. if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && txctl->an) {
  1411. tidno = ieee80211_get_qos_ctl(hdr)[0] &
  1412. IEEE80211_QOS_CTL_TID_MASK;
  1413. tid = ATH_AN_2_TID(txctl->an, tidno);
  1414. WARN_ON(tid->ac->txq != txctl->txq);
  1415. /*
  1416. * Try aggregation if it's a unicast data frame
  1417. * and the destination is HT capable.
  1418. */
  1419. ath_tx_send_ampdu(sc, tid, bf, txctl);
  1420. } else {
  1421. INIT_LIST_HEAD(&bf_head);
  1422. list_add_tail(&bf->list, &bf_head);
  1423. bf->bf_state.bfs_ftype = txctl->frame_type;
  1424. bf->bf_state.bfs_paprd = txctl->paprd;
  1425. if (bf->bf_state.bfs_paprd)
  1426. ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
  1427. bf->bf_state.bfs_paprd);
  1428. ath_tx_send_normal(sc, txctl->txq, NULL, &bf_head);
  1429. }
  1430. spin_unlock_bh(&txctl->txq->axq_lock);
  1431. }
  1432. /* Upon failure caller should free skb */
  1433. int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
  1434. struct ath_tx_control *txctl)
  1435. {
  1436. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  1437. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1438. struct ieee80211_sta *sta = info->control.sta;
  1439. struct ath_wiphy *aphy = hw->priv;
  1440. struct ath_softc *sc = aphy->sc;
  1441. struct ath_txq *txq = txctl->txq;
  1442. struct ath_buf *bf;
  1443. int padpos, padsize;
  1444. int frmlen = skb->len + FCS_LEN;
  1445. int q;
  1446. /* NOTE: sta can be NULL according to net/mac80211.h */
  1447. if (sta)
  1448. txctl->an = (struct ath_node *)sta->drv_priv;
  1449. if (info->control.hw_key)
  1450. frmlen += info->control.hw_key->icv_len;
  1451. /*
  1452. * As a temporary workaround, assign seq# here; this will likely need
  1453. * to be cleaned up to work better with Beacon transmission and virtual
  1454. * BSSes.
  1455. */
  1456. if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
  1457. if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
  1458. sc->tx.seq_no += 0x10;
  1459. hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
  1460. hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
  1461. }
  1462. /* Add the padding after the header if this is not already done */
  1463. padpos = ath9k_cmn_padpos(hdr->frame_control);
  1464. padsize = padpos & 3;
  1465. if (padsize && skb->len > padpos) {
  1466. if (skb_headroom(skb) < padsize)
  1467. return -ENOMEM;
  1468. skb_push(skb, padsize);
  1469. memmove(skb->data, skb->data + padsize, padpos);
  1470. }
  1471. setup_frame_info(hw, skb, frmlen);
  1472. /*
  1473. * At this point, the vif, hw_key and sta pointers in the tx control
  1474. * info are no longer valid (overwritten by the ath_frame_info data.
  1475. */
  1476. bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
  1477. if (unlikely(!bf))
  1478. return -ENOMEM;
  1479. q = skb_get_queue_mapping(skb);
  1480. spin_lock_bh(&txq->axq_lock);
  1481. if (txq == sc->tx.txq_map[q] &&
  1482. ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
  1483. ath_mac80211_stop_queue(sc, q);
  1484. txq->stopped = 1;
  1485. }
  1486. spin_unlock_bh(&txq->axq_lock);
  1487. ath_tx_start_dma(sc, bf, txctl);
  1488. return 0;
  1489. }
  1490. /*****************/
  1491. /* TX Completion */
  1492. /*****************/
  1493. static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
  1494. struct ath_wiphy *aphy, int tx_flags, int ftype,
  1495. struct ath_txq *txq)
  1496. {
  1497. struct ieee80211_hw *hw = sc->hw;
  1498. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1499. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1500. struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
  1501. int q, padpos, padsize;
  1502. ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
  1503. if (aphy)
  1504. hw = aphy->hw;
  1505. if (tx_flags & ATH_TX_BAR)
  1506. tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
  1507. if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
  1508. /* Frame was ACKed */
  1509. tx_info->flags |= IEEE80211_TX_STAT_ACK;
  1510. }
  1511. padpos = ath9k_cmn_padpos(hdr->frame_control);
  1512. padsize = padpos & 3;
  1513. if (padsize && skb->len>padpos+padsize) {
  1514. /*
  1515. * Remove MAC header padding before giving the frame back to
  1516. * mac80211.
  1517. */
  1518. memmove(skb->data + padsize, skb->data, padpos);
  1519. skb_pull(skb, padsize);
  1520. }
  1521. if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
  1522. sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
  1523. ath_dbg(common, ATH_DBG_PS,
  1524. "Going back to sleep after having received TX status (0x%lx)\n",
  1525. sc->ps_flags & (PS_WAIT_FOR_BEACON |
  1526. PS_WAIT_FOR_CAB |
  1527. PS_WAIT_FOR_PSPOLL_DATA |
  1528. PS_WAIT_FOR_TX_ACK));
  1529. }
  1530. if (unlikely(ftype))
  1531. ath9k_tx_status(hw, skb, ftype);
  1532. else {
  1533. q = skb_get_queue_mapping(skb);
  1534. if (txq == sc->tx.txq_map[q]) {
  1535. spin_lock_bh(&txq->axq_lock);
  1536. if (WARN_ON(--txq->pending_frames < 0))
  1537. txq->pending_frames = 0;
  1538. spin_unlock_bh(&txq->axq_lock);
  1539. }
  1540. ieee80211_tx_status(hw, skb);
  1541. }
  1542. }
  1543. static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
  1544. struct ath_txq *txq, struct list_head *bf_q,
  1545. struct ath_tx_status *ts, int txok, int sendbar)
  1546. {
  1547. struct sk_buff *skb = bf->bf_mpdu;
  1548. unsigned long flags;
  1549. int tx_flags = 0;
  1550. if (sendbar)
  1551. tx_flags = ATH_TX_BAR;
  1552. if (!txok) {
  1553. tx_flags |= ATH_TX_ERROR;
  1554. if (bf_isxretried(bf))
  1555. tx_flags |= ATH_TX_XRETRY;
  1556. }
  1557. dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
  1558. bf->bf_buf_addr = 0;
  1559. if (bf->bf_state.bfs_paprd) {
  1560. if (!sc->paprd_pending)
  1561. dev_kfree_skb_any(skb);
  1562. else
  1563. complete(&sc->paprd_complete);
  1564. } else {
  1565. ath_debug_stat_tx(sc, bf, ts);
  1566. ath_tx_complete(sc, skb, bf->aphy, tx_flags,
  1567. bf->bf_state.bfs_ftype, txq);
  1568. }
  1569. /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
  1570. * accidentally reference it later.
  1571. */
  1572. bf->bf_mpdu = NULL;
  1573. /*
  1574. * Return the list of ath_buf of this mpdu to free queue
  1575. */
  1576. spin_lock_irqsave(&sc->tx.txbuflock, flags);
  1577. list_splice_tail_init(bf_q, &sc->tx.txbuf);
  1578. spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
  1579. }
  1580. static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
  1581. int nframes, int nbad, int txok, bool update_rc)
  1582. {
  1583. struct sk_buff *skb = bf->bf_mpdu;
  1584. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1585. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1586. struct ieee80211_hw *hw = bf->aphy->hw;
  1587. struct ath_softc *sc = bf->aphy->sc;
  1588. struct ath_hw *ah = sc->sc_ah;
  1589. u8 i, tx_rateindex;
  1590. if (txok)
  1591. tx_info->status.ack_signal = ts->ts_rssi;
  1592. tx_rateindex = ts->ts_rateindex;
  1593. WARN_ON(tx_rateindex >= hw->max_rates);
  1594. if (ts->ts_status & ATH9K_TXERR_FILT)
  1595. tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
  1596. if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
  1597. tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
  1598. BUG_ON(nbad > nframes);
  1599. tx_info->status.ampdu_len = nframes;
  1600. tx_info->status.ampdu_ack_len = nframes - nbad;
  1601. }
  1602. if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
  1603. (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
  1604. /*
  1605. * If an underrun error is seen assume it as an excessive
  1606. * retry only if max frame trigger level has been reached
  1607. * (2 KB for single stream, and 4 KB for dual stream).
  1608. * Adjust the long retry as if the frame was tried
  1609. * hw->max_rate_tries times to affect how rate control updates
  1610. * PER for the failed rate.
  1611. * In case of congestion on the bus penalizing this type of
  1612. * underruns should help hardware actually transmit new frames
  1613. * successfully by eventually preferring slower rates.
  1614. * This itself should also alleviate congestion on the bus.
  1615. */
  1616. if (ieee80211_is_data(hdr->frame_control) &&
  1617. (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
  1618. ATH9K_TX_DELIM_UNDERRUN)) &&
  1619. ah->tx_trig_level >= sc->sc_ah->caps.tx_triglevel_max)
  1620. tx_info->status.rates[tx_rateindex].count =
  1621. hw->max_rate_tries;
  1622. }
  1623. for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
  1624. tx_info->status.rates[i].count = 0;
  1625. tx_info->status.rates[i].idx = -1;
  1626. }
  1627. tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
  1628. }
  1629. static void ath_wake_mac80211_queue(struct ath_softc *sc, int qnum)
  1630. {
  1631. struct ath_txq *txq;
  1632. txq = sc->tx.txq_map[qnum];
  1633. spin_lock_bh(&txq->axq_lock);
  1634. if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
  1635. if (ath_mac80211_start_queue(sc, qnum))
  1636. txq->stopped = 0;
  1637. }
  1638. spin_unlock_bh(&txq->axq_lock);
  1639. }
  1640. static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
  1641. {
  1642. struct ath_hw *ah = sc->sc_ah;
  1643. struct ath_common *common = ath9k_hw_common(ah);
  1644. struct ath_buf *bf, *lastbf, *bf_held = NULL;
  1645. struct list_head bf_head;
  1646. struct ath_desc *ds;
  1647. struct ath_tx_status ts;
  1648. int txok;
  1649. int status;
  1650. int qnum;
  1651. ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
  1652. txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
  1653. txq->axq_link);
  1654. for (;;) {
  1655. spin_lock_bh(&txq->axq_lock);
  1656. if (list_empty(&txq->axq_q)) {
  1657. txq->axq_link = NULL;
  1658. spin_unlock_bh(&txq->axq_lock);
  1659. break;
  1660. }
  1661. bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
  1662. /*
  1663. * There is a race condition that a BH gets scheduled
  1664. * after sw writes TxE and before hw re-load the last
  1665. * descriptor to get the newly chained one.
  1666. * Software must keep the last DONE descriptor as a
  1667. * holding descriptor - software does so by marking
  1668. * it with the STALE flag.
  1669. */
  1670. bf_held = NULL;
  1671. if (bf->bf_stale) {
  1672. bf_held = bf;
  1673. if (list_is_last(&bf_held->list, &txq->axq_q)) {
  1674. spin_unlock_bh(&txq->axq_lock);
  1675. break;
  1676. } else {
  1677. bf = list_entry(bf_held->list.next,
  1678. struct ath_buf, list);
  1679. }
  1680. }
  1681. lastbf = bf->bf_lastbf;
  1682. ds = lastbf->bf_desc;
  1683. memset(&ts, 0, sizeof(ts));
  1684. status = ath9k_hw_txprocdesc(ah, ds, &ts);
  1685. if (status == -EINPROGRESS) {
  1686. spin_unlock_bh(&txq->axq_lock);
  1687. break;
  1688. }
  1689. /*
  1690. * Remove ath_buf's of the same transmit unit from txq,
  1691. * however leave the last descriptor back as the holding
  1692. * descriptor for hw.
  1693. */
  1694. lastbf->bf_stale = true;
  1695. INIT_LIST_HEAD(&bf_head);
  1696. if (!list_is_singular(&lastbf->list))
  1697. list_cut_position(&bf_head,
  1698. &txq->axq_q, lastbf->list.prev);
  1699. txq->axq_depth--;
  1700. txok = !(ts.ts_status & ATH9K_TXERR_MASK);
  1701. txq->axq_tx_inprogress = false;
  1702. if (bf_held)
  1703. list_del(&bf_held->list);
  1704. spin_unlock_bh(&txq->axq_lock);
  1705. if (bf_held)
  1706. ath_tx_return_buffer(sc, bf_held);
  1707. if (!bf_isampdu(bf)) {
  1708. /*
  1709. * This frame is sent out as a single frame.
  1710. * Use hardware retry status for this frame.
  1711. */
  1712. if (ts.ts_status & ATH9K_TXERR_XRETRY)
  1713. bf->bf_state.bf_type |= BUF_XRETRY;
  1714. ath_tx_rc_status(bf, &ts, 1, txok ? 0 : 1, txok, true);
  1715. }
  1716. qnum = skb_get_queue_mapping(bf->bf_mpdu);
  1717. if (bf_isampdu(bf))
  1718. ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
  1719. true);
  1720. else
  1721. ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
  1722. if (txq == sc->tx.txq_map[qnum])
  1723. ath_wake_mac80211_queue(sc, qnum);
  1724. spin_lock_bh(&txq->axq_lock);
  1725. if (sc->sc_flags & SC_OP_TXAGGR)
  1726. ath_txq_schedule(sc, txq);
  1727. spin_unlock_bh(&txq->axq_lock);
  1728. }
  1729. }
  1730. static void ath_tx_complete_poll_work(struct work_struct *work)
  1731. {
  1732. struct ath_softc *sc = container_of(work, struct ath_softc,
  1733. tx_complete_work.work);
  1734. struct ath_txq *txq;
  1735. int i;
  1736. bool needreset = false;
  1737. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
  1738. if (ATH_TXQ_SETUP(sc, i)) {
  1739. txq = &sc->tx.txq[i];
  1740. spin_lock_bh(&txq->axq_lock);
  1741. if (txq->axq_depth) {
  1742. if (txq->axq_tx_inprogress) {
  1743. needreset = true;
  1744. spin_unlock_bh(&txq->axq_lock);
  1745. break;
  1746. } else {
  1747. txq->axq_tx_inprogress = true;
  1748. }
  1749. }
  1750. spin_unlock_bh(&txq->axq_lock);
  1751. }
  1752. if (needreset) {
  1753. ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
  1754. "tx hung, resetting the chip\n");
  1755. ath9k_ps_wakeup(sc);
  1756. ath_reset(sc, true);
  1757. ath9k_ps_restore(sc);
  1758. }
  1759. ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
  1760. msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
  1761. }
  1762. void ath_tx_tasklet(struct ath_softc *sc)
  1763. {
  1764. int i;
  1765. u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
  1766. ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
  1767. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  1768. if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
  1769. ath_tx_processq(sc, &sc->tx.txq[i]);
  1770. }
  1771. }
  1772. void ath_tx_edma_tasklet(struct ath_softc *sc)
  1773. {
  1774. struct ath_tx_status txs;
  1775. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1776. struct ath_hw *ah = sc->sc_ah;
  1777. struct ath_txq *txq;
  1778. struct ath_buf *bf, *lastbf;
  1779. struct list_head bf_head;
  1780. int status;
  1781. int txok;
  1782. int qnum;
  1783. for (;;) {
  1784. status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
  1785. if (status == -EINPROGRESS)
  1786. break;
  1787. if (status == -EIO) {
  1788. ath_dbg(common, ATH_DBG_XMIT,
  1789. "Error processing tx status\n");
  1790. break;
  1791. }
  1792. /* Skip beacon completions */
  1793. if (txs.qid == sc->beacon.beaconq)
  1794. continue;
  1795. txq = &sc->tx.txq[txs.qid];
  1796. spin_lock_bh(&txq->axq_lock);
  1797. if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
  1798. spin_unlock_bh(&txq->axq_lock);
  1799. return;
  1800. }
  1801. bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
  1802. struct ath_buf, list);
  1803. lastbf = bf->bf_lastbf;
  1804. INIT_LIST_HEAD(&bf_head);
  1805. list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
  1806. &lastbf->list);
  1807. INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
  1808. txq->axq_depth--;
  1809. txq->axq_tx_inprogress = false;
  1810. spin_unlock_bh(&txq->axq_lock);
  1811. txok = !(txs.ts_status & ATH9K_TXERR_MASK);
  1812. if (!bf_isampdu(bf)) {
  1813. if (txs.ts_status & ATH9K_TXERR_XRETRY)
  1814. bf->bf_state.bf_type |= BUF_XRETRY;
  1815. ath_tx_rc_status(bf, &txs, 1, txok ? 0 : 1, txok, true);
  1816. }
  1817. qnum = skb_get_queue_mapping(bf->bf_mpdu);
  1818. if (bf_isampdu(bf))
  1819. ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
  1820. txok, true);
  1821. else
  1822. ath_tx_complete_buf(sc, bf, txq, &bf_head,
  1823. &txs, txok, 0);
  1824. if (txq == sc->tx.txq_map[qnum])
  1825. ath_wake_mac80211_queue(sc, qnum);
  1826. spin_lock_bh(&txq->axq_lock);
  1827. if (!list_empty(&txq->txq_fifo_pending)) {
  1828. INIT_LIST_HEAD(&bf_head);
  1829. bf = list_first_entry(&txq->txq_fifo_pending,
  1830. struct ath_buf, list);
  1831. list_cut_position(&bf_head, &txq->txq_fifo_pending,
  1832. &bf->bf_lastbf->list);
  1833. ath_tx_txqaddbuf(sc, txq, &bf_head);
  1834. } else if (sc->sc_flags & SC_OP_TXAGGR)
  1835. ath_txq_schedule(sc, txq);
  1836. spin_unlock_bh(&txq->axq_lock);
  1837. }
  1838. }
  1839. /*****************/
  1840. /* Init, Cleanup */
  1841. /*****************/
  1842. static int ath_txstatus_setup(struct ath_softc *sc, int size)
  1843. {
  1844. struct ath_descdma *dd = &sc->txsdma;
  1845. u8 txs_len = sc->sc_ah->caps.txs_len;
  1846. dd->dd_desc_len = size * txs_len;
  1847. dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
  1848. &dd->dd_desc_paddr, GFP_KERNEL);
  1849. if (!dd->dd_desc)
  1850. return -ENOMEM;
  1851. return 0;
  1852. }
  1853. static int ath_tx_edma_init(struct ath_softc *sc)
  1854. {
  1855. int err;
  1856. err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
  1857. if (!err)
  1858. ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
  1859. sc->txsdma.dd_desc_paddr,
  1860. ATH_TXSTATUS_RING_SIZE);
  1861. return err;
  1862. }
  1863. static void ath_tx_edma_cleanup(struct ath_softc *sc)
  1864. {
  1865. struct ath_descdma *dd = &sc->txsdma;
  1866. dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
  1867. dd->dd_desc_paddr);
  1868. }
  1869. int ath_tx_init(struct ath_softc *sc, int nbufs)
  1870. {
  1871. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1872. int error = 0;
  1873. spin_lock_init(&sc->tx.txbuflock);
  1874. error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
  1875. "tx", nbufs, 1, 1);
  1876. if (error != 0) {
  1877. ath_err(common,
  1878. "Failed to allocate tx descriptors: %d\n", error);
  1879. goto err;
  1880. }
  1881. error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
  1882. "beacon", ATH_BCBUF, 1, 1);
  1883. if (error != 0) {
  1884. ath_err(common,
  1885. "Failed to allocate beacon descriptors: %d\n", error);
  1886. goto err;
  1887. }
  1888. INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
  1889. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
  1890. error = ath_tx_edma_init(sc);
  1891. if (error)
  1892. goto err;
  1893. }
  1894. err:
  1895. if (error != 0)
  1896. ath_tx_cleanup(sc);
  1897. return error;
  1898. }
  1899. void ath_tx_cleanup(struct ath_softc *sc)
  1900. {
  1901. if (sc->beacon.bdma.dd_desc_len != 0)
  1902. ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
  1903. if (sc->tx.txdma.dd_desc_len != 0)
  1904. ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
  1905. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
  1906. ath_tx_edma_cleanup(sc);
  1907. }
  1908. void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
  1909. {
  1910. struct ath_atx_tid *tid;
  1911. struct ath_atx_ac *ac;
  1912. int tidno, acno;
  1913. for (tidno = 0, tid = &an->tid[tidno];
  1914. tidno < WME_NUM_TID;
  1915. tidno++, tid++) {
  1916. tid->an = an;
  1917. tid->tidno = tidno;
  1918. tid->seq_start = tid->seq_next = 0;
  1919. tid->baw_size = WME_MAX_BA;
  1920. tid->baw_head = tid->baw_tail = 0;
  1921. tid->sched = false;
  1922. tid->paused = false;
  1923. tid->state &= ~AGGR_CLEANUP;
  1924. INIT_LIST_HEAD(&tid->buf_q);
  1925. acno = TID_TO_WME_AC(tidno);
  1926. tid->ac = &an->ac[acno];
  1927. tid->state &= ~AGGR_ADDBA_COMPLETE;
  1928. tid->state &= ~AGGR_ADDBA_PROGRESS;
  1929. }
  1930. for (acno = 0, ac = &an->ac[acno];
  1931. acno < WME_NUM_AC; acno++, ac++) {
  1932. ac->sched = false;
  1933. ac->txq = sc->tx.txq_map[acno];
  1934. INIT_LIST_HEAD(&ac->tid_q);
  1935. }
  1936. }
  1937. void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
  1938. {
  1939. struct ath_atx_ac *ac;
  1940. struct ath_atx_tid *tid;
  1941. struct ath_txq *txq;
  1942. int tidno;
  1943. for (tidno = 0, tid = &an->tid[tidno];
  1944. tidno < WME_NUM_TID; tidno++, tid++) {
  1945. ac = tid->ac;
  1946. txq = ac->txq;
  1947. spin_lock_bh(&txq->axq_lock);
  1948. if (tid->sched) {
  1949. list_del(&tid->list);
  1950. tid->sched = false;
  1951. }
  1952. if (ac->sched) {
  1953. list_del(&ac->list);
  1954. tid->ac->sched = false;
  1955. }
  1956. ath_tid_drain(sc, txq, tid);
  1957. tid->state &= ~AGGR_ADDBA_COMPLETE;
  1958. tid->state &= ~AGGR_CLEANUP;
  1959. spin_unlock_bh(&txq->axq_lock);
  1960. }
  1961. }