xmit.c 64 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557
  1. /*
  2. * Copyright (c) 2008 Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include "core.h"
  17. #define BITS_PER_BYTE 8
  18. #define OFDM_PLCP_BITS 22
  19. #define HT_RC_2_MCS(_rc) ((_rc) & 0x0f)
  20. #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
  21. #define L_STF 8
  22. #define L_LTF 8
  23. #define L_SIG 4
  24. #define HT_SIG 8
  25. #define HT_STF 4
  26. #define HT_LTF(_ns) (4 * (_ns))
  27. #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
  28. #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
  29. #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
  30. #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
  31. #define OFDM_SIFS_TIME 16
  32. static u32 bits_per_symbol[][2] = {
  33. /* 20MHz 40MHz */
  34. { 26, 54 }, /* 0: BPSK */
  35. { 52, 108 }, /* 1: QPSK 1/2 */
  36. { 78, 162 }, /* 2: QPSK 3/4 */
  37. { 104, 216 }, /* 3: 16-QAM 1/2 */
  38. { 156, 324 }, /* 4: 16-QAM 3/4 */
  39. { 208, 432 }, /* 5: 64-QAM 2/3 */
  40. { 234, 486 }, /* 6: 64-QAM 3/4 */
  41. { 260, 540 }, /* 7: 64-QAM 5/6 */
  42. { 52, 108 }, /* 8: BPSK */
  43. { 104, 216 }, /* 9: QPSK 1/2 */
  44. { 156, 324 }, /* 10: QPSK 3/4 */
  45. { 208, 432 }, /* 11: 16-QAM 1/2 */
  46. { 312, 648 }, /* 12: 16-QAM 3/4 */
  47. { 416, 864 }, /* 13: 64-QAM 2/3 */
  48. { 468, 972 }, /* 14: 64-QAM 3/4 */
  49. { 520, 1080 }, /* 15: 64-QAM 5/6 */
  50. };
  51. #define IS_HT_RATE(_rate) ((_rate) & 0x80)
  52. /*
  53. * Insert a chain of ath_buf (descriptors) on a txq and
  54. * assume the descriptors are already chained together by caller.
  55. * NB: must be called with txq lock held
  56. */
  57. static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
  58. struct list_head *head)
  59. {
  60. struct ath_hal *ah = sc->sc_ah;
  61. struct ath_buf *bf;
  62. /*
  63. * Insert the frame on the outbound list and
  64. * pass it on to the hardware.
  65. */
  66. if (list_empty(head))
  67. return;
  68. bf = list_first_entry(head, struct ath_buf, list);
  69. list_splice_tail_init(head, &txq->axq_q);
  70. txq->axq_depth++;
  71. txq->axq_totalqueued++;
  72. txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
  73. DPRINTF(sc, ATH_DBG_QUEUE,
  74. "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
  75. if (txq->axq_link == NULL) {
  76. ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
  77. DPRINTF(sc, ATH_DBG_XMIT,
  78. "TXDP[%u] = %llx (%p)\n",
  79. txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
  80. } else {
  81. *txq->axq_link = bf->bf_daddr;
  82. DPRINTF(sc, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n",
  83. txq->axq_qnum, txq->axq_link,
  84. ito64(bf->bf_daddr), bf->bf_desc);
  85. }
  86. txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
  87. ath9k_hw_txstart(ah, txq->axq_qnum);
  88. }
  89. static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
  90. struct ath_xmit_status *tx_status)
  91. {
  92. struct ieee80211_hw *hw = sc->hw;
  93. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  94. struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
  95. int hdrlen, padsize;
  96. DPRINTF(sc, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
  97. if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK ||
  98. tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
  99. kfree(tx_info_priv);
  100. tx_info->rate_driver_data[0] = NULL;
  101. }
  102. if (tx_status->flags & ATH_TX_BAR) {
  103. tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
  104. tx_status->flags &= ~ATH_TX_BAR;
  105. }
  106. if (!(tx_status->flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
  107. /* Frame was ACKed */
  108. tx_info->flags |= IEEE80211_TX_STAT_ACK;
  109. }
  110. tx_info->status.rates[0].count = tx_status->retries + 1;
  111. hdrlen = ieee80211_get_hdrlen_from_skb(skb);
  112. padsize = hdrlen & 3;
  113. if (padsize && hdrlen >= 24) {
  114. /*
  115. * Remove MAC header padding before giving the frame back to
  116. * mac80211.
  117. */
  118. memmove(skb->data + padsize, skb->data, hdrlen);
  119. skb_pull(skb, padsize);
  120. }
  121. ieee80211_tx_status(hw, skb);
  122. }
  123. /* Check if it's okay to send out aggregates */
  124. static int ath_aggr_query(struct ath_softc *sc, struct ath_node *an, u8 tidno)
  125. {
  126. struct ath_atx_tid *tid;
  127. tid = ATH_AN_2_TID(an, tidno);
  128. if (tid->state & AGGR_ADDBA_COMPLETE ||
  129. tid->state & AGGR_ADDBA_PROGRESS)
  130. return 1;
  131. else
  132. return 0;
  133. }
  134. static void ath_get_beaconconfig(struct ath_softc *sc, int if_id,
  135. struct ath_beacon_config *conf)
  136. {
  137. struct ieee80211_hw *hw = sc->hw;
  138. /* fill in beacon config data */
  139. conf->beacon_interval = hw->conf.beacon_int;
  140. conf->listen_interval = 100;
  141. conf->dtim_count = 1;
  142. conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval;
  143. }
  144. /* Calculate Atheros packet type from IEEE80211 packet header */
  145. static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
  146. {
  147. struct ieee80211_hdr *hdr;
  148. enum ath9k_pkt_type htype;
  149. __le16 fc;
  150. hdr = (struct ieee80211_hdr *)skb->data;
  151. fc = hdr->frame_control;
  152. if (ieee80211_is_beacon(fc))
  153. htype = ATH9K_PKT_TYPE_BEACON;
  154. else if (ieee80211_is_probe_resp(fc))
  155. htype = ATH9K_PKT_TYPE_PROBE_RESP;
  156. else if (ieee80211_is_atim(fc))
  157. htype = ATH9K_PKT_TYPE_ATIM;
  158. else if (ieee80211_is_pspoll(fc))
  159. htype = ATH9K_PKT_TYPE_PSPOLL;
  160. else
  161. htype = ATH9K_PKT_TYPE_NORMAL;
  162. return htype;
  163. }
  164. static bool is_pae(struct sk_buff *skb)
  165. {
  166. struct ieee80211_hdr *hdr;
  167. __le16 fc;
  168. hdr = (struct ieee80211_hdr *)skb->data;
  169. fc = hdr->frame_control;
  170. if (ieee80211_is_data(fc)) {
  171. if (ieee80211_is_nullfunc(fc) ||
  172. /* Port Access Entity (IEEE 802.1X) */
  173. (skb->protocol == cpu_to_be16(ETH_P_PAE))) {
  174. return true;
  175. }
  176. }
  177. return false;
  178. }
  179. static int get_hw_crypto_keytype(struct sk_buff *skb)
  180. {
  181. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  182. if (tx_info->control.hw_key) {
  183. if (tx_info->control.hw_key->alg == ALG_WEP)
  184. return ATH9K_KEY_TYPE_WEP;
  185. else if (tx_info->control.hw_key->alg == ALG_TKIP)
  186. return ATH9K_KEY_TYPE_TKIP;
  187. else if (tx_info->control.hw_key->alg == ALG_CCMP)
  188. return ATH9K_KEY_TYPE_AES;
  189. }
  190. return ATH9K_KEY_TYPE_CLEAR;
  191. }
  192. /* Called only when tx aggregation is enabled and HT is supported */
  193. static void assign_aggr_tid_seqno(struct sk_buff *skb,
  194. struct ath_buf *bf)
  195. {
  196. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  197. struct ieee80211_hdr *hdr;
  198. struct ath_node *an;
  199. struct ath_atx_tid *tid;
  200. __le16 fc;
  201. u8 *qc;
  202. if (!tx_info->control.sta)
  203. return;
  204. an = (struct ath_node *)tx_info->control.sta->drv_priv;
  205. hdr = (struct ieee80211_hdr *)skb->data;
  206. fc = hdr->frame_control;
  207. /* Get tidno */
  208. if (ieee80211_is_data_qos(fc)) {
  209. qc = ieee80211_get_qos_ctl(hdr);
  210. bf->bf_tidno = qc[0] & 0xf;
  211. }
  212. /* Get seqno */
  213. /* For HT capable stations, we save tidno for later use.
  214. * We also override seqno set by upper layer with the one
  215. * in tx aggregation state.
  216. *
  217. * If fragmentation is on, the sequence number is
  218. * not overridden, since it has been
  219. * incremented by the fragmentation routine.
  220. *
  221. * FIXME: check if the fragmentation threshold exceeds
  222. * IEEE80211 max.
  223. */
  224. tid = ATH_AN_2_TID(an, bf->bf_tidno);
  225. hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
  226. IEEE80211_SEQ_SEQ_SHIFT);
  227. bf->bf_seqno = tid->seq_next;
  228. INCR(tid->seq_next, IEEE80211_SEQ_MAX);
  229. }
  230. static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
  231. struct ath_txq *txq)
  232. {
  233. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  234. int flags = 0;
  235. flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
  236. flags |= ATH9K_TXDESC_INTREQ;
  237. if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
  238. flags |= ATH9K_TXDESC_NOACK;
  239. if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
  240. flags |= ATH9K_TXDESC_RTSENA;
  241. return flags;
  242. }
  243. static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
  244. {
  245. struct ath_buf *bf = NULL;
  246. spin_lock_bh(&sc->tx.txbuflock);
  247. if (unlikely(list_empty(&sc->tx.txbuf))) {
  248. spin_unlock_bh(&sc->tx.txbuflock);
  249. return NULL;
  250. }
  251. bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
  252. list_del(&bf->list);
  253. spin_unlock_bh(&sc->tx.txbuflock);
  254. return bf;
  255. }
  256. /* To complete a chain of buffers associated a frame */
  257. static void ath_tx_complete_buf(struct ath_softc *sc,
  258. struct ath_buf *bf,
  259. struct list_head *bf_q,
  260. int txok, int sendbar)
  261. {
  262. struct sk_buff *skb = bf->bf_mpdu;
  263. struct ath_xmit_status tx_status;
  264. unsigned long flags;
  265. /*
  266. * Set retry information.
  267. * NB: Don't use the information in the descriptor, because the frame
  268. * could be software retried.
  269. */
  270. tx_status.retries = bf->bf_retries;
  271. tx_status.flags = 0;
  272. if (sendbar)
  273. tx_status.flags = ATH_TX_BAR;
  274. if (!txok) {
  275. tx_status.flags |= ATH_TX_ERROR;
  276. if (bf_isxretried(bf))
  277. tx_status.flags |= ATH_TX_XRETRY;
  278. }
  279. /* Unmap this frame */
  280. pci_unmap_single(to_pci_dev(sc->dev),
  281. bf->bf_dmacontext,
  282. skb->len,
  283. PCI_DMA_TODEVICE);
  284. /* complete this frame */
  285. ath_tx_complete(sc, skb, &tx_status);
  286. /*
  287. * Return the list of ath_buf of this mpdu to free queue
  288. */
  289. spin_lock_irqsave(&sc->tx.txbuflock, flags);
  290. list_splice_tail_init(bf_q, &sc->tx.txbuf);
  291. spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
  292. }
  293. /*
  294. * queue up a dest/ac pair for tx scheduling
  295. * NB: must be called with txq lock held
  296. */
  297. static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
  298. {
  299. struct ath_atx_ac *ac = tid->ac;
  300. /*
  301. * if tid is paused, hold off
  302. */
  303. if (tid->paused)
  304. return;
  305. /*
  306. * add tid to ac atmost once
  307. */
  308. if (tid->sched)
  309. return;
  310. tid->sched = true;
  311. list_add_tail(&tid->list, &ac->tid_q);
  312. /*
  313. * add node ac to txq atmost once
  314. */
  315. if (ac->sched)
  316. return;
  317. ac->sched = true;
  318. list_add_tail(&ac->list, &txq->axq_acq);
  319. }
  320. /* pause a tid */
  321. static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
  322. {
  323. struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
  324. spin_lock_bh(&txq->axq_lock);
  325. tid->paused++;
  326. spin_unlock_bh(&txq->axq_lock);
  327. }
  328. /* resume a tid and schedule aggregate */
  329. void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
  330. {
  331. struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
  332. ASSERT(tid->paused > 0);
  333. spin_lock_bh(&txq->axq_lock);
  334. tid->paused--;
  335. if (tid->paused > 0)
  336. goto unlock;
  337. if (list_empty(&tid->buf_q))
  338. goto unlock;
  339. /*
  340. * Add this TID to scheduler and try to send out aggregates
  341. */
  342. ath_tx_queue_tid(txq, tid);
  343. ath_txq_schedule(sc, txq);
  344. unlock:
  345. spin_unlock_bh(&txq->axq_lock);
  346. }
  347. /* Compute the number of bad frames */
  348. static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
  349. int txok)
  350. {
  351. struct ath_buf *bf_last = bf->bf_lastbf;
  352. struct ath_desc *ds = bf_last->bf_desc;
  353. u16 seq_st = 0;
  354. u32 ba[WME_BA_BMP_SIZE >> 5];
  355. int ba_index;
  356. int nbad = 0;
  357. int isaggr = 0;
  358. if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
  359. return 0;
  360. isaggr = bf_isaggr(bf);
  361. if (isaggr) {
  362. seq_st = ATH_DS_BA_SEQ(ds);
  363. memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
  364. }
  365. while (bf) {
  366. ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
  367. if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
  368. nbad++;
  369. bf = bf->bf_next;
  370. }
  371. return nbad;
  372. }
  373. static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
  374. {
  375. struct sk_buff *skb;
  376. struct ieee80211_hdr *hdr;
  377. bf->bf_state.bf_type |= BUF_RETRY;
  378. bf->bf_retries++;
  379. skb = bf->bf_mpdu;
  380. hdr = (struct ieee80211_hdr *)skb->data;
  381. hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
  382. }
  383. /* Update block ack window */
  384. static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
  385. int seqno)
  386. {
  387. int index, cindex;
  388. index = ATH_BA_INDEX(tid->seq_start, seqno);
  389. cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
  390. tid->tx_buf[cindex] = NULL;
  391. while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
  392. INCR(tid->seq_start, IEEE80211_SEQ_MAX);
  393. INCR(tid->baw_head, ATH_TID_MAX_BUFS);
  394. }
  395. }
  396. /*
  397. * ath_pkt_dur - compute packet duration (NB: not NAV)
  398. *
  399. * rix - rate index
  400. * pktlen - total bytes (delims + data + fcs + pads + pad delims)
  401. * width - 0 for 20 MHz, 1 for 40 MHz
  402. * half_gi - to use 4us v/s 3.6 us for symbol time
  403. */
  404. static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
  405. int width, int half_gi, bool shortPreamble)
  406. {
  407. struct ath_rate_table *rate_table = sc->cur_rate_table;
  408. u32 nbits, nsymbits, duration, nsymbols;
  409. u8 rc;
  410. int streams, pktlen;
  411. pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
  412. rc = rate_table->info[rix].ratecode;
  413. /* for legacy rates, use old function to compute packet duration */
  414. if (!IS_HT_RATE(rc))
  415. return ath9k_hw_computetxtime(sc->sc_ah, rate_table, pktlen,
  416. rix, shortPreamble);
  417. /* find number of symbols: PLCP + data */
  418. nbits = (pktlen << 3) + OFDM_PLCP_BITS;
  419. nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
  420. nsymbols = (nbits + nsymbits - 1) / nsymbits;
  421. if (!half_gi)
  422. duration = SYMBOL_TIME(nsymbols);
  423. else
  424. duration = SYMBOL_TIME_HALFGI(nsymbols);
  425. /* addup duration for legacy/ht training and signal fields */
  426. streams = HT_RC_2_STREAMS(rc);
  427. duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
  428. return duration;
  429. }
  430. /* Rate module function to set rate related fields in tx descriptor */
  431. static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
  432. {
  433. struct ath_hal *ah = sc->sc_ah;
  434. struct ath_rate_table *rt;
  435. struct ath_desc *ds = bf->bf_desc;
  436. struct ath_desc *lastds = bf->bf_lastbf->bf_desc;
  437. struct ath9k_11n_rate_series series[4];
  438. struct sk_buff *skb;
  439. struct ieee80211_tx_info *tx_info;
  440. struct ieee80211_tx_rate *rates;
  441. struct ieee80211_hdr *hdr;
  442. struct ieee80211_hw *hw = sc->hw;
  443. int i, flags, rtsctsena = 0, enable_g_protection = 0;
  444. u32 ctsduration = 0;
  445. u8 rix = 0, cix, ctsrate = 0;
  446. __le16 fc;
  447. memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
  448. skb = (struct sk_buff *)bf->bf_mpdu;
  449. hdr = (struct ieee80211_hdr *)skb->data;
  450. fc = hdr->frame_control;
  451. tx_info = IEEE80211_SKB_CB(skb);
  452. rates = tx_info->control.rates;
  453. if (ieee80211_has_morefrags(fc) ||
  454. (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
  455. rates[1].count = rates[2].count = rates[3].count = 0;
  456. rates[1].idx = rates[2].idx = rates[3].idx = 0;
  457. rates[0].count = ATH_TXMAXTRY;
  458. }
  459. /* get the cix for the lowest valid rix */
  460. rt = sc->cur_rate_table;
  461. for (i = 3; i >= 0; i--) {
  462. if (rates[i].count && (rates[i].idx >= 0)) {
  463. rix = rates[i].idx;
  464. break;
  465. }
  466. }
  467. flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA));
  468. cix = rt->info[rix].ctrl_rate;
  469. /* All protection frames are transmited at 2Mb/s for 802.11g,
  470. * otherwise we transmit them at 1Mb/s */
  471. if (hw->conf.channel->band == IEEE80211_BAND_2GHZ &&
  472. !conf_is_ht(&hw->conf))
  473. enable_g_protection = 1;
  474. /*
  475. * If 802.11g protection is enabled, determine whether to use RTS/CTS or
  476. * just CTS. Note that this is only done for OFDM/HT unicast frames.
  477. */
  478. if (sc->sc_protmode != PROT_M_NONE && !(bf->bf_flags & ATH9K_TXDESC_NOACK)
  479. && (rt->info[rix].phy == WLAN_RC_PHY_OFDM ||
  480. WLAN_RC_PHY_HT(rt->info[rix].phy))) {
  481. if (sc->sc_protmode == PROT_M_RTSCTS)
  482. flags = ATH9K_TXDESC_RTSENA;
  483. else if (sc->sc_protmode == PROT_M_CTSONLY)
  484. flags = ATH9K_TXDESC_CTSENA;
  485. cix = rt->info[enable_g_protection].ctrl_rate;
  486. rtsctsena = 1;
  487. }
  488. /* For 11n, the default behavior is to enable RTS for hw retried frames.
  489. * We enable the global flag here and let rate series flags determine
  490. * which rates will actually use RTS.
  491. */
  492. if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) {
  493. /* 802.11g protection not needed, use our default behavior */
  494. if (!rtsctsena)
  495. flags = ATH9K_TXDESC_RTSENA;
  496. }
  497. /* Set protection if aggregate protection on */
  498. if (sc->sc_config.ath_aggr_prot &&
  499. (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
  500. flags = ATH9K_TXDESC_RTSENA;
  501. cix = rt->info[enable_g_protection].ctrl_rate;
  502. rtsctsena = 1;
  503. }
  504. /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
  505. if (bf_isaggr(bf) && (bf->bf_al > ah->ah_caps.rts_aggr_limit))
  506. flags &= ~(ATH9K_TXDESC_RTSENA);
  507. /*
  508. * CTS transmit rate is derived from the transmit rate by looking in the
  509. * h/w rate table. We must also factor in whether or not a short
  510. * preamble is to be used. NB: cix is set above where RTS/CTS is enabled
  511. */
  512. ctsrate = rt->info[cix].ratecode |
  513. (bf_isshpreamble(bf) ? rt->info[cix].short_preamble : 0);
  514. for (i = 0; i < 4; i++) {
  515. if (!rates[i].count || (rates[i].idx < 0))
  516. continue;
  517. rix = rates[i].idx;
  518. series[i].Rate = rt->info[rix].ratecode |
  519. (bf_isshpreamble(bf) ? rt->info[rix].short_preamble : 0);
  520. series[i].Tries = rates[i].count;
  521. series[i].RateFlags = (
  522. (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) ?
  523. ATH9K_RATESERIES_RTS_CTS : 0) |
  524. ((rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ?
  525. ATH9K_RATESERIES_2040 : 0) |
  526. ((rates[i].flags & IEEE80211_TX_RC_SHORT_GI) ?
  527. ATH9K_RATESERIES_HALFGI : 0);
  528. series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
  529. (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) != 0,
  530. (rates[i].flags & IEEE80211_TX_RC_SHORT_GI),
  531. bf_isshpreamble(bf));
  532. series[i].ChSel = sc->sc_tx_chainmask;
  533. if (rtsctsena)
  534. series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
  535. }
  536. /* set dur_update_en for l-sig computation except for PS-Poll frames */
  537. ath9k_hw_set11n_ratescenario(ah, ds, lastds, !bf_ispspoll(bf),
  538. ctsrate, ctsduration,
  539. series, 4, flags);
  540. if (sc->sc_config.ath_aggr_prot && flags)
  541. ath9k_hw_set11n_burstduration(ah, ds, 8192);
  542. }
  543. /*
  544. * Function to send a normal HT (non-AMPDU) frame
  545. * NB: must be called with txq lock held
  546. */
  547. static int ath_tx_send_normal(struct ath_softc *sc,
  548. struct ath_txq *txq,
  549. struct ath_atx_tid *tid,
  550. struct list_head *bf_head)
  551. {
  552. struct ath_buf *bf;
  553. BUG_ON(list_empty(bf_head));
  554. bf = list_first_entry(bf_head, struct ath_buf, list);
  555. bf->bf_state.bf_type &= ~BUF_AMPDU; /* regular HT frame */
  556. /* update starting sequence number for subsequent ADDBA request */
  557. INCR(tid->seq_start, IEEE80211_SEQ_MAX);
  558. /* Queue to h/w without aggregation */
  559. bf->bf_nframes = 1;
  560. bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
  561. ath_buf_set_rate(sc, bf);
  562. ath_tx_txqaddbuf(sc, txq, bf_head);
  563. return 0;
  564. }
  565. /* flush tid's software queue and send frames as non-ampdu's */
  566. static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
  567. {
  568. struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
  569. struct ath_buf *bf;
  570. struct list_head bf_head;
  571. INIT_LIST_HEAD(&bf_head);
  572. ASSERT(tid->paused > 0);
  573. spin_lock_bh(&txq->axq_lock);
  574. tid->paused--;
  575. if (tid->paused > 0) {
  576. spin_unlock_bh(&txq->axq_lock);
  577. return;
  578. }
  579. while (!list_empty(&tid->buf_q)) {
  580. bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
  581. ASSERT(!bf_isretried(bf));
  582. list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
  583. ath_tx_send_normal(sc, txq, tid, &bf_head);
  584. }
  585. spin_unlock_bh(&txq->axq_lock);
  586. }
  587. /* Completion routine of an aggregate */
  588. static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
  589. struct ath_txq *txq,
  590. struct ath_buf *bf,
  591. struct list_head *bf_q,
  592. int txok)
  593. {
  594. struct ath_node *an = NULL;
  595. struct sk_buff *skb;
  596. struct ieee80211_tx_info *tx_info;
  597. struct ath_atx_tid *tid = NULL;
  598. struct ath_buf *bf_last = bf->bf_lastbf;
  599. struct ath_desc *ds = bf_last->bf_desc;
  600. struct ath_buf *bf_next, *bf_lastq = NULL;
  601. struct list_head bf_head, bf_pending;
  602. u16 seq_st = 0;
  603. u32 ba[WME_BA_BMP_SIZE >> 5];
  604. int isaggr, txfail, txpending, sendbar = 0, needreset = 0;
  605. skb = (struct sk_buff *)bf->bf_mpdu;
  606. tx_info = IEEE80211_SKB_CB(skb);
  607. if (tx_info->control.sta) {
  608. an = (struct ath_node *)tx_info->control.sta->drv_priv;
  609. tid = ATH_AN_2_TID(an, bf->bf_tidno);
  610. }
  611. isaggr = bf_isaggr(bf);
  612. if (isaggr) {
  613. if (txok) {
  614. if (ATH_DS_TX_BA(ds)) {
  615. /*
  616. * extract starting sequence and
  617. * block-ack bitmap
  618. */
  619. seq_st = ATH_DS_BA_SEQ(ds);
  620. memcpy(ba,
  621. ATH_DS_BA_BITMAP(ds),
  622. WME_BA_BMP_SIZE >> 3);
  623. } else {
  624. memset(ba, 0, WME_BA_BMP_SIZE >> 3);
  625. /*
  626. * AR5416 can become deaf/mute when BA
  627. * issue happens. Chip needs to be reset.
  628. * But AP code may have sychronization issues
  629. * when perform internal reset in this routine.
  630. * Only enable reset in STA mode for now.
  631. */
  632. if (sc->sc_ah->ah_opmode ==
  633. NL80211_IFTYPE_STATION)
  634. needreset = 1;
  635. }
  636. } else {
  637. memset(ba, 0, WME_BA_BMP_SIZE >> 3);
  638. }
  639. }
  640. INIT_LIST_HEAD(&bf_pending);
  641. INIT_LIST_HEAD(&bf_head);
  642. while (bf) {
  643. txfail = txpending = 0;
  644. bf_next = bf->bf_next;
  645. if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
  646. /* transmit completion, subframe is
  647. * acked by block ack */
  648. } else if (!isaggr && txok) {
  649. /* transmit completion */
  650. } else {
  651. if (!(tid->state & AGGR_CLEANUP) &&
  652. ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
  653. if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
  654. ath_tx_set_retry(sc, bf);
  655. txpending = 1;
  656. } else {
  657. bf->bf_state.bf_type |= BUF_XRETRY;
  658. txfail = 1;
  659. sendbar = 1;
  660. }
  661. } else {
  662. /*
  663. * cleanup in progress, just fail
  664. * the un-acked sub-frames
  665. */
  666. txfail = 1;
  667. }
  668. }
  669. /*
  670. * Remove ath_buf's of this sub-frame from aggregate queue.
  671. */
  672. if (bf_next == NULL) { /* last subframe in the aggregate */
  673. ASSERT(bf->bf_lastfrm == bf_last);
  674. /*
  675. * The last descriptor of the last sub frame could be
  676. * a holding descriptor for h/w. If that's the case,
  677. * bf->bf_lastfrm won't be in the bf_q.
  678. * Make sure we handle bf_q properly here.
  679. */
  680. if (!list_empty(bf_q)) {
  681. bf_lastq = list_entry(bf_q->prev,
  682. struct ath_buf, list);
  683. list_cut_position(&bf_head,
  684. bf_q, &bf_lastq->list);
  685. } else {
  686. /*
  687. * XXX: if the last subframe only has one
  688. * descriptor which is also being used as
  689. * a holding descriptor. Then the ath_buf
  690. * is not in the bf_q at all.
  691. */
  692. INIT_LIST_HEAD(&bf_head);
  693. }
  694. } else {
  695. ASSERT(!list_empty(bf_q));
  696. list_cut_position(&bf_head,
  697. bf_q, &bf->bf_lastfrm->list);
  698. }
  699. if (!txpending) {
  700. /*
  701. * complete the acked-ones/xretried ones; update
  702. * block-ack window
  703. */
  704. spin_lock_bh(&txq->axq_lock);
  705. ath_tx_update_baw(sc, tid, bf->bf_seqno);
  706. spin_unlock_bh(&txq->axq_lock);
  707. /* complete this sub-frame */
  708. ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar);
  709. } else {
  710. /*
  711. * retry the un-acked ones
  712. */
  713. /*
  714. * XXX: if the last descriptor is holding descriptor,
  715. * in order to requeue the frame to software queue, we
  716. * need to allocate a new descriptor and
  717. * copy the content of holding descriptor to it.
  718. */
  719. if (bf->bf_next == NULL &&
  720. bf_last->bf_status & ATH_BUFSTATUS_STALE) {
  721. struct ath_buf *tbf;
  722. /* allocate new descriptor */
  723. spin_lock_bh(&sc->tx.txbuflock);
  724. ASSERT(!list_empty((&sc->tx.txbuf)));
  725. tbf = list_first_entry(&sc->tx.txbuf,
  726. struct ath_buf, list);
  727. list_del(&tbf->list);
  728. spin_unlock_bh(&sc->tx.txbuflock);
  729. ATH_TXBUF_RESET(tbf);
  730. /* copy descriptor content */
  731. tbf->bf_mpdu = bf_last->bf_mpdu;
  732. tbf->bf_buf_addr = bf_last->bf_buf_addr;
  733. *(tbf->bf_desc) = *(bf_last->bf_desc);
  734. /* link it to the frame */
  735. if (bf_lastq) {
  736. bf_lastq->bf_desc->ds_link =
  737. tbf->bf_daddr;
  738. bf->bf_lastfrm = tbf;
  739. ath9k_hw_cleartxdesc(sc->sc_ah,
  740. bf->bf_lastfrm->bf_desc);
  741. } else {
  742. tbf->bf_state = bf_last->bf_state;
  743. tbf->bf_lastfrm = tbf;
  744. ath9k_hw_cleartxdesc(sc->sc_ah,
  745. tbf->bf_lastfrm->bf_desc);
  746. /* copy the DMA context */
  747. tbf->bf_dmacontext =
  748. bf_last->bf_dmacontext;
  749. }
  750. list_add_tail(&tbf->list, &bf_head);
  751. } else {
  752. /*
  753. * Clear descriptor status words for
  754. * software retry
  755. */
  756. ath9k_hw_cleartxdesc(sc->sc_ah,
  757. bf->bf_lastfrm->bf_desc);
  758. }
  759. /*
  760. * Put this buffer to the temporary pending
  761. * queue to retain ordering
  762. */
  763. list_splice_tail_init(&bf_head, &bf_pending);
  764. }
  765. bf = bf_next;
  766. }
  767. if (tid->state & AGGR_CLEANUP) {
  768. /* check to see if we're done with cleaning the h/w queue */
  769. spin_lock_bh(&txq->axq_lock);
  770. if (tid->baw_head == tid->baw_tail) {
  771. tid->state &= ~AGGR_ADDBA_COMPLETE;
  772. tid->addba_exchangeattempts = 0;
  773. spin_unlock_bh(&txq->axq_lock);
  774. tid->state &= ~AGGR_CLEANUP;
  775. /* send buffered frames as singles */
  776. ath_tx_flush_tid(sc, tid);
  777. } else
  778. spin_unlock_bh(&txq->axq_lock);
  779. return;
  780. }
  781. /*
  782. * prepend un-acked frames to the beginning of the pending frame queue
  783. */
  784. if (!list_empty(&bf_pending)) {
  785. spin_lock_bh(&txq->axq_lock);
  786. /* Note: we _prepend_, we _do_not_ at to
  787. * the end of the queue ! */
  788. list_splice(&bf_pending, &tid->buf_q);
  789. ath_tx_queue_tid(txq, tid);
  790. spin_unlock_bh(&txq->axq_lock);
  791. }
  792. if (needreset)
  793. ath_reset(sc, false);
  794. return;
  795. }
  796. static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, int nbad)
  797. {
  798. struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
  799. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  800. struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
  801. tx_info_priv->update_rc = false;
  802. if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
  803. tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
  804. if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
  805. (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
  806. if (bf_isdata(bf)) {
  807. memcpy(&tx_info_priv->tx, &ds->ds_txstat,
  808. sizeof(tx_info_priv->tx));
  809. tx_info_priv->n_frames = bf->bf_nframes;
  810. tx_info_priv->n_bad_frames = nbad;
  811. tx_info_priv->update_rc = true;
  812. }
  813. }
  814. }
  815. /* Process completed xmit descriptors from the specified queue */
  816. static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
  817. {
  818. struct ath_hal *ah = sc->sc_ah;
  819. struct ath_buf *bf, *lastbf, *bf_held = NULL;
  820. struct list_head bf_head;
  821. struct ath_desc *ds;
  822. int txok, nbad = 0;
  823. int status;
  824. DPRINTF(sc, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
  825. txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
  826. txq->axq_link);
  827. for (;;) {
  828. spin_lock_bh(&txq->axq_lock);
  829. if (list_empty(&txq->axq_q)) {
  830. txq->axq_link = NULL;
  831. txq->axq_linkbuf = NULL;
  832. spin_unlock_bh(&txq->axq_lock);
  833. break;
  834. }
  835. bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
  836. /*
  837. * There is a race condition that a BH gets scheduled
  838. * after sw writes TxE and before hw re-load the last
  839. * descriptor to get the newly chained one.
  840. * Software must keep the last DONE descriptor as a
  841. * holding descriptor - software does so by marking
  842. * it with the STALE flag.
  843. */
  844. bf_held = NULL;
  845. if (bf->bf_status & ATH_BUFSTATUS_STALE) {
  846. bf_held = bf;
  847. if (list_is_last(&bf_held->list, &txq->axq_q)) {
  848. /* FIXME:
  849. * The holding descriptor is the last
  850. * descriptor in queue. It's safe to remove
  851. * the last holding descriptor in BH context.
  852. */
  853. spin_unlock_bh(&txq->axq_lock);
  854. break;
  855. } else {
  856. /* Lets work with the next buffer now */
  857. bf = list_entry(bf_held->list.next,
  858. struct ath_buf, list);
  859. }
  860. }
  861. lastbf = bf->bf_lastbf;
  862. ds = lastbf->bf_desc; /* NB: last decriptor */
  863. status = ath9k_hw_txprocdesc(ah, ds);
  864. if (status == -EINPROGRESS) {
  865. spin_unlock_bh(&txq->axq_lock);
  866. break;
  867. }
  868. if (bf->bf_desc == txq->axq_lastdsWithCTS)
  869. txq->axq_lastdsWithCTS = NULL;
  870. if (ds == txq->axq_gatingds)
  871. txq->axq_gatingds = NULL;
  872. /*
  873. * Remove ath_buf's of the same transmit unit from txq,
  874. * however leave the last descriptor back as the holding
  875. * descriptor for hw.
  876. */
  877. lastbf->bf_status |= ATH_BUFSTATUS_STALE;
  878. INIT_LIST_HEAD(&bf_head);
  879. if (!list_is_singular(&lastbf->list))
  880. list_cut_position(&bf_head,
  881. &txq->axq_q, lastbf->list.prev);
  882. txq->axq_depth--;
  883. if (bf_isaggr(bf))
  884. txq->axq_aggr_depth--;
  885. txok = (ds->ds_txstat.ts_status == 0);
  886. spin_unlock_bh(&txq->axq_lock);
  887. if (bf_held) {
  888. list_del(&bf_held->list);
  889. spin_lock_bh(&sc->tx.txbuflock);
  890. list_add_tail(&bf_held->list, &sc->tx.txbuf);
  891. spin_unlock_bh(&sc->tx.txbuflock);
  892. }
  893. if (!bf_isampdu(bf)) {
  894. /*
  895. * This frame is sent out as a single frame.
  896. * Use hardware retry status for this frame.
  897. */
  898. bf->bf_retries = ds->ds_txstat.ts_longretry;
  899. if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
  900. bf->bf_state.bf_type |= BUF_XRETRY;
  901. nbad = 0;
  902. } else {
  903. nbad = ath_tx_num_badfrms(sc, bf, txok);
  904. }
  905. ath_tx_rc_status(bf, ds, nbad);
  906. /*
  907. * Complete this transmit unit
  908. */
  909. if (bf_isampdu(bf))
  910. ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok);
  911. else
  912. ath_tx_complete_buf(sc, bf, &bf_head, txok, 0);
  913. /* Wake up mac80211 queue */
  914. spin_lock_bh(&txq->axq_lock);
  915. if (txq->stopped && ath_txq_depth(sc, txq->axq_qnum) <=
  916. (ATH_TXBUF - 20)) {
  917. int qnum;
  918. qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
  919. if (qnum != -1) {
  920. ieee80211_wake_queue(sc->hw, qnum);
  921. txq->stopped = 0;
  922. }
  923. }
  924. /*
  925. * schedule any pending packets if aggregation is enabled
  926. */
  927. if (sc->sc_flags & SC_OP_TXAGGR)
  928. ath_txq_schedule(sc, txq);
  929. spin_unlock_bh(&txq->axq_lock);
  930. }
  931. }
  932. static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
  933. {
  934. struct ath_hal *ah = sc->sc_ah;
  935. (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum);
  936. DPRINTF(sc, ATH_DBG_XMIT, "tx queue [%u] %x, link %p\n",
  937. txq->axq_qnum, ath9k_hw_gettxbuf(ah, txq->axq_qnum),
  938. txq->axq_link);
  939. }
  940. /* Drain only the data queues */
  941. static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
  942. {
  943. struct ath_hal *ah = sc->sc_ah;
  944. int i, npend = 0;
  945. if (!(sc->sc_flags & SC_OP_INVALID)) {
  946. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  947. if (ATH_TXQ_SETUP(sc, i)) {
  948. ath_tx_stopdma(sc, &sc->tx.txq[i]);
  949. /* The TxDMA may not really be stopped.
  950. * Double check the hal tx pending count */
  951. npend += ath9k_hw_numtxpending(ah,
  952. sc->tx.txq[i].axq_qnum);
  953. }
  954. }
  955. }
  956. if (npend) {
  957. int r;
  958. /* TxDMA not stopped, reset the hal */
  959. DPRINTF(sc, ATH_DBG_XMIT, "Unable to stop TxDMA. Reset HAL!\n");
  960. spin_lock_bh(&sc->sc_resetlock);
  961. r = ath9k_hw_reset(ah, sc->sc_ah->ah_curchan, true);
  962. if (r)
  963. DPRINTF(sc, ATH_DBG_FATAL,
  964. "Unable to reset hardware; reset status %u\n",
  965. r);
  966. spin_unlock_bh(&sc->sc_resetlock);
  967. }
  968. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  969. if (ATH_TXQ_SETUP(sc, i))
  970. ath_tx_draintxq(sc, &sc->tx.txq[i], retry_tx);
  971. }
  972. }
  973. /* Add a sub-frame to block ack window */
  974. static void ath_tx_addto_baw(struct ath_softc *sc,
  975. struct ath_atx_tid *tid,
  976. struct ath_buf *bf)
  977. {
  978. int index, cindex;
  979. if (bf_isretried(bf))
  980. return;
  981. index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
  982. cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
  983. ASSERT(tid->tx_buf[cindex] == NULL);
  984. tid->tx_buf[cindex] = bf;
  985. if (index >= ((tid->baw_tail - tid->baw_head) &
  986. (ATH_TID_MAX_BUFS - 1))) {
  987. tid->baw_tail = cindex;
  988. INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
  989. }
  990. }
  991. /*
  992. * Function to send an A-MPDU
  993. * NB: must be called with txq lock held
  994. */
  995. static int ath_tx_send_ampdu(struct ath_softc *sc,
  996. struct ath_atx_tid *tid,
  997. struct list_head *bf_head,
  998. struct ath_tx_control *txctl)
  999. {
  1000. struct ath_buf *bf;
  1001. BUG_ON(list_empty(bf_head));
  1002. bf = list_first_entry(bf_head, struct ath_buf, list);
  1003. bf->bf_state.bf_type |= BUF_AMPDU;
  1004. /*
  1005. * Do not queue to h/w when any of the following conditions is true:
  1006. * - there are pending frames in software queue
  1007. * - the TID is currently paused for ADDBA/BAR request
  1008. * - seqno is not within block-ack window
  1009. * - h/w queue depth exceeds low water mark
  1010. */
  1011. if (!list_empty(&tid->buf_q) || tid->paused ||
  1012. !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
  1013. txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
  1014. /*
  1015. * Add this frame to software queue for scheduling later
  1016. * for aggregation.
  1017. */
  1018. list_splice_tail_init(bf_head, &tid->buf_q);
  1019. ath_tx_queue_tid(txctl->txq, tid);
  1020. return 0;
  1021. }
  1022. /* Add sub-frame to BAW */
  1023. ath_tx_addto_baw(sc, tid, bf);
  1024. /* Queue to h/w without aggregation */
  1025. bf->bf_nframes = 1;
  1026. bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
  1027. ath_buf_set_rate(sc, bf);
  1028. ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
  1029. return 0;
  1030. }
  1031. /*
  1032. * looks up the rate
  1033. * returns aggr limit based on lowest of the rates
  1034. */
  1035. static u32 ath_lookup_rate(struct ath_softc *sc,
  1036. struct ath_buf *bf,
  1037. struct ath_atx_tid *tid)
  1038. {
  1039. struct ath_rate_table *rate_table = sc->cur_rate_table;
  1040. struct sk_buff *skb;
  1041. struct ieee80211_tx_info *tx_info;
  1042. struct ieee80211_tx_rate *rates;
  1043. struct ath_tx_info_priv *tx_info_priv;
  1044. u32 max_4ms_framelen, frame_length;
  1045. u16 aggr_limit, legacy = 0, maxampdu;
  1046. int i;
  1047. skb = (struct sk_buff *)bf->bf_mpdu;
  1048. tx_info = IEEE80211_SKB_CB(skb);
  1049. rates = tx_info->control.rates;
  1050. tx_info_priv =
  1051. (struct ath_tx_info_priv *)tx_info->rate_driver_data[0];
  1052. /*
  1053. * Find the lowest frame length among the rate series that will have a
  1054. * 4ms transmit duration.
  1055. * TODO - TXOP limit needs to be considered.
  1056. */
  1057. max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
  1058. for (i = 0; i < 4; i++) {
  1059. if (rates[i].count) {
  1060. if (!WLAN_RC_PHY_HT(rate_table->info[rates[i].idx].phy)) {
  1061. legacy = 1;
  1062. break;
  1063. }
  1064. frame_length =
  1065. rate_table->info[rates[i].idx].max_4ms_framelen;
  1066. max_4ms_framelen = min(max_4ms_framelen, frame_length);
  1067. }
  1068. }
  1069. /*
  1070. * limit aggregate size by the minimum rate if rate selected is
  1071. * not a probe rate, if rate selected is a probe rate then
  1072. * avoid aggregation of this packet.
  1073. */
  1074. if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
  1075. return 0;
  1076. aggr_limit = min(max_4ms_framelen,
  1077. (u32)ATH_AMPDU_LIMIT_DEFAULT);
  1078. /*
  1079. * h/w can accept aggregates upto 16 bit lengths (65535).
  1080. * The IE, however can hold upto 65536, which shows up here
  1081. * as zero. Ignore 65536 since we are constrained by hw.
  1082. */
  1083. maxampdu = tid->an->maxampdu;
  1084. if (maxampdu)
  1085. aggr_limit = min(aggr_limit, maxampdu);
  1086. return aggr_limit;
  1087. }
  1088. /*
  1089. * returns the number of delimiters to be added to
  1090. * meet the minimum required mpdudensity.
  1091. * caller should make sure that the rate is HT rate .
  1092. */
  1093. static int ath_compute_num_delims(struct ath_softc *sc,
  1094. struct ath_atx_tid *tid,
  1095. struct ath_buf *bf,
  1096. u16 frmlen)
  1097. {
  1098. struct ath_rate_table *rt = sc->cur_rate_table;
  1099. struct sk_buff *skb = bf->bf_mpdu;
  1100. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1101. u32 nsymbits, nsymbols, mpdudensity;
  1102. u16 minlen;
  1103. u8 rc, flags, rix;
  1104. int width, half_gi, ndelim, mindelim;
  1105. /* Select standard number of delimiters based on frame length alone */
  1106. ndelim = ATH_AGGR_GET_NDELIM(frmlen);
  1107. /*
  1108. * If encryption enabled, hardware requires some more padding between
  1109. * subframes.
  1110. * TODO - this could be improved to be dependent on the rate.
  1111. * The hardware can keep up at lower rates, but not higher rates
  1112. */
  1113. if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
  1114. ndelim += ATH_AGGR_ENCRYPTDELIM;
  1115. /*
  1116. * Convert desired mpdu density from microeconds to bytes based
  1117. * on highest rate in rate series (i.e. first rate) to determine
  1118. * required minimum length for subframe. Take into account
  1119. * whether high rate is 20 or 40Mhz and half or full GI.
  1120. */
  1121. mpdudensity = tid->an->mpdudensity;
  1122. /*
  1123. * If there is no mpdu density restriction, no further calculation
  1124. * is needed.
  1125. */
  1126. if (mpdudensity == 0)
  1127. return ndelim;
  1128. rix = tx_info->control.rates[0].idx;
  1129. flags = tx_info->control.rates[0].flags;
  1130. rc = rt->info[rix].ratecode;
  1131. width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
  1132. half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
  1133. if (half_gi)
  1134. nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity);
  1135. else
  1136. nsymbols = NUM_SYMBOLS_PER_USEC(mpdudensity);
  1137. if (nsymbols == 0)
  1138. nsymbols = 1;
  1139. nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
  1140. minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
  1141. /* Is frame shorter than required minimum length? */
  1142. if (frmlen < minlen) {
  1143. /* Get the minimum number of delimiters required. */
  1144. mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
  1145. ndelim = max(mindelim, ndelim);
  1146. }
  1147. return ndelim;
  1148. }
  1149. /*
  1150. * For aggregation from software buffer queue.
  1151. * NB: must be called with txq lock held
  1152. */
  1153. static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
  1154. struct ath_atx_tid *tid,
  1155. struct list_head *bf_q,
  1156. struct ath_buf **bf_last,
  1157. struct aggr_rifs_param *param,
  1158. int *prev_frames)
  1159. {
  1160. #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
  1161. struct ath_buf *bf, *tbf, *bf_first, *bf_prev = NULL;
  1162. struct list_head bf_head;
  1163. int rl = 0, nframes = 0, ndelim;
  1164. u16 aggr_limit = 0, al = 0, bpad = 0,
  1165. al_delta, h_baw = tid->baw_size / 2;
  1166. enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
  1167. int prev_al = 0;
  1168. INIT_LIST_HEAD(&bf_head);
  1169. BUG_ON(list_empty(&tid->buf_q));
  1170. bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
  1171. do {
  1172. bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
  1173. /*
  1174. * do not step over block-ack window
  1175. */
  1176. if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
  1177. status = ATH_AGGR_BAW_CLOSED;
  1178. break;
  1179. }
  1180. if (!rl) {
  1181. aggr_limit = ath_lookup_rate(sc, bf, tid);
  1182. rl = 1;
  1183. }
  1184. /*
  1185. * do not exceed aggregation limit
  1186. */
  1187. al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
  1188. if (nframes && (aggr_limit <
  1189. (al + bpad + al_delta + prev_al))) {
  1190. status = ATH_AGGR_LIMITED;
  1191. break;
  1192. }
  1193. /*
  1194. * do not exceed subframe limit
  1195. */
  1196. if ((nframes + *prev_frames) >=
  1197. min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
  1198. status = ATH_AGGR_LIMITED;
  1199. break;
  1200. }
  1201. /*
  1202. * add padding for previous frame to aggregation length
  1203. */
  1204. al += bpad + al_delta;
  1205. /*
  1206. * Get the delimiters needed to meet the MPDU
  1207. * density for this node.
  1208. */
  1209. ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
  1210. bpad = PADBYTES(al_delta) + (ndelim << 2);
  1211. bf->bf_next = NULL;
  1212. bf->bf_lastfrm->bf_desc->ds_link = 0;
  1213. /*
  1214. * this packet is part of an aggregate
  1215. * - remove all descriptors belonging to this frame from
  1216. * software queue
  1217. * - add it to block ack window
  1218. * - set up descriptors for aggregation
  1219. */
  1220. list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
  1221. ath_tx_addto_baw(sc, tid, bf);
  1222. list_for_each_entry(tbf, &bf_head, list) {
  1223. ath9k_hw_set11n_aggr_middle(sc->sc_ah,
  1224. tbf->bf_desc, ndelim);
  1225. }
  1226. /*
  1227. * link buffers of this frame to the aggregate
  1228. */
  1229. list_splice_tail_init(&bf_head, bf_q);
  1230. nframes++;
  1231. if (bf_prev) {
  1232. bf_prev->bf_next = bf;
  1233. bf_prev->bf_lastfrm->bf_desc->ds_link = bf->bf_daddr;
  1234. }
  1235. bf_prev = bf;
  1236. #ifdef AGGR_NOSHORT
  1237. /*
  1238. * terminate aggregation on a small packet boundary
  1239. */
  1240. if (bf->bf_frmlen < ATH_AGGR_MINPLEN) {
  1241. status = ATH_AGGR_SHORTPKT;
  1242. break;
  1243. }
  1244. #endif
  1245. } while (!list_empty(&tid->buf_q));
  1246. bf_first->bf_al = al;
  1247. bf_first->bf_nframes = nframes;
  1248. *bf_last = bf_prev;
  1249. return status;
  1250. #undef PADBYTES
  1251. }
  1252. /*
  1253. * process pending frames possibly doing a-mpdu aggregation
  1254. * NB: must be called with txq lock held
  1255. */
  1256. static void ath_tx_sched_aggr(struct ath_softc *sc,
  1257. struct ath_txq *txq, struct ath_atx_tid *tid)
  1258. {
  1259. struct ath_buf *bf, *tbf, *bf_last, *bf_lastaggr = NULL;
  1260. enum ATH_AGGR_STATUS status;
  1261. struct list_head bf_q;
  1262. struct aggr_rifs_param param = {0, 0, 0, 0, NULL};
  1263. int prev_frames = 0;
  1264. do {
  1265. if (list_empty(&tid->buf_q))
  1266. return;
  1267. INIT_LIST_HEAD(&bf_q);
  1268. status = ath_tx_form_aggr(sc, tid, &bf_q, &bf_lastaggr, &param,
  1269. &prev_frames);
  1270. /*
  1271. * no frames picked up to be aggregated; block-ack
  1272. * window is not open
  1273. */
  1274. if (list_empty(&bf_q))
  1275. break;
  1276. bf = list_first_entry(&bf_q, struct ath_buf, list);
  1277. bf_last = list_entry(bf_q.prev, struct ath_buf, list);
  1278. bf->bf_lastbf = bf_last;
  1279. /*
  1280. * if only one frame, send as non-aggregate
  1281. */
  1282. if (bf->bf_nframes == 1) {
  1283. ASSERT(bf->bf_lastfrm == bf_last);
  1284. bf->bf_state.bf_type &= ~BUF_AGGR;
  1285. /*
  1286. * clear aggr bits for every descriptor
  1287. * XXX TODO: is there a way to optimize it?
  1288. */
  1289. list_for_each_entry(tbf, &bf_q, list) {
  1290. ath9k_hw_clr11n_aggr(sc->sc_ah, tbf->bf_desc);
  1291. }
  1292. ath_buf_set_rate(sc, bf);
  1293. ath_tx_txqaddbuf(sc, txq, &bf_q);
  1294. continue;
  1295. }
  1296. /*
  1297. * setup first desc with rate and aggr info
  1298. */
  1299. bf->bf_state.bf_type |= BUF_AGGR;
  1300. ath_buf_set_rate(sc, bf);
  1301. ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
  1302. /*
  1303. * anchor last frame of aggregate correctly
  1304. */
  1305. ASSERT(bf_lastaggr);
  1306. ASSERT(bf_lastaggr->bf_lastfrm == bf_last);
  1307. tbf = bf_lastaggr;
  1308. ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
  1309. /* XXX: We don't enter into this loop, consider removing this */
  1310. while (!list_empty(&bf_q) && !list_is_last(&tbf->list, &bf_q)) {
  1311. tbf = list_entry(tbf->list.next, struct ath_buf, list);
  1312. ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
  1313. }
  1314. txq->axq_aggr_depth++;
  1315. /*
  1316. * Normal aggregate, queue to hardware
  1317. */
  1318. ath_tx_txqaddbuf(sc, txq, &bf_q);
  1319. } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
  1320. status != ATH_AGGR_BAW_CLOSED);
  1321. }
  1322. /* Called with txq lock held */
  1323. static void ath_tid_drain(struct ath_softc *sc,
  1324. struct ath_txq *txq,
  1325. struct ath_atx_tid *tid)
  1326. {
  1327. struct ath_buf *bf;
  1328. struct list_head bf_head;
  1329. INIT_LIST_HEAD(&bf_head);
  1330. for (;;) {
  1331. if (list_empty(&tid->buf_q))
  1332. break;
  1333. bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
  1334. list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
  1335. /* update baw for software retried frame */
  1336. if (bf_isretried(bf))
  1337. ath_tx_update_baw(sc, tid, bf->bf_seqno);
  1338. /*
  1339. * do not indicate packets while holding txq spinlock.
  1340. * unlock is intentional here
  1341. */
  1342. spin_unlock(&txq->axq_lock);
  1343. /* complete this sub-frame */
  1344. ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
  1345. spin_lock(&txq->axq_lock);
  1346. }
  1347. /*
  1348. * TODO: For frame(s) that are in the retry state, we will reuse the
  1349. * sequence number(s) without setting the retry bit. The
  1350. * alternative is to give up on these and BAR the receiver's window
  1351. * forward.
  1352. */
  1353. tid->seq_next = tid->seq_start;
  1354. tid->baw_tail = tid->baw_head;
  1355. }
  1356. /*
  1357. * Drain all pending buffers
  1358. * NB: must be called with txq lock held
  1359. */
  1360. static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
  1361. struct ath_txq *txq)
  1362. {
  1363. struct ath_atx_ac *ac, *ac_tmp;
  1364. struct ath_atx_tid *tid, *tid_tmp;
  1365. list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
  1366. list_del(&ac->list);
  1367. ac->sched = false;
  1368. list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
  1369. list_del(&tid->list);
  1370. tid->sched = false;
  1371. ath_tid_drain(sc, txq, tid);
  1372. }
  1373. }
  1374. }
  1375. static int ath_tx_setup_buffer(struct ath_softc *sc, struct ath_buf *bf,
  1376. struct sk_buff *skb,
  1377. struct ath_tx_control *txctl)
  1378. {
  1379. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1380. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1381. struct ath_tx_info_priv *tx_info_priv;
  1382. int hdrlen;
  1383. __le16 fc;
  1384. tx_info_priv = kzalloc(sizeof(*tx_info_priv), GFP_ATOMIC);
  1385. if (unlikely(!tx_info_priv))
  1386. return -ENOMEM;
  1387. tx_info->rate_driver_data[0] = tx_info_priv;
  1388. hdrlen = ieee80211_get_hdrlen_from_skb(skb);
  1389. fc = hdr->frame_control;
  1390. ATH_TXBUF_RESET(bf);
  1391. /* Frame type */
  1392. bf->bf_frmlen = skb->len + FCS_LEN - (hdrlen & 3);
  1393. ieee80211_is_data(fc) ?
  1394. (bf->bf_state.bf_type |= BUF_DATA) :
  1395. (bf->bf_state.bf_type &= ~BUF_DATA);
  1396. ieee80211_is_back_req(fc) ?
  1397. (bf->bf_state.bf_type |= BUF_BAR) :
  1398. (bf->bf_state.bf_type &= ~BUF_BAR);
  1399. ieee80211_is_pspoll(fc) ?
  1400. (bf->bf_state.bf_type |= BUF_PSPOLL) :
  1401. (bf->bf_state.bf_type &= ~BUF_PSPOLL);
  1402. (sc->sc_flags & SC_OP_PREAMBLE_SHORT) ?
  1403. (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) :
  1404. (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE);
  1405. (conf_is_ht(&sc->hw->conf) && !is_pae(skb) &&
  1406. (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) ?
  1407. (bf->bf_state.bf_type |= BUF_HT) :
  1408. (bf->bf_state.bf_type &= ~BUF_HT);
  1409. bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
  1410. /* Crypto */
  1411. bf->bf_keytype = get_hw_crypto_keytype(skb);
  1412. if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
  1413. bf->bf_frmlen += tx_info->control.hw_key->icv_len;
  1414. bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
  1415. } else {
  1416. bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
  1417. }
  1418. /* Assign seqno, tidno */
  1419. if (ieee80211_is_data_qos(fc) && (sc->sc_flags & SC_OP_TXAGGR))
  1420. assign_aggr_tid_seqno(skb, bf);
  1421. /* DMA setup */
  1422. bf->bf_mpdu = skb;
  1423. bf->bf_dmacontext = pci_map_single(to_pci_dev(sc->dev), skb->data,
  1424. skb->len, PCI_DMA_TODEVICE);
  1425. if (unlikely(pci_dma_mapping_error(to_pci_dev(sc->dev),
  1426. bf->bf_dmacontext))) {
  1427. bf->bf_mpdu = NULL;
  1428. DPRINTF(sc, ATH_DBG_CONFIG,
  1429. "pci_dma_mapping_error() on TX\n");
  1430. return -ENOMEM;
  1431. }
  1432. bf->bf_buf_addr = bf->bf_dmacontext;
  1433. return 0;
  1434. }
  1435. /* FIXME: tx power */
  1436. static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
  1437. struct ath_tx_control *txctl)
  1438. {
  1439. struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
  1440. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  1441. struct ath_node *an = NULL;
  1442. struct list_head bf_head;
  1443. struct ath_desc *ds;
  1444. struct ath_atx_tid *tid;
  1445. struct ath_hal *ah = sc->sc_ah;
  1446. int frm_type;
  1447. frm_type = get_hw_packet_type(skb);
  1448. INIT_LIST_HEAD(&bf_head);
  1449. list_add_tail(&bf->list, &bf_head);
  1450. /* setup descriptor */
  1451. ds = bf->bf_desc;
  1452. ds->ds_link = 0;
  1453. ds->ds_data = bf->bf_buf_addr;
  1454. /* Formulate first tx descriptor with tx controls */
  1455. ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
  1456. bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
  1457. ath9k_hw_filltxdesc(ah, ds,
  1458. skb->len, /* segment length */
  1459. true, /* first segment */
  1460. true, /* last segment */
  1461. ds); /* first descriptor */
  1462. bf->bf_lastfrm = bf;
  1463. spin_lock_bh(&txctl->txq->axq_lock);
  1464. if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
  1465. tx_info->control.sta) {
  1466. an = (struct ath_node *)tx_info->control.sta->drv_priv;
  1467. tid = ATH_AN_2_TID(an, bf->bf_tidno);
  1468. if (ath_aggr_query(sc, an, bf->bf_tidno)) {
  1469. /*
  1470. * Try aggregation if it's a unicast data frame
  1471. * and the destination is HT capable.
  1472. */
  1473. ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
  1474. } else {
  1475. /*
  1476. * Send this frame as regular when ADDBA
  1477. * exchange is neither complete nor pending.
  1478. */
  1479. ath_tx_send_normal(sc, txctl->txq,
  1480. tid, &bf_head);
  1481. }
  1482. } else {
  1483. bf->bf_lastbf = bf;
  1484. bf->bf_nframes = 1;
  1485. ath_buf_set_rate(sc, bf);
  1486. ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
  1487. }
  1488. spin_unlock_bh(&txctl->txq->axq_lock);
  1489. }
  1490. /* Upon failure caller should free skb */
  1491. int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb,
  1492. struct ath_tx_control *txctl)
  1493. {
  1494. struct ath_buf *bf;
  1495. int r;
  1496. /* Check if a tx buffer is available */
  1497. bf = ath_tx_get_buffer(sc);
  1498. if (!bf) {
  1499. DPRINTF(sc, ATH_DBG_XMIT, "TX buffers are full\n");
  1500. return -1;
  1501. }
  1502. r = ath_tx_setup_buffer(sc, bf, skb, txctl);
  1503. if (unlikely(r)) {
  1504. struct ath_txq *txq = txctl->txq;
  1505. DPRINTF(sc, ATH_DBG_FATAL, "TX mem alloc failure\n");
  1506. /* upon ath_tx_processq() this TX queue will be resumed, we
  1507. * guarantee this will happen by knowing beforehand that
  1508. * we will at least have to run TX completionon one buffer
  1509. * on the queue */
  1510. spin_lock_bh(&txq->axq_lock);
  1511. if (ath_txq_depth(sc, txq->axq_qnum) > 1) {
  1512. ieee80211_stop_queue(sc->hw,
  1513. skb_get_queue_mapping(skb));
  1514. txq->stopped = 1;
  1515. }
  1516. spin_unlock_bh(&txq->axq_lock);
  1517. spin_lock_bh(&sc->tx.txbuflock);
  1518. list_add_tail(&bf->list, &sc->tx.txbuf);
  1519. spin_unlock_bh(&sc->tx.txbuflock);
  1520. return r;
  1521. }
  1522. ath_tx_start_dma(sc, bf, txctl);
  1523. return 0;
  1524. }
  1525. /* Initialize TX queue and h/w */
  1526. int ath_tx_init(struct ath_softc *sc, int nbufs)
  1527. {
  1528. int error = 0;
  1529. do {
  1530. spin_lock_init(&sc->tx.txbuflock);
  1531. /* Setup tx descriptors */
  1532. error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
  1533. "tx", nbufs, 1);
  1534. if (error != 0) {
  1535. DPRINTF(sc, ATH_DBG_FATAL,
  1536. "Failed to allocate tx descriptors: %d\n",
  1537. error);
  1538. break;
  1539. }
  1540. /* XXX allocate beacon state together with vap */
  1541. error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
  1542. "beacon", ATH_BCBUF, 1);
  1543. if (error != 0) {
  1544. DPRINTF(sc, ATH_DBG_FATAL,
  1545. "Failed to allocate beacon descriptors: %d\n",
  1546. error);
  1547. break;
  1548. }
  1549. } while (0);
  1550. if (error != 0)
  1551. ath_tx_cleanup(sc);
  1552. return error;
  1553. }
  1554. /* Reclaim all tx queue resources */
  1555. int ath_tx_cleanup(struct ath_softc *sc)
  1556. {
  1557. /* cleanup beacon descriptors */
  1558. if (sc->beacon.bdma.dd_desc_len != 0)
  1559. ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
  1560. /* cleanup tx descriptors */
  1561. if (sc->tx.txdma.dd_desc_len != 0)
  1562. ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
  1563. return 0;
  1564. }
  1565. /* Setup a h/w transmit queue */
  1566. struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
  1567. {
  1568. struct ath_hal *ah = sc->sc_ah;
  1569. struct ath9k_tx_queue_info qi;
  1570. int qnum;
  1571. memset(&qi, 0, sizeof(qi));
  1572. qi.tqi_subtype = subtype;
  1573. qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
  1574. qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
  1575. qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
  1576. qi.tqi_physCompBuf = 0;
  1577. /*
  1578. * Enable interrupts only for EOL and DESC conditions.
  1579. * We mark tx descriptors to receive a DESC interrupt
  1580. * when a tx queue gets deep; otherwise waiting for the
  1581. * EOL to reap descriptors. Note that this is done to
  1582. * reduce interrupt load and this only defers reaping
  1583. * descriptors, never transmitting frames. Aside from
  1584. * reducing interrupts this also permits more concurrency.
  1585. * The only potential downside is if the tx queue backs
  1586. * up in which case the top half of the kernel may backup
  1587. * due to a lack of tx descriptors.
  1588. *
  1589. * The UAPSD queue is an exception, since we take a desc-
  1590. * based intr on the EOSP frames.
  1591. */
  1592. if (qtype == ATH9K_TX_QUEUE_UAPSD)
  1593. qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
  1594. else
  1595. qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
  1596. TXQ_FLAG_TXDESCINT_ENABLE;
  1597. qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
  1598. if (qnum == -1) {
  1599. /*
  1600. * NB: don't print a message, this happens
  1601. * normally on parts with too few tx queues
  1602. */
  1603. return NULL;
  1604. }
  1605. if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
  1606. DPRINTF(sc, ATH_DBG_FATAL,
  1607. "qnum %u out of range, max %u!\n",
  1608. qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
  1609. ath9k_hw_releasetxqueue(ah, qnum);
  1610. return NULL;
  1611. }
  1612. if (!ATH_TXQ_SETUP(sc, qnum)) {
  1613. struct ath_txq *txq = &sc->tx.txq[qnum];
  1614. txq->axq_qnum = qnum;
  1615. txq->axq_link = NULL;
  1616. INIT_LIST_HEAD(&txq->axq_q);
  1617. INIT_LIST_HEAD(&txq->axq_acq);
  1618. spin_lock_init(&txq->axq_lock);
  1619. txq->axq_depth = 0;
  1620. txq->axq_aggr_depth = 0;
  1621. txq->axq_totalqueued = 0;
  1622. txq->axq_linkbuf = NULL;
  1623. sc->tx.txqsetup |= 1<<qnum;
  1624. }
  1625. return &sc->tx.txq[qnum];
  1626. }
  1627. /* Reclaim resources for a setup queue */
  1628. void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
  1629. {
  1630. ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
  1631. sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
  1632. }
  1633. /*
  1634. * Setup a hardware data transmit queue for the specified
  1635. * access control. The hal may not support all requested
  1636. * queues in which case it will return a reference to a
  1637. * previously setup queue. We record the mapping from ac's
  1638. * to h/w queues for use by ath_tx_start and also track
  1639. * the set of h/w queues being used to optimize work in the
  1640. * transmit interrupt handler and related routines.
  1641. */
  1642. int ath_tx_setup(struct ath_softc *sc, int haltype)
  1643. {
  1644. struct ath_txq *txq;
  1645. if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
  1646. DPRINTF(sc, ATH_DBG_FATAL,
  1647. "HAL AC %u out of range, max %zu!\n",
  1648. haltype, ARRAY_SIZE(sc->tx.hwq_map));
  1649. return 0;
  1650. }
  1651. txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
  1652. if (txq != NULL) {
  1653. sc->tx.hwq_map[haltype] = txq->axq_qnum;
  1654. return 1;
  1655. } else
  1656. return 0;
  1657. }
  1658. int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
  1659. {
  1660. int qnum;
  1661. switch (qtype) {
  1662. case ATH9K_TX_QUEUE_DATA:
  1663. if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
  1664. DPRINTF(sc, ATH_DBG_FATAL,
  1665. "HAL AC %u out of range, max %zu!\n",
  1666. haltype, ARRAY_SIZE(sc->tx.hwq_map));
  1667. return -1;
  1668. }
  1669. qnum = sc->tx.hwq_map[haltype];
  1670. break;
  1671. case ATH9K_TX_QUEUE_BEACON:
  1672. qnum = sc->beacon.beaconq;
  1673. break;
  1674. case ATH9K_TX_QUEUE_CAB:
  1675. qnum = sc->beacon.cabq->axq_qnum;
  1676. break;
  1677. default:
  1678. qnum = -1;
  1679. }
  1680. return qnum;
  1681. }
  1682. /* Get a transmit queue, if available */
  1683. struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
  1684. {
  1685. struct ath_txq *txq = NULL;
  1686. int qnum;
  1687. qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
  1688. txq = &sc->tx.txq[qnum];
  1689. spin_lock_bh(&txq->axq_lock);
  1690. /* Try to avoid running out of descriptors */
  1691. if (txq->axq_depth >= (ATH_TXBUF - 20)) {
  1692. DPRINTF(sc, ATH_DBG_FATAL,
  1693. "TX queue: %d is full, depth: %d\n",
  1694. qnum, txq->axq_depth);
  1695. ieee80211_stop_queue(sc->hw, skb_get_queue_mapping(skb));
  1696. txq->stopped = 1;
  1697. spin_unlock_bh(&txq->axq_lock);
  1698. return NULL;
  1699. }
  1700. spin_unlock_bh(&txq->axq_lock);
  1701. return txq;
  1702. }
  1703. /* Update parameters for a transmit queue */
  1704. int ath_txq_update(struct ath_softc *sc, int qnum,
  1705. struct ath9k_tx_queue_info *qinfo)
  1706. {
  1707. struct ath_hal *ah = sc->sc_ah;
  1708. int error = 0;
  1709. struct ath9k_tx_queue_info qi;
  1710. if (qnum == sc->beacon.beaconq) {
  1711. /*
  1712. * XXX: for beacon queue, we just save the parameter.
  1713. * It will be picked up by ath_beaconq_config when
  1714. * it's necessary.
  1715. */
  1716. sc->beacon.beacon_qi = *qinfo;
  1717. return 0;
  1718. }
  1719. ASSERT(sc->tx.txq[qnum].axq_qnum == qnum);
  1720. ath9k_hw_get_txq_props(ah, qnum, &qi);
  1721. qi.tqi_aifs = qinfo->tqi_aifs;
  1722. qi.tqi_cwmin = qinfo->tqi_cwmin;
  1723. qi.tqi_cwmax = qinfo->tqi_cwmax;
  1724. qi.tqi_burstTime = qinfo->tqi_burstTime;
  1725. qi.tqi_readyTime = qinfo->tqi_readyTime;
  1726. if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
  1727. DPRINTF(sc, ATH_DBG_FATAL,
  1728. "Unable to update hardware queue %u!\n", qnum);
  1729. error = -EIO;
  1730. } else {
  1731. ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */
  1732. }
  1733. return error;
  1734. }
  1735. int ath_cabq_update(struct ath_softc *sc)
  1736. {
  1737. struct ath9k_tx_queue_info qi;
  1738. int qnum = sc->beacon.cabq->axq_qnum;
  1739. struct ath_beacon_config conf;
  1740. ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
  1741. /*
  1742. * Ensure the readytime % is within the bounds.
  1743. */
  1744. if (sc->sc_config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
  1745. sc->sc_config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
  1746. else if (sc->sc_config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
  1747. sc->sc_config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
  1748. ath_get_beaconconfig(sc, ATH_IF_ID_ANY, &conf);
  1749. qi.tqi_readyTime =
  1750. (conf.beacon_interval * sc->sc_config.cabqReadytime) / 100;
  1751. ath_txq_update(sc, qnum, &qi);
  1752. return 0;
  1753. }
  1754. /* Deferred processing of transmit interrupt */
  1755. void ath_tx_tasklet(struct ath_softc *sc)
  1756. {
  1757. int i;
  1758. u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
  1759. ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
  1760. /*
  1761. * Process each active queue.
  1762. */
  1763. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  1764. if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
  1765. ath_tx_processq(sc, &sc->tx.txq[i]);
  1766. }
  1767. }
  1768. void ath_tx_draintxq(struct ath_softc *sc,
  1769. struct ath_txq *txq, bool retry_tx)
  1770. {
  1771. struct ath_buf *bf, *lastbf;
  1772. struct list_head bf_head;
  1773. INIT_LIST_HEAD(&bf_head);
  1774. /*
  1775. * NB: this assumes output has been stopped and
  1776. * we do not need to block ath_tx_tasklet
  1777. */
  1778. for (;;) {
  1779. spin_lock_bh(&txq->axq_lock);
  1780. if (list_empty(&txq->axq_q)) {
  1781. txq->axq_link = NULL;
  1782. txq->axq_linkbuf = NULL;
  1783. spin_unlock_bh(&txq->axq_lock);
  1784. break;
  1785. }
  1786. bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
  1787. if (bf->bf_status & ATH_BUFSTATUS_STALE) {
  1788. list_del(&bf->list);
  1789. spin_unlock_bh(&txq->axq_lock);
  1790. spin_lock_bh(&sc->tx.txbuflock);
  1791. list_add_tail(&bf->list, &sc->tx.txbuf);
  1792. spin_unlock_bh(&sc->tx.txbuflock);
  1793. continue;
  1794. }
  1795. lastbf = bf->bf_lastbf;
  1796. if (!retry_tx)
  1797. lastbf->bf_desc->ds_txstat.ts_flags =
  1798. ATH9K_TX_SW_ABORTED;
  1799. /* remove ath_buf's of the same mpdu from txq */
  1800. list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
  1801. txq->axq_depth--;
  1802. spin_unlock_bh(&txq->axq_lock);
  1803. if (bf_isampdu(bf))
  1804. ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0);
  1805. else
  1806. ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
  1807. }
  1808. /* flush any pending frames if aggregation is enabled */
  1809. if (sc->sc_flags & SC_OP_TXAGGR) {
  1810. if (!retry_tx) {
  1811. spin_lock_bh(&txq->axq_lock);
  1812. ath_txq_drain_pending_buffers(sc, txq);
  1813. spin_unlock_bh(&txq->axq_lock);
  1814. }
  1815. }
  1816. }
  1817. /* Drain the transmit queues and reclaim resources */
  1818. void ath_draintxq(struct ath_softc *sc, bool retry_tx)
  1819. {
  1820. /* stop beacon queue. The beacon will be freed when
  1821. * we go to INIT state */
  1822. if (!(sc->sc_flags & SC_OP_INVALID)) {
  1823. (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
  1824. DPRINTF(sc, ATH_DBG_XMIT, "beacon queue %x\n",
  1825. ath9k_hw_gettxbuf(sc->sc_ah, sc->beacon.beaconq));
  1826. }
  1827. ath_drain_txdataq(sc, retry_tx);
  1828. }
  1829. u32 ath_txq_depth(struct ath_softc *sc, int qnum)
  1830. {
  1831. return sc->tx.txq[qnum].axq_depth;
  1832. }
  1833. u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum)
  1834. {
  1835. return sc->tx.txq[qnum].axq_aggr_depth;
  1836. }
  1837. bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
  1838. {
  1839. struct ath_atx_tid *txtid;
  1840. if (!(sc->sc_flags & SC_OP_TXAGGR))
  1841. return false;
  1842. txtid = ATH_AN_2_TID(an, tidno);
  1843. if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
  1844. if (!(txtid->state & AGGR_ADDBA_PROGRESS) &&
  1845. (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) {
  1846. txtid->addba_exchangeattempts++;
  1847. return true;
  1848. }
  1849. }
  1850. return false;
  1851. }
  1852. /* Start TX aggregation */
  1853. int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
  1854. u16 tid, u16 *ssn)
  1855. {
  1856. struct ath_atx_tid *txtid;
  1857. struct ath_node *an;
  1858. an = (struct ath_node *)sta->drv_priv;
  1859. if (sc->sc_flags & SC_OP_TXAGGR) {
  1860. txtid = ATH_AN_2_TID(an, tid);
  1861. txtid->state |= AGGR_ADDBA_PROGRESS;
  1862. ath_tx_pause_tid(sc, txtid);
  1863. }
  1864. return 0;
  1865. }
  1866. /* Stop tx aggregation */
  1867. int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
  1868. {
  1869. struct ath_node *an = (struct ath_node *)sta->drv_priv;
  1870. ath_tx_aggr_teardown(sc, an, tid);
  1871. return 0;
  1872. }
  1873. /* Resume tx aggregation */
  1874. void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
  1875. {
  1876. struct ath_atx_tid *txtid;
  1877. struct ath_node *an;
  1878. an = (struct ath_node *)sta->drv_priv;
  1879. if (sc->sc_flags & SC_OP_TXAGGR) {
  1880. txtid = ATH_AN_2_TID(an, tid);
  1881. txtid->baw_size =
  1882. IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
  1883. txtid->state |= AGGR_ADDBA_COMPLETE;
  1884. txtid->state &= ~AGGR_ADDBA_PROGRESS;
  1885. ath_tx_resume_tid(sc, txtid);
  1886. }
  1887. }
  1888. /*
  1889. * Performs transmit side cleanup when TID changes from aggregated to
  1890. * unaggregated.
  1891. * - Pause the TID and mark cleanup in progress
  1892. * - Discard all retry frames from the s/w queue.
  1893. */
  1894. void ath_tx_aggr_teardown(struct ath_softc *sc, struct ath_node *an, u8 tid)
  1895. {
  1896. struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
  1897. struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
  1898. struct ath_buf *bf;
  1899. struct list_head bf_head;
  1900. INIT_LIST_HEAD(&bf_head);
  1901. if (txtid->state & AGGR_CLEANUP) /* cleanup is in progress */
  1902. return;
  1903. if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
  1904. txtid->addba_exchangeattempts = 0;
  1905. return;
  1906. }
  1907. /* TID must be paused first */
  1908. ath_tx_pause_tid(sc, txtid);
  1909. /* drop all software retried frames and mark this TID */
  1910. spin_lock_bh(&txq->axq_lock);
  1911. while (!list_empty(&txtid->buf_q)) {
  1912. bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
  1913. if (!bf_isretried(bf)) {
  1914. /*
  1915. * NB: it's based on the assumption that
  1916. * software retried frame will always stay
  1917. * at the head of software queue.
  1918. */
  1919. break;
  1920. }
  1921. list_cut_position(&bf_head,
  1922. &txtid->buf_q, &bf->bf_lastfrm->list);
  1923. ath_tx_update_baw(sc, txtid, bf->bf_seqno);
  1924. /* complete this sub-frame */
  1925. ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
  1926. }
  1927. if (txtid->baw_head != txtid->baw_tail) {
  1928. spin_unlock_bh(&txq->axq_lock);
  1929. txtid->state |= AGGR_CLEANUP;
  1930. } else {
  1931. txtid->state &= ~AGGR_ADDBA_COMPLETE;
  1932. txtid->addba_exchangeattempts = 0;
  1933. spin_unlock_bh(&txq->axq_lock);
  1934. ath_tx_flush_tid(sc, txtid);
  1935. }
  1936. }
  1937. /*
  1938. * Tx scheduling logic
  1939. * NB: must be called with txq lock held
  1940. */
  1941. void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
  1942. {
  1943. struct ath_atx_ac *ac;
  1944. struct ath_atx_tid *tid;
  1945. /* nothing to schedule */
  1946. if (list_empty(&txq->axq_acq))
  1947. return;
  1948. /*
  1949. * get the first node/ac pair on the queue
  1950. */
  1951. ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
  1952. list_del(&ac->list);
  1953. ac->sched = false;
  1954. /*
  1955. * process a single tid per destination
  1956. */
  1957. do {
  1958. /* nothing to schedule */
  1959. if (list_empty(&ac->tid_q))
  1960. return;
  1961. tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
  1962. list_del(&tid->list);
  1963. tid->sched = false;
  1964. if (tid->paused) /* check next tid to keep h/w busy */
  1965. continue;
  1966. if ((txq->axq_depth % 2) == 0)
  1967. ath_tx_sched_aggr(sc, txq, tid);
  1968. /*
  1969. * add tid to round-robin queue if more frames
  1970. * are pending for the tid
  1971. */
  1972. if (!list_empty(&tid->buf_q))
  1973. ath_tx_queue_tid(txq, tid);
  1974. /* only schedule one TID at a time */
  1975. break;
  1976. } while (!list_empty(&ac->tid_q));
  1977. /*
  1978. * schedule AC if more TIDs need processing
  1979. */
  1980. if (!list_empty(&ac->tid_q)) {
  1981. /*
  1982. * add dest ac to txq if not already added
  1983. */
  1984. if (!ac->sched) {
  1985. ac->sched = true;
  1986. list_add_tail(&ac->list, &txq->axq_acq);
  1987. }
  1988. }
  1989. }
  1990. /* Initialize per-node transmit state */
  1991. void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
  1992. {
  1993. struct ath_atx_tid *tid;
  1994. struct ath_atx_ac *ac;
  1995. int tidno, acno;
  1996. /*
  1997. * Init per tid tx state
  1998. */
  1999. for (tidno = 0, tid = &an->tid[tidno];
  2000. tidno < WME_NUM_TID;
  2001. tidno++, tid++) {
  2002. tid->an = an;
  2003. tid->tidno = tidno;
  2004. tid->seq_start = tid->seq_next = 0;
  2005. tid->baw_size = WME_MAX_BA;
  2006. tid->baw_head = tid->baw_tail = 0;
  2007. tid->sched = false;
  2008. tid->paused = false;
  2009. tid->state &= ~AGGR_CLEANUP;
  2010. INIT_LIST_HEAD(&tid->buf_q);
  2011. acno = TID_TO_WME_AC(tidno);
  2012. tid->ac = &an->ac[acno];
  2013. /* ADDBA state */
  2014. tid->state &= ~AGGR_ADDBA_COMPLETE;
  2015. tid->state &= ~AGGR_ADDBA_PROGRESS;
  2016. tid->addba_exchangeattempts = 0;
  2017. }
  2018. /*
  2019. * Init per ac tx state
  2020. */
  2021. for (acno = 0, ac = &an->ac[acno];
  2022. acno < WME_NUM_AC; acno++, ac++) {
  2023. ac->sched = false;
  2024. INIT_LIST_HEAD(&ac->tid_q);
  2025. switch (acno) {
  2026. case WME_AC_BE:
  2027. ac->qnum = ath_tx_get_qnum(sc,
  2028. ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
  2029. break;
  2030. case WME_AC_BK:
  2031. ac->qnum = ath_tx_get_qnum(sc,
  2032. ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
  2033. break;
  2034. case WME_AC_VI:
  2035. ac->qnum = ath_tx_get_qnum(sc,
  2036. ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
  2037. break;
  2038. case WME_AC_VO:
  2039. ac->qnum = ath_tx_get_qnum(sc,
  2040. ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
  2041. break;
  2042. }
  2043. }
  2044. }
  2045. /* Cleanupthe pending buffers for the node. */
  2046. void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
  2047. {
  2048. int i;
  2049. struct ath_atx_ac *ac, *ac_tmp;
  2050. struct ath_atx_tid *tid, *tid_tmp;
  2051. struct ath_txq *txq;
  2052. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  2053. if (ATH_TXQ_SETUP(sc, i)) {
  2054. txq = &sc->tx.txq[i];
  2055. spin_lock(&txq->axq_lock);
  2056. list_for_each_entry_safe(ac,
  2057. ac_tmp, &txq->axq_acq, list) {
  2058. tid = list_first_entry(&ac->tid_q,
  2059. struct ath_atx_tid, list);
  2060. if (tid && tid->an != an)
  2061. continue;
  2062. list_del(&ac->list);
  2063. ac->sched = false;
  2064. list_for_each_entry_safe(tid,
  2065. tid_tmp, &ac->tid_q, list) {
  2066. list_del(&tid->list);
  2067. tid->sched = false;
  2068. ath_tid_drain(sc, txq, tid);
  2069. tid->state &= ~AGGR_ADDBA_COMPLETE;
  2070. tid->addba_exchangeattempts = 0;
  2071. tid->state &= ~AGGR_CLEANUP;
  2072. }
  2073. }
  2074. spin_unlock(&txq->axq_lock);
  2075. }
  2076. }
  2077. }
  2078. void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
  2079. {
  2080. int hdrlen, padsize;
  2081. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  2082. struct ath_tx_control txctl;
  2083. memset(&txctl, 0, sizeof(struct ath_tx_control));
  2084. /*
  2085. * As a temporary workaround, assign seq# here; this will likely need
  2086. * to be cleaned up to work better with Beacon transmission and virtual
  2087. * BSSes.
  2088. */
  2089. if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
  2090. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  2091. if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
  2092. sc->tx.seq_no += 0x10;
  2093. hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
  2094. hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
  2095. }
  2096. /* Add the padding after the header if this is not already done */
  2097. hdrlen = ieee80211_get_hdrlen_from_skb(skb);
  2098. if (hdrlen & 3) {
  2099. padsize = hdrlen % 4;
  2100. if (skb_headroom(skb) < padsize) {
  2101. DPRINTF(sc, ATH_DBG_XMIT, "TX CABQ padding failed\n");
  2102. dev_kfree_skb_any(skb);
  2103. return;
  2104. }
  2105. skb_push(skb, padsize);
  2106. memmove(skb->data, skb->data + padsize, hdrlen);
  2107. }
  2108. txctl.txq = sc->beacon.cabq;
  2109. DPRINTF(sc, ATH_DBG_XMIT, "transmitting CABQ packet, skb: %p\n", skb);
  2110. if (ath_tx_start(sc, skb, &txctl) != 0) {
  2111. DPRINTF(sc, ATH_DBG_XMIT, "CABQ TX failed\n");
  2112. goto exit;
  2113. }
  2114. return;
  2115. exit:
  2116. dev_kfree_skb_any(skb);
  2117. }