recv.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319
  1. /*
  2. * Copyright (c) 2008 Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. /*
  17. * Implementation of receive path.
  18. */
  19. #include "core.h"
  20. /*
  21. * Setup and link descriptors.
  22. *
  23. * 11N: we can no longer afford to self link the last descriptor.
  24. * MAC acknowledges BA status as long as it copies frames to host
  25. * buffer (or rx fifo). This can incorrectly acknowledge packets
  26. * to a sender if last desc is self-linked.
  27. *
  28. * NOTE: Caller should hold the rxbuf lock.
  29. */
  30. static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
  31. {
  32. struct ath_hal *ah = sc->sc_ah;
  33. struct ath_desc *ds;
  34. struct sk_buff *skb;
  35. ATH_RXBUF_RESET(bf);
  36. ds = bf->bf_desc;
  37. ds->ds_link = 0; /* link to null */
  38. ds->ds_data = bf->bf_buf_addr;
  39. /* XXX For RADAR?
  40. * virtual addr of the beginning of the buffer. */
  41. skb = bf->bf_mpdu;
  42. ASSERT(skb != NULL);
  43. ds->ds_vdata = skb->data;
  44. /* setup rx descriptors */
  45. ath9k_hw_setuprxdesc(ah,
  46. ds,
  47. skb_tailroom(skb), /* buffer size */
  48. 0);
  49. if (sc->sc_rxlink == NULL)
  50. ath9k_hw_putrxbuf(ah, bf->bf_daddr);
  51. else
  52. *sc->sc_rxlink = bf->bf_daddr;
  53. sc->sc_rxlink = &ds->ds_link;
  54. ath9k_hw_rxena(ah);
  55. }
  56. /* Process received BAR frame */
  57. static int ath_bar_rx(struct ath_softc *sc,
  58. struct ath_node *an,
  59. struct sk_buff *skb)
  60. {
  61. struct ieee80211_bar *bar;
  62. struct ath_arx_tid *rxtid;
  63. struct sk_buff *tskb;
  64. struct ath_recv_status *rx_status;
  65. int tidno, index, cindex;
  66. u16 seqno;
  67. /* look at BAR contents */
  68. bar = (struct ieee80211_bar *)skb->data;
  69. tidno = (le16_to_cpu(bar->control) & IEEE80211_BAR_CTL_TID_M)
  70. >> IEEE80211_BAR_CTL_TID_S;
  71. seqno = le16_to_cpu(bar->start_seq_num) >> IEEE80211_SEQ_SEQ_SHIFT;
  72. /* process BAR - indicate all pending RX frames till the BAR seqno */
  73. rxtid = &an->an_aggr.rx.tid[tidno];
  74. spin_lock_bh(&rxtid->tidlock);
  75. /* get relative index */
  76. index = ATH_BA_INDEX(rxtid->seq_next, seqno);
  77. /* drop BAR if old sequence (index is too large) */
  78. if ((index > rxtid->baw_size) &&
  79. (index > (IEEE80211_SEQ_MAX - (rxtid->baw_size << 2))))
  80. /* discard frame, ieee layer may not treat frame as a dup */
  81. goto unlock_and_free;
  82. /* complete receive processing for all pending frames upto BAR seqno */
  83. cindex = (rxtid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
  84. while ((rxtid->baw_head != rxtid->baw_tail) &&
  85. (rxtid->baw_head != cindex)) {
  86. tskb = rxtid->rxbuf[rxtid->baw_head].rx_wbuf;
  87. rx_status = &rxtid->rxbuf[rxtid->baw_head].rx_status;
  88. rxtid->rxbuf[rxtid->baw_head].rx_wbuf = NULL;
  89. if (tskb != NULL)
  90. ath_rx_subframe(an, tskb, rx_status);
  91. INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
  92. INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
  93. }
  94. /* ... and indicate rest of the frames in-order */
  95. while (rxtid->baw_head != rxtid->baw_tail &&
  96. rxtid->rxbuf[rxtid->baw_head].rx_wbuf != NULL) {
  97. tskb = rxtid->rxbuf[rxtid->baw_head].rx_wbuf;
  98. rx_status = &rxtid->rxbuf[rxtid->baw_head].rx_status;
  99. rxtid->rxbuf[rxtid->baw_head].rx_wbuf = NULL;
  100. ath_rx_subframe(an, tskb, rx_status);
  101. INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
  102. INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
  103. }
  104. unlock_and_free:
  105. spin_unlock_bh(&rxtid->tidlock);
  106. /* free bar itself */
  107. dev_kfree_skb(skb);
  108. return IEEE80211_FTYPE_CTL;
  109. }
  110. /* Function to handle a subframe of aggregation when HT is enabled */
  111. static int ath_ampdu_input(struct ath_softc *sc,
  112. struct ath_node *an,
  113. struct sk_buff *skb,
  114. struct ath_recv_status *rx_status)
  115. {
  116. struct ieee80211_hdr *hdr;
  117. struct ath_arx_tid *rxtid;
  118. struct ath_rxbuf *rxbuf;
  119. u8 type, subtype;
  120. u16 rxseq;
  121. int tid = 0, index, cindex, rxdiff;
  122. __le16 fc;
  123. u8 *qc;
  124. hdr = (struct ieee80211_hdr *)skb->data;
  125. fc = hdr->frame_control;
  126. /* collect stats of frames with non-zero version */
  127. if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_VERS) != 0) {
  128. dev_kfree_skb(skb);
  129. return -1;
  130. }
  131. type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE;
  132. subtype = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_STYPE;
  133. if (ieee80211_is_back_req(fc))
  134. return ath_bar_rx(sc, an, skb);
  135. /* special aggregate processing only for qos unicast data frames */
  136. if (!ieee80211_is_data(fc) ||
  137. !ieee80211_is_data_qos(fc) ||
  138. is_multicast_ether_addr(hdr->addr1))
  139. return ath_rx_subframe(an, skb, rx_status);
  140. /* lookup rx tid state */
  141. if (ieee80211_is_data_qos(fc)) {
  142. qc = ieee80211_get_qos_ctl(hdr);
  143. tid = qc[0] & 0xf;
  144. }
  145. if (sc->sc_opmode == ATH9K_M_STA) {
  146. /* Drop the frame not belonging to me. */
  147. if (memcmp(hdr->addr1, sc->sc_myaddr, ETH_ALEN)) {
  148. dev_kfree_skb(skb);
  149. return -1;
  150. }
  151. }
  152. rxtid = &an->an_aggr.rx.tid[tid];
  153. spin_lock(&rxtid->tidlock);
  154. rxdiff = (rxtid->baw_tail - rxtid->baw_head) &
  155. (ATH_TID_MAX_BUFS - 1);
  156. /*
  157. * If the ADDBA exchange has not been completed by the source,
  158. * process via legacy path (i.e. no reordering buffer is needed)
  159. */
  160. if (!rxtid->addba_exchangecomplete) {
  161. spin_unlock(&rxtid->tidlock);
  162. return ath_rx_subframe(an, skb, rx_status);
  163. }
  164. /* extract sequence number from recvd frame */
  165. rxseq = le16_to_cpu(hdr->seq_ctrl) >> IEEE80211_SEQ_SEQ_SHIFT;
  166. if (rxtid->seq_reset) {
  167. rxtid->seq_reset = 0;
  168. rxtid->seq_next = rxseq;
  169. }
  170. index = ATH_BA_INDEX(rxtid->seq_next, rxseq);
  171. /* drop frame if old sequence (index is too large) */
  172. if (index > (IEEE80211_SEQ_MAX - (rxtid->baw_size << 2))) {
  173. /* discard frame, ieee layer may not treat frame as a dup */
  174. spin_unlock(&rxtid->tidlock);
  175. dev_kfree_skb(skb);
  176. return IEEE80211_FTYPE_DATA;
  177. }
  178. /* sequence number is beyond block-ack window */
  179. if (index >= rxtid->baw_size) {
  180. /* complete receive processing for all pending frames */
  181. while (index >= rxtid->baw_size) {
  182. rxbuf = rxtid->rxbuf + rxtid->baw_head;
  183. if (rxbuf->rx_wbuf != NULL) {
  184. ath_rx_subframe(an, rxbuf->rx_wbuf,
  185. &rxbuf->rx_status);
  186. rxbuf->rx_wbuf = NULL;
  187. }
  188. INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
  189. INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
  190. index--;
  191. }
  192. }
  193. /* add buffer to the recv ba window */
  194. cindex = (rxtid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
  195. rxbuf = rxtid->rxbuf + cindex;
  196. if (rxbuf->rx_wbuf != NULL) {
  197. spin_unlock(&rxtid->tidlock);
  198. /* duplicate frame */
  199. dev_kfree_skb(skb);
  200. return IEEE80211_FTYPE_DATA;
  201. }
  202. rxbuf->rx_wbuf = skb;
  203. rxbuf->rx_time = get_timestamp();
  204. rxbuf->rx_status = *rx_status;
  205. /* advance tail if sequence received is newer
  206. * than any received so far */
  207. if (index >= rxdiff) {
  208. rxtid->baw_tail = cindex;
  209. INCR(rxtid->baw_tail, ATH_TID_MAX_BUFS);
  210. }
  211. /* indicate all in-order received frames */
  212. while (rxtid->baw_head != rxtid->baw_tail) {
  213. rxbuf = rxtid->rxbuf + rxtid->baw_head;
  214. if (!rxbuf->rx_wbuf)
  215. break;
  216. ath_rx_subframe(an, rxbuf->rx_wbuf, &rxbuf->rx_status);
  217. rxbuf->rx_wbuf = NULL;
  218. INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
  219. INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
  220. }
  221. /*
  222. * start a timer to flush all received frames if there are pending
  223. * receive frames
  224. */
  225. if (rxtid->baw_head != rxtid->baw_tail)
  226. mod_timer(&rxtid->timer, ATH_RX_TIMEOUT);
  227. else
  228. del_timer_sync(&rxtid->timer);
  229. spin_unlock(&rxtid->tidlock);
  230. return IEEE80211_FTYPE_DATA;
  231. }
  232. /* Timer to flush all received sub-frames */
  233. static void ath_rx_timer(unsigned long data)
  234. {
  235. struct ath_arx_tid *rxtid = (struct ath_arx_tid *)data;
  236. struct ath_node *an = rxtid->an;
  237. struct ath_rxbuf *rxbuf;
  238. int nosched;
  239. spin_lock_bh(&rxtid->tidlock);
  240. while (rxtid->baw_head != rxtid->baw_tail) {
  241. rxbuf = rxtid->rxbuf + rxtid->baw_head;
  242. if (!rxbuf->rx_wbuf) {
  243. INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
  244. INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
  245. continue;
  246. }
  247. /*
  248. * Stop if the next one is a very recent frame.
  249. *
  250. * Call get_timestamp in every iteration to protect against the
  251. * case in which a new frame is received while we are executing
  252. * this function. Using a timestamp obtained before entering
  253. * the loop could lead to a very large time interval
  254. * (a negative value typecast to unsigned), breaking the
  255. * function's logic.
  256. */
  257. if ((get_timestamp() - rxbuf->rx_time) <
  258. (ATH_RX_TIMEOUT * HZ / 1000))
  259. break;
  260. ath_rx_subframe(an, rxbuf->rx_wbuf,
  261. &rxbuf->rx_status);
  262. rxbuf->rx_wbuf = NULL;
  263. INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
  264. INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
  265. }
  266. /*
  267. * start a timer to flush all received frames if there are pending
  268. * receive frames
  269. */
  270. if (rxtid->baw_head != rxtid->baw_tail)
  271. nosched = 0;
  272. else
  273. nosched = 1; /* no need to re-arm the timer again */
  274. spin_unlock_bh(&rxtid->tidlock);
  275. }
  276. /* Free all pending sub-frames in the re-ordering buffer */
  277. static void ath_rx_flush_tid(struct ath_softc *sc,
  278. struct ath_arx_tid *rxtid, int drop)
  279. {
  280. struct ath_rxbuf *rxbuf;
  281. unsigned long flag;
  282. spin_lock_irqsave(&rxtid->tidlock, flag);
  283. while (rxtid->baw_head != rxtid->baw_tail) {
  284. rxbuf = rxtid->rxbuf + rxtid->baw_head;
  285. if (!rxbuf->rx_wbuf) {
  286. INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
  287. INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
  288. continue;
  289. }
  290. if (drop)
  291. dev_kfree_skb(rxbuf->rx_wbuf);
  292. else
  293. ath_rx_subframe(rxtid->an,
  294. rxbuf->rx_wbuf,
  295. &rxbuf->rx_status);
  296. rxbuf->rx_wbuf = NULL;
  297. INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
  298. INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
  299. }
  300. spin_unlock_irqrestore(&rxtid->tidlock, flag);
  301. }
  302. static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc,
  303. u32 len)
  304. {
  305. struct sk_buff *skb;
  306. u32 off;
  307. /*
  308. * Cache-line-align. This is important (for the
  309. * 5210 at least) as not doing so causes bogus data
  310. * in rx'd frames.
  311. */
  312. skb = dev_alloc_skb(len + sc->sc_cachelsz - 1);
  313. if (skb != NULL) {
  314. off = ((unsigned long) skb->data) % sc->sc_cachelsz;
  315. if (off != 0)
  316. skb_reserve(skb, sc->sc_cachelsz - off);
  317. } else {
  318. DPRINTF(sc, ATH_DBG_FATAL,
  319. "%s: skbuff alloc of size %u failed\n",
  320. __func__, len);
  321. return NULL;
  322. }
  323. return skb;
  324. }
  325. static void ath_rx_requeue(struct ath_softc *sc, struct sk_buff *skb)
  326. {
  327. struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf;
  328. ASSERT(bf != NULL);
  329. spin_lock_bh(&sc->sc_rxbuflock);
  330. if (bf->bf_status & ATH_BUFSTATUS_STALE) {
  331. /*
  332. * This buffer is still held for hw acess.
  333. * Mark it as free to be re-queued it later.
  334. */
  335. bf->bf_status |= ATH_BUFSTATUS_FREE;
  336. } else {
  337. /* XXX: we probably never enter here, remove after
  338. * verification */
  339. list_add_tail(&bf->list, &sc->sc_rxbuf);
  340. ath_rx_buf_link(sc, bf);
  341. }
  342. spin_unlock_bh(&sc->sc_rxbuflock);
  343. }
  344. /*
  345. * The skb indicated to upper stack won't be returned to us.
  346. * So we have to allocate a new one and queue it by ourselves.
  347. */
  348. static int ath_rx_indicate(struct ath_softc *sc,
  349. struct sk_buff *skb,
  350. struct ath_recv_status *status,
  351. u16 keyix)
  352. {
  353. struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf;
  354. struct sk_buff *nskb;
  355. int type;
  356. /* indicate frame to the stack, which will free the old skb. */
  357. type = ath__rx_indicate(sc, skb, status, keyix);
  358. /* allocate a new skb and queue it to for H/W processing */
  359. nskb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
  360. if (nskb != NULL) {
  361. bf->bf_mpdu = nskb;
  362. bf->bf_buf_addr = ath_skb_map_single(sc,
  363. nskb,
  364. PCI_DMA_FROMDEVICE,
  365. /* XXX: Remove get_dma_mem_context() */
  366. get_dma_mem_context(bf, bf_dmacontext));
  367. ATH_RX_CONTEXT(nskb)->ctx_rxbuf = bf;
  368. /* queue the new wbuf to H/W */
  369. ath_rx_requeue(sc, nskb);
  370. }
  371. return type;
  372. }
  373. static void ath_opmode_init(struct ath_softc *sc)
  374. {
  375. struct ath_hal *ah = sc->sc_ah;
  376. u32 rfilt, mfilt[2];
  377. /* configure rx filter */
  378. rfilt = ath_calcrxfilter(sc);
  379. ath9k_hw_setrxfilter(ah, rfilt);
  380. /* configure bssid mask */
  381. if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
  382. ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
  383. /* configure operational mode */
  384. ath9k_hw_setopmode(ah);
  385. /* Handle any link-level address change. */
  386. ath9k_hw_setmac(ah, sc->sc_myaddr);
  387. /* calculate and install multicast filter */
  388. mfilt[0] = mfilt[1] = ~0;
  389. ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
  390. DPRINTF(sc, ATH_DBG_CONFIG ,
  391. "%s: RX filter 0x%x, MC filter %08x:%08x\n",
  392. __func__, rfilt, mfilt[0], mfilt[1]);
  393. }
  394. int ath_rx_init(struct ath_softc *sc, int nbufs)
  395. {
  396. struct sk_buff *skb;
  397. struct ath_buf *bf;
  398. int error = 0;
  399. do {
  400. spin_lock_init(&sc->sc_rxflushlock);
  401. sc->sc_rxflush = 0;
  402. spin_lock_init(&sc->sc_rxbuflock);
  403. /*
  404. * Cisco's VPN software requires that drivers be able to
  405. * receive encapsulated frames that are larger than the MTU.
  406. * Since we can't be sure how large a frame we'll get, setup
  407. * to handle the larges on possible.
  408. */
  409. sc->sc_rxbufsize = roundup(IEEE80211_MAX_MPDU_LEN,
  410. min(sc->sc_cachelsz,
  411. (u16)64));
  412. DPRINTF(sc, ATH_DBG_CONFIG, "%s: cachelsz %u rxbufsize %u\n",
  413. __func__, sc->sc_cachelsz, sc->sc_rxbufsize);
  414. /* Initialize rx descriptors */
  415. error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
  416. "rx", nbufs, 1);
  417. if (error != 0) {
  418. DPRINTF(sc, ATH_DBG_FATAL,
  419. "%s: failed to allocate rx descriptors: %d\n",
  420. __func__, error);
  421. break;
  422. }
  423. /* Pre-allocate a wbuf for each rx buffer */
  424. list_for_each_entry(bf, &sc->sc_rxbuf, list) {
  425. skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
  426. if (skb == NULL) {
  427. error = -ENOMEM;
  428. break;
  429. }
  430. bf->bf_mpdu = skb;
  431. bf->bf_buf_addr =
  432. ath_skb_map_single(sc, skb, PCI_DMA_FROMDEVICE,
  433. get_dma_mem_context(bf, bf_dmacontext));
  434. ATH_RX_CONTEXT(skb)->ctx_rxbuf = bf;
  435. }
  436. sc->sc_rxlink = NULL;
  437. } while (0);
  438. if (error)
  439. ath_rx_cleanup(sc);
  440. return error;
  441. }
  442. /* Reclaim all rx queue resources */
  443. void ath_rx_cleanup(struct ath_softc *sc)
  444. {
  445. struct sk_buff *skb;
  446. struct ath_buf *bf;
  447. list_for_each_entry(bf, &sc->sc_rxbuf, list) {
  448. skb = bf->bf_mpdu;
  449. if (skb)
  450. dev_kfree_skb(skb);
  451. }
  452. /* cleanup rx descriptors */
  453. if (sc->sc_rxdma.dd_desc_len != 0)
  454. ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
  455. }
  456. /*
  457. * Calculate the receive filter according to the
  458. * operating mode and state:
  459. *
  460. * o always accept unicast, broadcast, and multicast traffic
  461. * o maintain current state of phy error reception (the hal
  462. * may enable phy error frames for noise immunity work)
  463. * o probe request frames are accepted only when operating in
  464. * hostap, adhoc, or monitor modes
  465. * o enable promiscuous mode according to the interface state
  466. * o accept beacons:
  467. * - when operating in adhoc mode so the 802.11 layer creates
  468. * node table entries for peers,
  469. * - when operating in station mode for collecting rssi data when
  470. * the station is otherwise quiet, or
  471. * - when operating as a repeater so we see repeater-sta beacons
  472. * - when scanning
  473. */
  474. u32 ath_calcrxfilter(struct ath_softc *sc)
  475. {
  476. #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
  477. u32 rfilt;
  478. rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
  479. | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
  480. | ATH9K_RX_FILTER_MCAST;
  481. /* If not a STA, enable processing of Probe Requests */
  482. if (sc->sc_opmode != ATH9K_M_STA)
  483. rfilt |= ATH9K_RX_FILTER_PROBEREQ;
  484. /* Can't set HOSTAP into promiscous mode */
  485. if (sc->sc_opmode == ATH9K_M_MONITOR) {
  486. rfilt |= ATH9K_RX_FILTER_PROM;
  487. /* ??? To prevent from sending ACK */
  488. rfilt &= ~ATH9K_RX_FILTER_UCAST;
  489. }
  490. if (sc->sc_opmode == ATH9K_M_STA || sc->sc_opmode == ATH9K_M_IBSS ||
  491. sc->sc_scanning)
  492. rfilt |= ATH9K_RX_FILTER_BEACON;
  493. /* If in HOSTAP mode, want to enable reception of PSPOLL frames
  494. & beacon frames */
  495. if (sc->sc_opmode == ATH9K_M_HOSTAP)
  496. rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL);
  497. return rfilt;
  498. #undef RX_FILTER_PRESERVE
  499. }
  500. /* Enable the receive h/w following a reset. */
  501. int ath_startrecv(struct ath_softc *sc)
  502. {
  503. struct ath_hal *ah = sc->sc_ah;
  504. struct ath_buf *bf, *tbf;
  505. spin_lock_bh(&sc->sc_rxbuflock);
  506. if (list_empty(&sc->sc_rxbuf))
  507. goto start_recv;
  508. sc->sc_rxlink = NULL;
  509. list_for_each_entry_safe(bf, tbf, &sc->sc_rxbuf, list) {
  510. if (bf->bf_status & ATH_BUFSTATUS_STALE) {
  511. /* restarting h/w, no need for holding descriptors */
  512. bf->bf_status &= ~ATH_BUFSTATUS_STALE;
  513. /*
  514. * Upper layer may not be done with the frame yet so
  515. * we can't just re-queue it to hardware. Remove it
  516. * from h/w queue. It'll be re-queued when upper layer
  517. * returns the frame and ath_rx_requeue_mpdu is called.
  518. */
  519. if (!(bf->bf_status & ATH_BUFSTATUS_FREE)) {
  520. list_del(&bf->list);
  521. continue;
  522. }
  523. }
  524. /* chain descriptors */
  525. ath_rx_buf_link(sc, bf);
  526. }
  527. /* We could have deleted elements so the list may be empty now */
  528. if (list_empty(&sc->sc_rxbuf))
  529. goto start_recv;
  530. bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list);
  531. ath9k_hw_putrxbuf(ah, bf->bf_daddr);
  532. ath9k_hw_rxena(ah); /* enable recv descriptors */
  533. start_recv:
  534. spin_unlock_bh(&sc->sc_rxbuflock);
  535. ath_opmode_init(sc); /* set filters, etc. */
  536. ath9k_hw_startpcureceive(ah); /* re-enable PCU/DMA engine */
  537. return 0;
  538. }
  539. /* Disable the receive h/w in preparation for a reset. */
  540. bool ath_stoprecv(struct ath_softc *sc)
  541. {
  542. struct ath_hal *ah = sc->sc_ah;
  543. u64 tsf;
  544. bool stopped;
  545. ath9k_hw_stoppcurecv(ah); /* disable PCU */
  546. ath9k_hw_setrxfilter(ah, 0); /* clear recv filter */
  547. stopped = ath9k_hw_stopdmarecv(ah); /* disable DMA engine */
  548. mdelay(3); /* 3ms is long enough for 1 frame */
  549. tsf = ath9k_hw_gettsf64(ah);
  550. sc->sc_rxlink = NULL; /* just in case */
  551. return stopped;
  552. }
  553. /* Flush receive queue */
  554. void ath_flushrecv(struct ath_softc *sc)
  555. {
  556. /*
  557. * ath_rx_tasklet may be used to handle rx interrupt and flush receive
  558. * queue at the same time. Use a lock to serialize the access of rx
  559. * queue.
  560. * ath_rx_tasklet cannot hold the spinlock while indicating packets.
  561. * Instead, do not claim the spinlock but check for a flush in
  562. * progress (see references to sc_rxflush)
  563. */
  564. spin_lock_bh(&sc->sc_rxflushlock);
  565. sc->sc_rxflush = 1;
  566. ath_rx_tasklet(sc, 1);
  567. sc->sc_rxflush = 0;
  568. spin_unlock_bh(&sc->sc_rxflushlock);
  569. }
  570. /* Process an individual frame */
  571. int ath_rx_input(struct ath_softc *sc,
  572. struct ath_node *an,
  573. int is_ampdu,
  574. struct sk_buff *skb,
  575. struct ath_recv_status *rx_status,
  576. enum ATH_RX_TYPE *status)
  577. {
  578. if (is_ampdu && sc->sc_rxaggr) {
  579. *status = ATH_RX_CONSUMED;
  580. return ath_ampdu_input(sc, an, skb, rx_status);
  581. } else {
  582. *status = ATH_RX_NON_CONSUMED;
  583. return -1;
  584. }
  585. }
  586. /* Process receive queue, as well as LED, etc. */
  587. int ath_rx_tasklet(struct ath_softc *sc, int flush)
  588. {
  589. #define PA2DESC(_sc, _pa) \
  590. ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
  591. ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
  592. struct ath_buf *bf, *bf_held = NULL;
  593. struct ath_desc *ds;
  594. struct ieee80211_hdr *hdr;
  595. struct sk_buff *skb = NULL;
  596. struct ath_recv_status rx_status;
  597. struct ath_hal *ah = sc->sc_ah;
  598. int type, rx_processed = 0;
  599. u32 phyerr;
  600. u8 chainreset = 0;
  601. int retval;
  602. __le16 fc;
  603. do {
  604. /* If handling rx interrupt and flush is in progress => exit */
  605. if (sc->sc_rxflush && (flush == 0))
  606. break;
  607. spin_lock_bh(&sc->sc_rxbuflock);
  608. if (list_empty(&sc->sc_rxbuf)) {
  609. sc->sc_rxlink = NULL;
  610. spin_unlock_bh(&sc->sc_rxbuflock);
  611. break;
  612. }
  613. bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list);
  614. /*
  615. * There is a race condition that BH gets scheduled after sw
  616. * writes RxE and before hw re-load the last descriptor to get
  617. * the newly chained one. Software must keep the last DONE
  618. * descriptor as a holding descriptor - software does so by
  619. * marking it with the STALE flag.
  620. */
  621. if (bf->bf_status & ATH_BUFSTATUS_STALE) {
  622. bf_held = bf;
  623. if (list_is_last(&bf_held->list, &sc->sc_rxbuf)) {
  624. /*
  625. * The holding descriptor is the last
  626. * descriptor in queue. It's safe to
  627. * remove the last holding descriptor
  628. * in BH context.
  629. */
  630. list_del(&bf_held->list);
  631. bf_held->bf_status &= ~ATH_BUFSTATUS_STALE;
  632. sc->sc_rxlink = NULL;
  633. if (bf_held->bf_status & ATH_BUFSTATUS_FREE) {
  634. list_add_tail(&bf_held->list,
  635. &sc->sc_rxbuf);
  636. ath_rx_buf_link(sc, bf_held);
  637. }
  638. spin_unlock_bh(&sc->sc_rxbuflock);
  639. break;
  640. }
  641. bf = list_entry(bf->list.next, struct ath_buf, list);
  642. }
  643. ds = bf->bf_desc;
  644. ++rx_processed;
  645. /*
  646. * Must provide the virtual address of the current
  647. * descriptor, the physical address, and the virtual
  648. * address of the next descriptor in the h/w chain.
  649. * This allows the HAL to look ahead to see if the
  650. * hardware is done with a descriptor by checking the
  651. * done bit in the following descriptor and the address
  652. * of the current descriptor the DMA engine is working
  653. * on. All this is necessary because of our use of
  654. * a self-linked list to avoid rx overruns.
  655. */
  656. retval = ath9k_hw_rxprocdesc(ah,
  657. ds,
  658. bf->bf_daddr,
  659. PA2DESC(sc, ds->ds_link),
  660. 0);
  661. if (retval == -EINPROGRESS) {
  662. struct ath_buf *tbf;
  663. struct ath_desc *tds;
  664. if (list_is_last(&bf->list, &sc->sc_rxbuf)) {
  665. spin_unlock_bh(&sc->sc_rxbuflock);
  666. break;
  667. }
  668. tbf = list_entry(bf->list.next, struct ath_buf, list);
  669. /*
  670. * On some hardware the descriptor status words could
  671. * get corrupted, including the done bit. Because of
  672. * this, check if the next descriptor's done bit is
  673. * set or not.
  674. *
  675. * If the next descriptor's done bit is set, the current
  676. * descriptor has been corrupted. Force s/w to discard
  677. * this descriptor and continue...
  678. */
  679. tds = tbf->bf_desc;
  680. retval = ath9k_hw_rxprocdesc(ah,
  681. tds, tbf->bf_daddr,
  682. PA2DESC(sc, tds->ds_link), 0);
  683. if (retval == -EINPROGRESS) {
  684. spin_unlock_bh(&sc->sc_rxbuflock);
  685. break;
  686. }
  687. }
  688. /* XXX: we do not support frames spanning
  689. * multiple descriptors */
  690. bf->bf_status |= ATH_BUFSTATUS_DONE;
  691. skb = bf->bf_mpdu;
  692. if (skb == NULL) { /* XXX ??? can this happen */
  693. spin_unlock_bh(&sc->sc_rxbuflock);
  694. continue;
  695. }
  696. /*
  697. * Now we know it's a completed frame, we can indicate the
  698. * frame. Remove the previous holding descriptor and leave
  699. * this one in the queue as the new holding descriptor.
  700. */
  701. if (bf_held) {
  702. list_del(&bf_held->list);
  703. bf_held->bf_status &= ~ATH_BUFSTATUS_STALE;
  704. if (bf_held->bf_status & ATH_BUFSTATUS_FREE) {
  705. list_add_tail(&bf_held->list, &sc->sc_rxbuf);
  706. /* try to requeue this descriptor */
  707. ath_rx_buf_link(sc, bf_held);
  708. }
  709. }
  710. bf->bf_status |= ATH_BUFSTATUS_STALE;
  711. bf_held = bf;
  712. /*
  713. * Release the lock here in case ieee80211_input() return
  714. * the frame immediately by calling ath_rx_mpdu_requeue().
  715. */
  716. spin_unlock_bh(&sc->sc_rxbuflock);
  717. if (flush) {
  718. /*
  719. * If we're asked to flush receive queue, directly
  720. * chain it back at the queue without processing it.
  721. */
  722. goto rx_next;
  723. }
  724. hdr = (struct ieee80211_hdr *)skb->data;
  725. fc = hdr->frame_control;
  726. memzero(&rx_status, sizeof(struct ath_recv_status));
  727. if (ds->ds_rxstat.rs_more) {
  728. /*
  729. * Frame spans multiple descriptors; this
  730. * cannot happen yet as we don't support
  731. * jumbograms. If not in monitor mode,
  732. * discard the frame.
  733. */
  734. #ifndef ERROR_FRAMES
  735. /*
  736. * Enable this if you want to see
  737. * error frames in Monitor mode.
  738. */
  739. if (sc->sc_opmode != ATH9K_M_MONITOR)
  740. goto rx_next;
  741. #endif
  742. /* fall thru for monitor mode handling... */
  743. } else if (ds->ds_rxstat.rs_status != 0) {
  744. if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
  745. rx_status.flags |= ATH_RX_FCS_ERROR;
  746. if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) {
  747. phyerr = ds->ds_rxstat.rs_phyerr & 0x1f;
  748. goto rx_next;
  749. }
  750. if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) {
  751. /*
  752. * Decrypt error. We only mark packet status
  753. * here and always push up the frame up to let
  754. * mac80211 handle the actual error case, be
  755. * it no decryption key or real decryption
  756. * error. This let us keep statistics there.
  757. */
  758. rx_status.flags |= ATH_RX_DECRYPT_ERROR;
  759. } else if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) {
  760. /*
  761. * Demic error. We only mark frame status here
  762. * and always push up the frame up to let
  763. * mac80211 handle the actual error case. This
  764. * let us keep statistics there. Hardware may
  765. * post a false-positive MIC error.
  766. */
  767. if (ieee80211_is_ctl(fc))
  768. /*
  769. * Sometimes, we get invalid
  770. * MIC failures on valid control frames.
  771. * Remove these mic errors.
  772. */
  773. ds->ds_rxstat.rs_status &=
  774. ~ATH9K_RXERR_MIC;
  775. else
  776. rx_status.flags |= ATH_RX_MIC_ERROR;
  777. }
  778. /*
  779. * Reject error frames with the exception of
  780. * decryption and MIC failures. For monitor mode,
  781. * we also ignore the CRC error.
  782. */
  783. if (sc->sc_opmode == ATH9K_M_MONITOR) {
  784. if (ds->ds_rxstat.rs_status &
  785. ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
  786. ATH9K_RXERR_CRC))
  787. goto rx_next;
  788. } else {
  789. if (ds->ds_rxstat.rs_status &
  790. ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
  791. goto rx_next;
  792. }
  793. }
  794. }
  795. /*
  796. * The status portion of the descriptor could get corrupted.
  797. */
  798. if (sc->sc_rxbufsize < ds->ds_rxstat.rs_datalen)
  799. goto rx_next;
  800. /*
  801. * Sync and unmap the frame. At this point we're
  802. * committed to passing the sk_buff somewhere so
  803. * clear buf_skb; this means a new sk_buff must be
  804. * allocated when the rx descriptor is setup again
  805. * to receive another frame.
  806. */
  807. skb_put(skb, ds->ds_rxstat.rs_datalen);
  808. skb->protocol = cpu_to_be16(ETH_P_CONTROL);
  809. rx_status.tsf = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp);
  810. rx_status.rateieee =
  811. sc->sc_hwmap[ds->ds_rxstat.rs_rate].ieeerate;
  812. rx_status.rateKbps =
  813. sc->sc_hwmap[ds->ds_rxstat.rs_rate].rateKbps;
  814. rx_status.ratecode = ds->ds_rxstat.rs_rate;
  815. /* HT rate */
  816. if (rx_status.ratecode & 0x80) {
  817. /* TODO - add table to avoid division */
  818. if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) {
  819. rx_status.flags |= ATH_RX_40MHZ;
  820. rx_status.rateKbps =
  821. (rx_status.rateKbps * 27) / 13;
  822. }
  823. if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI)
  824. rx_status.rateKbps =
  825. (rx_status.rateKbps * 10) / 9;
  826. else
  827. rx_status.flags |= ATH_RX_SHORT_GI;
  828. }
  829. /* sc->sc_noise_floor is only available when the station
  830. attaches to an AP, so we use a default value
  831. if we are not yet attached. */
  832. /* XXX we should use either sc->sc_noise_floor or
  833. * ath_hal_getChanNoise(ah, &sc->sc_curchan)
  834. * to calculate the noise floor.
  835. * However, the value returned by ath_hal_getChanNoise
  836. * seems to be incorrect (-31dBm on the last test),
  837. * so we will use a hard-coded value until we
  838. * figure out what is going on.
  839. */
  840. rx_status.abs_rssi =
  841. ds->ds_rxstat.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
  842. pci_dma_sync_single_for_cpu(sc->pdev,
  843. bf->bf_buf_addr,
  844. skb_tailroom(skb),
  845. PCI_DMA_FROMDEVICE);
  846. pci_unmap_single(sc->pdev,
  847. bf->bf_buf_addr,
  848. sc->sc_rxbufsize,
  849. PCI_DMA_FROMDEVICE);
  850. /* XXX: Ah! make me more readable, use a helper */
  851. if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
  852. if (ds->ds_rxstat.rs_moreaggr == 0) {
  853. rx_status.rssictl[0] =
  854. ds->ds_rxstat.rs_rssi_ctl0;
  855. rx_status.rssictl[1] =
  856. ds->ds_rxstat.rs_rssi_ctl1;
  857. rx_status.rssictl[2] =
  858. ds->ds_rxstat.rs_rssi_ctl2;
  859. rx_status.rssi = ds->ds_rxstat.rs_rssi;
  860. if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) {
  861. rx_status.rssiextn[0] =
  862. ds->ds_rxstat.rs_rssi_ext0;
  863. rx_status.rssiextn[1] =
  864. ds->ds_rxstat.rs_rssi_ext1;
  865. rx_status.rssiextn[2] =
  866. ds->ds_rxstat.rs_rssi_ext2;
  867. rx_status.flags |=
  868. ATH_RX_RSSI_EXTN_VALID;
  869. }
  870. rx_status.flags |= ATH_RX_RSSI_VALID |
  871. ATH_RX_CHAIN_RSSI_VALID;
  872. }
  873. } else {
  874. /*
  875. * Need to insert the "combined" rssi into the
  876. * status structure for upper layer processing
  877. */
  878. rx_status.rssi = ds->ds_rxstat.rs_rssi;
  879. rx_status.flags |= ATH_RX_RSSI_VALID;
  880. }
  881. /* Pass frames up to the stack. */
  882. type = ath_rx_indicate(sc, skb,
  883. &rx_status, ds->ds_rxstat.rs_keyix);
  884. /*
  885. * change the default rx antenna if rx diversity chooses the
  886. * other antenna 3 times in a row.
  887. */
  888. if (sc->sc_defant != ds->ds_rxstat.rs_antenna) {
  889. if (++sc->sc_rxotherant >= 3)
  890. ath_setdefantenna(sc,
  891. ds->ds_rxstat.rs_antenna);
  892. } else {
  893. sc->sc_rxotherant = 0;
  894. }
  895. #ifdef CONFIG_SLOW_ANT_DIV
  896. if ((rx_status.flags & ATH_RX_RSSI_VALID) &&
  897. ieee80211_is_beacon(fc)) {
  898. ath_slow_ant_div(&sc->sc_antdiv, hdr, &ds->ds_rxstat);
  899. }
  900. #endif
  901. /*
  902. * For frames successfully indicated, the buffer will be
  903. * returned to us by upper layers by calling
  904. * ath_rx_mpdu_requeue, either synchronusly or asynchronously.
  905. * So we don't want to do it here in this loop.
  906. */
  907. continue;
  908. rx_next:
  909. bf->bf_status |= ATH_BUFSTATUS_FREE;
  910. } while (TRUE);
  911. if (chainreset) {
  912. DPRINTF(sc, ATH_DBG_CONFIG,
  913. "%s: Reset rx chain mask. "
  914. "Do internal reset\n", __func__);
  915. ASSERT(flush == 0);
  916. ath_internal_reset(sc);
  917. }
  918. return 0;
  919. #undef PA2DESC
  920. }
  921. /* Process ADDBA request in per-TID data structure */
  922. int ath_rx_aggr_start(struct ath_softc *sc,
  923. const u8 *addr,
  924. u16 tid,
  925. u16 *ssn)
  926. {
  927. struct ath_arx_tid *rxtid;
  928. struct ath_node *an;
  929. struct ieee80211_hw *hw = sc->hw;
  930. struct ieee80211_supported_band *sband;
  931. u16 buffersize = 0;
  932. spin_lock_bh(&sc->node_lock);
  933. an = ath_node_find(sc, (u8 *) addr);
  934. spin_unlock_bh(&sc->node_lock);
  935. if (!an) {
  936. DPRINTF(sc, ATH_DBG_AGGR,
  937. "%s: Node not found to initialize RX aggregation\n",
  938. __func__);
  939. return -1;
  940. }
  941. sband = hw->wiphy->bands[hw->conf.channel->band];
  942. buffersize = IEEE80211_MIN_AMPDU_BUF <<
  943. sband->ht_info.ampdu_factor; /* FIXME */
  944. rxtid = &an->an_aggr.rx.tid[tid];
  945. spin_lock_bh(&rxtid->tidlock);
  946. if (sc->sc_rxaggr) {
  947. /* Allow aggregation reception
  948. * Adjust rx BA window size. Peer might indicate a
  949. * zero buffer size for a _dont_care_ condition.
  950. */
  951. if (buffersize)
  952. rxtid->baw_size = min(buffersize, rxtid->baw_size);
  953. /* set rx sequence number */
  954. rxtid->seq_next = *ssn;
  955. /* Allocate the receive buffers for this TID */
  956. DPRINTF(sc, ATH_DBG_AGGR,
  957. "%s: Allcating rxbuffer for TID %d\n", __func__, tid);
  958. if (rxtid->rxbuf == NULL) {
  959. /*
  960. * If the rxbuff is not NULL at this point, we *probably*
  961. * already allocated the buffer on a previous ADDBA,
  962. * and this is a subsequent ADDBA that got through.
  963. * Don't allocate, but use the value in the pointer,
  964. * we zero it out when we de-allocate.
  965. */
  966. rxtid->rxbuf = kmalloc(ATH_TID_MAX_BUFS *
  967. sizeof(struct ath_rxbuf), GFP_ATOMIC);
  968. }
  969. if (rxtid->rxbuf == NULL) {
  970. DPRINTF(sc, ATH_DBG_AGGR,
  971. "%s: Unable to allocate RX buffer, "
  972. "refusing ADDBA\n", __func__);
  973. } else {
  974. /* Ensure the memory is zeroed out (all internal
  975. * pointers are null) */
  976. memzero(rxtid->rxbuf, ATH_TID_MAX_BUFS *
  977. sizeof(struct ath_rxbuf));
  978. DPRINTF(sc, ATH_DBG_AGGR,
  979. "%s: Allocated @%p\n", __func__, rxtid->rxbuf);
  980. /* Allow aggregation reception */
  981. rxtid->addba_exchangecomplete = 1;
  982. }
  983. }
  984. spin_unlock_bh(&rxtid->tidlock);
  985. return 0;
  986. }
  987. /* Process DELBA */
  988. int ath_rx_aggr_stop(struct ath_softc *sc,
  989. const u8 *addr,
  990. u16 tid)
  991. {
  992. struct ath_node *an;
  993. spin_lock_bh(&sc->node_lock);
  994. an = ath_node_find(sc, (u8 *) addr);
  995. spin_unlock_bh(&sc->node_lock);
  996. if (!an) {
  997. DPRINTF(sc, ATH_DBG_AGGR,
  998. "%s: RX aggr stop for non-existent node\n", __func__);
  999. return -1;
  1000. }
  1001. ath_rx_aggr_teardown(sc, an, tid);
  1002. return 0;
  1003. }
  1004. /* Rx aggregation tear down */
  1005. void ath_rx_aggr_teardown(struct ath_softc *sc,
  1006. struct ath_node *an, u8 tid)
  1007. {
  1008. struct ath_arx_tid *rxtid = &an->an_aggr.rx.tid[tid];
  1009. if (!rxtid->addba_exchangecomplete)
  1010. return;
  1011. del_timer_sync(&rxtid->timer);
  1012. ath_rx_flush_tid(sc, rxtid, 0);
  1013. rxtid->addba_exchangecomplete = 0;
  1014. /* De-allocate the receive buffer array allocated when addba started */
  1015. if (rxtid->rxbuf) {
  1016. DPRINTF(sc, ATH_DBG_AGGR,
  1017. "%s: Deallocating TID %d rxbuff @%p\n",
  1018. __func__, tid, rxtid->rxbuf);
  1019. kfree(rxtid->rxbuf);
  1020. /* Set pointer to null to avoid reuse*/
  1021. rxtid->rxbuf = NULL;
  1022. }
  1023. }
  1024. /* Initialize per-node receive state */
  1025. void ath_rx_node_init(struct ath_softc *sc, struct ath_node *an)
  1026. {
  1027. if (sc->sc_rxaggr) {
  1028. struct ath_arx_tid *rxtid;
  1029. int tidno;
  1030. /* Init per tid rx state */
  1031. for (tidno = 0, rxtid = &an->an_aggr.rx.tid[tidno];
  1032. tidno < WME_NUM_TID;
  1033. tidno++, rxtid++) {
  1034. rxtid->an = an;
  1035. rxtid->seq_reset = 1;
  1036. rxtid->seq_next = 0;
  1037. rxtid->baw_size = WME_MAX_BA;
  1038. rxtid->baw_head = rxtid->baw_tail = 0;
  1039. /*
  1040. * Ensure the buffer pointer is null at this point
  1041. * (needs to be allocated when addba is received)
  1042. */
  1043. rxtid->rxbuf = NULL;
  1044. setup_timer(&rxtid->timer, ath_rx_timer,
  1045. (unsigned long)rxtid);
  1046. spin_lock_init(&rxtid->tidlock);
  1047. /* ADDBA state */
  1048. rxtid->addba_exchangecomplete = 0;
  1049. }
  1050. }
  1051. }
  1052. void ath_rx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
  1053. {
  1054. if (sc->sc_rxaggr) {
  1055. struct ath_arx_tid *rxtid;
  1056. int tidno, i;
  1057. /* Init per tid rx state */
  1058. for (tidno = 0, rxtid = &an->an_aggr.rx.tid[tidno];
  1059. tidno < WME_NUM_TID;
  1060. tidno++, rxtid++) {
  1061. if (!rxtid->addba_exchangecomplete)
  1062. continue;
  1063. /* must cancel timer first */
  1064. del_timer_sync(&rxtid->timer);
  1065. /* drop any pending sub-frames */
  1066. ath_rx_flush_tid(sc, rxtid, 1);
  1067. for (i = 0; i < ATH_TID_MAX_BUFS; i++)
  1068. ASSERT(rxtid->rxbuf[i].rx_wbuf == NULL);
  1069. rxtid->addba_exchangecomplete = 0;
  1070. }
  1071. }
  1072. }
  1073. /* Cleanup per-node receive state */
  1074. void ath_rx_node_free(struct ath_softc *sc, struct ath_node *an)
  1075. {
  1076. ath_rx_node_cleanup(sc, an);
  1077. }
  1078. dma_addr_t ath_skb_map_single(struct ath_softc *sc,
  1079. struct sk_buff *skb,
  1080. int direction,
  1081. dma_addr_t *pa)
  1082. {
  1083. /*
  1084. * NB: do NOT use skb->len, which is 0 on initialization.
  1085. * Use skb's entire data area instead.
  1086. */
  1087. *pa = pci_map_single(sc->pdev, skb->data,
  1088. skb_end_pointer(skb) - skb->head, direction);
  1089. return *pa;
  1090. }
  1091. void ath_skb_unmap_single(struct ath_softc *sc,
  1092. struct sk_buff *skb,
  1093. int direction,
  1094. dma_addr_t *pa)
  1095. {
  1096. /* Unmap skb's entire data area */
  1097. pci_unmap_single(sc->pdev, *pa,
  1098. skb_end_pointer(skb) - skb->head, direction);
  1099. }