txrx.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838
  1. /*
  2. * Copyright (c) 2004-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include "core.h"
  18. #include "debug.h"
  19. /*
  20. * tid - tid_mux0..tid_mux3
  21. * aid - tid_mux4..tid_mux7
  22. */
  23. #define ATH6KL_TID_MASK 0xf
  24. #define ATH6KL_AID_SHIFT 4
  25. static inline u8 ath6kl_get_tid(u8 tid_mux)
  26. {
  27. return tid_mux & ATH6KL_TID_MASK;
  28. }
  29. static inline u8 ath6kl_get_aid(u8 tid_mux)
  30. {
  31. return tid_mux >> ATH6KL_AID_SHIFT;
  32. }
  33. static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
  34. u32 *map_no)
  35. {
  36. struct ath6kl *ar = ath6kl_priv(dev);
  37. struct ethhdr *eth_hdr;
  38. u32 i, ep_map = -1;
  39. u8 *datap;
  40. *map_no = 0;
  41. datap = skb->data;
  42. eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr));
  43. if (is_multicast_ether_addr(eth_hdr->h_dest))
  44. return ENDPOINT_2;
  45. for (i = 0; i < ar->node_num; i++) {
  46. if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr,
  47. ETH_ALEN) == 0) {
  48. *map_no = i + 1;
  49. ar->node_map[i].tx_pend++;
  50. return ar->node_map[i].ep_id;
  51. }
  52. if ((ep_map == -1) && !ar->node_map[i].tx_pend)
  53. ep_map = i;
  54. }
  55. if (ep_map == -1) {
  56. ep_map = ar->node_num;
  57. ar->node_num++;
  58. if (ar->node_num > MAX_NODE_NUM)
  59. return ENDPOINT_UNUSED;
  60. }
  61. memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN);
  62. for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) {
  63. if (!ar->tx_pending[i]) {
  64. ar->node_map[ep_map].ep_id = i;
  65. break;
  66. }
  67. /*
  68. * No free endpoint is available, start redistribution on
  69. * the inuse endpoints.
  70. */
  71. if (i == ENDPOINT_5) {
  72. ar->node_map[ep_map].ep_id = ar->next_ep_id;
  73. ar->next_ep_id++;
  74. if (ar->next_ep_id > ENDPOINT_5)
  75. ar->next_ep_id = ENDPOINT_2;
  76. }
  77. }
  78. *map_no = ep_map + 1;
  79. ar->node_map[ep_map].tx_pend++;
  80. return ar->node_map[ep_map].ep_id;
  81. }
  82. static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn,
  83. struct ath6kl_vif *vif,
  84. struct sk_buff *skb,
  85. u32 *flags)
  86. {
  87. struct ath6kl *ar = vif->ar;
  88. bool is_apsdq_empty = false;
  89. struct ethhdr *datap = (struct ethhdr *) skb->data;
  90. u8 up = 0, traffic_class, *ip_hdr;
  91. u16 ether_type;
  92. struct ath6kl_llc_snap_hdr *llc_hdr;
  93. if (conn->sta_flags & STA_PS_APSD_TRIGGER) {
  94. /*
  95. * This tx is because of a uAPSD trigger, determine
  96. * more and EOSP bit. Set EOSP if queue is empty
  97. * or sufficient frames are delivered for this trigger.
  98. */
  99. spin_lock_bh(&conn->psq_lock);
  100. if (!skb_queue_empty(&conn->apsdq))
  101. *flags |= WMI_DATA_HDR_FLAGS_MORE;
  102. else if (conn->sta_flags & STA_PS_APSD_EOSP)
  103. *flags |= WMI_DATA_HDR_FLAGS_EOSP;
  104. *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
  105. spin_unlock_bh(&conn->psq_lock);
  106. return false;
  107. } else if (!conn->apsd_info)
  108. return false;
  109. if (test_bit(WMM_ENABLED, &vif->flags)) {
  110. ether_type = be16_to_cpu(datap->h_proto);
  111. if (is_ethertype(ether_type)) {
  112. /* packet is in DIX format */
  113. ip_hdr = (u8 *)(datap + 1);
  114. } else {
  115. /* packet is in 802.3 format */
  116. llc_hdr = (struct ath6kl_llc_snap_hdr *)
  117. (datap + 1);
  118. ether_type = be16_to_cpu(llc_hdr->eth_type);
  119. ip_hdr = (u8 *)(llc_hdr + 1);
  120. }
  121. if (ether_type == IP_ETHERTYPE)
  122. up = ath6kl_wmi_determine_user_priority(
  123. ip_hdr, 0);
  124. }
  125. traffic_class = ath6kl_wmi_get_traffic_class(up);
  126. if ((conn->apsd_info & (1 << traffic_class)) == 0)
  127. return false;
  128. /* Queue the frames if the STA is sleeping */
  129. spin_lock_bh(&conn->psq_lock);
  130. is_apsdq_empty = skb_queue_empty(&conn->apsdq);
  131. skb_queue_tail(&conn->apsdq, skb);
  132. spin_unlock_bh(&conn->psq_lock);
  133. /*
  134. * If this is the first pkt getting queued
  135. * for this STA, update the PVB for this STA
  136. */
  137. if (is_apsdq_empty) {
  138. ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
  139. vif->fw_vif_idx,
  140. conn->aid, 1, 0);
  141. }
  142. *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
  143. return true;
  144. }
  145. static bool ath6kl_process_psq(struct ath6kl_sta *conn,
  146. struct ath6kl_vif *vif,
  147. struct sk_buff *skb,
  148. u32 *flags)
  149. {
  150. bool is_psq_empty = false;
  151. struct ath6kl *ar = vif->ar;
  152. if (conn->sta_flags & STA_PS_POLLED) {
  153. spin_lock_bh(&conn->psq_lock);
  154. if (!skb_queue_empty(&conn->psq))
  155. *flags |= WMI_DATA_HDR_FLAGS_MORE;
  156. spin_unlock_bh(&conn->psq_lock);
  157. return false;
  158. }
  159. /* Queue the frames if the STA is sleeping */
  160. spin_lock_bh(&conn->psq_lock);
  161. is_psq_empty = skb_queue_empty(&conn->psq);
  162. skb_queue_tail(&conn->psq, skb);
  163. spin_unlock_bh(&conn->psq_lock);
  164. /*
  165. * If this is the first pkt getting queued
  166. * for this STA, update the PVB for this
  167. * STA.
  168. */
  169. if (is_psq_empty)
  170. ath6kl_wmi_set_pvb_cmd(ar->wmi,
  171. vif->fw_vif_idx,
  172. conn->aid, 1);
  173. return true;
  174. }
  175. static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
  176. u32 *flags)
  177. {
  178. struct ethhdr *datap = (struct ethhdr *) skb->data;
  179. struct ath6kl_sta *conn = NULL;
  180. bool ps_queued = false;
  181. struct ath6kl *ar = vif->ar;
  182. if (is_multicast_ether_addr(datap->h_dest)) {
  183. u8 ctr = 0;
  184. bool q_mcast = false;
  185. for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
  186. if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) {
  187. q_mcast = true;
  188. break;
  189. }
  190. }
  191. if (q_mcast) {
  192. /*
  193. * If this transmit is not because of a Dtim Expiry
  194. * q it.
  195. */
  196. if (!test_bit(DTIM_EXPIRED, &vif->flags)) {
  197. bool is_mcastq_empty = false;
  198. spin_lock_bh(&ar->mcastpsq_lock);
  199. is_mcastq_empty =
  200. skb_queue_empty(&ar->mcastpsq);
  201. skb_queue_tail(&ar->mcastpsq, skb);
  202. spin_unlock_bh(&ar->mcastpsq_lock);
  203. /*
  204. * If this is the first Mcast pkt getting
  205. * queued indicate to the target to set the
  206. * BitmapControl LSB of the TIM IE.
  207. */
  208. if (is_mcastq_empty)
  209. ath6kl_wmi_set_pvb_cmd(ar->wmi,
  210. vif->fw_vif_idx,
  211. MCAST_AID, 1);
  212. ps_queued = true;
  213. } else {
  214. /*
  215. * This transmit is because of Dtim expiry.
  216. * Determine if MoreData bit has to be set.
  217. */
  218. spin_lock_bh(&ar->mcastpsq_lock);
  219. if (!skb_queue_empty(&ar->mcastpsq))
  220. *flags |= WMI_DATA_HDR_FLAGS_MORE;
  221. spin_unlock_bh(&ar->mcastpsq_lock);
  222. }
  223. }
  224. } else {
  225. conn = ath6kl_find_sta(vif, datap->h_dest);
  226. if (!conn) {
  227. dev_kfree_skb(skb);
  228. /* Inform the caller that the skb is consumed */
  229. return true;
  230. }
  231. if (conn->sta_flags & STA_PS_SLEEP) {
  232. ps_queued = ath6kl_process_uapsdq(conn,
  233. vif, skb, flags);
  234. if (!(*flags & WMI_DATA_HDR_FLAGS_UAPSD))
  235. ps_queued = ath6kl_process_psq(conn,
  236. vif, skb, flags);
  237. }
  238. }
  239. return ps_queued;
  240. }
  241. /* Tx functions */
  242. int ath6kl_control_tx(void *devt, struct sk_buff *skb,
  243. enum htc_endpoint_id eid)
  244. {
  245. struct ath6kl *ar = devt;
  246. int status = 0;
  247. struct ath6kl_cookie *cookie = NULL;
  248. if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW))
  249. return -EACCES;
  250. spin_lock_bh(&ar->lock);
  251. ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
  252. "%s: skb=0x%p, len=0x%x eid =%d\n", __func__,
  253. skb, skb->len, eid);
  254. if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) {
  255. /*
  256. * Control endpoint is full, don't allocate resources, we
  257. * are just going to drop this packet.
  258. */
  259. cookie = NULL;
  260. ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
  261. skb, skb->len);
  262. } else
  263. cookie = ath6kl_alloc_cookie(ar);
  264. if (cookie == NULL) {
  265. spin_unlock_bh(&ar->lock);
  266. status = -ENOMEM;
  267. goto fail_ctrl_tx;
  268. }
  269. ar->tx_pending[eid]++;
  270. if (eid != ar->ctrl_ep)
  271. ar->total_tx_data_pend++;
  272. spin_unlock_bh(&ar->lock);
  273. cookie->skb = skb;
  274. cookie->map_no = 0;
  275. set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
  276. eid, ATH6KL_CONTROL_PKT_TAG);
  277. /*
  278. * This interface is asynchronous, if there is an error, cleanup
  279. * will happen in the TX completion callback.
  280. */
  281. ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
  282. return 0;
  283. fail_ctrl_tx:
  284. dev_kfree_skb(skb);
  285. return status;
  286. }
  287. int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
  288. {
  289. struct ath6kl *ar = ath6kl_priv(dev);
  290. struct ath6kl_cookie *cookie = NULL;
  291. enum htc_endpoint_id eid = ENDPOINT_UNUSED;
  292. struct ath6kl_vif *vif = netdev_priv(dev);
  293. u32 map_no = 0;
  294. u16 htc_tag = ATH6KL_DATA_PKT_TAG;
  295. u8 ac = 99 ; /* initialize to unmapped ac */
  296. bool chk_adhoc_ps_mapping = false;
  297. int ret;
  298. struct wmi_tx_meta_v2 meta_v2;
  299. void *meta;
  300. u8 csum_start = 0, csum_dest = 0, csum = skb->ip_summed;
  301. u8 meta_ver = 0;
  302. u32 flags = 0;
  303. ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
  304. "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__,
  305. skb, skb->data, skb->len);
  306. /* If target is not associated */
  307. if (!test_bit(CONNECTED, &vif->flags)) {
  308. dev_kfree_skb(skb);
  309. return 0;
  310. }
  311. if (WARN_ON_ONCE(ar->state != ATH6KL_STATE_ON)) {
  312. dev_kfree_skb(skb);
  313. return 0;
  314. }
  315. if (!test_bit(WMI_READY, &ar->flag))
  316. goto fail_tx;
  317. /* AP mode Power saving processing */
  318. if (vif->nw_type == AP_NETWORK) {
  319. if (ath6kl_powersave_ap(vif, skb, &flags))
  320. return 0;
  321. }
  322. if (test_bit(WMI_ENABLED, &ar->flag)) {
  323. if ((dev->features & NETIF_F_IP_CSUM) &&
  324. (csum == CHECKSUM_PARTIAL)) {
  325. csum_start = skb->csum_start -
  326. (skb_network_header(skb) - skb->head) +
  327. sizeof(struct ath6kl_llc_snap_hdr);
  328. csum_dest = skb->csum_offset + csum_start;
  329. }
  330. if (skb_headroom(skb) < dev->needed_headroom) {
  331. struct sk_buff *tmp_skb = skb;
  332. skb = skb_realloc_headroom(skb, dev->needed_headroom);
  333. kfree_skb(tmp_skb);
  334. if (skb == NULL) {
  335. vif->net_stats.tx_dropped++;
  336. return 0;
  337. }
  338. }
  339. if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
  340. ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
  341. goto fail_tx;
  342. }
  343. if ((dev->features & NETIF_F_IP_CSUM) &&
  344. (csum == CHECKSUM_PARTIAL)) {
  345. meta_v2.csum_start = csum_start;
  346. meta_v2.csum_dest = csum_dest;
  347. /* instruct target to calculate checksum */
  348. meta_v2.csum_flags = WMI_META_V2_FLAG_CSUM_OFFLOAD;
  349. meta_ver = WMI_META_VERSION_2;
  350. meta = &meta_v2;
  351. } else {
  352. meta_ver = 0;
  353. meta = NULL;
  354. }
  355. ret = ath6kl_wmi_data_hdr_add(ar->wmi, skb,
  356. DATA_MSGTYPE, flags, 0,
  357. meta_ver,
  358. meta, vif->fw_vif_idx);
  359. if (ret) {
  360. ath6kl_warn("failed to add wmi data header:%d\n"
  361. , ret);
  362. goto fail_tx;
  363. }
  364. if ((vif->nw_type == ADHOC_NETWORK) &&
  365. ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
  366. chk_adhoc_ps_mapping = true;
  367. else {
  368. /* get the stream mapping */
  369. ret = ath6kl_wmi_implicit_create_pstream(ar->wmi,
  370. vif->fw_vif_idx, skb,
  371. 0, test_bit(WMM_ENABLED, &vif->flags), &ac);
  372. if (ret)
  373. goto fail_tx;
  374. }
  375. } else
  376. goto fail_tx;
  377. spin_lock_bh(&ar->lock);
  378. if (chk_adhoc_ps_mapping)
  379. eid = ath6kl_ibss_map_epid(skb, dev, &map_no);
  380. else
  381. eid = ar->ac2ep_map[ac];
  382. if (eid == 0 || eid == ENDPOINT_UNUSED) {
  383. ath6kl_err("eid %d is not mapped!\n", eid);
  384. spin_unlock_bh(&ar->lock);
  385. goto fail_tx;
  386. }
  387. /* allocate resource for this packet */
  388. cookie = ath6kl_alloc_cookie(ar);
  389. if (!cookie) {
  390. spin_unlock_bh(&ar->lock);
  391. goto fail_tx;
  392. }
  393. /* update counts while the lock is held */
  394. ar->tx_pending[eid]++;
  395. ar->total_tx_data_pend++;
  396. spin_unlock_bh(&ar->lock);
  397. if (!IS_ALIGNED((unsigned long) skb->data - HTC_HDR_LENGTH, 4) &&
  398. skb_cloned(skb)) {
  399. /*
  400. * We will touch (move the buffer data to align it. Since the
  401. * skb buffer is cloned and not only the header is changed, we
  402. * have to copy it to allow the changes. Since we are copying
  403. * the data here, we may as well align it by reserving suitable
  404. * headroom to avoid the memmove in ath6kl_htc_tx_buf_align().
  405. */
  406. struct sk_buff *nskb;
  407. nskb = skb_copy_expand(skb, HTC_HDR_LENGTH, 0, GFP_ATOMIC);
  408. if (nskb == NULL)
  409. goto fail_tx;
  410. kfree_skb(skb);
  411. skb = nskb;
  412. }
  413. cookie->skb = skb;
  414. cookie->map_no = map_no;
  415. set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
  416. eid, htc_tag);
  417. ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ",
  418. skb->data, skb->len);
  419. /*
  420. * HTC interface is asynchronous, if this fails, cleanup will
  421. * happen in the ath6kl_tx_complete callback.
  422. */
  423. ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
  424. return 0;
  425. fail_tx:
  426. dev_kfree_skb(skb);
  427. vif->net_stats.tx_dropped++;
  428. vif->net_stats.tx_aborted_errors++;
  429. return 0;
  430. }
  431. /* indicate tx activity or inactivity on a WMI stream */
  432. void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active)
  433. {
  434. struct ath6kl *ar = devt;
  435. enum htc_endpoint_id eid;
  436. int i;
  437. eid = ar->ac2ep_map[traffic_class];
  438. if (!test_bit(WMI_ENABLED, &ar->flag))
  439. goto notify_htc;
  440. spin_lock_bh(&ar->lock);
  441. ar->ac_stream_active[traffic_class] = active;
  442. if (active) {
  443. /*
  444. * Keep track of the active stream with the highest
  445. * priority.
  446. */
  447. if (ar->ac_stream_pri_map[traffic_class] >
  448. ar->hiac_stream_active_pri)
  449. /* set the new highest active priority */
  450. ar->hiac_stream_active_pri =
  451. ar->ac_stream_pri_map[traffic_class];
  452. } else {
  453. /*
  454. * We may have to search for the next active stream
  455. * that is the highest priority.
  456. */
  457. if (ar->hiac_stream_active_pri ==
  458. ar->ac_stream_pri_map[traffic_class]) {
  459. /*
  460. * The highest priority stream just went inactive
  461. * reset and search for the "next" highest "active"
  462. * priority stream.
  463. */
  464. ar->hiac_stream_active_pri = 0;
  465. for (i = 0; i < WMM_NUM_AC; i++) {
  466. if (ar->ac_stream_active[i] &&
  467. (ar->ac_stream_pri_map[i] >
  468. ar->hiac_stream_active_pri))
  469. /*
  470. * Set the new highest active
  471. * priority.
  472. */
  473. ar->hiac_stream_active_pri =
  474. ar->ac_stream_pri_map[i];
  475. }
  476. }
  477. }
  478. spin_unlock_bh(&ar->lock);
  479. notify_htc:
  480. /* notify HTC, this may cause credit distribution changes */
  481. ath6kl_htc_indicate_activity_change(ar->htc_target, eid, active);
  482. }
  483. enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
  484. struct htc_packet *packet)
  485. {
  486. struct ath6kl *ar = target->dev->ar;
  487. struct ath6kl_vif *vif;
  488. enum htc_endpoint_id endpoint = packet->endpoint;
  489. enum htc_send_full_action action = HTC_SEND_FULL_KEEP;
  490. if (endpoint == ar->ctrl_ep) {
  491. /*
  492. * Under normal WMI if this is getting full, then something
  493. * is running rampant the host should not be exhausting the
  494. * WMI queue with too many commands the only exception to
  495. * this is during testing using endpointping.
  496. */
  497. set_bit(WMI_CTRL_EP_FULL, &ar->flag);
  498. ath6kl_err("wmi ctrl ep is full\n");
  499. return action;
  500. }
  501. if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
  502. return action;
  503. /*
  504. * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
  505. * the highest active stream.
  506. */
  507. if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
  508. ar->hiac_stream_active_pri &&
  509. ar->cookie_count <=
  510. target->endpoint[endpoint].tx_drop_packet_threshold)
  511. /*
  512. * Give preference to the highest priority stream by
  513. * dropping the packets which overflowed.
  514. */
  515. action = HTC_SEND_FULL_DROP;
  516. /* FIXME: Locking */
  517. spin_lock_bh(&ar->list_lock);
  518. list_for_each_entry(vif, &ar->vif_list, list) {
  519. if (vif->nw_type == ADHOC_NETWORK ||
  520. action != HTC_SEND_FULL_DROP) {
  521. spin_unlock_bh(&ar->list_lock);
  522. set_bit(NETQ_STOPPED, &vif->flags);
  523. netif_stop_queue(vif->ndev);
  524. return action;
  525. }
  526. }
  527. spin_unlock_bh(&ar->list_lock);
  528. return action;
  529. }
  530. /* TODO this needs to be looked at */
  531. static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif,
  532. enum htc_endpoint_id eid, u32 map_no)
  533. {
  534. struct ath6kl *ar = vif->ar;
  535. u32 i;
  536. if (vif->nw_type != ADHOC_NETWORK)
  537. return;
  538. if (!ar->ibss_ps_enable)
  539. return;
  540. if (eid == ar->ctrl_ep)
  541. return;
  542. if (map_no == 0)
  543. return;
  544. map_no--;
  545. ar->node_map[map_no].tx_pend--;
  546. if (ar->node_map[map_no].tx_pend)
  547. return;
  548. if (map_no != (ar->node_num - 1))
  549. return;
  550. for (i = ar->node_num; i > 0; i--) {
  551. if (ar->node_map[i - 1].tx_pend)
  552. break;
  553. memset(&ar->node_map[i - 1], 0,
  554. sizeof(struct ath6kl_node_mapping));
  555. ar->node_num--;
  556. }
  557. }
  558. void ath6kl_tx_complete(struct htc_target *target,
  559. struct list_head *packet_queue)
  560. {
  561. struct ath6kl *ar = target->dev->ar;
  562. struct sk_buff_head skb_queue;
  563. struct htc_packet *packet;
  564. struct sk_buff *skb;
  565. struct ath6kl_cookie *ath6kl_cookie;
  566. u32 map_no = 0;
  567. int status;
  568. enum htc_endpoint_id eid;
  569. bool wake_event = false;
  570. bool flushing[ATH6KL_VIF_MAX] = {false};
  571. u8 if_idx;
  572. struct ath6kl_vif *vif;
  573. skb_queue_head_init(&skb_queue);
  574. /* lock the driver as we update internal state */
  575. spin_lock_bh(&ar->lock);
  576. /* reap completed packets */
  577. while (!list_empty(packet_queue)) {
  578. packet = list_first_entry(packet_queue, struct htc_packet,
  579. list);
  580. list_del(&packet->list);
  581. ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt;
  582. if (!ath6kl_cookie)
  583. goto fatal;
  584. status = packet->status;
  585. skb = ath6kl_cookie->skb;
  586. eid = packet->endpoint;
  587. map_no = ath6kl_cookie->map_no;
  588. if (!skb || !skb->data)
  589. goto fatal;
  590. __skb_queue_tail(&skb_queue, skb);
  591. if (!status && (packet->act_len != skb->len))
  592. goto fatal;
  593. ar->tx_pending[eid]--;
  594. if (eid != ar->ctrl_ep)
  595. ar->total_tx_data_pend--;
  596. if (eid == ar->ctrl_ep) {
  597. if (test_bit(WMI_CTRL_EP_FULL, &ar->flag))
  598. clear_bit(WMI_CTRL_EP_FULL, &ar->flag);
  599. if (ar->tx_pending[eid] == 0)
  600. wake_event = true;
  601. }
  602. if (eid == ar->ctrl_ep) {
  603. if_idx = wmi_cmd_hdr_get_if_idx(
  604. (struct wmi_cmd_hdr *) packet->buf);
  605. } else {
  606. if_idx = wmi_data_hdr_get_if_idx(
  607. (struct wmi_data_hdr *) packet->buf);
  608. }
  609. vif = ath6kl_get_vif_by_index(ar, if_idx);
  610. if (!vif) {
  611. ath6kl_free_cookie(ar, ath6kl_cookie);
  612. continue;
  613. }
  614. if (status) {
  615. if (status == -ECANCELED)
  616. /* a packet was flushed */
  617. flushing[if_idx] = true;
  618. vif->net_stats.tx_errors++;
  619. if (status != -ENOSPC && status != -ECANCELED)
  620. ath6kl_warn("tx complete error: %d\n", status);
  621. ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
  622. "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
  623. __func__, skb, packet->buf, packet->act_len,
  624. eid, "error!");
  625. } else {
  626. ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
  627. "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
  628. __func__, skb, packet->buf, packet->act_len,
  629. eid, "OK");
  630. flushing[if_idx] = false;
  631. vif->net_stats.tx_packets++;
  632. vif->net_stats.tx_bytes += skb->len;
  633. }
  634. ath6kl_tx_clear_node_map(vif, eid, map_no);
  635. ath6kl_free_cookie(ar, ath6kl_cookie);
  636. if (test_bit(NETQ_STOPPED, &vif->flags))
  637. clear_bit(NETQ_STOPPED, &vif->flags);
  638. }
  639. spin_unlock_bh(&ar->lock);
  640. __skb_queue_purge(&skb_queue);
  641. /* FIXME: Locking */
  642. spin_lock_bh(&ar->list_lock);
  643. list_for_each_entry(vif, &ar->vif_list, list) {
  644. if (test_bit(CONNECTED, &vif->flags) &&
  645. !flushing[vif->fw_vif_idx]) {
  646. spin_unlock_bh(&ar->list_lock);
  647. netif_wake_queue(vif->ndev);
  648. spin_lock_bh(&ar->list_lock);
  649. }
  650. }
  651. spin_unlock_bh(&ar->list_lock);
  652. if (wake_event)
  653. wake_up(&ar->event_wq);
  654. return;
  655. fatal:
  656. WARN_ON(1);
  657. spin_unlock_bh(&ar->lock);
  658. return;
  659. }
  660. void ath6kl_tx_data_cleanup(struct ath6kl *ar)
  661. {
  662. int i;
  663. /* flush all the data (non-control) streams */
  664. for (i = 0; i < WMM_NUM_AC; i++)
  665. ath6kl_htc_flush_txep(ar->htc_target, ar->ac2ep_map[i],
  666. ATH6KL_DATA_PKT_TAG);
  667. }
  668. /* Rx functions */
  669. static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev,
  670. struct sk_buff *skb)
  671. {
  672. if (!skb)
  673. return;
  674. skb->dev = dev;
  675. if (!(skb->dev->flags & IFF_UP)) {
  676. dev_kfree_skb(skb);
  677. return;
  678. }
  679. skb->protocol = eth_type_trans(skb, skb->dev);
  680. netif_rx_ni(skb);
  681. }
  682. static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
  683. {
  684. struct sk_buff *skb;
  685. while (num) {
  686. skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
  687. if (!skb) {
  688. ath6kl_err("netbuf allocation failed\n");
  689. return;
  690. }
  691. skb_queue_tail(q, skb);
  692. num--;
  693. }
  694. }
  695. static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr)
  696. {
  697. struct sk_buff *skb = NULL;
  698. if (skb_queue_len(&p_aggr->rx_amsdu_freeq) <
  699. (AGGR_NUM_OF_FREE_NETBUFS >> 2))
  700. ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq,
  701. AGGR_NUM_OF_FREE_NETBUFS);
  702. skb = skb_dequeue(&p_aggr->rx_amsdu_freeq);
  703. return skb;
  704. }
  705. void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
  706. {
  707. struct ath6kl *ar = target->dev->ar;
  708. struct sk_buff *skb;
  709. int rx_buf;
  710. int n_buf_refill;
  711. struct htc_packet *packet;
  712. struct list_head queue;
  713. n_buf_refill = ATH6KL_MAX_RX_BUFFERS -
  714. ath6kl_htc_get_rxbuf_num(ar->htc_target, endpoint);
  715. if (n_buf_refill <= 0)
  716. return;
  717. INIT_LIST_HEAD(&queue);
  718. ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
  719. "%s: providing htc with %d buffers at eid=%d\n",
  720. __func__, n_buf_refill, endpoint);
  721. for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) {
  722. skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
  723. if (!skb)
  724. break;
  725. packet = (struct htc_packet *) skb->head;
  726. if (!IS_ALIGNED((unsigned long) skb->data, 4))
  727. skb->data = PTR_ALIGN(skb->data - 4, 4);
  728. set_htc_rxpkt_info(packet, skb, skb->data,
  729. ATH6KL_BUFFER_SIZE, endpoint);
  730. list_add_tail(&packet->list, &queue);
  731. }
  732. if (!list_empty(&queue))
  733. ath6kl_htc_add_rxbuf_multiple(ar->htc_target, &queue);
  734. }
  735. void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count)
  736. {
  737. struct htc_packet *packet;
  738. struct sk_buff *skb;
  739. while (count) {
  740. skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE);
  741. if (!skb)
  742. return;
  743. packet = (struct htc_packet *) skb->head;
  744. if (!IS_ALIGNED((unsigned long) skb->data, 4))
  745. skb->data = PTR_ALIGN(skb->data - 4, 4);
  746. set_htc_rxpkt_info(packet, skb, skb->data,
  747. ATH6KL_AMSDU_BUFFER_SIZE, 0);
  748. spin_lock_bh(&ar->lock);
  749. list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue);
  750. spin_unlock_bh(&ar->lock);
  751. count--;
  752. }
  753. }
  754. /*
  755. * Callback to allocate a receive buffer for a pending packet. We use a
  756. * pre-allocated list of buffers of maximum AMSDU size (4K).
  757. */
  758. struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
  759. enum htc_endpoint_id endpoint,
  760. int len)
  761. {
  762. struct ath6kl *ar = target->dev->ar;
  763. struct htc_packet *packet = NULL;
  764. struct list_head *pkt_pos;
  765. int refill_cnt = 0, depth = 0;
  766. ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n",
  767. __func__, endpoint, len);
  768. if ((len <= ATH6KL_BUFFER_SIZE) ||
  769. (len > ATH6KL_AMSDU_BUFFER_SIZE))
  770. return NULL;
  771. spin_lock_bh(&ar->lock);
  772. if (list_empty(&ar->amsdu_rx_buffer_queue)) {
  773. spin_unlock_bh(&ar->lock);
  774. refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS;
  775. goto refill_buf;
  776. }
  777. packet = list_first_entry(&ar->amsdu_rx_buffer_queue,
  778. struct htc_packet, list);
  779. list_del(&packet->list);
  780. list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue)
  781. depth++;
  782. refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth;
  783. spin_unlock_bh(&ar->lock);
  784. /* set actual endpoint ID */
  785. packet->endpoint = endpoint;
  786. refill_buf:
  787. if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD)
  788. ath6kl_refill_amsdu_rxbufs(ar, refill_cnt);
  789. return packet;
  790. }
  791. static void aggr_slice_amsdu(struct aggr_info *p_aggr,
  792. struct rxtid *rxtid, struct sk_buff *skb)
  793. {
  794. struct sk_buff *new_skb;
  795. struct ethhdr *hdr;
  796. u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len;
  797. u8 *framep;
  798. mac_hdr_len = sizeof(struct ethhdr);
  799. framep = skb->data + mac_hdr_len;
  800. amsdu_len = skb->len - mac_hdr_len;
  801. while (amsdu_len > mac_hdr_len) {
  802. hdr = (struct ethhdr *) framep;
  803. payload_8023_len = ntohs(hdr->h_proto);
  804. if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN ||
  805. payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
  806. ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n",
  807. payload_8023_len);
  808. break;
  809. }
  810. frame_8023_len = payload_8023_len + mac_hdr_len;
  811. new_skb = aggr_get_free_skb(p_aggr);
  812. if (!new_skb) {
  813. ath6kl_err("no buffer available\n");
  814. break;
  815. }
  816. memcpy(new_skb->data, framep, frame_8023_len);
  817. skb_put(new_skb, frame_8023_len);
  818. if (ath6kl_wmi_dot3_2_dix(new_skb)) {
  819. ath6kl_err("dot3_2_dix error\n");
  820. dev_kfree_skb(new_skb);
  821. break;
  822. }
  823. skb_queue_tail(&rxtid->q, new_skb);
  824. /* Is this the last subframe within this aggregate ? */
  825. if ((amsdu_len - frame_8023_len) == 0)
  826. break;
  827. /* Add the length of A-MSDU subframe padding bytes -
  828. * Round to nearest word.
  829. */
  830. frame_8023_len = ALIGN(frame_8023_len, 4);
  831. framep += frame_8023_len;
  832. amsdu_len -= frame_8023_len;
  833. }
  834. dev_kfree_skb(skb);
  835. }
  836. static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
  837. u16 seq_no, u8 order)
  838. {
  839. struct sk_buff *skb;
  840. struct rxtid *rxtid;
  841. struct skb_hold_q *node;
  842. u16 idx, idx_end, seq_end;
  843. struct rxtid_stats *stats;
  844. rxtid = &agg_conn->rx_tid[tid];
  845. stats = &agg_conn->stat[tid];
  846. idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
  847. /*
  848. * idx_end is typically the last possible frame in the window,
  849. * but changes to 'the' seq_no, when BAR comes. If seq_no
  850. * is non-zero, we will go up to that and stop.
  851. * Note: last seq no in current window will occupy the same
  852. * index position as index that is just previous to start.
  853. * An imp point : if win_sz is 7, for seq_no space of 4095,
  854. * then, there would be holes when sequence wrap around occurs.
  855. * Target should judiciously choose the win_sz, based on
  856. * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
  857. * 2, 4, 8, 16 win_sz works fine).
  858. * We must deque from "idx" to "idx_end", including both.
  859. */
  860. seq_end = seq_no ? seq_no : rxtid->seq_next;
  861. idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
  862. spin_lock_bh(&rxtid->lock);
  863. do {
  864. node = &rxtid->hold_q[idx];
  865. if ((order == 1) && (!node->skb))
  866. break;
  867. if (node->skb) {
  868. if (node->is_amsdu)
  869. aggr_slice_amsdu(agg_conn->aggr_info, rxtid,
  870. node->skb);
  871. else
  872. skb_queue_tail(&rxtid->q, node->skb);
  873. node->skb = NULL;
  874. } else
  875. stats->num_hole++;
  876. rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
  877. idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
  878. } while (idx != idx_end);
  879. spin_unlock_bh(&rxtid->lock);
  880. stats->num_delivered += skb_queue_len(&rxtid->q);
  881. while ((skb = skb_dequeue(&rxtid->q)))
  882. ath6kl_deliver_frames_to_nw_stack(agg_conn->dev, skb);
  883. }
  884. static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
  885. u16 seq_no,
  886. bool is_amsdu, struct sk_buff *frame)
  887. {
  888. struct rxtid *rxtid;
  889. struct rxtid_stats *stats;
  890. struct sk_buff *skb;
  891. struct skb_hold_q *node;
  892. u16 idx, st, cur, end;
  893. bool is_queued = false;
  894. u16 extended_end;
  895. rxtid = &agg_conn->rx_tid[tid];
  896. stats = &agg_conn->stat[tid];
  897. stats->num_into_aggr++;
  898. if (!rxtid->aggr) {
  899. if (is_amsdu) {
  900. aggr_slice_amsdu(agg_conn->aggr_info, rxtid, frame);
  901. is_queued = true;
  902. stats->num_amsdu++;
  903. while ((skb = skb_dequeue(&rxtid->q)))
  904. ath6kl_deliver_frames_to_nw_stack(agg_conn->dev,
  905. skb);
  906. }
  907. return is_queued;
  908. }
  909. /* Check the incoming sequence no, if it's in the window */
  910. st = rxtid->seq_next;
  911. cur = seq_no;
  912. end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO;
  913. if (((st < end) && (cur < st || cur > end)) ||
  914. ((st > end) && (cur > end) && (cur < st))) {
  915. extended_end = (end + rxtid->hold_q_sz - 1) &
  916. ATH6KL_MAX_SEQ_NO;
  917. if (((end < extended_end) &&
  918. (cur < end || cur > extended_end)) ||
  919. ((end > extended_end) && (cur > extended_end) &&
  920. (cur < end))) {
  921. aggr_deque_frms(agg_conn, tid, 0, 0);
  922. if (cur >= rxtid->hold_q_sz - 1)
  923. rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
  924. else
  925. rxtid->seq_next = ATH6KL_MAX_SEQ_NO -
  926. (rxtid->hold_q_sz - 2 - cur);
  927. } else {
  928. /*
  929. * Dequeue only those frames that are outside the
  930. * new shifted window.
  931. */
  932. if (cur >= rxtid->hold_q_sz - 1)
  933. st = cur - (rxtid->hold_q_sz - 1);
  934. else
  935. st = ATH6KL_MAX_SEQ_NO -
  936. (rxtid->hold_q_sz - 2 - cur);
  937. aggr_deque_frms(agg_conn, tid, st, 0);
  938. }
  939. stats->num_oow++;
  940. }
  941. idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz);
  942. node = &rxtid->hold_q[idx];
  943. spin_lock_bh(&rxtid->lock);
  944. /*
  945. * Is the cur frame duplicate or something beyond our window(hold_q
  946. * -> which is 2x, already)?
  947. *
  948. * 1. Duplicate is easy - drop incoming frame.
  949. * 2. Not falling in current sliding window.
  950. * 2a. is the frame_seq_no preceding current tid_seq_no?
  951. * -> drop the frame. perhaps sender did not get our ACK.
  952. * this is taken care of above.
  953. * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
  954. * -> Taken care of it above, by moving window forward.
  955. */
  956. dev_kfree_skb(node->skb);
  957. stats->num_dups++;
  958. node->skb = frame;
  959. is_queued = true;
  960. node->is_amsdu = is_amsdu;
  961. node->seq_no = seq_no;
  962. if (node->is_amsdu)
  963. stats->num_amsdu++;
  964. else
  965. stats->num_mpdu++;
  966. spin_unlock_bh(&rxtid->lock);
  967. aggr_deque_frms(agg_conn, tid, 0, 1);
  968. if (agg_conn->timer_scheduled)
  969. rxtid->progress = true;
  970. else
  971. for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
  972. if (rxtid->hold_q[idx].skb) {
  973. /*
  974. * There is a frame in the queue and no
  975. * timer so start a timer to ensure that
  976. * the frame doesn't remain stuck
  977. * forever.
  978. */
  979. agg_conn->timer_scheduled = true;
  980. mod_timer(&agg_conn->timer,
  981. (jiffies +
  982. HZ * (AGGR_RX_TIMEOUT) / 1000));
  983. rxtid->progress = false;
  984. rxtid->timer_mon = true;
  985. break;
  986. }
  987. }
  988. return is_queued;
  989. }
  990. static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif,
  991. struct ath6kl_sta *conn)
  992. {
  993. struct ath6kl *ar = vif->ar;
  994. bool is_apsdq_empty, is_apsdq_empty_at_start;
  995. u32 num_frames_to_deliver, flags;
  996. struct sk_buff *skb = NULL;
  997. /*
  998. * If the APSD q for this STA is not empty, dequeue and
  999. * send a pkt from the head of the q. Also update the
  1000. * More data bit in the WMI_DATA_HDR if there are
  1001. * more pkts for this STA in the APSD q.
  1002. * If there are no more pkts for this STA,
  1003. * update the APSD bitmap for this STA.
  1004. */
  1005. num_frames_to_deliver = (conn->apsd_info >> ATH6KL_APSD_NUM_OF_AC) &
  1006. ATH6KL_APSD_FRAME_MASK;
  1007. /*
  1008. * Number of frames to send in a service period is
  1009. * indicated by the station
  1010. * in the QOS_INFO of the association request
  1011. * If it is zero, send all frames
  1012. */
  1013. if (!num_frames_to_deliver)
  1014. num_frames_to_deliver = ATH6KL_APSD_ALL_FRAME;
  1015. spin_lock_bh(&conn->psq_lock);
  1016. is_apsdq_empty = skb_queue_empty(&conn->apsdq);
  1017. spin_unlock_bh(&conn->psq_lock);
  1018. is_apsdq_empty_at_start = is_apsdq_empty;
  1019. while ((!is_apsdq_empty) && (num_frames_to_deliver)) {
  1020. spin_lock_bh(&conn->psq_lock);
  1021. skb = skb_dequeue(&conn->apsdq);
  1022. is_apsdq_empty = skb_queue_empty(&conn->apsdq);
  1023. spin_unlock_bh(&conn->psq_lock);
  1024. /*
  1025. * Set the STA flag to Trigger delivery,
  1026. * so that the frame will go out
  1027. */
  1028. conn->sta_flags |= STA_PS_APSD_TRIGGER;
  1029. num_frames_to_deliver--;
  1030. /* Last frame in the service period, set EOSP or queue empty */
  1031. if ((is_apsdq_empty) || (!num_frames_to_deliver))
  1032. conn->sta_flags |= STA_PS_APSD_EOSP;
  1033. ath6kl_data_tx(skb, vif->ndev);
  1034. conn->sta_flags &= ~(STA_PS_APSD_TRIGGER);
  1035. conn->sta_flags &= ~(STA_PS_APSD_EOSP);
  1036. }
  1037. if (is_apsdq_empty) {
  1038. if (is_apsdq_empty_at_start)
  1039. flags = WMI_AP_APSD_NO_DELIVERY_FRAMES;
  1040. else
  1041. flags = 0;
  1042. ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
  1043. vif->fw_vif_idx,
  1044. conn->aid, 0, flags);
  1045. }
  1046. return;
  1047. }
  1048. void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
  1049. {
  1050. struct ath6kl *ar = target->dev->ar;
  1051. struct sk_buff *skb = packet->pkt_cntxt;
  1052. struct wmi_rx_meta_v2 *meta;
  1053. struct wmi_data_hdr *dhdr;
  1054. int min_hdr_len;
  1055. u8 meta_type, dot11_hdr = 0;
  1056. int status = packet->status;
  1057. enum htc_endpoint_id ept = packet->endpoint;
  1058. bool is_amsdu, prev_ps, ps_state = false;
  1059. bool trig_state = false;
  1060. struct ath6kl_sta *conn = NULL;
  1061. struct sk_buff *skb1 = NULL;
  1062. struct ethhdr *datap = NULL;
  1063. struct ath6kl_vif *vif;
  1064. struct aggr_info_conn *aggr_conn;
  1065. u16 seq_no, offset;
  1066. u8 tid, if_idx;
  1067. ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
  1068. "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
  1069. __func__, ar, ept, skb, packet->buf,
  1070. packet->act_len, status);
  1071. if (status || !(skb->data + HTC_HDR_LENGTH)) {
  1072. dev_kfree_skb(skb);
  1073. return;
  1074. }
  1075. skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
  1076. skb_pull(skb, HTC_HDR_LENGTH);
  1077. ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
  1078. skb->data, skb->len);
  1079. if (ept == ar->ctrl_ep) {
  1080. if (test_bit(WMI_ENABLED, &ar->flag)) {
  1081. ath6kl_check_wow_status(ar);
  1082. ath6kl_wmi_control_rx(ar->wmi, skb);
  1083. return;
  1084. }
  1085. if_idx =
  1086. wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data);
  1087. } else {
  1088. if_idx =
  1089. wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data);
  1090. }
  1091. vif = ath6kl_get_vif_by_index(ar, if_idx);
  1092. if (!vif) {
  1093. dev_kfree_skb(skb);
  1094. return;
  1095. }
  1096. /*
  1097. * Take lock to protect buffer counts and adaptive power throughput
  1098. * state.
  1099. */
  1100. spin_lock_bh(&vif->if_lock);
  1101. vif->net_stats.rx_packets++;
  1102. vif->net_stats.rx_bytes += packet->act_len;
  1103. spin_unlock_bh(&vif->if_lock);
  1104. skb->dev = vif->ndev;
  1105. if (!test_bit(WMI_ENABLED, &ar->flag)) {
  1106. if (EPPING_ALIGNMENT_PAD > 0)
  1107. skb_pull(skb, EPPING_ALIGNMENT_PAD);
  1108. ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
  1109. return;
  1110. }
  1111. ath6kl_check_wow_status(ar);
  1112. min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) +
  1113. sizeof(struct ath6kl_llc_snap_hdr);
  1114. dhdr = (struct wmi_data_hdr *) skb->data;
  1115. /*
  1116. * In the case of AP mode we may receive NULL data frames
  1117. * that do not have LLC hdr. They are 16 bytes in size.
  1118. * Allow these frames in the AP mode.
  1119. */
  1120. if (vif->nw_type != AP_NETWORK &&
  1121. ((packet->act_len < min_hdr_len) ||
  1122. (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
  1123. ath6kl_info("frame len is too short or too long\n");
  1124. vif->net_stats.rx_errors++;
  1125. vif->net_stats.rx_length_errors++;
  1126. dev_kfree_skb(skb);
  1127. return;
  1128. }
  1129. /* Get the Power save state of the STA */
  1130. if (vif->nw_type == AP_NETWORK) {
  1131. meta_type = wmi_data_hdr_get_meta(dhdr);
  1132. ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
  1133. WMI_DATA_HDR_PS_MASK);
  1134. offset = sizeof(struct wmi_data_hdr);
  1135. trig_state = !!(le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_TRIG);
  1136. switch (meta_type) {
  1137. case 0:
  1138. break;
  1139. case WMI_META_VERSION_1:
  1140. offset += sizeof(struct wmi_rx_meta_v1);
  1141. break;
  1142. case WMI_META_VERSION_2:
  1143. offset += sizeof(struct wmi_rx_meta_v2);
  1144. break;
  1145. default:
  1146. break;
  1147. }
  1148. datap = (struct ethhdr *) (skb->data + offset);
  1149. conn = ath6kl_find_sta(vif, datap->h_source);
  1150. if (!conn) {
  1151. dev_kfree_skb(skb);
  1152. return;
  1153. }
  1154. /*
  1155. * If there is a change in PS state of the STA,
  1156. * take appropriate steps:
  1157. *
  1158. * 1. If Sleep-->Awake, flush the psq for the STA
  1159. * Clear the PVB for the STA.
  1160. * 2. If Awake-->Sleep, Starting queueing frames
  1161. * the STA.
  1162. */
  1163. prev_ps = !!(conn->sta_flags & STA_PS_SLEEP);
  1164. if (ps_state)
  1165. conn->sta_flags |= STA_PS_SLEEP;
  1166. else
  1167. conn->sta_flags &= ~STA_PS_SLEEP;
  1168. /* Accept trigger only when the station is in sleep */
  1169. if ((conn->sta_flags & STA_PS_SLEEP) && trig_state)
  1170. ath6kl_uapsd_trigger_frame_rx(vif, conn);
  1171. if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) {
  1172. if (!(conn->sta_flags & STA_PS_SLEEP)) {
  1173. struct sk_buff *skbuff = NULL;
  1174. bool is_apsdq_empty;
  1175. struct ath6kl_mgmt_buff *mgmt;
  1176. u8 idx;
  1177. spin_lock_bh(&conn->psq_lock);
  1178. while (conn->mgmt_psq_len > 0) {
  1179. mgmt = list_first_entry(
  1180. &conn->mgmt_psq,
  1181. struct ath6kl_mgmt_buff,
  1182. list);
  1183. list_del(&mgmt->list);
  1184. conn->mgmt_psq_len--;
  1185. spin_unlock_bh(&conn->psq_lock);
  1186. idx = vif->fw_vif_idx;
  1187. ath6kl_wmi_send_mgmt_cmd(ar->wmi,
  1188. idx,
  1189. mgmt->id,
  1190. mgmt->freq,
  1191. mgmt->wait,
  1192. mgmt->buf,
  1193. mgmt->len,
  1194. mgmt->no_cck);
  1195. kfree(mgmt);
  1196. spin_lock_bh(&conn->psq_lock);
  1197. }
  1198. conn->mgmt_psq_len = 0;
  1199. while ((skbuff = skb_dequeue(&conn->psq))) {
  1200. spin_unlock_bh(&conn->psq_lock);
  1201. ath6kl_data_tx(skbuff, vif->ndev);
  1202. spin_lock_bh(&conn->psq_lock);
  1203. }
  1204. is_apsdq_empty = skb_queue_empty(&conn->apsdq);
  1205. while ((skbuff = skb_dequeue(&conn->apsdq))) {
  1206. spin_unlock_bh(&conn->psq_lock);
  1207. ath6kl_data_tx(skbuff, vif->ndev);
  1208. spin_lock_bh(&conn->psq_lock);
  1209. }
  1210. spin_unlock_bh(&conn->psq_lock);
  1211. if (!is_apsdq_empty)
  1212. ath6kl_wmi_set_apsd_bfrd_traf(
  1213. ar->wmi,
  1214. vif->fw_vif_idx,
  1215. conn->aid, 0, 0);
  1216. /* Clear the PVB for this STA */
  1217. ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
  1218. conn->aid, 0);
  1219. }
  1220. }
  1221. /* drop NULL data frames here */
  1222. if ((packet->act_len < min_hdr_len) ||
  1223. (packet->act_len >
  1224. WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) {
  1225. dev_kfree_skb(skb);
  1226. return;
  1227. }
  1228. }
  1229. is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false;
  1230. tid = wmi_data_hdr_get_up(dhdr);
  1231. seq_no = wmi_data_hdr_get_seqno(dhdr);
  1232. meta_type = wmi_data_hdr_get_meta(dhdr);
  1233. dot11_hdr = wmi_data_hdr_get_dot11(dhdr);
  1234. skb_pull(skb, sizeof(struct wmi_data_hdr));
  1235. switch (meta_type) {
  1236. case WMI_META_VERSION_1:
  1237. skb_pull(skb, sizeof(struct wmi_rx_meta_v1));
  1238. break;
  1239. case WMI_META_VERSION_2:
  1240. meta = (struct wmi_rx_meta_v2 *) skb->data;
  1241. if (meta->csum_flags & 0x1) {
  1242. skb->ip_summed = CHECKSUM_COMPLETE;
  1243. skb->csum = (__force __wsum) meta->csum;
  1244. }
  1245. skb_pull(skb, sizeof(struct wmi_rx_meta_v2));
  1246. break;
  1247. default:
  1248. break;
  1249. }
  1250. if (dot11_hdr)
  1251. status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb);
  1252. else if (!is_amsdu)
  1253. status = ath6kl_wmi_dot3_2_dix(skb);
  1254. if (status) {
  1255. /*
  1256. * Drop frames that could not be processed (lack of
  1257. * memory, etc.)
  1258. */
  1259. dev_kfree_skb(skb);
  1260. return;
  1261. }
  1262. if (!(vif->ndev->flags & IFF_UP)) {
  1263. dev_kfree_skb(skb);
  1264. return;
  1265. }
  1266. if (vif->nw_type == AP_NETWORK) {
  1267. datap = (struct ethhdr *) skb->data;
  1268. if (is_multicast_ether_addr(datap->h_dest))
  1269. /*
  1270. * Bcast/Mcast frames should be sent to the
  1271. * OS stack as well as on the air.
  1272. */
  1273. skb1 = skb_copy(skb, GFP_ATOMIC);
  1274. else {
  1275. /*
  1276. * Search for a connected STA with dstMac
  1277. * as the Mac address. If found send the
  1278. * frame to it on the air else send the
  1279. * frame up the stack.
  1280. */
  1281. conn = ath6kl_find_sta(vif, datap->h_dest);
  1282. if (conn && ar->intra_bss) {
  1283. skb1 = skb;
  1284. skb = NULL;
  1285. } else if (conn && !ar->intra_bss) {
  1286. dev_kfree_skb(skb);
  1287. skb = NULL;
  1288. }
  1289. }
  1290. if (skb1)
  1291. ath6kl_data_tx(skb1, vif->ndev);
  1292. if (skb == NULL) {
  1293. /* nothing to deliver up the stack */
  1294. return;
  1295. }
  1296. }
  1297. datap = (struct ethhdr *) skb->data;
  1298. if (is_unicast_ether_addr(datap->h_dest)) {
  1299. if (vif->nw_type == AP_NETWORK) {
  1300. conn = ath6kl_find_sta(vif, datap->h_source);
  1301. if (!conn)
  1302. return;
  1303. aggr_conn = conn->aggr_conn;
  1304. } else
  1305. aggr_conn = vif->aggr_cntxt->aggr_conn;
  1306. if (aggr_process_recv_frm(aggr_conn, tid, seq_no,
  1307. is_amsdu, skb)) {
  1308. /* aggregation code will handle the skb */
  1309. return;
  1310. }
  1311. }
  1312. ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
  1313. }
  1314. static void aggr_timeout(unsigned long arg)
  1315. {
  1316. u8 i, j;
  1317. struct aggr_info_conn *aggr_conn = (struct aggr_info_conn *) arg;
  1318. struct rxtid *rxtid;
  1319. struct rxtid_stats *stats;
  1320. for (i = 0; i < NUM_OF_TIDS; i++) {
  1321. rxtid = &aggr_conn->rx_tid[i];
  1322. stats = &aggr_conn->stat[i];
  1323. if (!rxtid->aggr || !rxtid->timer_mon || rxtid->progress)
  1324. continue;
  1325. stats->num_timeouts++;
  1326. ath6kl_dbg(ATH6KL_DBG_AGGR,
  1327. "aggr timeout (st %d end %d)\n",
  1328. rxtid->seq_next,
  1329. ((rxtid->seq_next + rxtid->hold_q_sz-1) &
  1330. ATH6KL_MAX_SEQ_NO));
  1331. aggr_deque_frms(aggr_conn, i, 0, 0);
  1332. }
  1333. aggr_conn->timer_scheduled = false;
  1334. for (i = 0; i < NUM_OF_TIDS; i++) {
  1335. rxtid = &aggr_conn->rx_tid[i];
  1336. if (rxtid->aggr && rxtid->hold_q) {
  1337. for (j = 0; j < rxtid->hold_q_sz; j++) {
  1338. if (rxtid->hold_q[j].skb) {
  1339. aggr_conn->timer_scheduled = true;
  1340. rxtid->timer_mon = true;
  1341. rxtid->progress = false;
  1342. break;
  1343. }
  1344. }
  1345. if (j >= rxtid->hold_q_sz)
  1346. rxtid->timer_mon = false;
  1347. }
  1348. }
  1349. if (aggr_conn->timer_scheduled)
  1350. mod_timer(&aggr_conn->timer,
  1351. jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT));
  1352. }
  1353. static void aggr_delete_tid_state(struct aggr_info_conn *aggr_conn, u8 tid)
  1354. {
  1355. struct rxtid *rxtid;
  1356. struct rxtid_stats *stats;
  1357. if (!aggr_conn || tid >= NUM_OF_TIDS)
  1358. return;
  1359. rxtid = &aggr_conn->rx_tid[tid];
  1360. stats = &aggr_conn->stat[tid];
  1361. if (rxtid->aggr)
  1362. aggr_deque_frms(aggr_conn, tid, 0, 0);
  1363. rxtid->aggr = false;
  1364. rxtid->progress = false;
  1365. rxtid->timer_mon = false;
  1366. rxtid->win_sz = 0;
  1367. rxtid->seq_next = 0;
  1368. rxtid->hold_q_sz = 0;
  1369. kfree(rxtid->hold_q);
  1370. rxtid->hold_q = NULL;
  1371. memset(stats, 0, sizeof(struct rxtid_stats));
  1372. }
  1373. void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
  1374. u8 win_sz)
  1375. {
  1376. struct ath6kl_sta *sta;
  1377. struct aggr_info_conn *aggr_conn = NULL;
  1378. struct rxtid *rxtid;
  1379. struct rxtid_stats *stats;
  1380. u16 hold_q_size;
  1381. u8 tid, aid;
  1382. if (vif->nw_type == AP_NETWORK) {
  1383. aid = ath6kl_get_aid(tid_mux);
  1384. sta = ath6kl_find_sta_by_aid(vif->ar, aid);
  1385. if (sta)
  1386. aggr_conn = sta->aggr_conn;
  1387. } else
  1388. aggr_conn = vif->aggr_cntxt->aggr_conn;
  1389. if (!aggr_conn)
  1390. return;
  1391. tid = ath6kl_get_tid(tid_mux);
  1392. if (tid >= NUM_OF_TIDS)
  1393. return;
  1394. rxtid = &aggr_conn->rx_tid[tid];
  1395. stats = &aggr_conn->stat[tid];
  1396. if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
  1397. ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
  1398. __func__, win_sz, tid);
  1399. if (rxtid->aggr)
  1400. aggr_delete_tid_state(aggr_conn, tid);
  1401. rxtid->seq_next = seq_no;
  1402. hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q);
  1403. rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL);
  1404. if (!rxtid->hold_q)
  1405. return;
  1406. rxtid->win_sz = win_sz;
  1407. rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz);
  1408. if (!skb_queue_empty(&rxtid->q))
  1409. return;
  1410. rxtid->aggr = true;
  1411. }
  1412. void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
  1413. struct aggr_info_conn *aggr_conn)
  1414. {
  1415. struct rxtid *rxtid;
  1416. u8 i;
  1417. aggr_conn->aggr_sz = AGGR_SZ_DEFAULT;
  1418. aggr_conn->dev = vif->ndev;
  1419. init_timer(&aggr_conn->timer);
  1420. aggr_conn->timer.function = aggr_timeout;
  1421. aggr_conn->timer.data = (unsigned long) aggr_conn;
  1422. aggr_conn->aggr_info = aggr_info;
  1423. aggr_conn->timer_scheduled = false;
  1424. for (i = 0; i < NUM_OF_TIDS; i++) {
  1425. rxtid = &aggr_conn->rx_tid[i];
  1426. rxtid->aggr = false;
  1427. rxtid->progress = false;
  1428. rxtid->timer_mon = false;
  1429. skb_queue_head_init(&rxtid->q);
  1430. spin_lock_init(&rxtid->lock);
  1431. }
  1432. }
  1433. struct aggr_info *aggr_init(struct ath6kl_vif *vif)
  1434. {
  1435. struct aggr_info *p_aggr = NULL;
  1436. p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL);
  1437. if (!p_aggr) {
  1438. ath6kl_err("failed to alloc memory for aggr_node\n");
  1439. return NULL;
  1440. }
  1441. p_aggr->aggr_conn = kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL);
  1442. if (!p_aggr->aggr_conn) {
  1443. ath6kl_err("failed to alloc memory for connection specific aggr info\n");
  1444. kfree(p_aggr);
  1445. return NULL;
  1446. }
  1447. aggr_conn_init(vif, p_aggr, p_aggr->aggr_conn);
  1448. skb_queue_head_init(&p_aggr->rx_amsdu_freeq);
  1449. ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq, AGGR_NUM_OF_FREE_NETBUFS);
  1450. return p_aggr;
  1451. }
  1452. void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux)
  1453. {
  1454. struct ath6kl_sta *sta;
  1455. struct rxtid *rxtid;
  1456. struct aggr_info_conn *aggr_conn = NULL;
  1457. u8 tid, aid;
  1458. if (vif->nw_type == AP_NETWORK) {
  1459. aid = ath6kl_get_aid(tid_mux);
  1460. sta = ath6kl_find_sta_by_aid(vif->ar, aid);
  1461. if (sta)
  1462. aggr_conn = sta->aggr_conn;
  1463. } else
  1464. aggr_conn = vif->aggr_cntxt->aggr_conn;
  1465. if (!aggr_conn)
  1466. return;
  1467. tid = ath6kl_get_tid(tid_mux);
  1468. if (tid >= NUM_OF_TIDS)
  1469. return;
  1470. rxtid = &aggr_conn->rx_tid[tid];
  1471. if (rxtid->aggr)
  1472. aggr_delete_tid_state(aggr_conn, tid);
  1473. }
  1474. void aggr_reset_state(struct aggr_info_conn *aggr_conn)
  1475. {
  1476. u8 tid;
  1477. if (!aggr_conn)
  1478. return;
  1479. if (aggr_conn->timer_scheduled) {
  1480. del_timer(&aggr_conn->timer);
  1481. aggr_conn->timer_scheduled = false;
  1482. }
  1483. for (tid = 0; tid < NUM_OF_TIDS; tid++)
  1484. aggr_delete_tid_state(aggr_conn, tid);
  1485. }
  1486. /* clean up our amsdu buffer list */
  1487. void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar)
  1488. {
  1489. struct htc_packet *packet, *tmp_pkt;
  1490. spin_lock_bh(&ar->lock);
  1491. if (list_empty(&ar->amsdu_rx_buffer_queue)) {
  1492. spin_unlock_bh(&ar->lock);
  1493. return;
  1494. }
  1495. list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue,
  1496. list) {
  1497. list_del(&packet->list);
  1498. spin_unlock_bh(&ar->lock);
  1499. dev_kfree_skb(packet->pkt_cntxt);
  1500. spin_lock_bh(&ar->lock);
  1501. }
  1502. spin_unlock_bh(&ar->lock);
  1503. }
  1504. void aggr_module_destroy(struct aggr_info *aggr_info)
  1505. {
  1506. if (!aggr_info)
  1507. return;
  1508. aggr_reset_state(aggr_info->aggr_conn);
  1509. skb_queue_purge(&aggr_info->rx_amsdu_freeq);
  1510. kfree(aggr_info->aggr_conn);
  1511. kfree(aggr_info);
  1512. }