txrx.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850
  1. /*
  2. * Copyright (c) 2004-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18. #include "core.h"
  19. #include "debug.h"
  20. #include "htc-ops.h"
  21. /*
  22. * tid - tid_mux0..tid_mux3
  23. * aid - tid_mux4..tid_mux7
  24. */
  25. #define ATH6KL_TID_MASK 0xf
  26. #define ATH6KL_AID_SHIFT 4
  27. static inline u8 ath6kl_get_tid(u8 tid_mux)
  28. {
  29. return tid_mux & ATH6KL_TID_MASK;
  30. }
  31. static inline u8 ath6kl_get_aid(u8 tid_mux)
  32. {
  33. return tid_mux >> ATH6KL_AID_SHIFT;
  34. }
  35. static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
  36. u32 *map_no)
  37. {
  38. struct ath6kl *ar = ath6kl_priv(dev);
  39. struct ethhdr *eth_hdr;
  40. u32 i, ep_map = -1;
  41. u8 *datap;
  42. *map_no = 0;
  43. datap = skb->data;
  44. eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr));
  45. if (is_multicast_ether_addr(eth_hdr->h_dest))
  46. return ENDPOINT_2;
  47. for (i = 0; i < ar->node_num; i++) {
  48. if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr,
  49. ETH_ALEN) == 0) {
  50. *map_no = i + 1;
  51. ar->node_map[i].tx_pend++;
  52. return ar->node_map[i].ep_id;
  53. }
  54. if ((ep_map == -1) && !ar->node_map[i].tx_pend)
  55. ep_map = i;
  56. }
  57. if (ep_map == -1) {
  58. ep_map = ar->node_num;
  59. ar->node_num++;
  60. if (ar->node_num > MAX_NODE_NUM)
  61. return ENDPOINT_UNUSED;
  62. }
  63. memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN);
  64. for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) {
  65. if (!ar->tx_pending[i]) {
  66. ar->node_map[ep_map].ep_id = i;
  67. break;
  68. }
  69. /*
  70. * No free endpoint is available, start redistribution on
  71. * the inuse endpoints.
  72. */
  73. if (i == ENDPOINT_5) {
  74. ar->node_map[ep_map].ep_id = ar->next_ep_id;
  75. ar->next_ep_id++;
  76. if (ar->next_ep_id > ENDPOINT_5)
  77. ar->next_ep_id = ENDPOINT_2;
  78. }
  79. }
  80. *map_no = ep_map + 1;
  81. ar->node_map[ep_map].tx_pend++;
  82. return ar->node_map[ep_map].ep_id;
  83. }
  84. static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn,
  85. struct ath6kl_vif *vif,
  86. struct sk_buff *skb,
  87. u32 *flags)
  88. {
  89. struct ath6kl *ar = vif->ar;
  90. bool is_apsdq_empty = false;
  91. struct ethhdr *datap = (struct ethhdr *) skb->data;
  92. u8 up = 0, traffic_class, *ip_hdr;
  93. u16 ether_type;
  94. struct ath6kl_llc_snap_hdr *llc_hdr;
  95. if (conn->sta_flags & STA_PS_APSD_TRIGGER) {
  96. /*
  97. * This tx is because of a uAPSD trigger, determine
  98. * more and EOSP bit. Set EOSP if queue is empty
  99. * or sufficient frames are delivered for this trigger.
  100. */
  101. spin_lock_bh(&conn->psq_lock);
  102. if (!skb_queue_empty(&conn->apsdq))
  103. *flags |= WMI_DATA_HDR_FLAGS_MORE;
  104. else if (conn->sta_flags & STA_PS_APSD_EOSP)
  105. *flags |= WMI_DATA_HDR_FLAGS_EOSP;
  106. *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
  107. spin_unlock_bh(&conn->psq_lock);
  108. return false;
  109. } else if (!conn->apsd_info)
  110. return false;
  111. if (test_bit(WMM_ENABLED, &vif->flags)) {
  112. ether_type = be16_to_cpu(datap->h_proto);
  113. if (is_ethertype(ether_type)) {
  114. /* packet is in DIX format */
  115. ip_hdr = (u8 *)(datap + 1);
  116. } else {
  117. /* packet is in 802.3 format */
  118. llc_hdr = (struct ath6kl_llc_snap_hdr *)
  119. (datap + 1);
  120. ether_type = be16_to_cpu(llc_hdr->eth_type);
  121. ip_hdr = (u8 *)(llc_hdr + 1);
  122. }
  123. if (ether_type == IP_ETHERTYPE)
  124. up = ath6kl_wmi_determine_user_priority(
  125. ip_hdr, 0);
  126. }
  127. traffic_class = ath6kl_wmi_get_traffic_class(up);
  128. if ((conn->apsd_info & (1 << traffic_class)) == 0)
  129. return false;
  130. /* Queue the frames if the STA is sleeping */
  131. spin_lock_bh(&conn->psq_lock);
  132. is_apsdq_empty = skb_queue_empty(&conn->apsdq);
  133. skb_queue_tail(&conn->apsdq, skb);
  134. spin_unlock_bh(&conn->psq_lock);
  135. /*
  136. * If this is the first pkt getting queued
  137. * for this STA, update the PVB for this STA
  138. */
  139. if (is_apsdq_empty) {
  140. ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
  141. vif->fw_vif_idx,
  142. conn->aid, 1, 0);
  143. }
  144. *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
  145. return true;
  146. }
  147. static bool ath6kl_process_psq(struct ath6kl_sta *conn,
  148. struct ath6kl_vif *vif,
  149. struct sk_buff *skb,
  150. u32 *flags)
  151. {
  152. bool is_psq_empty = false;
  153. struct ath6kl *ar = vif->ar;
  154. if (conn->sta_flags & STA_PS_POLLED) {
  155. spin_lock_bh(&conn->psq_lock);
  156. if (!skb_queue_empty(&conn->psq))
  157. *flags |= WMI_DATA_HDR_FLAGS_MORE;
  158. spin_unlock_bh(&conn->psq_lock);
  159. return false;
  160. }
  161. /* Queue the frames if the STA is sleeping */
  162. spin_lock_bh(&conn->psq_lock);
  163. is_psq_empty = skb_queue_empty(&conn->psq);
  164. skb_queue_tail(&conn->psq, skb);
  165. spin_unlock_bh(&conn->psq_lock);
  166. /*
  167. * If this is the first pkt getting queued
  168. * for this STA, update the PVB for this
  169. * STA.
  170. */
  171. if (is_psq_empty)
  172. ath6kl_wmi_set_pvb_cmd(ar->wmi,
  173. vif->fw_vif_idx,
  174. conn->aid, 1);
  175. return true;
  176. }
  177. static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
  178. u32 *flags)
  179. {
  180. struct ethhdr *datap = (struct ethhdr *) skb->data;
  181. struct ath6kl_sta *conn = NULL;
  182. bool ps_queued = false;
  183. struct ath6kl *ar = vif->ar;
  184. if (is_multicast_ether_addr(datap->h_dest)) {
  185. u8 ctr = 0;
  186. bool q_mcast = false;
  187. for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
  188. if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) {
  189. q_mcast = true;
  190. break;
  191. }
  192. }
  193. if (q_mcast) {
  194. /*
  195. * If this transmit is not because of a Dtim Expiry
  196. * q it.
  197. */
  198. if (!test_bit(DTIM_EXPIRED, &vif->flags)) {
  199. bool is_mcastq_empty = false;
  200. spin_lock_bh(&ar->mcastpsq_lock);
  201. is_mcastq_empty =
  202. skb_queue_empty(&ar->mcastpsq);
  203. skb_queue_tail(&ar->mcastpsq, skb);
  204. spin_unlock_bh(&ar->mcastpsq_lock);
  205. /*
  206. * If this is the first Mcast pkt getting
  207. * queued indicate to the target to set the
  208. * BitmapControl LSB of the TIM IE.
  209. */
  210. if (is_mcastq_empty)
  211. ath6kl_wmi_set_pvb_cmd(ar->wmi,
  212. vif->fw_vif_idx,
  213. MCAST_AID, 1);
  214. ps_queued = true;
  215. } else {
  216. /*
  217. * This transmit is because of Dtim expiry.
  218. * Determine if MoreData bit has to be set.
  219. */
  220. spin_lock_bh(&ar->mcastpsq_lock);
  221. if (!skb_queue_empty(&ar->mcastpsq))
  222. *flags |= WMI_DATA_HDR_FLAGS_MORE;
  223. spin_unlock_bh(&ar->mcastpsq_lock);
  224. }
  225. }
  226. } else {
  227. conn = ath6kl_find_sta(vif, datap->h_dest);
  228. if (!conn) {
  229. dev_kfree_skb(skb);
  230. /* Inform the caller that the skb is consumed */
  231. return true;
  232. }
  233. if (conn->sta_flags & STA_PS_SLEEP) {
  234. ps_queued = ath6kl_process_uapsdq(conn,
  235. vif, skb, flags);
  236. if (!(*flags & WMI_DATA_HDR_FLAGS_UAPSD))
  237. ps_queued = ath6kl_process_psq(conn,
  238. vif, skb, flags);
  239. }
  240. }
  241. return ps_queued;
  242. }
  243. /* Tx functions */
  244. int ath6kl_control_tx(void *devt, struct sk_buff *skb,
  245. enum htc_endpoint_id eid)
  246. {
  247. struct ath6kl *ar = devt;
  248. int status = 0;
  249. struct ath6kl_cookie *cookie = NULL;
  250. if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW))
  251. return -EACCES;
  252. spin_lock_bh(&ar->lock);
  253. ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
  254. "%s: skb=0x%p, len=0x%x eid =%d\n", __func__,
  255. skb, skb->len, eid);
  256. if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) {
  257. /*
  258. * Control endpoint is full, don't allocate resources, we
  259. * are just going to drop this packet.
  260. */
  261. cookie = NULL;
  262. ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
  263. skb, skb->len);
  264. } else
  265. cookie = ath6kl_alloc_cookie(ar);
  266. if (cookie == NULL) {
  267. spin_unlock_bh(&ar->lock);
  268. status = -ENOMEM;
  269. goto fail_ctrl_tx;
  270. }
  271. ar->tx_pending[eid]++;
  272. if (eid != ar->ctrl_ep)
  273. ar->total_tx_data_pend++;
  274. spin_unlock_bh(&ar->lock);
  275. cookie->skb = skb;
  276. cookie->map_no = 0;
  277. set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
  278. eid, ATH6KL_CONTROL_PKT_TAG);
  279. cookie->htc_pkt.skb = skb;
  280. /*
  281. * This interface is asynchronous, if there is an error, cleanup
  282. * will happen in the TX completion callback.
  283. */
  284. ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
  285. return 0;
  286. fail_ctrl_tx:
  287. dev_kfree_skb(skb);
  288. return status;
  289. }
  290. int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
  291. {
  292. struct ath6kl *ar = ath6kl_priv(dev);
  293. struct ath6kl_cookie *cookie = NULL;
  294. enum htc_endpoint_id eid = ENDPOINT_UNUSED;
  295. struct ath6kl_vif *vif = netdev_priv(dev);
  296. u32 map_no = 0;
  297. u16 htc_tag = ATH6KL_DATA_PKT_TAG;
  298. u8 ac = 99 ; /* initialize to unmapped ac */
  299. bool chk_adhoc_ps_mapping = false;
  300. int ret;
  301. struct wmi_tx_meta_v2 meta_v2;
  302. void *meta;
  303. u8 csum_start = 0, csum_dest = 0, csum = skb->ip_summed;
  304. u8 meta_ver = 0;
  305. u32 flags = 0;
  306. ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
  307. "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__,
  308. skb, skb->data, skb->len);
  309. /* If target is not associated */
  310. if (!test_bit(CONNECTED, &vif->flags))
  311. goto fail_tx;
  312. if (WARN_ON_ONCE(ar->state != ATH6KL_STATE_ON))
  313. goto fail_tx;
  314. if (!test_bit(WMI_READY, &ar->flag))
  315. goto fail_tx;
  316. /* AP mode Power saving processing */
  317. if (vif->nw_type == AP_NETWORK) {
  318. if (ath6kl_powersave_ap(vif, skb, &flags))
  319. return 0;
  320. }
  321. if (test_bit(WMI_ENABLED, &ar->flag)) {
  322. if ((dev->features & NETIF_F_IP_CSUM) &&
  323. (csum == CHECKSUM_PARTIAL)) {
  324. csum_start = skb->csum_start -
  325. (skb_network_header(skb) - skb->head) +
  326. sizeof(struct ath6kl_llc_snap_hdr);
  327. csum_dest = skb->csum_offset + csum_start;
  328. }
  329. if (skb_headroom(skb) < dev->needed_headroom) {
  330. struct sk_buff *tmp_skb = skb;
  331. skb = skb_realloc_headroom(skb, dev->needed_headroom);
  332. kfree_skb(tmp_skb);
  333. if (skb == NULL) {
  334. vif->net_stats.tx_dropped++;
  335. return 0;
  336. }
  337. }
  338. if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
  339. ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
  340. goto fail_tx;
  341. }
  342. if ((dev->features & NETIF_F_IP_CSUM) &&
  343. (csum == CHECKSUM_PARTIAL)) {
  344. meta_v2.csum_start = csum_start;
  345. meta_v2.csum_dest = csum_dest;
  346. /* instruct target to calculate checksum */
  347. meta_v2.csum_flags = WMI_META_V2_FLAG_CSUM_OFFLOAD;
  348. meta_ver = WMI_META_VERSION_2;
  349. meta = &meta_v2;
  350. } else {
  351. meta_ver = 0;
  352. meta = NULL;
  353. }
  354. ret = ath6kl_wmi_data_hdr_add(ar->wmi, skb,
  355. DATA_MSGTYPE, flags, 0,
  356. meta_ver,
  357. meta, vif->fw_vif_idx);
  358. if (ret) {
  359. ath6kl_warn("failed to add wmi data header:%d\n"
  360. , ret);
  361. goto fail_tx;
  362. }
  363. if ((vif->nw_type == ADHOC_NETWORK) &&
  364. ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
  365. chk_adhoc_ps_mapping = true;
  366. else {
  367. /* get the stream mapping */
  368. ret = ath6kl_wmi_implicit_create_pstream(ar->wmi,
  369. vif->fw_vif_idx, skb,
  370. 0, test_bit(WMM_ENABLED, &vif->flags), &ac);
  371. if (ret)
  372. goto fail_tx;
  373. }
  374. } else
  375. goto fail_tx;
  376. spin_lock_bh(&ar->lock);
  377. if (chk_adhoc_ps_mapping)
  378. eid = ath6kl_ibss_map_epid(skb, dev, &map_no);
  379. else
  380. eid = ar->ac2ep_map[ac];
  381. if (eid == 0 || eid == ENDPOINT_UNUSED) {
  382. ath6kl_err("eid %d is not mapped!\n", eid);
  383. spin_unlock_bh(&ar->lock);
  384. goto fail_tx;
  385. }
  386. /* allocate resource for this packet */
  387. cookie = ath6kl_alloc_cookie(ar);
  388. if (!cookie) {
  389. spin_unlock_bh(&ar->lock);
  390. goto fail_tx;
  391. }
  392. /* update counts while the lock is held */
  393. ar->tx_pending[eid]++;
  394. ar->total_tx_data_pend++;
  395. spin_unlock_bh(&ar->lock);
  396. if (!IS_ALIGNED((unsigned long) skb->data - HTC_HDR_LENGTH, 4) &&
  397. skb_cloned(skb)) {
  398. /*
  399. * We will touch (move the buffer data to align it. Since the
  400. * skb buffer is cloned and not only the header is changed, we
  401. * have to copy it to allow the changes. Since we are copying
  402. * the data here, we may as well align it by reserving suitable
  403. * headroom to avoid the memmove in ath6kl_htc_tx_buf_align().
  404. */
  405. struct sk_buff *nskb;
  406. nskb = skb_copy_expand(skb, HTC_HDR_LENGTH, 0, GFP_ATOMIC);
  407. if (nskb == NULL)
  408. goto fail_tx;
  409. kfree_skb(skb);
  410. skb = nskb;
  411. }
  412. cookie->skb = skb;
  413. cookie->map_no = map_no;
  414. set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
  415. eid, htc_tag);
  416. cookie->htc_pkt.skb = skb;
  417. ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ",
  418. skb->data, skb->len);
  419. /*
  420. * HTC interface is asynchronous, if this fails, cleanup will
  421. * happen in the ath6kl_tx_complete callback.
  422. */
  423. ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
  424. return 0;
  425. fail_tx:
  426. dev_kfree_skb(skb);
  427. vif->net_stats.tx_dropped++;
  428. vif->net_stats.tx_aborted_errors++;
  429. return 0;
  430. }
  431. /* indicate tx activity or inactivity on a WMI stream */
  432. void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active)
  433. {
  434. struct ath6kl *ar = devt;
  435. enum htc_endpoint_id eid;
  436. int i;
  437. eid = ar->ac2ep_map[traffic_class];
  438. if (!test_bit(WMI_ENABLED, &ar->flag))
  439. goto notify_htc;
  440. spin_lock_bh(&ar->lock);
  441. ar->ac_stream_active[traffic_class] = active;
  442. if (active) {
  443. /*
  444. * Keep track of the active stream with the highest
  445. * priority.
  446. */
  447. if (ar->ac_stream_pri_map[traffic_class] >
  448. ar->hiac_stream_active_pri)
  449. /* set the new highest active priority */
  450. ar->hiac_stream_active_pri =
  451. ar->ac_stream_pri_map[traffic_class];
  452. } else {
  453. /*
  454. * We may have to search for the next active stream
  455. * that is the highest priority.
  456. */
  457. if (ar->hiac_stream_active_pri ==
  458. ar->ac_stream_pri_map[traffic_class]) {
  459. /*
  460. * The highest priority stream just went inactive
  461. * reset and search for the "next" highest "active"
  462. * priority stream.
  463. */
  464. ar->hiac_stream_active_pri = 0;
  465. for (i = 0; i < WMM_NUM_AC; i++) {
  466. if (ar->ac_stream_active[i] &&
  467. (ar->ac_stream_pri_map[i] >
  468. ar->hiac_stream_active_pri))
  469. /*
  470. * Set the new highest active
  471. * priority.
  472. */
  473. ar->hiac_stream_active_pri =
  474. ar->ac_stream_pri_map[i];
  475. }
  476. }
  477. }
  478. spin_unlock_bh(&ar->lock);
  479. notify_htc:
  480. /* notify HTC, this may cause credit distribution changes */
  481. ath6kl_htc_activity_changed(ar->htc_target, eid, active);
  482. }
  483. enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
  484. struct htc_packet *packet)
  485. {
  486. struct ath6kl *ar = target->dev->ar;
  487. struct ath6kl_vif *vif;
  488. enum htc_endpoint_id endpoint = packet->endpoint;
  489. enum htc_send_full_action action = HTC_SEND_FULL_KEEP;
  490. if (endpoint == ar->ctrl_ep) {
  491. /*
  492. * Under normal WMI if this is getting full, then something
  493. * is running rampant the host should not be exhausting the
  494. * WMI queue with too many commands the only exception to
  495. * this is during testing using endpointping.
  496. */
  497. set_bit(WMI_CTRL_EP_FULL, &ar->flag);
  498. ath6kl_err("wmi ctrl ep is full\n");
  499. return action;
  500. }
  501. if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
  502. return action;
  503. /*
  504. * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
  505. * the highest active stream.
  506. */
  507. if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
  508. ar->hiac_stream_active_pri &&
  509. ar->cookie_count <=
  510. target->endpoint[endpoint].tx_drop_packet_threshold)
  511. /*
  512. * Give preference to the highest priority stream by
  513. * dropping the packets which overflowed.
  514. */
  515. action = HTC_SEND_FULL_DROP;
  516. /* FIXME: Locking */
  517. spin_lock_bh(&ar->list_lock);
  518. list_for_each_entry(vif, &ar->vif_list, list) {
  519. if (vif->nw_type == ADHOC_NETWORK ||
  520. action != HTC_SEND_FULL_DROP) {
  521. spin_unlock_bh(&ar->list_lock);
  522. set_bit(NETQ_STOPPED, &vif->flags);
  523. netif_stop_queue(vif->ndev);
  524. return action;
  525. }
  526. }
  527. spin_unlock_bh(&ar->list_lock);
  528. return action;
  529. }
  530. /* TODO this needs to be looked at */
  531. static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif,
  532. enum htc_endpoint_id eid, u32 map_no)
  533. {
  534. struct ath6kl *ar = vif->ar;
  535. u32 i;
  536. if (vif->nw_type != ADHOC_NETWORK)
  537. return;
  538. if (!ar->ibss_ps_enable)
  539. return;
  540. if (eid == ar->ctrl_ep)
  541. return;
  542. if (map_no == 0)
  543. return;
  544. map_no--;
  545. ar->node_map[map_no].tx_pend--;
  546. if (ar->node_map[map_no].tx_pend)
  547. return;
  548. if (map_no != (ar->node_num - 1))
  549. return;
  550. for (i = ar->node_num; i > 0; i--) {
  551. if (ar->node_map[i - 1].tx_pend)
  552. break;
  553. memset(&ar->node_map[i - 1], 0,
  554. sizeof(struct ath6kl_node_mapping));
  555. ar->node_num--;
  556. }
  557. }
  558. void ath6kl_tx_complete(struct htc_target *target,
  559. struct list_head *packet_queue)
  560. {
  561. struct ath6kl *ar = target->dev->ar;
  562. struct sk_buff_head skb_queue;
  563. struct htc_packet *packet;
  564. struct sk_buff *skb;
  565. struct ath6kl_cookie *ath6kl_cookie;
  566. u32 map_no = 0;
  567. int status;
  568. enum htc_endpoint_id eid;
  569. bool wake_event = false;
  570. bool flushing[ATH6KL_VIF_MAX] = {false};
  571. u8 if_idx;
  572. struct ath6kl_vif *vif;
  573. skb_queue_head_init(&skb_queue);
  574. /* lock the driver as we update internal state */
  575. spin_lock_bh(&ar->lock);
  576. /* reap completed packets */
  577. while (!list_empty(packet_queue)) {
  578. packet = list_first_entry(packet_queue, struct htc_packet,
  579. list);
  580. list_del(&packet->list);
  581. ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt;
  582. if (!ath6kl_cookie)
  583. goto fatal;
  584. status = packet->status;
  585. skb = ath6kl_cookie->skb;
  586. eid = packet->endpoint;
  587. map_no = ath6kl_cookie->map_no;
  588. if (!skb || !skb->data)
  589. goto fatal;
  590. __skb_queue_tail(&skb_queue, skb);
  591. if (!status && (packet->act_len != skb->len))
  592. goto fatal;
  593. ar->tx_pending[eid]--;
  594. if (eid != ar->ctrl_ep)
  595. ar->total_tx_data_pend--;
  596. if (eid == ar->ctrl_ep) {
  597. if (test_bit(WMI_CTRL_EP_FULL, &ar->flag))
  598. clear_bit(WMI_CTRL_EP_FULL, &ar->flag);
  599. if (ar->tx_pending[eid] == 0)
  600. wake_event = true;
  601. }
  602. if (eid == ar->ctrl_ep) {
  603. if_idx = wmi_cmd_hdr_get_if_idx(
  604. (struct wmi_cmd_hdr *) packet->buf);
  605. } else {
  606. if_idx = wmi_data_hdr_get_if_idx(
  607. (struct wmi_data_hdr *) packet->buf);
  608. }
  609. vif = ath6kl_get_vif_by_index(ar, if_idx);
  610. if (!vif) {
  611. ath6kl_free_cookie(ar, ath6kl_cookie);
  612. continue;
  613. }
  614. if (status) {
  615. if (status == -ECANCELED)
  616. /* a packet was flushed */
  617. flushing[if_idx] = true;
  618. vif->net_stats.tx_errors++;
  619. if (status != -ENOSPC && status != -ECANCELED)
  620. ath6kl_warn("tx complete error: %d\n", status);
  621. ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
  622. "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
  623. __func__, skb, packet->buf, packet->act_len,
  624. eid, "error!");
  625. } else {
  626. ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
  627. "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
  628. __func__, skb, packet->buf, packet->act_len,
  629. eid, "OK");
  630. flushing[if_idx] = false;
  631. vif->net_stats.tx_packets++;
  632. vif->net_stats.tx_bytes += skb->len;
  633. }
  634. ath6kl_tx_clear_node_map(vif, eid, map_no);
  635. ath6kl_free_cookie(ar, ath6kl_cookie);
  636. if (test_bit(NETQ_STOPPED, &vif->flags))
  637. clear_bit(NETQ_STOPPED, &vif->flags);
  638. }
  639. spin_unlock_bh(&ar->lock);
  640. __skb_queue_purge(&skb_queue);
  641. /* FIXME: Locking */
  642. spin_lock_bh(&ar->list_lock);
  643. list_for_each_entry(vif, &ar->vif_list, list) {
  644. if (test_bit(CONNECTED, &vif->flags) &&
  645. !flushing[vif->fw_vif_idx]) {
  646. spin_unlock_bh(&ar->list_lock);
  647. netif_wake_queue(vif->ndev);
  648. spin_lock_bh(&ar->list_lock);
  649. }
  650. }
  651. spin_unlock_bh(&ar->list_lock);
  652. if (wake_event)
  653. wake_up(&ar->event_wq);
  654. return;
  655. fatal:
  656. WARN_ON(1);
  657. spin_unlock_bh(&ar->lock);
  658. return;
  659. }
  660. void ath6kl_tx_data_cleanup(struct ath6kl *ar)
  661. {
  662. int i;
  663. /* flush all the data (non-control) streams */
  664. for (i = 0; i < WMM_NUM_AC; i++)
  665. ath6kl_htc_flush_txep(ar->htc_target, ar->ac2ep_map[i],
  666. ATH6KL_DATA_PKT_TAG);
  667. }
  668. /* Rx functions */
  669. static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev,
  670. struct sk_buff *skb)
  671. {
  672. if (!skb)
  673. return;
  674. skb->dev = dev;
  675. if (!(skb->dev->flags & IFF_UP)) {
  676. dev_kfree_skb(skb);
  677. return;
  678. }
  679. skb->protocol = eth_type_trans(skb, skb->dev);
  680. netif_rx_ni(skb);
  681. }
  682. static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
  683. {
  684. struct sk_buff *skb;
  685. while (num) {
  686. skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
  687. if (!skb) {
  688. ath6kl_err("netbuf allocation failed\n");
  689. return;
  690. }
  691. skb_queue_tail(q, skb);
  692. num--;
  693. }
  694. }
  695. static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr)
  696. {
  697. struct sk_buff *skb = NULL;
  698. if (skb_queue_len(&p_aggr->rx_amsdu_freeq) <
  699. (AGGR_NUM_OF_FREE_NETBUFS >> 2))
  700. ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq,
  701. AGGR_NUM_OF_FREE_NETBUFS);
  702. skb = skb_dequeue(&p_aggr->rx_amsdu_freeq);
  703. return skb;
  704. }
  705. void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
  706. {
  707. struct ath6kl *ar = target->dev->ar;
  708. struct sk_buff *skb;
  709. int rx_buf;
  710. int n_buf_refill;
  711. struct htc_packet *packet;
  712. struct list_head queue;
  713. n_buf_refill = ATH6KL_MAX_RX_BUFFERS -
  714. ath6kl_htc_get_rxbuf_num(ar->htc_target, endpoint);
  715. if (n_buf_refill <= 0)
  716. return;
  717. INIT_LIST_HEAD(&queue);
  718. ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
  719. "%s: providing htc with %d buffers at eid=%d\n",
  720. __func__, n_buf_refill, endpoint);
  721. for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) {
  722. skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
  723. if (!skb)
  724. break;
  725. packet = (struct htc_packet *) skb->head;
  726. if (!IS_ALIGNED((unsigned long) skb->data, 4))
  727. skb->data = PTR_ALIGN(skb->data - 4, 4);
  728. set_htc_rxpkt_info(packet, skb, skb->data,
  729. ATH6KL_BUFFER_SIZE, endpoint);
  730. packet->skb = skb;
  731. list_add_tail(&packet->list, &queue);
  732. }
  733. if (!list_empty(&queue))
  734. ath6kl_htc_add_rxbuf_multiple(ar->htc_target, &queue);
  735. }
  736. void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count)
  737. {
  738. struct htc_packet *packet;
  739. struct sk_buff *skb;
  740. while (count) {
  741. skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE);
  742. if (!skb)
  743. return;
  744. packet = (struct htc_packet *) skb->head;
  745. if (!IS_ALIGNED((unsigned long) skb->data, 4))
  746. skb->data = PTR_ALIGN(skb->data - 4, 4);
  747. set_htc_rxpkt_info(packet, skb, skb->data,
  748. ATH6KL_AMSDU_BUFFER_SIZE, 0);
  749. packet->skb = skb;
  750. spin_lock_bh(&ar->lock);
  751. list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue);
  752. spin_unlock_bh(&ar->lock);
  753. count--;
  754. }
  755. }
  756. /*
  757. * Callback to allocate a receive buffer for a pending packet. We use a
  758. * pre-allocated list of buffers of maximum AMSDU size (4K).
  759. */
  760. struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
  761. enum htc_endpoint_id endpoint,
  762. int len)
  763. {
  764. struct ath6kl *ar = target->dev->ar;
  765. struct htc_packet *packet = NULL;
  766. struct list_head *pkt_pos;
  767. int refill_cnt = 0, depth = 0;
  768. ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n",
  769. __func__, endpoint, len);
  770. if ((len <= ATH6KL_BUFFER_SIZE) ||
  771. (len > ATH6KL_AMSDU_BUFFER_SIZE))
  772. return NULL;
  773. spin_lock_bh(&ar->lock);
  774. if (list_empty(&ar->amsdu_rx_buffer_queue)) {
  775. spin_unlock_bh(&ar->lock);
  776. refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS;
  777. goto refill_buf;
  778. }
  779. packet = list_first_entry(&ar->amsdu_rx_buffer_queue,
  780. struct htc_packet, list);
  781. list_del(&packet->list);
  782. list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue)
  783. depth++;
  784. refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth;
  785. spin_unlock_bh(&ar->lock);
  786. /* set actual endpoint ID */
  787. packet->endpoint = endpoint;
  788. refill_buf:
  789. if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD)
  790. ath6kl_refill_amsdu_rxbufs(ar, refill_cnt);
  791. return packet;
  792. }
  793. static void aggr_slice_amsdu(struct aggr_info *p_aggr,
  794. struct rxtid *rxtid, struct sk_buff *skb)
  795. {
  796. struct sk_buff *new_skb;
  797. struct ethhdr *hdr;
  798. u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len;
  799. u8 *framep;
  800. mac_hdr_len = sizeof(struct ethhdr);
  801. framep = skb->data + mac_hdr_len;
  802. amsdu_len = skb->len - mac_hdr_len;
  803. while (amsdu_len > mac_hdr_len) {
  804. hdr = (struct ethhdr *) framep;
  805. payload_8023_len = ntohs(hdr->h_proto);
  806. if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN ||
  807. payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
  808. ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n",
  809. payload_8023_len);
  810. break;
  811. }
  812. frame_8023_len = payload_8023_len + mac_hdr_len;
  813. new_skb = aggr_get_free_skb(p_aggr);
  814. if (!new_skb) {
  815. ath6kl_err("no buffer available\n");
  816. break;
  817. }
  818. memcpy(new_skb->data, framep, frame_8023_len);
  819. skb_put(new_skb, frame_8023_len);
  820. if (ath6kl_wmi_dot3_2_dix(new_skb)) {
  821. ath6kl_err("dot3_2_dix error\n");
  822. dev_kfree_skb(new_skb);
  823. break;
  824. }
  825. skb_queue_tail(&rxtid->q, new_skb);
  826. /* Is this the last subframe within this aggregate ? */
  827. if ((amsdu_len - frame_8023_len) == 0)
  828. break;
  829. /* Add the length of A-MSDU subframe padding bytes -
  830. * Round to nearest word.
  831. */
  832. frame_8023_len = ALIGN(frame_8023_len, 4);
  833. framep += frame_8023_len;
  834. amsdu_len -= frame_8023_len;
  835. }
  836. dev_kfree_skb(skb);
  837. }
  838. static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
  839. u16 seq_no, u8 order)
  840. {
  841. struct sk_buff *skb;
  842. struct rxtid *rxtid;
  843. struct skb_hold_q *node;
  844. u16 idx, idx_end, seq_end;
  845. struct rxtid_stats *stats;
  846. rxtid = &agg_conn->rx_tid[tid];
  847. stats = &agg_conn->stat[tid];
  848. idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
  849. /*
  850. * idx_end is typically the last possible frame in the window,
  851. * but changes to 'the' seq_no, when BAR comes. If seq_no
  852. * is non-zero, we will go up to that and stop.
  853. * Note: last seq no in current window will occupy the same
  854. * index position as index that is just previous to start.
  855. * An imp point : if win_sz is 7, for seq_no space of 4095,
  856. * then, there would be holes when sequence wrap around occurs.
  857. * Target should judiciously choose the win_sz, based on
  858. * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
  859. * 2, 4, 8, 16 win_sz works fine).
  860. * We must deque from "idx" to "idx_end", including both.
  861. */
  862. seq_end = seq_no ? seq_no : rxtid->seq_next;
  863. idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
  864. spin_lock_bh(&rxtid->lock);
  865. do {
  866. node = &rxtid->hold_q[idx];
  867. if ((order == 1) && (!node->skb))
  868. break;
  869. if (node->skb) {
  870. if (node->is_amsdu)
  871. aggr_slice_amsdu(agg_conn->aggr_info, rxtid,
  872. node->skb);
  873. else
  874. skb_queue_tail(&rxtid->q, node->skb);
  875. node->skb = NULL;
  876. } else
  877. stats->num_hole++;
  878. rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
  879. idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
  880. } while (idx != idx_end);
  881. spin_unlock_bh(&rxtid->lock);
  882. stats->num_delivered += skb_queue_len(&rxtid->q);
  883. while ((skb = skb_dequeue(&rxtid->q)))
  884. ath6kl_deliver_frames_to_nw_stack(agg_conn->dev, skb);
  885. }
  886. static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
  887. u16 seq_no,
  888. bool is_amsdu, struct sk_buff *frame)
  889. {
  890. struct rxtid *rxtid;
  891. struct rxtid_stats *stats;
  892. struct sk_buff *skb;
  893. struct skb_hold_q *node;
  894. u16 idx, st, cur, end;
  895. bool is_queued = false;
  896. u16 extended_end;
  897. rxtid = &agg_conn->rx_tid[tid];
  898. stats = &agg_conn->stat[tid];
  899. stats->num_into_aggr++;
  900. if (!rxtid->aggr) {
  901. if (is_amsdu) {
  902. aggr_slice_amsdu(agg_conn->aggr_info, rxtid, frame);
  903. is_queued = true;
  904. stats->num_amsdu++;
  905. while ((skb = skb_dequeue(&rxtid->q)))
  906. ath6kl_deliver_frames_to_nw_stack(agg_conn->dev,
  907. skb);
  908. }
  909. return is_queued;
  910. }
  911. /* Check the incoming sequence no, if it's in the window */
  912. st = rxtid->seq_next;
  913. cur = seq_no;
  914. end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO;
  915. if (((st < end) && (cur < st || cur > end)) ||
  916. ((st > end) && (cur > end) && (cur < st))) {
  917. extended_end = (end + rxtid->hold_q_sz - 1) &
  918. ATH6KL_MAX_SEQ_NO;
  919. if (((end < extended_end) &&
  920. (cur < end || cur > extended_end)) ||
  921. ((end > extended_end) && (cur > extended_end) &&
  922. (cur < end))) {
  923. aggr_deque_frms(agg_conn, tid, 0, 0);
  924. if (cur >= rxtid->hold_q_sz - 1)
  925. rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
  926. else
  927. rxtid->seq_next = ATH6KL_MAX_SEQ_NO -
  928. (rxtid->hold_q_sz - 2 - cur);
  929. } else {
  930. /*
  931. * Dequeue only those frames that are outside the
  932. * new shifted window.
  933. */
  934. if (cur >= rxtid->hold_q_sz - 1)
  935. st = cur - (rxtid->hold_q_sz - 1);
  936. else
  937. st = ATH6KL_MAX_SEQ_NO -
  938. (rxtid->hold_q_sz - 2 - cur);
  939. aggr_deque_frms(agg_conn, tid, st, 0);
  940. }
  941. stats->num_oow++;
  942. }
  943. idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz);
  944. node = &rxtid->hold_q[idx];
  945. spin_lock_bh(&rxtid->lock);
  946. /*
  947. * Is the cur frame duplicate or something beyond our window(hold_q
  948. * -> which is 2x, already)?
  949. *
  950. * 1. Duplicate is easy - drop incoming frame.
  951. * 2. Not falling in current sliding window.
  952. * 2a. is the frame_seq_no preceding current tid_seq_no?
  953. * -> drop the frame. perhaps sender did not get our ACK.
  954. * this is taken care of above.
  955. * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
  956. * -> Taken care of it above, by moving window forward.
  957. */
  958. dev_kfree_skb(node->skb);
  959. stats->num_dups++;
  960. node->skb = frame;
  961. is_queued = true;
  962. node->is_amsdu = is_amsdu;
  963. node->seq_no = seq_no;
  964. if (node->is_amsdu)
  965. stats->num_amsdu++;
  966. else
  967. stats->num_mpdu++;
  968. spin_unlock_bh(&rxtid->lock);
  969. aggr_deque_frms(agg_conn, tid, 0, 1);
  970. if (agg_conn->timer_scheduled)
  971. rxtid->progress = true;
  972. else
  973. for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
  974. if (rxtid->hold_q[idx].skb) {
  975. /*
  976. * There is a frame in the queue and no
  977. * timer so start a timer to ensure that
  978. * the frame doesn't remain stuck
  979. * forever.
  980. */
  981. agg_conn->timer_scheduled = true;
  982. mod_timer(&agg_conn->timer,
  983. (jiffies +
  984. HZ * (AGGR_RX_TIMEOUT) / 1000));
  985. rxtid->progress = false;
  986. rxtid->timer_mon = true;
  987. break;
  988. }
  989. }
  990. return is_queued;
  991. }
  992. static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif,
  993. struct ath6kl_sta *conn)
  994. {
  995. struct ath6kl *ar = vif->ar;
  996. bool is_apsdq_empty, is_apsdq_empty_at_start;
  997. u32 num_frames_to_deliver, flags;
  998. struct sk_buff *skb = NULL;
  999. /*
  1000. * If the APSD q for this STA is not empty, dequeue and
  1001. * send a pkt from the head of the q. Also update the
  1002. * More data bit in the WMI_DATA_HDR if there are
  1003. * more pkts for this STA in the APSD q.
  1004. * If there are no more pkts for this STA,
  1005. * update the APSD bitmap for this STA.
  1006. */
  1007. num_frames_to_deliver = (conn->apsd_info >> ATH6KL_APSD_NUM_OF_AC) &
  1008. ATH6KL_APSD_FRAME_MASK;
  1009. /*
  1010. * Number of frames to send in a service period is
  1011. * indicated by the station
  1012. * in the QOS_INFO of the association request
  1013. * If it is zero, send all frames
  1014. */
  1015. if (!num_frames_to_deliver)
  1016. num_frames_to_deliver = ATH6KL_APSD_ALL_FRAME;
  1017. spin_lock_bh(&conn->psq_lock);
  1018. is_apsdq_empty = skb_queue_empty(&conn->apsdq);
  1019. spin_unlock_bh(&conn->psq_lock);
  1020. is_apsdq_empty_at_start = is_apsdq_empty;
  1021. while ((!is_apsdq_empty) && (num_frames_to_deliver)) {
  1022. spin_lock_bh(&conn->psq_lock);
  1023. skb = skb_dequeue(&conn->apsdq);
  1024. is_apsdq_empty = skb_queue_empty(&conn->apsdq);
  1025. spin_unlock_bh(&conn->psq_lock);
  1026. /*
  1027. * Set the STA flag to Trigger delivery,
  1028. * so that the frame will go out
  1029. */
  1030. conn->sta_flags |= STA_PS_APSD_TRIGGER;
  1031. num_frames_to_deliver--;
  1032. /* Last frame in the service period, set EOSP or queue empty */
  1033. if ((is_apsdq_empty) || (!num_frames_to_deliver))
  1034. conn->sta_flags |= STA_PS_APSD_EOSP;
  1035. ath6kl_data_tx(skb, vif->ndev);
  1036. conn->sta_flags &= ~(STA_PS_APSD_TRIGGER);
  1037. conn->sta_flags &= ~(STA_PS_APSD_EOSP);
  1038. }
  1039. if (is_apsdq_empty) {
  1040. if (is_apsdq_empty_at_start)
  1041. flags = WMI_AP_APSD_NO_DELIVERY_FRAMES;
  1042. else
  1043. flags = 0;
  1044. ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
  1045. vif->fw_vif_idx,
  1046. conn->aid, 0, flags);
  1047. }
  1048. return;
  1049. }
  1050. void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
  1051. {
  1052. struct ath6kl *ar = target->dev->ar;
  1053. struct sk_buff *skb = packet->pkt_cntxt;
  1054. struct wmi_rx_meta_v2 *meta;
  1055. struct wmi_data_hdr *dhdr;
  1056. int min_hdr_len;
  1057. u8 meta_type, dot11_hdr = 0;
  1058. u8 pad_before_data_start;
  1059. int status = packet->status;
  1060. enum htc_endpoint_id ept = packet->endpoint;
  1061. bool is_amsdu, prev_ps, ps_state = false;
  1062. bool trig_state = false;
  1063. struct ath6kl_sta *conn = NULL;
  1064. struct sk_buff *skb1 = NULL;
  1065. struct ethhdr *datap = NULL;
  1066. struct ath6kl_vif *vif;
  1067. struct aggr_info_conn *aggr_conn;
  1068. u16 seq_no, offset;
  1069. u8 tid, if_idx;
  1070. ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
  1071. "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
  1072. __func__, ar, ept, skb, packet->buf,
  1073. packet->act_len, status);
  1074. if (status || !(skb->data + HTC_HDR_LENGTH)) {
  1075. dev_kfree_skb(skb);
  1076. return;
  1077. }
  1078. skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
  1079. skb_pull(skb, HTC_HDR_LENGTH);
  1080. ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
  1081. skb->data, skb->len);
  1082. if (ept == ar->ctrl_ep) {
  1083. if (test_bit(WMI_ENABLED, &ar->flag)) {
  1084. ath6kl_check_wow_status(ar);
  1085. ath6kl_wmi_control_rx(ar->wmi, skb);
  1086. return;
  1087. }
  1088. if_idx =
  1089. wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data);
  1090. } else {
  1091. if_idx =
  1092. wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data);
  1093. }
  1094. vif = ath6kl_get_vif_by_index(ar, if_idx);
  1095. if (!vif) {
  1096. dev_kfree_skb(skb);
  1097. return;
  1098. }
  1099. /*
  1100. * Take lock to protect buffer counts and adaptive power throughput
  1101. * state.
  1102. */
  1103. spin_lock_bh(&vif->if_lock);
  1104. vif->net_stats.rx_packets++;
  1105. vif->net_stats.rx_bytes += packet->act_len;
  1106. spin_unlock_bh(&vif->if_lock);
  1107. skb->dev = vif->ndev;
  1108. if (!test_bit(WMI_ENABLED, &ar->flag)) {
  1109. if (EPPING_ALIGNMENT_PAD > 0)
  1110. skb_pull(skb, EPPING_ALIGNMENT_PAD);
  1111. ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
  1112. return;
  1113. }
  1114. ath6kl_check_wow_status(ar);
  1115. min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) +
  1116. sizeof(struct ath6kl_llc_snap_hdr);
  1117. dhdr = (struct wmi_data_hdr *) skb->data;
  1118. /*
  1119. * In the case of AP mode we may receive NULL data frames
  1120. * that do not have LLC hdr. They are 16 bytes in size.
  1121. * Allow these frames in the AP mode.
  1122. */
  1123. if (vif->nw_type != AP_NETWORK &&
  1124. ((packet->act_len < min_hdr_len) ||
  1125. (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
  1126. ath6kl_info("frame len is too short or too long\n");
  1127. vif->net_stats.rx_errors++;
  1128. vif->net_stats.rx_length_errors++;
  1129. dev_kfree_skb(skb);
  1130. return;
  1131. }
  1132. /* Get the Power save state of the STA */
  1133. if (vif->nw_type == AP_NETWORK) {
  1134. meta_type = wmi_data_hdr_get_meta(dhdr);
  1135. ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
  1136. WMI_DATA_HDR_PS_MASK);
  1137. offset = sizeof(struct wmi_data_hdr);
  1138. trig_state = !!(le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_TRIG);
  1139. switch (meta_type) {
  1140. case 0:
  1141. break;
  1142. case WMI_META_VERSION_1:
  1143. offset += sizeof(struct wmi_rx_meta_v1);
  1144. break;
  1145. case WMI_META_VERSION_2:
  1146. offset += sizeof(struct wmi_rx_meta_v2);
  1147. break;
  1148. default:
  1149. break;
  1150. }
  1151. datap = (struct ethhdr *) (skb->data + offset);
  1152. conn = ath6kl_find_sta(vif, datap->h_source);
  1153. if (!conn) {
  1154. dev_kfree_skb(skb);
  1155. return;
  1156. }
  1157. /*
  1158. * If there is a change in PS state of the STA,
  1159. * take appropriate steps:
  1160. *
  1161. * 1. If Sleep-->Awake, flush the psq for the STA
  1162. * Clear the PVB for the STA.
  1163. * 2. If Awake-->Sleep, Starting queueing frames
  1164. * the STA.
  1165. */
  1166. prev_ps = !!(conn->sta_flags & STA_PS_SLEEP);
  1167. if (ps_state)
  1168. conn->sta_flags |= STA_PS_SLEEP;
  1169. else
  1170. conn->sta_flags &= ~STA_PS_SLEEP;
  1171. /* Accept trigger only when the station is in sleep */
  1172. if ((conn->sta_flags & STA_PS_SLEEP) && trig_state)
  1173. ath6kl_uapsd_trigger_frame_rx(vif, conn);
  1174. if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) {
  1175. if (!(conn->sta_flags & STA_PS_SLEEP)) {
  1176. struct sk_buff *skbuff = NULL;
  1177. bool is_apsdq_empty;
  1178. struct ath6kl_mgmt_buff *mgmt;
  1179. u8 idx;
  1180. spin_lock_bh(&conn->psq_lock);
  1181. while (conn->mgmt_psq_len > 0) {
  1182. mgmt = list_first_entry(
  1183. &conn->mgmt_psq,
  1184. struct ath6kl_mgmt_buff,
  1185. list);
  1186. list_del(&mgmt->list);
  1187. conn->mgmt_psq_len--;
  1188. spin_unlock_bh(&conn->psq_lock);
  1189. idx = vif->fw_vif_idx;
  1190. ath6kl_wmi_send_mgmt_cmd(ar->wmi,
  1191. idx,
  1192. mgmt->id,
  1193. mgmt->freq,
  1194. mgmt->wait,
  1195. mgmt->buf,
  1196. mgmt->len,
  1197. mgmt->no_cck);
  1198. kfree(mgmt);
  1199. spin_lock_bh(&conn->psq_lock);
  1200. }
  1201. conn->mgmt_psq_len = 0;
  1202. while ((skbuff = skb_dequeue(&conn->psq))) {
  1203. spin_unlock_bh(&conn->psq_lock);
  1204. ath6kl_data_tx(skbuff, vif->ndev);
  1205. spin_lock_bh(&conn->psq_lock);
  1206. }
  1207. is_apsdq_empty = skb_queue_empty(&conn->apsdq);
  1208. while ((skbuff = skb_dequeue(&conn->apsdq))) {
  1209. spin_unlock_bh(&conn->psq_lock);
  1210. ath6kl_data_tx(skbuff, vif->ndev);
  1211. spin_lock_bh(&conn->psq_lock);
  1212. }
  1213. spin_unlock_bh(&conn->psq_lock);
  1214. if (!is_apsdq_empty)
  1215. ath6kl_wmi_set_apsd_bfrd_traf(
  1216. ar->wmi,
  1217. vif->fw_vif_idx,
  1218. conn->aid, 0, 0);
  1219. /* Clear the PVB for this STA */
  1220. ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
  1221. conn->aid, 0);
  1222. }
  1223. }
  1224. /* drop NULL data frames here */
  1225. if ((packet->act_len < min_hdr_len) ||
  1226. (packet->act_len >
  1227. WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) {
  1228. dev_kfree_skb(skb);
  1229. return;
  1230. }
  1231. }
  1232. is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false;
  1233. tid = wmi_data_hdr_get_up(dhdr);
  1234. seq_no = wmi_data_hdr_get_seqno(dhdr);
  1235. meta_type = wmi_data_hdr_get_meta(dhdr);
  1236. dot11_hdr = wmi_data_hdr_get_dot11(dhdr);
  1237. pad_before_data_start =
  1238. (le16_to_cpu(dhdr->info3) >> WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT)
  1239. & WMI_DATA_HDR_PAD_BEFORE_DATA_MASK;
  1240. skb_pull(skb, sizeof(struct wmi_data_hdr));
  1241. switch (meta_type) {
  1242. case WMI_META_VERSION_1:
  1243. skb_pull(skb, sizeof(struct wmi_rx_meta_v1));
  1244. break;
  1245. case WMI_META_VERSION_2:
  1246. meta = (struct wmi_rx_meta_v2 *) skb->data;
  1247. if (meta->csum_flags & 0x1) {
  1248. skb->ip_summed = CHECKSUM_COMPLETE;
  1249. skb->csum = (__force __wsum) meta->csum;
  1250. }
  1251. skb_pull(skb, sizeof(struct wmi_rx_meta_v2));
  1252. break;
  1253. default:
  1254. break;
  1255. }
  1256. skb_pull(skb, pad_before_data_start);
  1257. if (dot11_hdr)
  1258. status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb);
  1259. else if (!is_amsdu)
  1260. status = ath6kl_wmi_dot3_2_dix(skb);
  1261. if (status) {
  1262. /*
  1263. * Drop frames that could not be processed (lack of
  1264. * memory, etc.)
  1265. */
  1266. dev_kfree_skb(skb);
  1267. return;
  1268. }
  1269. if (!(vif->ndev->flags & IFF_UP)) {
  1270. dev_kfree_skb(skb);
  1271. return;
  1272. }
  1273. if (vif->nw_type == AP_NETWORK) {
  1274. datap = (struct ethhdr *) skb->data;
  1275. if (is_multicast_ether_addr(datap->h_dest))
  1276. /*
  1277. * Bcast/Mcast frames should be sent to the
  1278. * OS stack as well as on the air.
  1279. */
  1280. skb1 = skb_copy(skb, GFP_ATOMIC);
  1281. else {
  1282. /*
  1283. * Search for a connected STA with dstMac
  1284. * as the Mac address. If found send the
  1285. * frame to it on the air else send the
  1286. * frame up the stack.
  1287. */
  1288. conn = ath6kl_find_sta(vif, datap->h_dest);
  1289. if (conn && ar->intra_bss) {
  1290. skb1 = skb;
  1291. skb = NULL;
  1292. } else if (conn && !ar->intra_bss) {
  1293. dev_kfree_skb(skb);
  1294. skb = NULL;
  1295. }
  1296. }
  1297. if (skb1)
  1298. ath6kl_data_tx(skb1, vif->ndev);
  1299. if (skb == NULL) {
  1300. /* nothing to deliver up the stack */
  1301. return;
  1302. }
  1303. }
  1304. datap = (struct ethhdr *) skb->data;
  1305. if (is_unicast_ether_addr(datap->h_dest)) {
  1306. if (vif->nw_type == AP_NETWORK) {
  1307. conn = ath6kl_find_sta(vif, datap->h_source);
  1308. if (!conn)
  1309. return;
  1310. aggr_conn = conn->aggr_conn;
  1311. } else
  1312. aggr_conn = vif->aggr_cntxt->aggr_conn;
  1313. if (aggr_process_recv_frm(aggr_conn, tid, seq_no,
  1314. is_amsdu, skb)) {
  1315. /* aggregation code will handle the skb */
  1316. return;
  1317. }
  1318. } else if (!is_broadcast_ether_addr(datap->h_dest))
  1319. vif->net_stats.multicast++;
  1320. ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
  1321. }
  1322. static void aggr_timeout(unsigned long arg)
  1323. {
  1324. u8 i, j;
  1325. struct aggr_info_conn *aggr_conn = (struct aggr_info_conn *) arg;
  1326. struct rxtid *rxtid;
  1327. struct rxtid_stats *stats;
  1328. for (i = 0; i < NUM_OF_TIDS; i++) {
  1329. rxtid = &aggr_conn->rx_tid[i];
  1330. stats = &aggr_conn->stat[i];
  1331. if (!rxtid->aggr || !rxtid->timer_mon || rxtid->progress)
  1332. continue;
  1333. stats->num_timeouts++;
  1334. ath6kl_dbg(ATH6KL_DBG_AGGR,
  1335. "aggr timeout (st %d end %d)\n",
  1336. rxtid->seq_next,
  1337. ((rxtid->seq_next + rxtid->hold_q_sz-1) &
  1338. ATH6KL_MAX_SEQ_NO));
  1339. aggr_deque_frms(aggr_conn, i, 0, 0);
  1340. }
  1341. aggr_conn->timer_scheduled = false;
  1342. for (i = 0; i < NUM_OF_TIDS; i++) {
  1343. rxtid = &aggr_conn->rx_tid[i];
  1344. if (rxtid->aggr && rxtid->hold_q) {
  1345. for (j = 0; j < rxtid->hold_q_sz; j++) {
  1346. if (rxtid->hold_q[j].skb) {
  1347. aggr_conn->timer_scheduled = true;
  1348. rxtid->timer_mon = true;
  1349. rxtid->progress = false;
  1350. break;
  1351. }
  1352. }
  1353. if (j >= rxtid->hold_q_sz)
  1354. rxtid->timer_mon = false;
  1355. }
  1356. }
  1357. if (aggr_conn->timer_scheduled)
  1358. mod_timer(&aggr_conn->timer,
  1359. jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT));
  1360. }
  1361. static void aggr_delete_tid_state(struct aggr_info_conn *aggr_conn, u8 tid)
  1362. {
  1363. struct rxtid *rxtid;
  1364. struct rxtid_stats *stats;
  1365. if (!aggr_conn || tid >= NUM_OF_TIDS)
  1366. return;
  1367. rxtid = &aggr_conn->rx_tid[tid];
  1368. stats = &aggr_conn->stat[tid];
  1369. if (rxtid->aggr)
  1370. aggr_deque_frms(aggr_conn, tid, 0, 0);
  1371. rxtid->aggr = false;
  1372. rxtid->progress = false;
  1373. rxtid->timer_mon = false;
  1374. rxtid->win_sz = 0;
  1375. rxtid->seq_next = 0;
  1376. rxtid->hold_q_sz = 0;
  1377. kfree(rxtid->hold_q);
  1378. rxtid->hold_q = NULL;
  1379. memset(stats, 0, sizeof(struct rxtid_stats));
  1380. }
  1381. void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
  1382. u8 win_sz)
  1383. {
  1384. struct ath6kl_sta *sta;
  1385. struct aggr_info_conn *aggr_conn = NULL;
  1386. struct rxtid *rxtid;
  1387. struct rxtid_stats *stats;
  1388. u16 hold_q_size;
  1389. u8 tid, aid;
  1390. if (vif->nw_type == AP_NETWORK) {
  1391. aid = ath6kl_get_aid(tid_mux);
  1392. sta = ath6kl_find_sta_by_aid(vif->ar, aid);
  1393. if (sta)
  1394. aggr_conn = sta->aggr_conn;
  1395. } else
  1396. aggr_conn = vif->aggr_cntxt->aggr_conn;
  1397. if (!aggr_conn)
  1398. return;
  1399. tid = ath6kl_get_tid(tid_mux);
  1400. if (tid >= NUM_OF_TIDS)
  1401. return;
  1402. rxtid = &aggr_conn->rx_tid[tid];
  1403. stats = &aggr_conn->stat[tid];
  1404. if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
  1405. ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
  1406. __func__, win_sz, tid);
  1407. if (rxtid->aggr)
  1408. aggr_delete_tid_state(aggr_conn, tid);
  1409. rxtid->seq_next = seq_no;
  1410. hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q);
  1411. rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL);
  1412. if (!rxtid->hold_q)
  1413. return;
  1414. rxtid->win_sz = win_sz;
  1415. rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz);
  1416. if (!skb_queue_empty(&rxtid->q))
  1417. return;
  1418. rxtid->aggr = true;
  1419. }
  1420. void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
  1421. struct aggr_info_conn *aggr_conn)
  1422. {
  1423. struct rxtid *rxtid;
  1424. u8 i;
  1425. aggr_conn->aggr_sz = AGGR_SZ_DEFAULT;
  1426. aggr_conn->dev = vif->ndev;
  1427. init_timer(&aggr_conn->timer);
  1428. aggr_conn->timer.function = aggr_timeout;
  1429. aggr_conn->timer.data = (unsigned long) aggr_conn;
  1430. aggr_conn->aggr_info = aggr_info;
  1431. aggr_conn->timer_scheduled = false;
  1432. for (i = 0; i < NUM_OF_TIDS; i++) {
  1433. rxtid = &aggr_conn->rx_tid[i];
  1434. rxtid->aggr = false;
  1435. rxtid->progress = false;
  1436. rxtid->timer_mon = false;
  1437. skb_queue_head_init(&rxtid->q);
  1438. spin_lock_init(&rxtid->lock);
  1439. }
  1440. }
  1441. struct aggr_info *aggr_init(struct ath6kl_vif *vif)
  1442. {
  1443. struct aggr_info *p_aggr = NULL;
  1444. p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL);
  1445. if (!p_aggr) {
  1446. ath6kl_err("failed to alloc memory for aggr_node\n");
  1447. return NULL;
  1448. }
  1449. p_aggr->aggr_conn = kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL);
  1450. if (!p_aggr->aggr_conn) {
  1451. ath6kl_err("failed to alloc memory for connection specific aggr info\n");
  1452. kfree(p_aggr);
  1453. return NULL;
  1454. }
  1455. aggr_conn_init(vif, p_aggr, p_aggr->aggr_conn);
  1456. skb_queue_head_init(&p_aggr->rx_amsdu_freeq);
  1457. ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq, AGGR_NUM_OF_FREE_NETBUFS);
  1458. return p_aggr;
  1459. }
  1460. void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux)
  1461. {
  1462. struct ath6kl_sta *sta;
  1463. struct rxtid *rxtid;
  1464. struct aggr_info_conn *aggr_conn = NULL;
  1465. u8 tid, aid;
  1466. if (vif->nw_type == AP_NETWORK) {
  1467. aid = ath6kl_get_aid(tid_mux);
  1468. sta = ath6kl_find_sta_by_aid(vif->ar, aid);
  1469. if (sta)
  1470. aggr_conn = sta->aggr_conn;
  1471. } else
  1472. aggr_conn = vif->aggr_cntxt->aggr_conn;
  1473. if (!aggr_conn)
  1474. return;
  1475. tid = ath6kl_get_tid(tid_mux);
  1476. if (tid >= NUM_OF_TIDS)
  1477. return;
  1478. rxtid = &aggr_conn->rx_tid[tid];
  1479. if (rxtid->aggr)
  1480. aggr_delete_tid_state(aggr_conn, tid);
  1481. }
  1482. void aggr_reset_state(struct aggr_info_conn *aggr_conn)
  1483. {
  1484. u8 tid;
  1485. if (!aggr_conn)
  1486. return;
  1487. if (aggr_conn->timer_scheduled) {
  1488. del_timer(&aggr_conn->timer);
  1489. aggr_conn->timer_scheduled = false;
  1490. }
  1491. for (tid = 0; tid < NUM_OF_TIDS; tid++)
  1492. aggr_delete_tid_state(aggr_conn, tid);
  1493. }
  1494. /* clean up our amsdu buffer list */
  1495. void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar)
  1496. {
  1497. struct htc_packet *packet, *tmp_pkt;
  1498. spin_lock_bh(&ar->lock);
  1499. if (list_empty(&ar->amsdu_rx_buffer_queue)) {
  1500. spin_unlock_bh(&ar->lock);
  1501. return;
  1502. }
  1503. list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue,
  1504. list) {
  1505. list_del(&packet->list);
  1506. spin_unlock_bh(&ar->lock);
  1507. dev_kfree_skb(packet->pkt_cntxt);
  1508. spin_lock_bh(&ar->lock);
  1509. }
  1510. spin_unlock_bh(&ar->lock);
  1511. }
  1512. void aggr_module_destroy(struct aggr_info *aggr_info)
  1513. {
  1514. if (!aggr_info)
  1515. return;
  1516. aggr_reset_state(aggr_info->aggr_conn);
  1517. skb_queue_purge(&aggr_info->rx_amsdu_freeq);
  1518. kfree(aggr_info->aggr_conn);
  1519. kfree(aggr_info);
  1520. }