11n_aggr.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422
  1. /*
  2. * Marvell Wireless LAN device driver: 802.11n Aggregation
  3. *
  4. * Copyright (C) 2011, Marvell International Ltd.
  5. *
  6. * This software file (the "File") is distributed by Marvell International
  7. * Ltd. under the terms of the GNU General Public License Version 2, June 1991
  8. * (the "License"). You may use, redistribute and/or modify this File in
  9. * accordance with the terms and conditions of the License, a copy of which
  10. * is available by writing to the Free Software Foundation, Inc.,
  11. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
  12. * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
  13. *
  14. * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
  15. * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
  16. * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
  17. * this warranty disclaimer.
  18. */
  19. #include "decl.h"
  20. #include "ioctl.h"
  21. #include "util.h"
  22. #include "fw.h"
  23. #include "main.h"
  24. #include "wmm.h"
  25. #include "11n.h"
  26. #include "11n_aggr.h"
  27. /*
  28. * Creates an AMSDU subframe for aggregation into one AMSDU packet.
  29. *
  30. * The resultant AMSDU subframe format is -
  31. *
  32. * +---- ~ -----+---- ~ ------+---- ~ -----+----- ~ -----+---- ~ -----+
  33. * | DA | SA | Length | SNAP header | MSDU |
  34. * | data[0..5] | data[6..11] | | | data[14..] |
  35. * +---- ~ -----+---- ~ ------+---- ~ -----+----- ~ -----+---- ~ -----+
  36. * <--6-bytes--> <--6-bytes--> <--2-bytes--><--8-bytes--> <--n-bytes-->
  37. *
  38. * This function also computes the amount of padding required to make the
  39. * buffer length multiple of 4 bytes.
  40. *
  41. * Data => |DA|SA|SNAP-TYPE|........ .|
  42. * MSDU => |DA|SA|Length|SNAP|...... ..|
  43. */
  44. static int
  45. mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr,
  46. struct sk_buff *skb_src, int *pad)
  47. {
  48. int dt_offset;
  49. struct rfc_1042_hdr snap = {
  50. 0xaa, /* LLC DSAP */
  51. 0xaa, /* LLC SSAP */
  52. 0x03, /* LLC CTRL */
  53. {0x00, 0x00, 0x00}, /* SNAP OUI */
  54. 0x0000 /* SNAP type */
  55. /*
  56. * This field will be overwritten
  57. * later with ethertype
  58. */
  59. };
  60. struct tx_packet_hdr *tx_header = NULL;
  61. skb_put(skb_aggr, sizeof(*tx_header));
  62. tx_header = (struct tx_packet_hdr *) skb_aggr->data;
  63. /* Copy DA and SA */
  64. dt_offset = 2 * ETH_ALEN;
  65. memcpy(&tx_header->eth803_hdr, skb_src->data, dt_offset);
  66. /* Copy SNAP header */
  67. snap.snap_type = *(u16 *) ((u8 *)skb_src->data + dt_offset);
  68. dt_offset += sizeof(u16);
  69. memcpy(&tx_header->rfc1042_hdr, &snap, sizeof(struct rfc_1042_hdr));
  70. skb_pull(skb_src, dt_offset);
  71. /* Update Length field */
  72. tx_header->eth803_hdr.h_proto = htons(skb_src->len + LLC_SNAP_LEN);
  73. /* Add payload */
  74. skb_put(skb_aggr, skb_src->len);
  75. memcpy(skb_aggr->data + sizeof(*tx_header), skb_src->data,
  76. skb_src->len);
  77. *pad = (((skb_src->len + LLC_SNAP_LEN) & 3)) ? (4 - (((skb_src->len +
  78. LLC_SNAP_LEN)) & 3)) : 0;
  79. skb_put(skb_aggr, *pad);
  80. return skb_aggr->len + *pad;
  81. }
  82. /*
  83. * Adds TxPD to AMSDU header.
  84. *
  85. * Each AMSDU packet will contain one TxPD at the beginning,
  86. * followed by multiple AMSDU subframes.
  87. */
  88. static void
  89. mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
  90. struct sk_buff *skb)
  91. {
  92. struct txpd *local_tx_pd;
  93. skb_push(skb, sizeof(*local_tx_pd));
  94. local_tx_pd = (struct txpd *) skb->data;
  95. memset(local_tx_pd, 0, sizeof(struct txpd));
  96. /* Original priority has been overwritten */
  97. local_tx_pd->priority = (u8) skb->priority;
  98. local_tx_pd->pkt_delay_2ms =
  99. mwifiex_wmm_compute_drv_pkt_delay(priv, skb);
  100. local_tx_pd->bss_num = priv->bss_num;
  101. local_tx_pd->bss_type = priv->bss_type;
  102. /* Always zero as the data is followed by struct txpd */
  103. local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd));
  104. local_tx_pd->tx_pkt_type = cpu_to_le16(PKT_TYPE_AMSDU);
  105. local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len -
  106. sizeof(*local_tx_pd));
  107. if (local_tx_pd->tx_control == 0)
  108. /* TxCtrl set by user or default */
  109. local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl);
  110. if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
  111. (priv->adapter->pps_uapsd_mode)) {
  112. if (true == mwifiex_check_last_packet_indication(priv)) {
  113. priv->adapter->tx_lock_flag = true;
  114. local_tx_pd->flags =
  115. MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET;
  116. }
  117. }
  118. }
  119. /*
  120. * Counts the number of subframes in an aggregate packet.
  121. *
  122. * This function parses an aggregate packet buffer, looking for
  123. * subframes and counting the number of such subframe found. The
  124. * function automatically skips the DA/SA fields at the beginning
  125. * of each subframe and padding at the end.
  126. */
  127. static int
  128. mwifiex_11n_get_num_aggr_pkts(u8 *data, int total_pkt_len)
  129. {
  130. int pkt_count = 0, pkt_len, pad;
  131. while (total_pkt_len > 0) {
  132. /* Length will be in network format, change it to host */
  133. pkt_len = ntohs((*(__be16 *)(data + 2 * ETH_ALEN)));
  134. pad = (((pkt_len + sizeof(struct ethhdr)) & 3)) ?
  135. (4 - ((pkt_len + sizeof(struct ethhdr)) & 3)) : 0;
  136. data += pkt_len + pad + sizeof(struct ethhdr);
  137. total_pkt_len -= pkt_len + pad + sizeof(struct ethhdr);
  138. ++pkt_count;
  139. }
  140. return pkt_count;
  141. }
  142. /*
  143. * De-aggregate received packets.
  144. *
  145. * This function parses the received aggregate buffer, extracts each subframe,
  146. * strips off the SNAP header from them and sends the data portion for further
  147. * processing.
  148. *
  149. * Each subframe body is copied onto a separate buffer, which are freed by
  150. * upper layer after processing. The function also performs sanity tests on
  151. * the received buffer.
  152. */
  153. int mwifiex_11n_deaggregate_pkt(struct mwifiex_private *priv,
  154. struct sk_buff *skb)
  155. {
  156. u16 pkt_len;
  157. int total_pkt_len;
  158. u8 *data;
  159. int pad;
  160. struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
  161. struct rxpd *local_rx_pd = (struct rxpd *) skb->data;
  162. struct sk_buff *skb_daggr;
  163. struct mwifiex_rxinfo *rx_info_daggr = NULL;
  164. int ret = -1;
  165. struct rx_packet_hdr *rx_pkt_hdr;
  166. struct mwifiex_adapter *adapter = priv->adapter;
  167. u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
  168. data = (u8 *) (local_rx_pd + local_rx_pd->rx_pkt_offset);
  169. total_pkt_len = local_rx_pd->rx_pkt_length;
  170. /* Sanity test */
  171. if (total_pkt_len > MWIFIEX_RX_DATA_BUF_SIZE) {
  172. dev_err(adapter->dev, "total pkt len greater than buffer"
  173. " size %d\n", total_pkt_len);
  174. return -1;
  175. }
  176. rx_info->use_count = mwifiex_11n_get_num_aggr_pkts(data, total_pkt_len);
  177. while (total_pkt_len > 0) {
  178. rx_pkt_hdr = (struct rx_packet_hdr *) data;
  179. /* Length will be in network format, change it to host */
  180. pkt_len = ntohs((*(__be16 *) (data + 2 * ETH_ALEN)));
  181. if (pkt_len > total_pkt_len) {
  182. dev_err(adapter->dev, "pkt_len %d > total_pkt_len %d\n",
  183. total_pkt_len, pkt_len);
  184. break;
  185. }
  186. pad = (((pkt_len + sizeof(struct ethhdr)) & 3)) ?
  187. (4 - ((pkt_len + sizeof(struct ethhdr)) & 3)) : 0;
  188. total_pkt_len -= pkt_len + pad + sizeof(struct ethhdr);
  189. if (memcmp(&rx_pkt_hdr->rfc1042_hdr,
  190. rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr)) == 0) {
  191. memmove(data + LLC_SNAP_LEN, data, 2 * ETH_ALEN);
  192. data += LLC_SNAP_LEN;
  193. pkt_len += sizeof(struct ethhdr) - LLC_SNAP_LEN;
  194. } else {
  195. *(u16 *) (data + 2 * ETH_ALEN) = (u16) 0;
  196. pkt_len += sizeof(struct ethhdr);
  197. }
  198. skb_daggr = dev_alloc_skb(pkt_len);
  199. if (!skb_daggr) {
  200. dev_err(adapter->dev, "%s: failed to alloc skb_daggr\n",
  201. __func__);
  202. return -1;
  203. }
  204. rx_info_daggr = MWIFIEX_SKB_RXCB(skb_daggr);
  205. rx_info_daggr->bss_index = rx_info->bss_index;
  206. skb_daggr->tstamp = skb->tstamp;
  207. rx_info_daggr->parent = skb;
  208. skb_daggr->priority = skb->priority;
  209. skb_put(skb_daggr, pkt_len);
  210. memcpy(skb_daggr->data, data, pkt_len);
  211. ret = mwifiex_recv_packet(adapter, skb_daggr);
  212. switch (ret) {
  213. case -EINPROGRESS:
  214. break;
  215. case -1:
  216. dev_err(adapter->dev, "deaggr: host_to_card failed\n");
  217. case 0:
  218. mwifiex_recv_packet_complete(adapter, skb_daggr, ret);
  219. break;
  220. default:
  221. break;
  222. }
  223. data += pkt_len + pad;
  224. }
  225. return ret;
  226. }
  227. /*
  228. * Create aggregated packet.
  229. *
  230. * This function creates an aggregated MSDU packet, by combining buffers
  231. * from the RA list. Each individual buffer is encapsulated as an AMSDU
  232. * subframe and all such subframes are concatenated together to form the
  233. * AMSDU packet.
  234. *
  235. * A TxPD is also added to the front of the resultant AMSDU packets for
  236. * transmission. The resultant packets format is -
  237. *
  238. * +---- ~ ----+------ ~ ------+------ ~ ------+-..-+------ ~ ------+
  239. * | TxPD |AMSDU sub-frame|AMSDU sub-frame| .. |AMSDU sub-frame|
  240. * | | 1 | 2 | .. | n |
  241. * +---- ~ ----+------ ~ ------+------ ~ ------+ .. +------ ~ ------+
  242. */
  243. int
  244. mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
  245. struct mwifiex_ra_list_tbl *pra_list, int headroom,
  246. int ptrindex, unsigned long ra_list_flags)
  247. __releases(&priv->wmm.ra_list_spinlock)
  248. {
  249. struct mwifiex_adapter *adapter = priv->adapter;
  250. struct sk_buff *skb_aggr, *skb_src;
  251. struct mwifiex_txinfo *tx_info_aggr, *tx_info_src;
  252. int pad = 0;
  253. int ret = 0;
  254. struct mwifiex_tx_param tx_param;
  255. struct txpd *ptx_pd = NULL;
  256. if (skb_queue_empty(&pra_list->skb_head)) {
  257. spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
  258. ra_list_flags);
  259. return 0;
  260. }
  261. skb_src = skb_peek(&pra_list->skb_head);
  262. tx_info_src = MWIFIEX_SKB_TXCB(skb_src);
  263. skb_aggr = dev_alloc_skb(adapter->tx_buf_size);
  264. if (!skb_aggr) {
  265. dev_err(adapter->dev, "%s: alloc skb_aggr\n", __func__);
  266. spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
  267. ra_list_flags);
  268. return -1;
  269. }
  270. skb_reserve(skb_aggr, headroom + sizeof(struct txpd));
  271. tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr);
  272. tx_info_aggr->bss_index = tx_info_src->bss_index;
  273. skb_aggr->priority = skb_src->priority;
  274. while (skb_src && ((skb_headroom(skb_aggr) + skb_src->len
  275. + LLC_SNAP_LEN)
  276. <= adapter->tx_buf_size)) {
  277. if (!skb_queue_empty(&pra_list->skb_head))
  278. skb_src = skb_dequeue(&pra_list->skb_head);
  279. else
  280. skb_src = NULL;
  281. pra_list->total_pkts_size -= skb_src->len;
  282. spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
  283. ra_list_flags);
  284. mwifiex_11n_form_amsdu_pkt(skb_aggr, skb_src, &pad);
  285. mwifiex_write_data_complete(adapter, skb_src, 0);
  286. spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
  287. if (!mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) {
  288. spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
  289. ra_list_flags);
  290. return -1;
  291. }
  292. if (!skb_queue_empty(&pra_list->skb_head))
  293. skb_src = skb_peek(&pra_list->skb_head);
  294. else
  295. skb_src = NULL;
  296. }
  297. spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
  298. /* Last AMSDU packet does not need padding */
  299. skb_trim(skb_aggr, skb_aggr->len - pad);
  300. /* Form AMSDU */
  301. mwifiex_11n_form_amsdu_txpd(priv, skb_aggr);
  302. if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
  303. ptx_pd = (struct txpd *)skb_aggr->data;
  304. skb_push(skb_aggr, headroom);
  305. tx_param.next_pkt_len = ((pra_list->total_pkts_size) ?
  306. (((pra_list->total_pkts_size) >
  307. adapter->tx_buf_size) ? adapter->
  308. tx_buf_size : pra_list->total_pkts_size +
  309. LLC_SNAP_LEN + sizeof(struct txpd)) : 0);
  310. ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
  311. skb_aggr->data,
  312. skb_aggr->len, &tx_param);
  313. switch (ret) {
  314. case -EBUSY:
  315. spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
  316. if (!mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) {
  317. spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
  318. ra_list_flags);
  319. mwifiex_write_data_complete(adapter, skb_aggr, -1);
  320. return -1;
  321. }
  322. if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
  323. (adapter->pps_uapsd_mode) &&
  324. (adapter->tx_lock_flag)) {
  325. priv->adapter->tx_lock_flag = false;
  326. ptx_pd->flags = 0;
  327. }
  328. skb_queue_tail(&pra_list->skb_head, skb_aggr);
  329. pra_list->total_pkts_size += skb_aggr->len;
  330. tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
  331. spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
  332. ra_list_flags);
  333. dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
  334. break;
  335. case -1:
  336. adapter->data_sent = false;
  337. dev_err(adapter->dev, "%s: host_to_card failed: %#x\n",
  338. __func__, ret);
  339. adapter->dbg.num_tx_host_to_card_failure++;
  340. mwifiex_write_data_complete(adapter, skb_aggr, ret);
  341. return 0;
  342. case -EINPROGRESS:
  343. adapter->data_sent = false;
  344. break;
  345. case 0:
  346. mwifiex_write_data_complete(adapter, skb_aggr, ret);
  347. break;
  348. default:
  349. break;
  350. }
  351. if (ret != -EBUSY) {
  352. spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
  353. if (mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) {
  354. priv->wmm.packets_out[ptrindex]++;
  355. priv->wmm.tid_tbl_ptr[ptrindex].ra_list_curr = pra_list;
  356. }
  357. /* Now bss_prio_cur pointer points to next node */
  358. adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur =
  359. list_first_entry(
  360. &adapter->bss_prio_tbl[priv->bss_priority]
  361. .bss_prio_cur->list,
  362. struct mwifiex_bss_prio_node, list);
  363. spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
  364. ra_list_flags);
  365. }
  366. return 0;
  367. }