txrx.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825
  1. /*
  2. * Common code for mac80211 Prism54 drivers
  3. *
  4. * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
  5. * Copyright (c) 2007-2009, Christian Lamparter <chunkeey@web.de>
  6. * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
  7. *
  8. * Based on:
  9. * - the islsm (softmac prism54) driver, which is:
  10. * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
  11. * - stlc45xx driver
  12. * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies).
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License version 2 as
  16. * published by the Free Software Foundation.
  17. */
  18. #include <linux/init.h>
  19. #include <linux/firmware.h>
  20. #include <linux/etherdevice.h>
  21. #include <net/mac80211.h>
  22. #include "p54.h"
  23. #include "lmac.h"
  24. #ifdef P54_MM_DEBUG
  25. static void p54_dump_tx_queue(struct p54_common *priv)
  26. {
  27. unsigned long flags;
  28. struct ieee80211_tx_info *info;
  29. struct p54_tx_info *range;
  30. struct sk_buff *skb;
  31. struct p54_hdr *hdr;
  32. unsigned int i = 0;
  33. u32 prev_addr;
  34. u32 largest_hole = 0, free;
  35. spin_lock_irqsave(&priv->tx_queue.lock, flags);
  36. printk(KERN_DEBUG "%s: / --- tx queue dump (%d entries) --- \n",
  37. wiphy_name(priv->hw->wiphy), skb_queue_len(&priv->tx_queue));
  38. prev_addr = priv->rx_start;
  39. skb_queue_walk(&priv->tx_queue, skb) {
  40. info = IEEE80211_SKB_CB(skb);
  41. range = (void *) info->rate_driver_data;
  42. hdr = (void *) skb->data;
  43. free = range->start_addr - prev_addr;
  44. printk(KERN_DEBUG "%s: | [%02d] => [skb:%p skb_len:0x%04x "
  45. "hdr:{flags:%02x len:%04x req_id:%04x type:%02x} "
  46. "mem:{start:%04x end:%04x, free:%d}]\n",
  47. wiphy_name(priv->hw->wiphy), i++, skb, skb->len,
  48. le16_to_cpu(hdr->flags), le16_to_cpu(hdr->len),
  49. le32_to_cpu(hdr->req_id), le16_to_cpu(hdr->type),
  50. range->start_addr, range->end_addr, free);
  51. prev_addr = range->end_addr;
  52. largest_hole = max(largest_hole, free);
  53. }
  54. free = priv->rx_end - prev_addr;
  55. largest_hole = max(largest_hole, free);
  56. printk(KERN_DEBUG "%s: \\ --- [free: %d], largest free block: %d ---\n",
  57. wiphy_name(priv->hw->wiphy), free, largest_hole);
  58. spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
  59. }
  60. #endif /* P54_MM_DEBUG */
  61. /*
  62. * So, the firmware is somewhat stupid and doesn't know what places in its
  63. * memory incoming data should go to. By poking around in the firmware, we
  64. * can find some unused memory to upload our packets to. However, data that we
  65. * want the card to TX needs to stay intact until the card has told us that
  66. * it is done with it. This function finds empty places we can upload to and
  67. * marks allocated areas as reserved if necessary. p54_find_and_unlink_skb or
  68. * p54_free_skb frees allocated areas.
  69. */
  70. static int p54_assign_address(struct p54_common *priv, struct sk_buff *skb)
  71. {
  72. struct sk_buff *entry, *target_skb = NULL;
  73. struct ieee80211_tx_info *info;
  74. struct p54_tx_info *range;
  75. struct p54_hdr *data = (void *) skb->data;
  76. unsigned long flags;
  77. u32 last_addr = priv->rx_start;
  78. u32 target_addr = priv->rx_start;
  79. u16 len = priv->headroom + skb->len + priv->tailroom + 3;
  80. if (unlikely(WARN_ON(!skb || !priv)))
  81. return -EINVAL;
  82. info = IEEE80211_SKB_CB(skb);
  83. range = (void *) info->rate_driver_data;
  84. len = (range->extra_len + len) & ~0x3;
  85. spin_lock_irqsave(&priv->tx_queue.lock, flags);
  86. if (unlikely(skb_queue_len(&priv->tx_queue) == 32)) {
  87. /*
  88. * The tx_queue is now really full.
  89. *
  90. * TODO: check if the device has crashed and reset it.
  91. */
  92. spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
  93. return -EBUSY;
  94. }
  95. skb_queue_walk(&priv->tx_queue, entry) {
  96. u32 hole_size;
  97. info = IEEE80211_SKB_CB(entry);
  98. range = (void *) info->rate_driver_data;
  99. hole_size = range->start_addr - last_addr;
  100. if (!entry->next) {
  101. spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
  102. return -ENOSPC;
  103. }
  104. if (!target_skb && hole_size >= len) {
  105. target_skb = entry->prev;
  106. hole_size -= len;
  107. target_addr = last_addr;
  108. break;
  109. }
  110. last_addr = range->end_addr;
  111. }
  112. if (unlikely(!target_skb)) {
  113. if (priv->rx_end - last_addr >= len) {
  114. target_skb = priv->tx_queue.prev;
  115. if (!skb_queue_empty(&priv->tx_queue)) {
  116. info = IEEE80211_SKB_CB(target_skb);
  117. range = (void *)info->rate_driver_data;
  118. target_addr = range->end_addr;
  119. }
  120. } else {
  121. spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
  122. return -ENOSPC;
  123. }
  124. }
  125. info = IEEE80211_SKB_CB(skb);
  126. range = (void *) info->rate_driver_data;
  127. range->start_addr = target_addr;
  128. range->end_addr = target_addr + len;
  129. __skb_queue_after(&priv->tx_queue, target_skb, skb);
  130. spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
  131. data->req_id = cpu_to_le32(target_addr + priv->headroom);
  132. return 0;
  133. }
  134. static void p54_tx_pending(struct p54_common *priv)
  135. {
  136. struct sk_buff *skb;
  137. int ret;
  138. if (unlikely(WARN_ON(!priv)))
  139. return ;
  140. skb = skb_dequeue(&priv->tx_pending);
  141. if (unlikely(!skb))
  142. return ;
  143. ret = p54_assign_address(priv, skb);
  144. if (unlikely(ret))
  145. skb_queue_head(&priv->tx_pending, skb);
  146. else
  147. priv->tx(priv->hw, skb);
  148. }
  149. static void p54_wake_queues(struct p54_common *priv)
  150. {
  151. unsigned long flags;
  152. unsigned int i;
  153. if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED))
  154. return ;
  155. p54_tx_pending(priv);
  156. spin_lock_irqsave(&priv->tx_stats_lock, flags);
  157. for (i = 0; i < priv->hw->queues; i++) {
  158. if (priv->tx_stats[i + P54_QUEUE_DATA].len <
  159. priv->tx_stats[i + P54_QUEUE_DATA].limit)
  160. ieee80211_wake_queue(priv->hw, i);
  161. }
  162. spin_unlock_irqrestore(&priv->tx_stats_lock, flags);
  163. }
  164. static int p54_tx_qos_accounting_alloc(struct p54_common *priv,
  165. struct sk_buff *skb,
  166. const u16 p54_queue)
  167. {
  168. struct ieee80211_tx_queue_stats *queue;
  169. unsigned long flags;
  170. if (WARN_ON(p54_queue > P54_QUEUE_NUM))
  171. return -EINVAL;
  172. queue = &priv->tx_stats[p54_queue];
  173. spin_lock_irqsave(&priv->tx_stats_lock, flags);
  174. if (unlikely(queue->len > queue->limit && IS_QOS_QUEUE(p54_queue))) {
  175. spin_unlock_irqrestore(&priv->tx_stats_lock, flags);
  176. return -ENOSPC;
  177. }
  178. queue->len++;
  179. queue->count++;
  180. if (unlikely(queue->len == queue->limit && IS_QOS_QUEUE(p54_queue))) {
  181. u16 ac_queue = p54_queue - P54_QUEUE_DATA;
  182. ieee80211_stop_queue(priv->hw, ac_queue);
  183. }
  184. spin_unlock_irqrestore(&priv->tx_stats_lock, flags);
  185. return 0;
  186. }
  187. static void p54_tx_qos_accounting_free(struct p54_common *priv,
  188. struct sk_buff *skb)
  189. {
  190. if (skb && IS_DATA_FRAME(skb)) {
  191. struct p54_hdr *hdr = (void *) skb->data;
  192. struct p54_tx_data *data = (void *) hdr->data;
  193. priv->tx_stats[data->hw_queue].len--;
  194. }
  195. p54_wake_queues(priv);
  196. }
  197. void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb)
  198. {
  199. struct p54_common *priv = dev->priv;
  200. if (unlikely(!skb))
  201. return ;
  202. skb_unlink(skb, &priv->tx_queue);
  203. p54_tx_qos_accounting_free(priv, skb);
  204. dev_kfree_skb_any(skb);
  205. }
  206. EXPORT_SYMBOL_GPL(p54_free_skb);
  207. static struct sk_buff *p54_find_and_unlink_skb(struct p54_common *priv,
  208. const __le32 req_id)
  209. {
  210. struct sk_buff *entry;
  211. unsigned long flags;
  212. spin_lock_irqsave(&priv->tx_queue.lock, flags);
  213. skb_queue_walk(&priv->tx_queue, entry) {
  214. struct p54_hdr *hdr = (struct p54_hdr *) entry->data;
  215. if (hdr->req_id == req_id) {
  216. __skb_unlink(entry, &priv->tx_queue);
  217. spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
  218. p54_tx_qos_accounting_free(priv, entry);
  219. return entry;
  220. }
  221. }
  222. spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
  223. return NULL;
  224. }
  225. void p54_tx(struct p54_common *priv, struct sk_buff *skb)
  226. {
  227. if (unlikely(WARN_ON(!priv)))
  228. return ;
  229. skb_queue_tail(&priv->tx_pending, skb);
  230. p54_tx_pending(priv);
  231. }
  232. static int p54_rssi_to_dbm(struct p54_common *priv, int rssi)
  233. {
  234. int band = priv->hw->conf.channel->band;
  235. if (priv->rxhw != 5)
  236. return ((rssi * priv->rssical_db[band].mul) / 64 +
  237. priv->rssical_db[band].add) / 4;
  238. else
  239. /*
  240. * TODO: find the correct formula
  241. */
  242. return ((rssi * priv->rssical_db[band].mul) / 64 +
  243. priv->rssical_db[band].add) / 4;
  244. }
  245. static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb)
  246. {
  247. struct p54_rx_data *hdr = (struct p54_rx_data *) skb->data;
  248. struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
  249. u16 freq = le16_to_cpu(hdr->freq);
  250. size_t header_len = sizeof(*hdr);
  251. u32 tsf32;
  252. u8 rate = hdr->rate & 0xf;
  253. /*
  254. * If the device is in a unspecified state we have to
  255. * ignore all data frames. Else we could end up with a
  256. * nasty crash.
  257. */
  258. if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED))
  259. return 0;
  260. if (!(hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_IN_FCS_GOOD)))
  261. return 0;
  262. if (hdr->decrypt_status == P54_DECRYPT_OK)
  263. rx_status->flag |= RX_FLAG_DECRYPTED;
  264. if ((hdr->decrypt_status == P54_DECRYPT_FAIL_MICHAEL) ||
  265. (hdr->decrypt_status == P54_DECRYPT_FAIL_TKIP))
  266. rx_status->flag |= RX_FLAG_MMIC_ERROR;
  267. rx_status->signal = p54_rssi_to_dbm(priv, hdr->rssi);
  268. rx_status->noise = priv->noise;
  269. if (hdr->rate & 0x10)
  270. rx_status->flag |= RX_FLAG_SHORTPRE;
  271. if (priv->hw->conf.channel->band == IEEE80211_BAND_5GHZ)
  272. rx_status->rate_idx = (rate < 4) ? 0 : rate - 4;
  273. else
  274. rx_status->rate_idx = rate;
  275. rx_status->freq = freq;
  276. rx_status->band = priv->hw->conf.channel->band;
  277. rx_status->antenna = hdr->antenna;
  278. tsf32 = le32_to_cpu(hdr->tsf32);
  279. if (tsf32 < priv->tsf_low32)
  280. priv->tsf_high32++;
  281. rx_status->mactime = ((u64)priv->tsf_high32) << 32 | tsf32;
  282. priv->tsf_low32 = tsf32;
  283. rx_status->flag |= RX_FLAG_TSFT;
  284. if (hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN))
  285. header_len += hdr->align[0];
  286. skb_pull(skb, header_len);
  287. skb_trim(skb, le16_to_cpu(hdr->len));
  288. ieee80211_rx_irqsafe(priv->hw, skb);
  289. queue_delayed_work(priv->hw->workqueue, &priv->work,
  290. msecs_to_jiffies(P54_STATISTICS_UPDATE));
  291. return -1;
  292. }
  293. static void p54_rx_frame_sent(struct p54_common *priv, struct sk_buff *skb)
  294. {
  295. struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
  296. struct p54_frame_sent *payload = (struct p54_frame_sent *) hdr->data;
  297. struct ieee80211_tx_info *info;
  298. struct p54_hdr *entry_hdr;
  299. struct p54_tx_data *entry_data;
  300. struct sk_buff *entry;
  301. unsigned int pad = 0, frame_len;
  302. int count, idx;
  303. entry = p54_find_and_unlink_skb(priv, hdr->req_id);
  304. if (unlikely(!entry))
  305. return ;
  306. frame_len = entry->len;
  307. info = IEEE80211_SKB_CB(entry);
  308. entry_hdr = (struct p54_hdr *) entry->data;
  309. entry_data = (struct p54_tx_data *) entry_hdr->data;
  310. priv->stats.dot11ACKFailureCount += payload->tries - 1;
  311. /*
  312. * Frames in P54_QUEUE_FWSCAN and P54_QUEUE_BEACON are
  313. * generated by the driver. Therefore tx_status is bogus
  314. * and we don't want to confuse the mac80211 stack.
  315. */
  316. if (unlikely(entry_data->hw_queue < P54_QUEUE_FWSCAN)) {
  317. if (entry_data->hw_queue == P54_QUEUE_BEACON &&
  318. hdr->req_id == priv->beacon_req_id)
  319. priv->beacon_req_id = cpu_to_le32(0);
  320. dev_kfree_skb_any(entry);
  321. return ;
  322. }
  323. /*
  324. * Clear manually, ieee80211_tx_info_clear_status would
  325. * clear the counts too and we need them.
  326. */
  327. memset(&info->status.ampdu_ack_len, 0,
  328. sizeof(struct ieee80211_tx_info) -
  329. offsetof(struct ieee80211_tx_info, status.ampdu_ack_len));
  330. BUILD_BUG_ON(offsetof(struct ieee80211_tx_info,
  331. status.ampdu_ack_len) != 23);
  332. if (entry_hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN))
  333. pad = entry_data->align[0];
  334. /* walk through the rates array and adjust the counts */
  335. count = payload->tries;
  336. for (idx = 0; idx < 4; idx++) {
  337. if (count >= info->status.rates[idx].count) {
  338. count -= info->status.rates[idx].count;
  339. } else if (count > 0) {
  340. info->status.rates[idx].count = count;
  341. count = 0;
  342. } else {
  343. info->status.rates[idx].idx = -1;
  344. info->status.rates[idx].count = 0;
  345. }
  346. }
  347. if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
  348. (!payload->status))
  349. info->flags |= IEEE80211_TX_STAT_ACK;
  350. if (payload->status & P54_TX_PSM_CANCELLED)
  351. info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
  352. info->status.ack_signal = p54_rssi_to_dbm(priv,
  353. (int)payload->ack_rssi);
  354. /* Undo all changes to the frame. */
  355. switch (entry_data->key_type) {
  356. case P54_CRYPTO_TKIPMICHAEL: {
  357. u8 *iv = (u8 *)(entry_data->align + pad +
  358. entry_data->crypt_offset);
  359. /* Restore the original TKIP IV. */
  360. iv[2] = iv[0];
  361. iv[0] = iv[1];
  362. iv[1] = (iv[0] | 0x20) & 0x7f; /* WEPSeed - 8.3.2.2 */
  363. frame_len -= 12; /* remove TKIP_MMIC + TKIP_ICV */
  364. break;
  365. }
  366. case P54_CRYPTO_AESCCMP:
  367. frame_len -= 8; /* remove CCMP_MIC */
  368. break;
  369. case P54_CRYPTO_WEP:
  370. frame_len -= 4; /* remove WEP_ICV */
  371. break;
  372. }
  373. skb_trim(entry, frame_len);
  374. skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data));
  375. ieee80211_tx_status_irqsafe(priv->hw, entry);
  376. }
  377. static void p54_rx_eeprom_readback(struct p54_common *priv,
  378. struct sk_buff *skb)
  379. {
  380. struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
  381. struct p54_eeprom_lm86 *eeprom = (struct p54_eeprom_lm86 *) hdr->data;
  382. struct sk_buff *tmp;
  383. if (!priv->eeprom)
  384. return ;
  385. if (priv->fw_var >= 0x509) {
  386. memcpy(priv->eeprom, eeprom->v2.data,
  387. le16_to_cpu(eeprom->v2.len));
  388. } else {
  389. memcpy(priv->eeprom, eeprom->v1.data,
  390. le16_to_cpu(eeprom->v1.len));
  391. }
  392. priv->eeprom = NULL;
  393. tmp = p54_find_and_unlink_skb(priv, hdr->req_id);
  394. p54_tx_qos_accounting_free(priv, tmp);
  395. dev_kfree_skb_any(tmp);
  396. complete(&priv->eeprom_comp);
  397. }
  398. static void p54_rx_stats(struct p54_common *priv, struct sk_buff *skb)
  399. {
  400. struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
  401. struct p54_statistics *stats = (struct p54_statistics *) hdr->data;
  402. struct sk_buff *tmp;
  403. u32 tsf32;
  404. if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED))
  405. return ;
  406. tsf32 = le32_to_cpu(stats->tsf32);
  407. if (tsf32 < priv->tsf_low32)
  408. priv->tsf_high32++;
  409. priv->tsf_low32 = tsf32;
  410. priv->stats.dot11RTSFailureCount = le32_to_cpu(stats->rts_fail);
  411. priv->stats.dot11RTSSuccessCount = le32_to_cpu(stats->rts_success);
  412. priv->stats.dot11FCSErrorCount = le32_to_cpu(stats->rx_bad_fcs);
  413. priv->noise = p54_rssi_to_dbm(priv, le32_to_cpu(stats->noise));
  414. tmp = p54_find_and_unlink_skb(priv, hdr->req_id);
  415. p54_tx_qos_accounting_free(priv, tmp);
  416. dev_kfree_skb_any(tmp);
  417. }
  418. static void p54_rx_trap(struct p54_common *priv, struct sk_buff *skb)
  419. {
  420. struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
  421. struct p54_trap *trap = (struct p54_trap *) hdr->data;
  422. u16 event = le16_to_cpu(trap->event);
  423. u16 freq = le16_to_cpu(trap->frequency);
  424. switch (event) {
  425. case P54_TRAP_BEACON_TX:
  426. break;
  427. case P54_TRAP_RADAR:
  428. printk(KERN_INFO "%s: radar (freq:%d MHz)\n",
  429. wiphy_name(priv->hw->wiphy), freq);
  430. break;
  431. case P54_TRAP_NO_BEACON:
  432. if (priv->vif)
  433. ieee80211_beacon_loss(priv->vif);
  434. break;
  435. case P54_TRAP_SCAN:
  436. break;
  437. case P54_TRAP_TBTT:
  438. break;
  439. case P54_TRAP_TIMER:
  440. break;
  441. default:
  442. printk(KERN_INFO "%s: received event:%x freq:%d\n",
  443. wiphy_name(priv->hw->wiphy), event, freq);
  444. break;
  445. }
  446. }
  447. static int p54_rx_control(struct p54_common *priv, struct sk_buff *skb)
  448. {
  449. struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
  450. switch (le16_to_cpu(hdr->type)) {
  451. case P54_CONTROL_TYPE_TXDONE:
  452. p54_rx_frame_sent(priv, skb);
  453. break;
  454. case P54_CONTROL_TYPE_TRAP:
  455. p54_rx_trap(priv, skb);
  456. break;
  457. case P54_CONTROL_TYPE_BBP:
  458. break;
  459. case P54_CONTROL_TYPE_STAT_READBACK:
  460. p54_rx_stats(priv, skb);
  461. break;
  462. case P54_CONTROL_TYPE_EEPROM_READBACK:
  463. p54_rx_eeprom_readback(priv, skb);
  464. break;
  465. default:
  466. printk(KERN_DEBUG "%s: not handling 0x%02x type control frame\n",
  467. wiphy_name(priv->hw->wiphy), le16_to_cpu(hdr->type));
  468. break;
  469. }
  470. return 0;
  471. }
  472. /* returns zero if skb can be reused */
  473. int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb)
  474. {
  475. struct p54_common *priv = dev->priv;
  476. u16 type = le16_to_cpu(*((__le16 *)skb->data));
  477. if (type & P54_HDR_FLAG_CONTROL)
  478. return p54_rx_control(priv, skb);
  479. else
  480. return p54_rx_data(priv, skb);
  481. }
  482. EXPORT_SYMBOL_GPL(p54_rx);
  483. static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
  484. struct ieee80211_tx_info *info, u8 *queue,
  485. u32 *extra_len, u16 *flags, u16 *aid,
  486. bool *burst_possible)
  487. {
  488. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  489. if (ieee80211_is_data_qos(hdr->frame_control))
  490. *burst_possible = true;
  491. else
  492. *burst_possible = false;
  493. if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
  494. *flags |= P54_HDR_FLAG_DATA_OUT_SEQNR;
  495. if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
  496. *flags |= P54_HDR_FLAG_DATA_OUT_NOCANCEL;
  497. *queue = skb_get_queue_mapping(skb) + P54_QUEUE_DATA;
  498. switch (priv->mode) {
  499. case NL80211_IFTYPE_MONITOR:
  500. /*
  501. * We have to set P54_HDR_FLAG_DATA_OUT_PROMISC for
  502. * every frame in promiscuous/monitor mode.
  503. * see STSW45x0C LMAC API - page 12.
  504. */
  505. *aid = 0;
  506. *flags |= P54_HDR_FLAG_DATA_OUT_PROMISC;
  507. break;
  508. case NL80211_IFTYPE_STATION:
  509. *aid = 1;
  510. break;
  511. case NL80211_IFTYPE_AP:
  512. case NL80211_IFTYPE_ADHOC:
  513. case NL80211_IFTYPE_MESH_POINT:
  514. if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
  515. *aid = 0;
  516. *queue = P54_QUEUE_CAB;
  517. return;
  518. }
  519. if (unlikely(ieee80211_is_mgmt(hdr->frame_control))) {
  520. if (ieee80211_is_probe_resp(hdr->frame_control)) {
  521. *aid = 0;
  522. *flags |= P54_HDR_FLAG_DATA_OUT_TIMESTAMP |
  523. P54_HDR_FLAG_DATA_OUT_NOCANCEL;
  524. return;
  525. } else if (ieee80211_is_beacon(hdr->frame_control)) {
  526. *aid = 0;
  527. if (info->flags & IEEE80211_TX_CTL_INJECTED) {
  528. /*
  529. * Injecting beacons on top of a AP is
  530. * not a good idea... nevertheless,
  531. * it should be doable.
  532. */
  533. return;
  534. }
  535. *flags |= P54_HDR_FLAG_DATA_OUT_TIMESTAMP;
  536. *queue = P54_QUEUE_BEACON;
  537. *extra_len = IEEE80211_MAX_TIM_LEN;
  538. return;
  539. }
  540. }
  541. if (info->control.sta)
  542. *aid = info->control.sta->aid;
  543. break;
  544. }
  545. }
  546. static u8 p54_convert_algo(enum ieee80211_key_alg alg)
  547. {
  548. switch (alg) {
  549. case ALG_WEP:
  550. return P54_CRYPTO_WEP;
  551. case ALG_TKIP:
  552. return P54_CRYPTO_TKIPMICHAEL;
  553. case ALG_CCMP:
  554. return P54_CRYPTO_AESCCMP;
  555. default:
  556. return 0;
  557. }
  558. }
  559. int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
  560. {
  561. struct p54_common *priv = dev->priv;
  562. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  563. struct p54_tx_info *p54info;
  564. struct p54_hdr *hdr;
  565. struct p54_tx_data *txhdr;
  566. unsigned int padding, len, extra_len;
  567. int i, j, ridx;
  568. u16 hdr_flags = 0, aid = 0;
  569. u8 rate, queue = 0, crypt_offset = 0;
  570. u8 cts_rate = 0x20;
  571. u8 rc_flags;
  572. u8 calculated_tries[4];
  573. u8 nrates = 0, nremaining = 8;
  574. bool burst_allowed = false;
  575. p54_tx_80211_header(priv, skb, info, &queue, &extra_len,
  576. &hdr_flags, &aid, &burst_allowed);
  577. if (p54_tx_qos_accounting_alloc(priv, skb, queue)) {
  578. if (!IS_QOS_QUEUE(queue)) {
  579. dev_kfree_skb_any(skb);
  580. return NETDEV_TX_OK;
  581. } else {
  582. return NETDEV_TX_BUSY;
  583. }
  584. }
  585. padding = (unsigned long)(skb->data - (sizeof(*hdr) + sizeof(*txhdr))) & 3;
  586. len = skb->len;
  587. if (info->control.hw_key) {
  588. crypt_offset = ieee80211_get_hdrlen_from_skb(skb);
  589. if (info->control.hw_key->alg == ALG_TKIP) {
  590. u8 *iv = (u8 *)(skb->data + crypt_offset);
  591. /*
  592. * The firmware excepts that the IV has to have
  593. * this special format
  594. */
  595. iv[1] = iv[0];
  596. iv[0] = iv[2];
  597. iv[2] = 0;
  598. }
  599. }
  600. txhdr = (struct p54_tx_data *) skb_push(skb, sizeof(*txhdr) + padding);
  601. hdr = (struct p54_hdr *) skb_push(skb, sizeof(*hdr));
  602. if (padding)
  603. hdr_flags |= P54_HDR_FLAG_DATA_ALIGN;
  604. hdr->type = cpu_to_le16(aid);
  605. hdr->rts_tries = info->control.rates[0].count;
  606. /*
  607. * we register the rates in perfect order, and
  608. * RTS/CTS won't happen on 5 GHz
  609. */
  610. cts_rate = info->control.rts_cts_rate_idx;
  611. memset(&txhdr->rateset, 0, sizeof(txhdr->rateset));
  612. /* see how many rates got used */
  613. for (i = 0; i < dev->max_rates; i++) {
  614. if (info->control.rates[i].idx < 0)
  615. break;
  616. nrates++;
  617. }
  618. /* limit tries to 8/nrates per rate */
  619. for (i = 0; i < nrates; i++) {
  620. /*
  621. * The magic expression here is equivalent to 8/nrates for
  622. * all values that matter, but avoids division and jumps.
  623. * Note that nrates can only take the values 1 through 4.
  624. */
  625. calculated_tries[i] = min_t(int, ((15 >> nrates) | 1) + 1,
  626. info->control.rates[i].count);
  627. nremaining -= calculated_tries[i];
  628. }
  629. /* if there are tries left, distribute from back to front */
  630. for (i = nrates - 1; nremaining > 0 && i >= 0; i--) {
  631. int tmp = info->control.rates[i].count - calculated_tries[i];
  632. if (tmp <= 0)
  633. continue;
  634. /* RC requested more tries at this rate */
  635. tmp = min_t(int, tmp, nremaining);
  636. calculated_tries[i] += tmp;
  637. nremaining -= tmp;
  638. }
  639. ridx = 0;
  640. for (i = 0; i < nrates && ridx < 8; i++) {
  641. /* we register the rates in perfect order */
  642. rate = info->control.rates[i].idx;
  643. if (info->band == IEEE80211_BAND_5GHZ)
  644. rate += 4;
  645. /* store the count we actually calculated for TX status */
  646. info->control.rates[i].count = calculated_tries[i];
  647. rc_flags = info->control.rates[i].flags;
  648. if (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) {
  649. rate |= 0x10;
  650. cts_rate |= 0x10;
  651. }
  652. if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
  653. burst_allowed = false;
  654. rate |= 0x40;
  655. } else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
  656. rate |= 0x20;
  657. burst_allowed = false;
  658. }
  659. for (j = 0; j < calculated_tries[i] && ridx < 8; j++) {
  660. txhdr->rateset[ridx] = rate;
  661. ridx++;
  662. }
  663. }
  664. if (burst_allowed)
  665. hdr_flags |= P54_HDR_FLAG_DATA_OUT_BURST;
  666. /* TODO: enable bursting */
  667. hdr->flags = cpu_to_le16(hdr_flags);
  668. hdr->tries = ridx;
  669. txhdr->rts_rate_idx = 0;
  670. if (info->control.hw_key) {
  671. txhdr->key_type = p54_convert_algo(info->control.hw_key->alg);
  672. txhdr->key_len = min((u8)16, info->control.hw_key->keylen);
  673. memcpy(txhdr->key, info->control.hw_key->key, txhdr->key_len);
  674. if (info->control.hw_key->alg == ALG_TKIP) {
  675. /* reserve space for the MIC key */
  676. len += 8;
  677. memcpy(skb_put(skb, 8), &(info->control.hw_key->key
  678. [NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY]), 8);
  679. }
  680. /* reserve some space for ICV */
  681. len += info->control.hw_key->icv_len;
  682. memset(skb_put(skb, info->control.hw_key->icv_len), 0,
  683. info->control.hw_key->icv_len);
  684. } else {
  685. txhdr->key_type = 0;
  686. txhdr->key_len = 0;
  687. }
  688. txhdr->crypt_offset = crypt_offset;
  689. txhdr->hw_queue = queue;
  690. txhdr->backlog = priv->tx_stats[queue].len - 1;
  691. memset(txhdr->durations, 0, sizeof(txhdr->durations));
  692. txhdr->tx_antenna = ((info->antenna_sel_tx == 0) ?
  693. 2 : info->antenna_sel_tx - 1) & priv->tx_diversity_mask;
  694. if (priv->rxhw == 5) {
  695. txhdr->longbow.cts_rate = cts_rate;
  696. txhdr->longbow.output_power = cpu_to_le16(priv->output_power);
  697. } else {
  698. txhdr->normal.output_power = priv->output_power;
  699. txhdr->normal.cts_rate = cts_rate;
  700. }
  701. if (padding)
  702. txhdr->align[0] = padding;
  703. hdr->len = cpu_to_le16(len);
  704. /* modifies skb->cb and with it info, so must be last! */
  705. p54info = (void *) info->rate_driver_data;
  706. p54info->extra_len = extra_len;
  707. p54_tx(priv, skb);
  708. return NETDEV_TX_OK;
  709. }