rt2x00queue.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975
  1. /*
  2. Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
  3. Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
  4. Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
  5. <http://rt2x00.serialmonkey.com>
  6. This program is free software; you can redistribute it and/or modify
  7. it under the terms of the GNU General Public License as published by
  8. the Free Software Foundation; either version 2 of the License, or
  9. (at your option) any later version.
  10. This program is distributed in the hope that it will be useful,
  11. but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. GNU General Public License for more details.
  14. You should have received a copy of the GNU General Public License
  15. along with this program; if not, write to the
  16. Free Software Foundation, Inc.,
  17. 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  18. */
  19. /*
  20. Module: rt2x00lib
  21. Abstract: rt2x00 queue specific routines.
  22. */
  23. #include <linux/slab.h>
  24. #include <linux/kernel.h>
  25. #include <linux/module.h>
  26. #include <linux/dma-mapping.h>
  27. #include "rt2x00.h"
  28. #include "rt2x00lib.h"
  29. struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
  30. {
  31. struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
  32. struct sk_buff *skb;
  33. struct skb_frame_desc *skbdesc;
  34. unsigned int frame_size;
  35. unsigned int head_size = 0;
  36. unsigned int tail_size = 0;
  37. /*
  38. * The frame size includes descriptor size, because the
  39. * hardware directly receive the frame into the skbuffer.
  40. */
  41. frame_size = entry->queue->data_size + entry->queue->desc_size;
  42. /*
  43. * The payload should be aligned to a 4-byte boundary,
  44. * this means we need at least 3 bytes for moving the frame
  45. * into the correct offset.
  46. */
  47. head_size = 4;
  48. /*
  49. * For IV/EIV/ICV assembly we must make sure there is
  50. * at least 8 bytes bytes available in headroom for IV/EIV
  51. * and 8 bytes for ICV data as tailroon.
  52. */
  53. if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
  54. head_size += 8;
  55. tail_size += 8;
  56. }
  57. /*
  58. * Allocate skbuffer.
  59. */
  60. skb = dev_alloc_skb(frame_size + head_size + tail_size);
  61. if (!skb)
  62. return NULL;
  63. /*
  64. * Make sure we not have a frame with the requested bytes
  65. * available in the head and tail.
  66. */
  67. skb_reserve(skb, head_size);
  68. skb_put(skb, frame_size);
  69. /*
  70. * Populate skbdesc.
  71. */
  72. skbdesc = get_skb_frame_desc(skb);
  73. memset(skbdesc, 0, sizeof(*skbdesc));
  74. skbdesc->entry = entry;
  75. if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) {
  76. skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
  77. skb->data,
  78. skb->len,
  79. DMA_FROM_DEVICE);
  80. skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
  81. }
  82. return skb;
  83. }
  84. void rt2x00queue_map_txskb(struct queue_entry *entry)
  85. {
  86. struct device *dev = entry->queue->rt2x00dev->dev;
  87. struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
  88. skbdesc->skb_dma =
  89. dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
  90. skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
  91. }
  92. EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
  93. void rt2x00queue_unmap_skb(struct queue_entry *entry)
  94. {
  95. struct device *dev = entry->queue->rt2x00dev->dev;
  96. struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
  97. if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
  98. dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
  99. DMA_FROM_DEVICE);
  100. skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
  101. } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
  102. dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
  103. DMA_TO_DEVICE);
  104. skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
  105. }
  106. }
  107. EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
  108. void rt2x00queue_free_skb(struct queue_entry *entry)
  109. {
  110. if (!entry->skb)
  111. return;
  112. rt2x00queue_unmap_skb(entry);
  113. dev_kfree_skb_any(entry->skb);
  114. entry->skb = NULL;
  115. }
  116. void rt2x00queue_align_frame(struct sk_buff *skb)
  117. {
  118. unsigned int frame_length = skb->len;
  119. unsigned int align = ALIGN_SIZE(skb, 0);
  120. if (!align)
  121. return;
  122. skb_push(skb, align);
  123. memmove(skb->data, skb->data + align, frame_length);
  124. skb_trim(skb, frame_length);
  125. }
  126. void rt2x00queue_align_payload(struct sk_buff *skb, unsigned int header_length)
  127. {
  128. unsigned int frame_length = skb->len;
  129. unsigned int align = ALIGN_SIZE(skb, header_length);
  130. if (!align)
  131. return;
  132. skb_push(skb, align);
  133. memmove(skb->data, skb->data + align, frame_length);
  134. skb_trim(skb, frame_length);
  135. }
  136. void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
  137. {
  138. unsigned int payload_length = skb->len - header_length;
  139. unsigned int header_align = ALIGN_SIZE(skb, 0);
  140. unsigned int payload_align = ALIGN_SIZE(skb, header_length);
  141. unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
  142. /*
  143. * Adjust the header alignment if the payload needs to be moved more
  144. * than the header.
  145. */
  146. if (payload_align > header_align)
  147. header_align += 4;
  148. /* There is nothing to do if no alignment is needed */
  149. if (!header_align)
  150. return;
  151. /* Reserve the amount of space needed in front of the frame */
  152. skb_push(skb, header_align);
  153. /*
  154. * Move the header.
  155. */
  156. memmove(skb->data, skb->data + header_align, header_length);
  157. /* Move the payload, if present and if required */
  158. if (payload_length && payload_align)
  159. memmove(skb->data + header_length + l2pad,
  160. skb->data + header_length + l2pad + payload_align,
  161. payload_length);
  162. /* Trim the skb to the correct size */
  163. skb_trim(skb, header_length + l2pad + payload_length);
  164. }
  165. void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
  166. {
  167. /*
  168. * L2 padding is only present if the skb contains more than just the
  169. * IEEE 802.11 header.
  170. */
  171. unsigned int l2pad = (skb->len > header_length) ?
  172. L2PAD_SIZE(header_length) : 0;
  173. if (!l2pad)
  174. return;
  175. memmove(skb->data + l2pad, skb->data, header_length);
  176. skb_pull(skb, l2pad);
  177. }
  178. static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
  179. struct txentry_desc *txdesc)
  180. {
  181. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
  182. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
  183. struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
  184. unsigned long irqflags;
  185. if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) ||
  186. unlikely(!tx_info->control.vif))
  187. return;
  188. /*
  189. * Hardware should insert sequence counter.
  190. * FIXME: We insert a software sequence counter first for
  191. * hardware that doesn't support hardware sequence counting.
  192. *
  193. * This is wrong because beacons are not getting sequence
  194. * numbers assigned properly.
  195. *
  196. * A secondary problem exists for drivers that cannot toggle
  197. * sequence counting per-frame, since those will override the
  198. * sequence counter given by mac80211.
  199. */
  200. spin_lock_irqsave(&intf->seqlock, irqflags);
  201. if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
  202. intf->seqno += 0x10;
  203. hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
  204. hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
  205. spin_unlock_irqrestore(&intf->seqlock, irqflags);
  206. __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
  207. }
  208. static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
  209. struct txentry_desc *txdesc,
  210. const struct rt2x00_rate *hwrate)
  211. {
  212. struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
  213. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
  214. struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
  215. unsigned int data_length;
  216. unsigned int duration;
  217. unsigned int residual;
  218. /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
  219. data_length = entry->skb->len + 4;
  220. data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb);
  221. /*
  222. * PLCP setup
  223. * Length calculation depends on OFDM/CCK rate.
  224. */
  225. txdesc->signal = hwrate->plcp;
  226. txdesc->service = 0x04;
  227. if (hwrate->flags & DEV_RATE_OFDM) {
  228. txdesc->length_high = (data_length >> 6) & 0x3f;
  229. txdesc->length_low = data_length & 0x3f;
  230. } else {
  231. /*
  232. * Convert length to microseconds.
  233. */
  234. residual = GET_DURATION_RES(data_length, hwrate->bitrate);
  235. duration = GET_DURATION(data_length, hwrate->bitrate);
  236. if (residual != 0) {
  237. duration++;
  238. /*
  239. * Check if we need to set the Length Extension
  240. */
  241. if (hwrate->bitrate == 110 && residual <= 30)
  242. txdesc->service |= 0x80;
  243. }
  244. txdesc->length_high = (duration >> 8) & 0xff;
  245. txdesc->length_low = duration & 0xff;
  246. /*
  247. * When preamble is enabled we should set the
  248. * preamble bit for the signal.
  249. */
  250. if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
  251. txdesc->signal |= 0x08;
  252. }
  253. }
  254. static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
  255. struct txentry_desc *txdesc)
  256. {
  257. struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
  258. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
  259. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
  260. struct ieee80211_rate *rate =
  261. ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
  262. const struct rt2x00_rate *hwrate;
  263. memset(txdesc, 0, sizeof(*txdesc));
  264. /*
  265. * Header and frame information.
  266. */
  267. txdesc->length = entry->skb->len;
  268. txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
  269. /*
  270. * Check whether this frame is to be acked.
  271. */
  272. if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
  273. __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
  274. /*
  275. * Check if this is a RTS/CTS frame
  276. */
  277. if (ieee80211_is_rts(hdr->frame_control) ||
  278. ieee80211_is_cts(hdr->frame_control)) {
  279. __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
  280. if (ieee80211_is_rts(hdr->frame_control))
  281. __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
  282. else
  283. __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
  284. if (tx_info->control.rts_cts_rate_idx >= 0)
  285. rate =
  286. ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
  287. }
  288. /*
  289. * Determine retry information.
  290. */
  291. txdesc->retry_limit = tx_info->control.rates[0].count - 1;
  292. if (txdesc->retry_limit >= rt2x00dev->long_retry)
  293. __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
  294. /*
  295. * Check if more fragments are pending
  296. */
  297. if (ieee80211_has_morefrags(hdr->frame_control)) {
  298. __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
  299. __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
  300. }
  301. /*
  302. * Check if more frames (!= fragments) are pending
  303. */
  304. if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
  305. __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
  306. /*
  307. * Beacons and probe responses require the tsf timestamp
  308. * to be inserted into the frame, except for a frame that has been injected
  309. * through a monitor interface. This latter is needed for testing a
  310. * monitor interface.
  311. */
  312. if ((ieee80211_is_beacon(hdr->frame_control) ||
  313. ieee80211_is_probe_resp(hdr->frame_control)) &&
  314. (!(tx_info->flags & IEEE80211_TX_CTL_INJECTED)))
  315. __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
  316. /*
  317. * Determine with what IFS priority this frame should be send.
  318. * Set ifs to IFS_SIFS when the this is not the first fragment,
  319. * or this fragment came after RTS/CTS.
  320. */
  321. if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
  322. !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) {
  323. __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
  324. txdesc->ifs = IFS_BACKOFF;
  325. } else
  326. txdesc->ifs = IFS_SIFS;
  327. /*
  328. * Determine rate modulation.
  329. */
  330. hwrate = rt2x00_get_rate(rate->hw_value);
  331. txdesc->rate_mode = RATE_MODE_CCK;
  332. if (hwrate->flags & DEV_RATE_OFDM)
  333. txdesc->rate_mode = RATE_MODE_OFDM;
  334. /*
  335. * Apply TX descriptor handling by components
  336. */
  337. rt2x00crypto_create_tx_descriptor(entry, txdesc);
  338. rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate);
  339. rt2x00queue_create_tx_descriptor_seq(entry, txdesc);
  340. rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
  341. }
  342. static int rt2x00queue_write_tx_data(struct queue_entry *entry,
  343. struct txentry_desc *txdesc)
  344. {
  345. struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
  346. /*
  347. * This should not happen, we already checked the entry
  348. * was ours. When the hardware disagrees there has been
  349. * a queue corruption!
  350. */
  351. if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
  352. rt2x00dev->ops->lib->get_entry_state(entry))) {
  353. ERROR(rt2x00dev,
  354. "Corrupt queue %d, accessing entry which is not ours.\n"
  355. "Please file bug report to %s.\n",
  356. entry->queue->qid, DRV_PROJECT);
  357. return -EINVAL;
  358. }
  359. /*
  360. * Add the requested extra tx headroom in front of the skb.
  361. */
  362. skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom);
  363. memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom);
  364. /*
  365. * Call the driver's write_tx_data function, if it exists.
  366. */
  367. if (rt2x00dev->ops->lib->write_tx_data)
  368. rt2x00dev->ops->lib->write_tx_data(entry, txdesc);
  369. /*
  370. * Map the skb to DMA.
  371. */
  372. if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags))
  373. rt2x00queue_map_txskb(entry);
  374. return 0;
  375. }
  376. static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
  377. struct txentry_desc *txdesc)
  378. {
  379. struct data_queue *queue = entry->queue;
  380. queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
  381. /*
  382. * All processing on the frame has been completed, this means
  383. * it is now ready to be dumped to userspace through debugfs.
  384. */
  385. rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
  386. }
  387. static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
  388. struct txentry_desc *txdesc)
  389. {
  390. /*
  391. * Check if we need to kick the queue, there are however a few rules
  392. * 1) Don't kick unless this is the last in frame in a burst.
  393. * When the burst flag is set, this frame is always followed
  394. * by another frame which in some way are related to eachother.
  395. * This is true for fragments, RTS or CTS-to-self frames.
  396. * 2) Rule 1 can be broken when the available entries
  397. * in the queue are less then a certain threshold.
  398. */
  399. if (rt2x00queue_threshold(queue) ||
  400. !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
  401. queue->rt2x00dev->ops->lib->kick_tx_queue(queue);
  402. }
  403. int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
  404. bool local)
  405. {
  406. struct ieee80211_tx_info *tx_info;
  407. struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
  408. struct txentry_desc txdesc;
  409. struct skb_frame_desc *skbdesc;
  410. u8 rate_idx, rate_flags;
  411. if (unlikely(rt2x00queue_full(queue)))
  412. return -ENOBUFS;
  413. if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
  414. &entry->flags))) {
  415. ERROR(queue->rt2x00dev,
  416. "Arrived at non-free entry in the non-full queue %d.\n"
  417. "Please file bug report to %s.\n",
  418. queue->qid, DRV_PROJECT);
  419. return -EINVAL;
  420. }
  421. /*
  422. * Copy all TX descriptor information into txdesc,
  423. * after that we are free to use the skb->cb array
  424. * for our information.
  425. */
  426. entry->skb = skb;
  427. rt2x00queue_create_tx_descriptor(entry, &txdesc);
  428. /*
  429. * All information is retrieved from the skb->cb array,
  430. * now we should claim ownership of the driver part of that
  431. * array, preserving the bitrate index and flags.
  432. */
  433. tx_info = IEEE80211_SKB_CB(skb);
  434. rate_idx = tx_info->control.rates[0].idx;
  435. rate_flags = tx_info->control.rates[0].flags;
  436. skbdesc = get_skb_frame_desc(skb);
  437. memset(skbdesc, 0, sizeof(*skbdesc));
  438. skbdesc->entry = entry;
  439. skbdesc->tx_rate_idx = rate_idx;
  440. skbdesc->tx_rate_flags = rate_flags;
  441. if (local)
  442. skbdesc->flags |= SKBDESC_NOT_MAC80211;
  443. /*
  444. * When hardware encryption is supported, and this frame
  445. * is to be encrypted, we should strip the IV/EIV data from
  446. * the frame so we can provide it to the driver separately.
  447. */
  448. if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
  449. !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
  450. if (test_bit(DRIVER_REQUIRE_COPY_IV, &queue->rt2x00dev->flags))
  451. rt2x00crypto_tx_copy_iv(skb, &txdesc);
  452. else
  453. rt2x00crypto_tx_remove_iv(skb, &txdesc);
  454. }
  455. /*
  456. * When DMA allocation is required we should guarentee to the
  457. * driver that the DMA is aligned to a 4-byte boundary.
  458. * However some drivers require L2 padding to pad the payload
  459. * rather then the header. This could be a requirement for
  460. * PCI and USB devices, while header alignment only is valid
  461. * for PCI devices.
  462. */
  463. if (test_bit(DRIVER_REQUIRE_L2PAD, &queue->rt2x00dev->flags))
  464. rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length);
  465. else if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
  466. rt2x00queue_align_frame(entry->skb);
  467. /*
  468. * It could be possible that the queue was corrupted and this
  469. * call failed. Since we always return NETDEV_TX_OK to mac80211,
  470. * this frame will simply be dropped.
  471. */
  472. if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
  473. clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
  474. entry->skb = NULL;
  475. return -EIO;
  476. }
  477. set_bit(ENTRY_DATA_PENDING, &entry->flags);
  478. rt2x00queue_index_inc(queue, Q_INDEX);
  479. rt2x00queue_write_tx_descriptor(entry, &txdesc);
  480. rt2x00queue_kick_tx_queue(queue, &txdesc);
  481. return 0;
  482. }
  483. int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
  484. struct ieee80211_vif *vif,
  485. const bool enable_beacon)
  486. {
  487. struct rt2x00_intf *intf = vif_to_intf(vif);
  488. struct skb_frame_desc *skbdesc;
  489. struct txentry_desc txdesc;
  490. if (unlikely(!intf->beacon))
  491. return -ENOBUFS;
  492. mutex_lock(&intf->beacon_skb_mutex);
  493. /*
  494. * Clean up the beacon skb.
  495. */
  496. rt2x00queue_free_skb(intf->beacon);
  497. if (!enable_beacon) {
  498. rt2x00dev->ops->lib->kill_tx_queue(intf->beacon->queue);
  499. mutex_unlock(&intf->beacon_skb_mutex);
  500. return 0;
  501. }
  502. intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
  503. if (!intf->beacon->skb) {
  504. mutex_unlock(&intf->beacon_skb_mutex);
  505. return -ENOMEM;
  506. }
  507. /*
  508. * Copy all TX descriptor information into txdesc,
  509. * after that we are free to use the skb->cb array
  510. * for our information.
  511. */
  512. rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
  513. /*
  514. * Fill in skb descriptor
  515. */
  516. skbdesc = get_skb_frame_desc(intf->beacon->skb);
  517. memset(skbdesc, 0, sizeof(*skbdesc));
  518. skbdesc->entry = intf->beacon;
  519. /*
  520. * Send beacon to hardware and enable beacon genaration..
  521. */
  522. rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
  523. mutex_unlock(&intf->beacon_skb_mutex);
  524. return 0;
  525. }
  526. void rt2x00queue_for_each_entry(struct data_queue *queue,
  527. enum queue_index start,
  528. enum queue_index end,
  529. void (*fn)(struct queue_entry *entry))
  530. {
  531. unsigned long irqflags;
  532. unsigned int index_start;
  533. unsigned int index_end;
  534. unsigned int i;
  535. if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
  536. ERROR(queue->rt2x00dev,
  537. "Entry requested from invalid index range (%d - %d)\n",
  538. start, end);
  539. return;
  540. }
  541. /*
  542. * Only protect the range we are going to loop over,
  543. * if during our loop a extra entry is set to pending
  544. * it should not be kicked during this run, since it
  545. * is part of another TX operation.
  546. */
  547. spin_lock_irqsave(&queue->index_lock, irqflags);
  548. index_start = queue->index[start];
  549. index_end = queue->index[end];
  550. spin_unlock_irqrestore(&queue->index_lock, irqflags);
  551. /*
  552. * Start from the TX done pointer, this guarentees that we will
  553. * send out all frames in the correct order.
  554. */
  555. if (index_start < index_end) {
  556. for (i = index_start; i < index_end; i++)
  557. fn(&queue->entries[i]);
  558. } else {
  559. for (i = index_start; i < queue->limit; i++)
  560. fn(&queue->entries[i]);
  561. for (i = 0; i < index_end; i++)
  562. fn(&queue->entries[i]);
  563. }
  564. }
  565. EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
  566. struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
  567. const enum data_queue_qid queue)
  568. {
  569. int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
  570. if (queue == QID_RX)
  571. return rt2x00dev->rx;
  572. if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
  573. return &rt2x00dev->tx[queue];
  574. if (!rt2x00dev->bcn)
  575. return NULL;
  576. if (queue == QID_BEACON)
  577. return &rt2x00dev->bcn[0];
  578. else if (queue == QID_ATIM && atim)
  579. return &rt2x00dev->bcn[1];
  580. return NULL;
  581. }
  582. EXPORT_SYMBOL_GPL(rt2x00queue_get_queue);
  583. struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
  584. enum queue_index index)
  585. {
  586. struct queue_entry *entry;
  587. unsigned long irqflags;
  588. if (unlikely(index >= Q_INDEX_MAX)) {
  589. ERROR(queue->rt2x00dev,
  590. "Entry requested from invalid index type (%d)\n", index);
  591. return NULL;
  592. }
  593. spin_lock_irqsave(&queue->index_lock, irqflags);
  594. entry = &queue->entries[queue->index[index]];
  595. spin_unlock_irqrestore(&queue->index_lock, irqflags);
  596. return entry;
  597. }
  598. EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
  599. void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
  600. {
  601. unsigned long irqflags;
  602. if (unlikely(index >= Q_INDEX_MAX)) {
  603. ERROR(queue->rt2x00dev,
  604. "Index change on invalid index type (%d)\n", index);
  605. return;
  606. }
  607. spin_lock_irqsave(&queue->index_lock, irqflags);
  608. queue->index[index]++;
  609. if (queue->index[index] >= queue->limit)
  610. queue->index[index] = 0;
  611. queue->last_action[index] = jiffies;
  612. if (index == Q_INDEX) {
  613. queue->length++;
  614. } else if (index == Q_INDEX_DONE) {
  615. queue->length--;
  616. queue->count++;
  617. }
  618. spin_unlock_irqrestore(&queue->index_lock, irqflags);
  619. }
  620. static void rt2x00queue_reset(struct data_queue *queue)
  621. {
  622. unsigned long irqflags;
  623. unsigned int i;
  624. spin_lock_irqsave(&queue->index_lock, irqflags);
  625. queue->count = 0;
  626. queue->length = 0;
  627. for (i = 0; i < Q_INDEX_MAX; i++) {
  628. queue->index[i] = 0;
  629. queue->last_action[i] = jiffies;
  630. }
  631. spin_unlock_irqrestore(&queue->index_lock, irqflags);
  632. }
  633. void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
  634. {
  635. struct data_queue *queue;
  636. txall_queue_for_each(rt2x00dev, queue)
  637. rt2x00dev->ops->lib->kill_tx_queue(queue);
  638. }
  639. void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
  640. {
  641. struct data_queue *queue;
  642. unsigned int i;
  643. queue_for_each(rt2x00dev, queue) {
  644. rt2x00queue_reset(queue);
  645. for (i = 0; i < queue->limit; i++) {
  646. rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
  647. if (queue->qid == QID_RX)
  648. rt2x00queue_index_inc(queue, Q_INDEX);
  649. }
  650. }
  651. }
  652. static int rt2x00queue_alloc_entries(struct data_queue *queue,
  653. const struct data_queue_desc *qdesc)
  654. {
  655. struct queue_entry *entries;
  656. unsigned int entry_size;
  657. unsigned int i;
  658. rt2x00queue_reset(queue);
  659. queue->limit = qdesc->entry_num;
  660. queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
  661. queue->data_size = qdesc->data_size;
  662. queue->desc_size = qdesc->desc_size;
  663. /*
  664. * Allocate all queue entries.
  665. */
  666. entry_size = sizeof(*entries) + qdesc->priv_size;
  667. entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
  668. if (!entries)
  669. return -ENOMEM;
  670. #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
  671. (((char *)(__base)) + ((__limit) * (__esize)) + \
  672. ((__index) * (__psize)))
  673. for (i = 0; i < queue->limit; i++) {
  674. entries[i].flags = 0;
  675. entries[i].queue = queue;
  676. entries[i].skb = NULL;
  677. entries[i].entry_idx = i;
  678. entries[i].priv_data =
  679. QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
  680. sizeof(*entries), qdesc->priv_size);
  681. }
  682. #undef QUEUE_ENTRY_PRIV_OFFSET
  683. queue->entries = entries;
  684. return 0;
  685. }
  686. static void rt2x00queue_free_skbs(struct data_queue *queue)
  687. {
  688. unsigned int i;
  689. if (!queue->entries)
  690. return;
  691. for (i = 0; i < queue->limit; i++) {
  692. rt2x00queue_free_skb(&queue->entries[i]);
  693. }
  694. }
  695. static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
  696. {
  697. unsigned int i;
  698. struct sk_buff *skb;
  699. for (i = 0; i < queue->limit; i++) {
  700. skb = rt2x00queue_alloc_rxskb(&queue->entries[i]);
  701. if (!skb)
  702. return -ENOMEM;
  703. queue->entries[i].skb = skb;
  704. }
  705. return 0;
  706. }
  707. int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
  708. {
  709. struct data_queue *queue;
  710. int status;
  711. status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
  712. if (status)
  713. goto exit;
  714. tx_queue_for_each(rt2x00dev, queue) {
  715. status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
  716. if (status)
  717. goto exit;
  718. }
  719. status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
  720. if (status)
  721. goto exit;
  722. if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) {
  723. status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1],
  724. rt2x00dev->ops->atim);
  725. if (status)
  726. goto exit;
  727. }
  728. status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx);
  729. if (status)
  730. goto exit;
  731. return 0;
  732. exit:
  733. ERROR(rt2x00dev, "Queue entries allocation failed.\n");
  734. rt2x00queue_uninitialize(rt2x00dev);
  735. return status;
  736. }
  737. void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
  738. {
  739. struct data_queue *queue;
  740. rt2x00queue_free_skbs(rt2x00dev->rx);
  741. queue_for_each(rt2x00dev, queue) {
  742. kfree(queue->entries);
  743. queue->entries = NULL;
  744. }
  745. }
  746. static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
  747. struct data_queue *queue, enum data_queue_qid qid)
  748. {
  749. spin_lock_init(&queue->index_lock);
  750. queue->rt2x00dev = rt2x00dev;
  751. queue->qid = qid;
  752. queue->txop = 0;
  753. queue->aifs = 2;
  754. queue->cw_min = 5;
  755. queue->cw_max = 10;
  756. }
  757. int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
  758. {
  759. struct data_queue *queue;
  760. enum data_queue_qid qid;
  761. unsigned int req_atim =
  762. !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
  763. /*
  764. * We need the following queues:
  765. * RX: 1
  766. * TX: ops->tx_queues
  767. * Beacon: 1
  768. * Atim: 1 (if required)
  769. */
  770. rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
  771. queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
  772. if (!queue) {
  773. ERROR(rt2x00dev, "Queue allocation failed.\n");
  774. return -ENOMEM;
  775. }
  776. /*
  777. * Initialize pointers
  778. */
  779. rt2x00dev->rx = queue;
  780. rt2x00dev->tx = &queue[1];
  781. rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
  782. /*
  783. * Initialize queue parameters.
  784. * RX: qid = QID_RX
  785. * TX: qid = QID_AC_BE + index
  786. * TX: cw_min: 2^5 = 32.
  787. * TX: cw_max: 2^10 = 1024.
  788. * BCN: qid = QID_BEACON
  789. * ATIM: qid = QID_ATIM
  790. */
  791. rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
  792. qid = QID_AC_BE;
  793. tx_queue_for_each(rt2x00dev, queue)
  794. rt2x00queue_init(rt2x00dev, queue, qid++);
  795. rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_BEACON);
  796. if (req_atim)
  797. rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_ATIM);
  798. return 0;
  799. }
  800. void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
  801. {
  802. kfree(rt2x00dev->rx);
  803. rt2x00dev->rx = NULL;
  804. rt2x00dev->tx = NULL;
  805. rt2x00dev->bcn = NULL;
  806. }