rt2x00queue.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256
  1. /*
  2. Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
  3. Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
  4. Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
  5. <http://rt2x00.serialmonkey.com>
  6. This program is free software; you can redistribute it and/or modify
  7. it under the terms of the GNU General Public License as published by
  8. the Free Software Foundation; either version 2 of the License, or
  9. (at your option) any later version.
  10. This program is distributed in the hope that it will be useful,
  11. but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. GNU General Public License for more details.
  14. You should have received a copy of the GNU General Public License
  15. along with this program; if not, write to the
  16. Free Software Foundation, Inc.,
  17. 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  18. */
  19. /*
  20. Module: rt2x00lib
  21. Abstract: rt2x00 queue specific routines.
  22. */
  23. #include <linux/slab.h>
  24. #include <linux/kernel.h>
  25. #include <linux/module.h>
  26. #include <linux/dma-mapping.h>
  27. #include "rt2x00.h"
  28. #include "rt2x00lib.h"
  29. struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
  30. {
  31. struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
  32. struct sk_buff *skb;
  33. struct skb_frame_desc *skbdesc;
  34. unsigned int frame_size;
  35. unsigned int head_size = 0;
  36. unsigned int tail_size = 0;
  37. /*
  38. * The frame size includes descriptor size, because the
  39. * hardware directly receive the frame into the skbuffer.
  40. */
  41. frame_size = entry->queue->data_size + entry->queue->desc_size;
  42. /*
  43. * The payload should be aligned to a 4-byte boundary,
  44. * this means we need at least 3 bytes for moving the frame
  45. * into the correct offset.
  46. */
  47. head_size = 4;
  48. /*
  49. * For IV/EIV/ICV assembly we must make sure there is
  50. * at least 8 bytes bytes available in headroom for IV/EIV
  51. * and 8 bytes for ICV data as tailroon.
  52. */
  53. if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) {
  54. head_size += 8;
  55. tail_size += 8;
  56. }
  57. /*
  58. * Allocate skbuffer.
  59. */
  60. skb = dev_alloc_skb(frame_size + head_size + tail_size);
  61. if (!skb)
  62. return NULL;
  63. /*
  64. * Make sure we not have a frame with the requested bytes
  65. * available in the head and tail.
  66. */
  67. skb_reserve(skb, head_size);
  68. skb_put(skb, frame_size);
  69. /*
  70. * Populate skbdesc.
  71. */
  72. skbdesc = get_skb_frame_desc(skb);
  73. memset(skbdesc, 0, sizeof(*skbdesc));
  74. skbdesc->entry = entry;
  75. if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) {
  76. skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
  77. skb->data,
  78. skb->len,
  79. DMA_FROM_DEVICE);
  80. skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
  81. }
  82. return skb;
  83. }
  84. void rt2x00queue_map_txskb(struct queue_entry *entry)
  85. {
  86. struct device *dev = entry->queue->rt2x00dev->dev;
  87. struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
  88. skbdesc->skb_dma =
  89. dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
  90. skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
  91. }
  92. EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
  93. void rt2x00queue_unmap_skb(struct queue_entry *entry)
  94. {
  95. struct device *dev = entry->queue->rt2x00dev->dev;
  96. struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
  97. if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
  98. dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
  99. DMA_FROM_DEVICE);
  100. skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
  101. } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
  102. dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
  103. DMA_TO_DEVICE);
  104. skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
  105. }
  106. }
  107. EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
  108. void rt2x00queue_free_skb(struct queue_entry *entry)
  109. {
  110. if (!entry->skb)
  111. return;
  112. rt2x00queue_unmap_skb(entry);
  113. dev_kfree_skb_any(entry->skb);
  114. entry->skb = NULL;
  115. }
  116. void rt2x00queue_align_frame(struct sk_buff *skb)
  117. {
  118. unsigned int frame_length = skb->len;
  119. unsigned int align = ALIGN_SIZE(skb, 0);
  120. if (!align)
  121. return;
  122. skb_push(skb, align);
  123. memmove(skb->data, skb->data + align, frame_length);
  124. skb_trim(skb, frame_length);
  125. }
  126. void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
  127. {
  128. unsigned int payload_length = skb->len - header_length;
  129. unsigned int header_align = ALIGN_SIZE(skb, 0);
  130. unsigned int payload_align = ALIGN_SIZE(skb, header_length);
  131. unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
  132. /*
  133. * Adjust the header alignment if the payload needs to be moved more
  134. * than the header.
  135. */
  136. if (payload_align > header_align)
  137. header_align += 4;
  138. /* There is nothing to do if no alignment is needed */
  139. if (!header_align)
  140. return;
  141. /* Reserve the amount of space needed in front of the frame */
  142. skb_push(skb, header_align);
  143. /*
  144. * Move the header.
  145. */
  146. memmove(skb->data, skb->data + header_align, header_length);
  147. /* Move the payload, if present and if required */
  148. if (payload_length && payload_align)
  149. memmove(skb->data + header_length + l2pad,
  150. skb->data + header_length + l2pad + payload_align,
  151. payload_length);
  152. /* Trim the skb to the correct size */
  153. skb_trim(skb, header_length + l2pad + payload_length);
  154. }
  155. void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
  156. {
  157. /*
  158. * L2 padding is only present if the skb contains more than just the
  159. * IEEE 802.11 header.
  160. */
  161. unsigned int l2pad = (skb->len > header_length) ?
  162. L2PAD_SIZE(header_length) : 0;
  163. if (!l2pad)
  164. return;
  165. memmove(skb->data + l2pad, skb->data, header_length);
  166. skb_pull(skb, l2pad);
  167. }
  168. static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
  169. struct txentry_desc *txdesc)
  170. {
  171. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
  172. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
  173. struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
  174. unsigned long irqflags;
  175. if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
  176. return;
  177. __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
  178. if (!test_bit(REQUIRE_SW_SEQNO, &entry->queue->rt2x00dev->cap_flags))
  179. return;
  180. /*
  181. * The hardware is not able to insert a sequence number. Assign a
  182. * software generated one here.
  183. *
  184. * This is wrong because beacons are not getting sequence
  185. * numbers assigned properly.
  186. *
  187. * A secondary problem exists for drivers that cannot toggle
  188. * sequence counting per-frame, since those will override the
  189. * sequence counter given by mac80211.
  190. */
  191. spin_lock_irqsave(&intf->seqlock, irqflags);
  192. if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
  193. intf->seqno += 0x10;
  194. hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
  195. hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
  196. spin_unlock_irqrestore(&intf->seqlock, irqflags);
  197. }
  198. static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
  199. struct txentry_desc *txdesc,
  200. const struct rt2x00_rate *hwrate)
  201. {
  202. struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
  203. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
  204. struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
  205. unsigned int data_length;
  206. unsigned int duration;
  207. unsigned int residual;
  208. /*
  209. * Determine with what IFS priority this frame should be send.
  210. * Set ifs to IFS_SIFS when the this is not the first fragment,
  211. * or this fragment came after RTS/CTS.
  212. */
  213. if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
  214. txdesc->u.plcp.ifs = IFS_BACKOFF;
  215. else
  216. txdesc->u.plcp.ifs = IFS_SIFS;
  217. /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
  218. data_length = entry->skb->len + 4;
  219. data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb);
  220. /*
  221. * PLCP setup
  222. * Length calculation depends on OFDM/CCK rate.
  223. */
  224. txdesc->u.plcp.signal = hwrate->plcp;
  225. txdesc->u.plcp.service = 0x04;
  226. if (hwrate->flags & DEV_RATE_OFDM) {
  227. txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f;
  228. txdesc->u.plcp.length_low = data_length & 0x3f;
  229. } else {
  230. /*
  231. * Convert length to microseconds.
  232. */
  233. residual = GET_DURATION_RES(data_length, hwrate->bitrate);
  234. duration = GET_DURATION(data_length, hwrate->bitrate);
  235. if (residual != 0) {
  236. duration++;
  237. /*
  238. * Check if we need to set the Length Extension
  239. */
  240. if (hwrate->bitrate == 110 && residual <= 30)
  241. txdesc->u.plcp.service |= 0x80;
  242. }
  243. txdesc->u.plcp.length_high = (duration >> 8) & 0xff;
  244. txdesc->u.plcp.length_low = duration & 0xff;
  245. /*
  246. * When preamble is enabled we should set the
  247. * preamble bit for the signal.
  248. */
  249. if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
  250. txdesc->u.plcp.signal |= 0x08;
  251. }
  252. }
  253. static void rt2x00queue_create_tx_descriptor_ht(struct queue_entry *entry,
  254. struct txentry_desc *txdesc,
  255. const struct rt2x00_rate *hwrate)
  256. {
  257. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
  258. struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
  259. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
  260. if (tx_info->control.sta)
  261. txdesc->u.ht.mpdu_density =
  262. tx_info->control.sta->ht_cap.ampdu_density;
  263. txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
  264. /*
  265. * Only one STBC stream is supported for now.
  266. */
  267. if (tx_info->flags & IEEE80211_TX_CTL_STBC)
  268. txdesc->u.ht.stbc = 1;
  269. /*
  270. * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
  271. * mcs rate to be used
  272. */
  273. if (txrate->flags & IEEE80211_TX_RC_MCS) {
  274. txdesc->u.ht.mcs = txrate->idx;
  275. /*
  276. * MIMO PS should be set to 1 for STA's using dynamic SM PS
  277. * when using more then one tx stream (>MCS7).
  278. */
  279. if (tx_info->control.sta && txdesc->u.ht.mcs > 7 &&
  280. ((tx_info->control.sta->ht_cap.cap &
  281. IEEE80211_HT_CAP_SM_PS) >>
  282. IEEE80211_HT_CAP_SM_PS_SHIFT) ==
  283. WLAN_HT_CAP_SM_PS_DYNAMIC)
  284. __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
  285. } else {
  286. txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
  287. if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
  288. txdesc->u.ht.mcs |= 0x08;
  289. }
  290. /*
  291. * This frame is eligible for an AMPDU, however, don't aggregate
  292. * frames that are intended to probe a specific tx rate.
  293. */
  294. if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
  295. !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
  296. __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
  297. /*
  298. * Set 40Mhz mode if necessary (for legacy rates this will
  299. * duplicate the frame to both channels).
  300. */
  301. if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH ||
  302. txrate->flags & IEEE80211_TX_RC_DUP_DATA)
  303. __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
  304. if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
  305. __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
  306. /*
  307. * Determine IFS values
  308. * - Use TXOP_BACKOFF for management frames except beacons
  309. * - Use TXOP_SIFS for fragment bursts
  310. * - Use TXOP_HTTXOP for everything else
  311. *
  312. * Note: rt2800 devices won't use CTS protection (if used)
  313. * for frames not transmitted with TXOP_HTTXOP
  314. */
  315. if (ieee80211_is_mgmt(hdr->frame_control) &&
  316. !ieee80211_is_beacon(hdr->frame_control))
  317. txdesc->u.ht.txop = TXOP_BACKOFF;
  318. else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
  319. txdesc->u.ht.txop = TXOP_SIFS;
  320. else
  321. txdesc->u.ht.txop = TXOP_HTTXOP;
  322. }
  323. static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
  324. struct txentry_desc *txdesc)
  325. {
  326. struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
  327. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
  328. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
  329. struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
  330. struct ieee80211_rate *rate;
  331. const struct rt2x00_rate *hwrate = NULL;
  332. memset(txdesc, 0, sizeof(*txdesc));
  333. /*
  334. * Header and frame information.
  335. */
  336. txdesc->length = entry->skb->len;
  337. txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
  338. /*
  339. * Check whether this frame is to be acked.
  340. */
  341. if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
  342. __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
  343. /*
  344. * Check if this is a RTS/CTS frame
  345. */
  346. if (ieee80211_is_rts(hdr->frame_control) ||
  347. ieee80211_is_cts(hdr->frame_control)) {
  348. __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
  349. if (ieee80211_is_rts(hdr->frame_control))
  350. __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
  351. else
  352. __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
  353. if (tx_info->control.rts_cts_rate_idx >= 0)
  354. rate =
  355. ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
  356. }
  357. /*
  358. * Determine retry information.
  359. */
  360. txdesc->retry_limit = tx_info->control.rates[0].count - 1;
  361. if (txdesc->retry_limit >= rt2x00dev->long_retry)
  362. __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
  363. /*
  364. * Check if more fragments are pending
  365. */
  366. if (ieee80211_has_morefrags(hdr->frame_control)) {
  367. __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
  368. __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
  369. }
  370. /*
  371. * Check if more frames (!= fragments) are pending
  372. */
  373. if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
  374. __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
  375. /*
  376. * Beacons and probe responses require the tsf timestamp
  377. * to be inserted into the frame.
  378. */
  379. if (ieee80211_is_beacon(hdr->frame_control) ||
  380. ieee80211_is_probe_resp(hdr->frame_control))
  381. __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
  382. if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
  383. !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags))
  384. __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
  385. /*
  386. * Determine rate modulation.
  387. */
  388. if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
  389. txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
  390. else if (txrate->flags & IEEE80211_TX_RC_MCS)
  391. txdesc->rate_mode = RATE_MODE_HT_MIX;
  392. else {
  393. rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
  394. hwrate = rt2x00_get_rate(rate->hw_value);
  395. if (hwrate->flags & DEV_RATE_OFDM)
  396. txdesc->rate_mode = RATE_MODE_OFDM;
  397. else
  398. txdesc->rate_mode = RATE_MODE_CCK;
  399. }
  400. /*
  401. * Apply TX descriptor handling by components
  402. */
  403. rt2x00crypto_create_tx_descriptor(entry, txdesc);
  404. rt2x00queue_create_tx_descriptor_seq(entry, txdesc);
  405. if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
  406. rt2x00queue_create_tx_descriptor_ht(entry, txdesc, hwrate);
  407. else
  408. rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
  409. }
  410. static int rt2x00queue_write_tx_data(struct queue_entry *entry,
  411. struct txentry_desc *txdesc)
  412. {
  413. struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
  414. /*
  415. * This should not happen, we already checked the entry
  416. * was ours. When the hardware disagrees there has been
  417. * a queue corruption!
  418. */
  419. if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
  420. rt2x00dev->ops->lib->get_entry_state(entry))) {
  421. ERROR(rt2x00dev,
  422. "Corrupt queue %d, accessing entry which is not ours.\n"
  423. "Please file bug report to %s.\n",
  424. entry->queue->qid, DRV_PROJECT);
  425. return -EINVAL;
  426. }
  427. /*
  428. * Add the requested extra tx headroom in front of the skb.
  429. */
  430. skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom);
  431. memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom);
  432. /*
  433. * Call the driver's write_tx_data function, if it exists.
  434. */
  435. if (rt2x00dev->ops->lib->write_tx_data)
  436. rt2x00dev->ops->lib->write_tx_data(entry, txdesc);
  437. /*
  438. * Map the skb to DMA.
  439. */
  440. if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags))
  441. rt2x00queue_map_txskb(entry);
  442. return 0;
  443. }
  444. static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
  445. struct txentry_desc *txdesc)
  446. {
  447. struct data_queue *queue = entry->queue;
  448. queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
  449. /*
  450. * All processing on the frame has been completed, this means
  451. * it is now ready to be dumped to userspace through debugfs.
  452. */
  453. rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
  454. }
  455. static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
  456. struct txentry_desc *txdesc)
  457. {
  458. /*
  459. * Check if we need to kick the queue, there are however a few rules
  460. * 1) Don't kick unless this is the last in frame in a burst.
  461. * When the burst flag is set, this frame is always followed
  462. * by another frame which in some way are related to eachother.
  463. * This is true for fragments, RTS or CTS-to-self frames.
  464. * 2) Rule 1 can be broken when the available entries
  465. * in the queue are less then a certain threshold.
  466. */
  467. if (rt2x00queue_threshold(queue) ||
  468. !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
  469. queue->rt2x00dev->ops->lib->kick_queue(queue);
  470. }
  471. int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
  472. bool local)
  473. {
  474. struct ieee80211_tx_info *tx_info;
  475. struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
  476. struct txentry_desc txdesc;
  477. struct skb_frame_desc *skbdesc;
  478. u8 rate_idx, rate_flags;
  479. if (unlikely(rt2x00queue_full(queue))) {
  480. ERROR(queue->rt2x00dev,
  481. "Dropping frame due to full tx queue %d.\n", queue->qid);
  482. return -ENOBUFS;
  483. }
  484. if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
  485. &entry->flags))) {
  486. ERROR(queue->rt2x00dev,
  487. "Arrived at non-free entry in the non-full queue %d.\n"
  488. "Please file bug report to %s.\n",
  489. queue->qid, DRV_PROJECT);
  490. return -EINVAL;
  491. }
  492. /*
  493. * Copy all TX descriptor information into txdesc,
  494. * after that we are free to use the skb->cb array
  495. * for our information.
  496. */
  497. entry->skb = skb;
  498. rt2x00queue_create_tx_descriptor(entry, &txdesc);
  499. /*
  500. * All information is retrieved from the skb->cb array,
  501. * now we should claim ownership of the driver part of that
  502. * array, preserving the bitrate index and flags.
  503. */
  504. tx_info = IEEE80211_SKB_CB(skb);
  505. rate_idx = tx_info->control.rates[0].idx;
  506. rate_flags = tx_info->control.rates[0].flags;
  507. skbdesc = get_skb_frame_desc(skb);
  508. memset(skbdesc, 0, sizeof(*skbdesc));
  509. skbdesc->entry = entry;
  510. skbdesc->tx_rate_idx = rate_idx;
  511. skbdesc->tx_rate_flags = rate_flags;
  512. if (local)
  513. skbdesc->flags |= SKBDESC_NOT_MAC80211;
  514. /*
  515. * When hardware encryption is supported, and this frame
  516. * is to be encrypted, we should strip the IV/EIV data from
  517. * the frame so we can provide it to the driver separately.
  518. */
  519. if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
  520. !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
  521. if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags))
  522. rt2x00crypto_tx_copy_iv(skb, &txdesc);
  523. else
  524. rt2x00crypto_tx_remove_iv(skb, &txdesc);
  525. }
  526. /*
  527. * When DMA allocation is required we should guarentee to the
  528. * driver that the DMA is aligned to a 4-byte boundary.
  529. * However some drivers require L2 padding to pad the payload
  530. * rather then the header. This could be a requirement for
  531. * PCI and USB devices, while header alignment only is valid
  532. * for PCI devices.
  533. */
  534. if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags))
  535. rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length);
  536. else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags))
  537. rt2x00queue_align_frame(entry->skb);
  538. /*
  539. * It could be possible that the queue was corrupted and this
  540. * call failed. Since we always return NETDEV_TX_OK to mac80211,
  541. * this frame will simply be dropped.
  542. */
  543. if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
  544. clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
  545. entry->skb = NULL;
  546. return -EIO;
  547. }
  548. set_bit(ENTRY_DATA_PENDING, &entry->flags);
  549. rt2x00queue_index_inc(entry, Q_INDEX);
  550. rt2x00queue_write_tx_descriptor(entry, &txdesc);
  551. rt2x00queue_kick_tx_queue(queue, &txdesc);
  552. return 0;
  553. }
  554. int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
  555. struct ieee80211_vif *vif)
  556. {
  557. struct rt2x00_intf *intf = vif_to_intf(vif);
  558. if (unlikely(!intf->beacon))
  559. return -ENOBUFS;
  560. mutex_lock(&intf->beacon_skb_mutex);
  561. /*
  562. * Clean up the beacon skb.
  563. */
  564. rt2x00queue_free_skb(intf->beacon);
  565. /*
  566. * Clear beacon (single bssid devices don't need to clear the beacon
  567. * since the beacon queue will get stopped anyway).
  568. */
  569. if (rt2x00dev->ops->lib->clear_beacon)
  570. rt2x00dev->ops->lib->clear_beacon(intf->beacon);
  571. mutex_unlock(&intf->beacon_skb_mutex);
  572. return 0;
  573. }
  574. int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
  575. struct ieee80211_vif *vif)
  576. {
  577. struct rt2x00_intf *intf = vif_to_intf(vif);
  578. struct skb_frame_desc *skbdesc;
  579. struct txentry_desc txdesc;
  580. if (unlikely(!intf->beacon))
  581. return -ENOBUFS;
  582. /*
  583. * Clean up the beacon skb.
  584. */
  585. rt2x00queue_free_skb(intf->beacon);
  586. intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
  587. if (!intf->beacon->skb)
  588. return -ENOMEM;
  589. /*
  590. * Copy all TX descriptor information into txdesc,
  591. * after that we are free to use the skb->cb array
  592. * for our information.
  593. */
  594. rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
  595. /*
  596. * Fill in skb descriptor
  597. */
  598. skbdesc = get_skb_frame_desc(intf->beacon->skb);
  599. memset(skbdesc, 0, sizeof(*skbdesc));
  600. skbdesc->entry = intf->beacon;
  601. /*
  602. * Send beacon to hardware.
  603. */
  604. rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
  605. return 0;
  606. }
  607. int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
  608. struct ieee80211_vif *vif)
  609. {
  610. struct rt2x00_intf *intf = vif_to_intf(vif);
  611. int ret;
  612. mutex_lock(&intf->beacon_skb_mutex);
  613. ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif);
  614. mutex_unlock(&intf->beacon_skb_mutex);
  615. return ret;
  616. }
  617. bool rt2x00queue_for_each_entry(struct data_queue *queue,
  618. enum queue_index start,
  619. enum queue_index end,
  620. void *data,
  621. bool (*fn)(struct queue_entry *entry,
  622. void *data))
  623. {
  624. unsigned long irqflags;
  625. unsigned int index_start;
  626. unsigned int index_end;
  627. unsigned int i;
  628. if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
  629. ERROR(queue->rt2x00dev,
  630. "Entry requested from invalid index range (%d - %d)\n",
  631. start, end);
  632. return true;
  633. }
  634. /*
  635. * Only protect the range we are going to loop over,
  636. * if during our loop a extra entry is set to pending
  637. * it should not be kicked during this run, since it
  638. * is part of another TX operation.
  639. */
  640. spin_lock_irqsave(&queue->index_lock, irqflags);
  641. index_start = queue->index[start];
  642. index_end = queue->index[end];
  643. spin_unlock_irqrestore(&queue->index_lock, irqflags);
  644. /*
  645. * Start from the TX done pointer, this guarentees that we will
  646. * send out all frames in the correct order.
  647. */
  648. if (index_start < index_end) {
  649. for (i = index_start; i < index_end; i++) {
  650. if (fn(&queue->entries[i], data))
  651. return true;
  652. }
  653. } else {
  654. for (i = index_start; i < queue->limit; i++) {
  655. if (fn(&queue->entries[i], data))
  656. return true;
  657. }
  658. for (i = 0; i < index_end; i++) {
  659. if (fn(&queue->entries[i], data))
  660. return true;
  661. }
  662. }
  663. return false;
  664. }
  665. EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
  666. struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
  667. enum queue_index index)
  668. {
  669. struct queue_entry *entry;
  670. unsigned long irqflags;
  671. if (unlikely(index >= Q_INDEX_MAX)) {
  672. ERROR(queue->rt2x00dev,
  673. "Entry requested from invalid index type (%d)\n", index);
  674. return NULL;
  675. }
  676. spin_lock_irqsave(&queue->index_lock, irqflags);
  677. entry = &queue->entries[queue->index[index]];
  678. spin_unlock_irqrestore(&queue->index_lock, irqflags);
  679. return entry;
  680. }
  681. EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
  682. void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
  683. {
  684. struct data_queue *queue = entry->queue;
  685. unsigned long irqflags;
  686. if (unlikely(index >= Q_INDEX_MAX)) {
  687. ERROR(queue->rt2x00dev,
  688. "Index change on invalid index type (%d)\n", index);
  689. return;
  690. }
  691. spin_lock_irqsave(&queue->index_lock, irqflags);
  692. queue->index[index]++;
  693. if (queue->index[index] >= queue->limit)
  694. queue->index[index] = 0;
  695. entry->last_action = jiffies;
  696. if (index == Q_INDEX) {
  697. queue->length++;
  698. } else if (index == Q_INDEX_DONE) {
  699. queue->length--;
  700. queue->count++;
  701. }
  702. spin_unlock_irqrestore(&queue->index_lock, irqflags);
  703. }
  704. void rt2x00queue_pause_queue(struct data_queue *queue)
  705. {
  706. if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
  707. !test_bit(QUEUE_STARTED, &queue->flags) ||
  708. test_and_set_bit(QUEUE_PAUSED, &queue->flags))
  709. return;
  710. switch (queue->qid) {
  711. case QID_AC_VO:
  712. case QID_AC_VI:
  713. case QID_AC_BE:
  714. case QID_AC_BK:
  715. /*
  716. * For TX queues, we have to disable the queue
  717. * inside mac80211.
  718. */
  719. ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
  720. break;
  721. default:
  722. break;
  723. }
  724. }
  725. EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
  726. void rt2x00queue_unpause_queue(struct data_queue *queue)
  727. {
  728. if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
  729. !test_bit(QUEUE_STARTED, &queue->flags) ||
  730. !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
  731. return;
  732. switch (queue->qid) {
  733. case QID_AC_VO:
  734. case QID_AC_VI:
  735. case QID_AC_BE:
  736. case QID_AC_BK:
  737. /*
  738. * For TX queues, we have to enable the queue
  739. * inside mac80211.
  740. */
  741. ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
  742. break;
  743. case QID_RX:
  744. /*
  745. * For RX we need to kick the queue now in order to
  746. * receive frames.
  747. */
  748. queue->rt2x00dev->ops->lib->kick_queue(queue);
  749. default:
  750. break;
  751. }
  752. }
  753. EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue);
  754. void rt2x00queue_start_queue(struct data_queue *queue)
  755. {
  756. mutex_lock(&queue->status_lock);
  757. if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
  758. test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
  759. mutex_unlock(&queue->status_lock);
  760. return;
  761. }
  762. set_bit(QUEUE_PAUSED, &queue->flags);
  763. queue->rt2x00dev->ops->lib->start_queue(queue);
  764. rt2x00queue_unpause_queue(queue);
  765. mutex_unlock(&queue->status_lock);
  766. }
  767. EXPORT_SYMBOL_GPL(rt2x00queue_start_queue);
  768. void rt2x00queue_stop_queue(struct data_queue *queue)
  769. {
  770. mutex_lock(&queue->status_lock);
  771. if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
  772. mutex_unlock(&queue->status_lock);
  773. return;
  774. }
  775. rt2x00queue_pause_queue(queue);
  776. queue->rt2x00dev->ops->lib->stop_queue(queue);
  777. mutex_unlock(&queue->status_lock);
  778. }
  779. EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
  780. void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
  781. {
  782. bool started;
  783. bool tx_queue =
  784. (queue->qid == QID_AC_VO) ||
  785. (queue->qid == QID_AC_VI) ||
  786. (queue->qid == QID_AC_BE) ||
  787. (queue->qid == QID_AC_BK);
  788. mutex_lock(&queue->status_lock);
  789. /*
  790. * If the queue has been started, we must stop it temporarily
  791. * to prevent any new frames to be queued on the device. If
  792. * we are not dropping the pending frames, the queue must
  793. * only be stopped in the software and not the hardware,
  794. * otherwise the queue will never become empty on its own.
  795. */
  796. started = test_bit(QUEUE_STARTED, &queue->flags);
  797. if (started) {
  798. /*
  799. * Pause the queue
  800. */
  801. rt2x00queue_pause_queue(queue);
  802. /*
  803. * If we are not supposed to drop any pending
  804. * frames, this means we must force a start (=kick)
  805. * to the queue to make sure the hardware will
  806. * start transmitting.
  807. */
  808. if (!drop && tx_queue)
  809. queue->rt2x00dev->ops->lib->kick_queue(queue);
  810. }
  811. /*
  812. * Check if driver supports flushing, if that is the case we can
  813. * defer the flushing to the driver. Otherwise we must use the
  814. * alternative which just waits for the queue to become empty.
  815. */
  816. if (likely(queue->rt2x00dev->ops->lib->flush_queue))
  817. queue->rt2x00dev->ops->lib->flush_queue(queue, drop);
  818. /*
  819. * The queue flush has failed...
  820. */
  821. if (unlikely(!rt2x00queue_empty(queue)))
  822. WARNING(queue->rt2x00dev, "Queue %d failed to flush\n", queue->qid);
  823. /*
  824. * Restore the queue to the previous status
  825. */
  826. if (started)
  827. rt2x00queue_unpause_queue(queue);
  828. mutex_unlock(&queue->status_lock);
  829. }
  830. EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
  831. void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
  832. {
  833. struct data_queue *queue;
  834. /*
  835. * rt2x00queue_start_queue will call ieee80211_wake_queue
  836. * for each queue after is has been properly initialized.
  837. */
  838. tx_queue_for_each(rt2x00dev, queue)
  839. rt2x00queue_start_queue(queue);
  840. rt2x00queue_start_queue(rt2x00dev->rx);
  841. }
  842. EXPORT_SYMBOL_GPL(rt2x00queue_start_queues);
  843. void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
  844. {
  845. struct data_queue *queue;
  846. /*
  847. * rt2x00queue_stop_queue will call ieee80211_stop_queue
  848. * as well, but we are completely shutting doing everything
  849. * now, so it is much safer to stop all TX queues at once,
  850. * and use rt2x00queue_stop_queue for cleaning up.
  851. */
  852. ieee80211_stop_queues(rt2x00dev->hw);
  853. tx_queue_for_each(rt2x00dev, queue)
  854. rt2x00queue_stop_queue(queue);
  855. rt2x00queue_stop_queue(rt2x00dev->rx);
  856. }
  857. EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues);
  858. void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop)
  859. {
  860. struct data_queue *queue;
  861. tx_queue_for_each(rt2x00dev, queue)
  862. rt2x00queue_flush_queue(queue, drop);
  863. rt2x00queue_flush_queue(rt2x00dev->rx, drop);
  864. }
  865. EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues);
  866. static void rt2x00queue_reset(struct data_queue *queue)
  867. {
  868. unsigned long irqflags;
  869. unsigned int i;
  870. spin_lock_irqsave(&queue->index_lock, irqflags);
  871. queue->count = 0;
  872. queue->length = 0;
  873. for (i = 0; i < Q_INDEX_MAX; i++)
  874. queue->index[i] = 0;
  875. spin_unlock_irqrestore(&queue->index_lock, irqflags);
  876. }
  877. void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
  878. {
  879. struct data_queue *queue;
  880. unsigned int i;
  881. queue_for_each(rt2x00dev, queue) {
  882. rt2x00queue_reset(queue);
  883. for (i = 0; i < queue->limit; i++)
  884. rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
  885. }
  886. }
  887. static int rt2x00queue_alloc_entries(struct data_queue *queue,
  888. const struct data_queue_desc *qdesc)
  889. {
  890. struct queue_entry *entries;
  891. unsigned int entry_size;
  892. unsigned int i;
  893. rt2x00queue_reset(queue);
  894. queue->limit = qdesc->entry_num;
  895. queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
  896. queue->data_size = qdesc->data_size;
  897. queue->desc_size = qdesc->desc_size;
  898. /*
  899. * Allocate all queue entries.
  900. */
  901. entry_size = sizeof(*entries) + qdesc->priv_size;
  902. entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
  903. if (!entries)
  904. return -ENOMEM;
  905. #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
  906. (((char *)(__base)) + ((__limit) * (__esize)) + \
  907. ((__index) * (__psize)))
  908. for (i = 0; i < queue->limit; i++) {
  909. entries[i].flags = 0;
  910. entries[i].queue = queue;
  911. entries[i].skb = NULL;
  912. entries[i].entry_idx = i;
  913. entries[i].priv_data =
  914. QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
  915. sizeof(*entries), qdesc->priv_size);
  916. }
  917. #undef QUEUE_ENTRY_PRIV_OFFSET
  918. queue->entries = entries;
  919. return 0;
  920. }
  921. static void rt2x00queue_free_skbs(struct data_queue *queue)
  922. {
  923. unsigned int i;
  924. if (!queue->entries)
  925. return;
  926. for (i = 0; i < queue->limit; i++) {
  927. rt2x00queue_free_skb(&queue->entries[i]);
  928. }
  929. }
  930. static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
  931. {
  932. unsigned int i;
  933. struct sk_buff *skb;
  934. for (i = 0; i < queue->limit; i++) {
  935. skb = rt2x00queue_alloc_rxskb(&queue->entries[i]);
  936. if (!skb)
  937. return -ENOMEM;
  938. queue->entries[i].skb = skb;
  939. }
  940. return 0;
  941. }
  942. int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
  943. {
  944. struct data_queue *queue;
  945. int status;
  946. status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
  947. if (status)
  948. goto exit;
  949. tx_queue_for_each(rt2x00dev, queue) {
  950. status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
  951. if (status)
  952. goto exit;
  953. }
  954. status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
  955. if (status)
  956. goto exit;
  957. if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) {
  958. status = rt2x00queue_alloc_entries(rt2x00dev->atim,
  959. rt2x00dev->ops->atim);
  960. if (status)
  961. goto exit;
  962. }
  963. status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx);
  964. if (status)
  965. goto exit;
  966. return 0;
  967. exit:
  968. ERROR(rt2x00dev, "Queue entries allocation failed.\n");
  969. rt2x00queue_uninitialize(rt2x00dev);
  970. return status;
  971. }
  972. void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
  973. {
  974. struct data_queue *queue;
  975. rt2x00queue_free_skbs(rt2x00dev->rx);
  976. queue_for_each(rt2x00dev, queue) {
  977. kfree(queue->entries);
  978. queue->entries = NULL;
  979. }
  980. }
  981. static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
  982. struct data_queue *queue, enum data_queue_qid qid)
  983. {
  984. mutex_init(&queue->status_lock);
  985. spin_lock_init(&queue->index_lock);
  986. queue->rt2x00dev = rt2x00dev;
  987. queue->qid = qid;
  988. queue->txop = 0;
  989. queue->aifs = 2;
  990. queue->cw_min = 5;
  991. queue->cw_max = 10;
  992. }
  993. int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
  994. {
  995. struct data_queue *queue;
  996. enum data_queue_qid qid;
  997. unsigned int req_atim =
  998. !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags);
  999. /*
  1000. * We need the following queues:
  1001. * RX: 1
  1002. * TX: ops->tx_queues
  1003. * Beacon: 1
  1004. * Atim: 1 (if required)
  1005. */
  1006. rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
  1007. queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
  1008. if (!queue) {
  1009. ERROR(rt2x00dev, "Queue allocation failed.\n");
  1010. return -ENOMEM;
  1011. }
  1012. /*
  1013. * Initialize pointers
  1014. */
  1015. rt2x00dev->rx = queue;
  1016. rt2x00dev->tx = &queue[1];
  1017. rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
  1018. rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
  1019. /*
  1020. * Initialize queue parameters.
  1021. * RX: qid = QID_RX
  1022. * TX: qid = QID_AC_VO + index
  1023. * TX: cw_min: 2^5 = 32.
  1024. * TX: cw_max: 2^10 = 1024.
  1025. * BCN: qid = QID_BEACON
  1026. * ATIM: qid = QID_ATIM
  1027. */
  1028. rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
  1029. qid = QID_AC_VO;
  1030. tx_queue_for_each(rt2x00dev, queue)
  1031. rt2x00queue_init(rt2x00dev, queue, qid++);
  1032. rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON);
  1033. if (req_atim)
  1034. rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM);
  1035. return 0;
  1036. }
  1037. void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
  1038. {
  1039. kfree(rt2x00dev->rx);
  1040. rt2x00dev->rx = NULL;
  1041. rt2x00dev->tx = NULL;
  1042. rt2x00dev->bcn = NULL;
  1043. }