rt2x00queue.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271
  1. /*
  2. Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
  3. Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
  4. Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
  5. <http://rt2x00.serialmonkey.com>
  6. This program is free software; you can redistribute it and/or modify
  7. it under the terms of the GNU General Public License as published by
  8. the Free Software Foundation; either version 2 of the License, or
  9. (at your option) any later version.
  10. This program is distributed in the hope that it will be useful,
  11. but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. GNU General Public License for more details.
  14. You should have received a copy of the GNU General Public License
  15. along with this program; if not, write to the
  16. Free Software Foundation, Inc.,
  17. 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  18. */
  19. /*
  20. Module: rt2x00lib
  21. Abstract: rt2x00 queue specific routines.
  22. */
  23. #include <linux/slab.h>
  24. #include <linux/kernel.h>
  25. #include <linux/module.h>
  26. #include <linux/dma-mapping.h>
  27. #include "rt2x00.h"
  28. #include "rt2x00lib.h"
  29. struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
  30. {
  31. struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
  32. struct sk_buff *skb;
  33. struct skb_frame_desc *skbdesc;
  34. unsigned int frame_size;
  35. unsigned int head_size = 0;
  36. unsigned int tail_size = 0;
  37. /*
  38. * The frame size includes descriptor size, because the
  39. * hardware directly receive the frame into the skbuffer.
  40. */
  41. frame_size = entry->queue->data_size + entry->queue->desc_size;
  42. /*
  43. * The payload should be aligned to a 4-byte boundary,
  44. * this means we need at least 3 bytes for moving the frame
  45. * into the correct offset.
  46. */
  47. head_size = 4;
  48. /*
  49. * For IV/EIV/ICV assembly we must make sure there is
  50. * at least 8 bytes bytes available in headroom for IV/EIV
  51. * and 8 bytes for ICV data as tailroon.
  52. */
  53. if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) {
  54. head_size += 8;
  55. tail_size += 8;
  56. }
  57. /*
  58. * Allocate skbuffer.
  59. */
  60. skb = dev_alloc_skb(frame_size + head_size + tail_size);
  61. if (!skb)
  62. return NULL;
  63. /*
  64. * Make sure we not have a frame with the requested bytes
  65. * available in the head and tail.
  66. */
  67. skb_reserve(skb, head_size);
  68. skb_put(skb, frame_size);
  69. /*
  70. * Populate skbdesc.
  71. */
  72. skbdesc = get_skb_frame_desc(skb);
  73. memset(skbdesc, 0, sizeof(*skbdesc));
  74. skbdesc->entry = entry;
  75. if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) {
  76. skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
  77. skb->data,
  78. skb->len,
  79. DMA_FROM_DEVICE);
  80. skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
  81. }
  82. return skb;
  83. }
  84. void rt2x00queue_map_txskb(struct queue_entry *entry)
  85. {
  86. struct device *dev = entry->queue->rt2x00dev->dev;
  87. struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
  88. skbdesc->skb_dma =
  89. dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
  90. skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
  91. }
  92. EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
  93. void rt2x00queue_unmap_skb(struct queue_entry *entry)
  94. {
  95. struct device *dev = entry->queue->rt2x00dev->dev;
  96. struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
  97. if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
  98. dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
  99. DMA_FROM_DEVICE);
  100. skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
  101. } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
  102. dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
  103. DMA_TO_DEVICE);
  104. skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
  105. }
  106. }
  107. EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
  108. void rt2x00queue_free_skb(struct queue_entry *entry)
  109. {
  110. if (!entry->skb)
  111. return;
  112. rt2x00queue_unmap_skb(entry);
  113. dev_kfree_skb_any(entry->skb);
  114. entry->skb = NULL;
  115. }
  116. void rt2x00queue_align_frame(struct sk_buff *skb)
  117. {
  118. unsigned int frame_length = skb->len;
  119. unsigned int align = ALIGN_SIZE(skb, 0);
  120. if (!align)
  121. return;
  122. skb_push(skb, align);
  123. memmove(skb->data, skb->data + align, frame_length);
  124. skb_trim(skb, frame_length);
  125. }
  126. void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
  127. {
  128. unsigned int payload_length = skb->len - header_length;
  129. unsigned int header_align = ALIGN_SIZE(skb, 0);
  130. unsigned int payload_align = ALIGN_SIZE(skb, header_length);
  131. unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
  132. /*
  133. * Adjust the header alignment if the payload needs to be moved more
  134. * than the header.
  135. */
  136. if (payload_align > header_align)
  137. header_align += 4;
  138. /* There is nothing to do if no alignment is needed */
  139. if (!header_align)
  140. return;
  141. /* Reserve the amount of space needed in front of the frame */
  142. skb_push(skb, header_align);
  143. /*
  144. * Move the header.
  145. */
  146. memmove(skb->data, skb->data + header_align, header_length);
  147. /* Move the payload, if present and if required */
  148. if (payload_length && payload_align)
  149. memmove(skb->data + header_length + l2pad,
  150. skb->data + header_length + l2pad + payload_align,
  151. payload_length);
  152. /* Trim the skb to the correct size */
  153. skb_trim(skb, header_length + l2pad + payload_length);
  154. }
  155. void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
  156. {
  157. /*
  158. * L2 padding is only present if the skb contains more than just the
  159. * IEEE 802.11 header.
  160. */
  161. unsigned int l2pad = (skb->len > header_length) ?
  162. L2PAD_SIZE(header_length) : 0;
  163. if (!l2pad)
  164. return;
  165. memmove(skb->data + l2pad, skb->data, header_length);
  166. skb_pull(skb, l2pad);
  167. }
  168. static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
  169. struct sk_buff *skb,
  170. struct txentry_desc *txdesc)
  171. {
  172. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  173. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  174. struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
  175. if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
  176. return;
  177. __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
  178. if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags))
  179. return;
  180. /*
  181. * The hardware is not able to insert a sequence number. Assign a
  182. * software generated one here.
  183. *
  184. * This is wrong because beacons are not getting sequence
  185. * numbers assigned properly.
  186. *
  187. * A secondary problem exists for drivers that cannot toggle
  188. * sequence counting per-frame, since those will override the
  189. * sequence counter given by mac80211.
  190. */
  191. spin_lock(&intf->seqlock);
  192. if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
  193. intf->seqno += 0x10;
  194. hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
  195. hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
  196. spin_unlock(&intf->seqlock);
  197. }
  198. static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
  199. struct sk_buff *skb,
  200. struct txentry_desc *txdesc,
  201. const struct rt2x00_rate *hwrate)
  202. {
  203. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  204. struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
  205. unsigned int data_length;
  206. unsigned int duration;
  207. unsigned int residual;
  208. /*
  209. * Determine with what IFS priority this frame should be send.
  210. * Set ifs to IFS_SIFS when the this is not the first fragment,
  211. * or this fragment came after RTS/CTS.
  212. */
  213. if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
  214. txdesc->u.plcp.ifs = IFS_BACKOFF;
  215. else
  216. txdesc->u.plcp.ifs = IFS_SIFS;
  217. /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
  218. data_length = skb->len + 4;
  219. data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);
  220. /*
  221. * PLCP setup
  222. * Length calculation depends on OFDM/CCK rate.
  223. */
  224. txdesc->u.plcp.signal = hwrate->plcp;
  225. txdesc->u.plcp.service = 0x04;
  226. if (hwrate->flags & DEV_RATE_OFDM) {
  227. txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f;
  228. txdesc->u.plcp.length_low = data_length & 0x3f;
  229. } else {
  230. /*
  231. * Convert length to microseconds.
  232. */
  233. residual = GET_DURATION_RES(data_length, hwrate->bitrate);
  234. duration = GET_DURATION(data_length, hwrate->bitrate);
  235. if (residual != 0) {
  236. duration++;
  237. /*
  238. * Check if we need to set the Length Extension
  239. */
  240. if (hwrate->bitrate == 110 && residual <= 30)
  241. txdesc->u.plcp.service |= 0x80;
  242. }
  243. txdesc->u.plcp.length_high = (duration >> 8) & 0xff;
  244. txdesc->u.plcp.length_low = duration & 0xff;
  245. /*
  246. * When preamble is enabled we should set the
  247. * preamble bit for the signal.
  248. */
  249. if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
  250. txdesc->u.plcp.signal |= 0x08;
  251. }
  252. }
  253. static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
  254. struct sk_buff *skb,
  255. struct txentry_desc *txdesc,
  256. const struct rt2x00_rate *hwrate)
  257. {
  258. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  259. struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
  260. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  261. if (tx_info->control.sta)
  262. txdesc->u.ht.mpdu_density =
  263. tx_info->control.sta->ht_cap.ampdu_density;
  264. txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
  265. /*
  266. * Only one STBC stream is supported for now.
  267. */
  268. if (tx_info->flags & IEEE80211_TX_CTL_STBC)
  269. txdesc->u.ht.stbc = 1;
  270. /*
  271. * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
  272. * mcs rate to be used
  273. */
  274. if (txrate->flags & IEEE80211_TX_RC_MCS) {
  275. txdesc->u.ht.mcs = txrate->idx;
  276. /*
  277. * MIMO PS should be set to 1 for STA's using dynamic SM PS
  278. * when using more then one tx stream (>MCS7).
  279. */
  280. if (tx_info->control.sta && txdesc->u.ht.mcs > 7 &&
  281. ((tx_info->control.sta->ht_cap.cap &
  282. IEEE80211_HT_CAP_SM_PS) >>
  283. IEEE80211_HT_CAP_SM_PS_SHIFT) ==
  284. WLAN_HT_CAP_SM_PS_DYNAMIC)
  285. __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
  286. } else {
  287. txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
  288. if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
  289. txdesc->u.ht.mcs |= 0x08;
  290. }
  291. /*
  292. * This frame is eligible for an AMPDU, however, don't aggregate
  293. * frames that are intended to probe a specific tx rate.
  294. */
  295. if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
  296. !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
  297. __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
  298. /*
  299. * Set 40Mhz mode if necessary (for legacy rates this will
  300. * duplicate the frame to both channels).
  301. */
  302. if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH ||
  303. txrate->flags & IEEE80211_TX_RC_DUP_DATA)
  304. __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
  305. if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
  306. __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
  307. /*
  308. * Determine IFS values
  309. * - Use TXOP_BACKOFF for management frames except beacons
  310. * - Use TXOP_SIFS for fragment bursts
  311. * - Use TXOP_HTTXOP for everything else
  312. *
  313. * Note: rt2800 devices won't use CTS protection (if used)
  314. * for frames not transmitted with TXOP_HTTXOP
  315. */
  316. if (ieee80211_is_mgmt(hdr->frame_control) &&
  317. !ieee80211_is_beacon(hdr->frame_control))
  318. txdesc->u.ht.txop = TXOP_BACKOFF;
  319. else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
  320. txdesc->u.ht.txop = TXOP_SIFS;
  321. else
  322. txdesc->u.ht.txop = TXOP_HTTXOP;
  323. }
  324. static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
  325. struct sk_buff *skb,
  326. struct txentry_desc *txdesc)
  327. {
  328. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  329. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  330. struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
  331. struct ieee80211_rate *rate;
  332. const struct rt2x00_rate *hwrate = NULL;
  333. memset(txdesc, 0, sizeof(*txdesc));
  334. /*
  335. * Header and frame information.
  336. */
  337. txdesc->length = skb->len;
  338. txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb);
  339. /*
  340. * Check whether this frame is to be acked.
  341. */
  342. if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
  343. __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
  344. /*
  345. * Check if this is a RTS/CTS frame
  346. */
  347. if (ieee80211_is_rts(hdr->frame_control) ||
  348. ieee80211_is_cts(hdr->frame_control)) {
  349. __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
  350. if (ieee80211_is_rts(hdr->frame_control))
  351. __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
  352. else
  353. __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
  354. if (tx_info->control.rts_cts_rate_idx >= 0)
  355. rate =
  356. ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
  357. }
  358. /*
  359. * Determine retry information.
  360. */
  361. txdesc->retry_limit = tx_info->control.rates[0].count - 1;
  362. if (txdesc->retry_limit >= rt2x00dev->long_retry)
  363. __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
  364. /*
  365. * Check if more fragments are pending
  366. */
  367. if (ieee80211_has_morefrags(hdr->frame_control)) {
  368. __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
  369. __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
  370. }
  371. /*
  372. * Check if more frames (!= fragments) are pending
  373. */
  374. if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
  375. __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
  376. /*
  377. * Beacons and probe responses require the tsf timestamp
  378. * to be inserted into the frame.
  379. */
  380. if (ieee80211_is_beacon(hdr->frame_control) ||
  381. ieee80211_is_probe_resp(hdr->frame_control))
  382. __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
  383. if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
  384. !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags))
  385. __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
  386. /*
  387. * Determine rate modulation.
  388. */
  389. if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
  390. txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
  391. else if (txrate->flags & IEEE80211_TX_RC_MCS)
  392. txdesc->rate_mode = RATE_MODE_HT_MIX;
  393. else {
  394. rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
  395. hwrate = rt2x00_get_rate(rate->hw_value);
  396. if (hwrate->flags & DEV_RATE_OFDM)
  397. txdesc->rate_mode = RATE_MODE_OFDM;
  398. else
  399. txdesc->rate_mode = RATE_MODE_CCK;
  400. }
  401. /*
  402. * Apply TX descriptor handling by components
  403. */
  404. rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc);
  405. rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc);
  406. if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
  407. rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
  408. hwrate);
  409. else
  410. rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
  411. hwrate);
  412. }
  413. static int rt2x00queue_write_tx_data(struct queue_entry *entry,
  414. struct txentry_desc *txdesc)
  415. {
  416. struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
  417. /*
  418. * This should not happen, we already checked the entry
  419. * was ours. When the hardware disagrees there has been
  420. * a queue corruption!
  421. */
  422. if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
  423. rt2x00dev->ops->lib->get_entry_state(entry))) {
  424. ERROR(rt2x00dev,
  425. "Corrupt queue %d, accessing entry which is not ours.\n"
  426. "Please file bug report to %s.\n",
  427. entry->queue->qid, DRV_PROJECT);
  428. return -EINVAL;
  429. }
  430. /*
  431. * Add the requested extra tx headroom in front of the skb.
  432. */
  433. skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom);
  434. memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom);
  435. /*
  436. * Call the driver's write_tx_data function, if it exists.
  437. */
  438. if (rt2x00dev->ops->lib->write_tx_data)
  439. rt2x00dev->ops->lib->write_tx_data(entry, txdesc);
  440. /*
  441. * Map the skb to DMA.
  442. */
  443. if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags))
  444. rt2x00queue_map_txskb(entry);
  445. return 0;
  446. }
  447. static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
  448. struct txentry_desc *txdesc)
  449. {
  450. struct data_queue *queue = entry->queue;
  451. queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
  452. /*
  453. * All processing on the frame has been completed, this means
  454. * it is now ready to be dumped to userspace through debugfs.
  455. */
  456. rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
  457. }
  458. static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
  459. struct txentry_desc *txdesc)
  460. {
  461. /*
  462. * Check if we need to kick the queue, there are however a few rules
  463. * 1) Don't kick unless this is the last in frame in a burst.
  464. * When the burst flag is set, this frame is always followed
  465. * by another frame which in some way are related to eachother.
  466. * This is true for fragments, RTS or CTS-to-self frames.
  467. * 2) Rule 1 can be broken when the available entries
  468. * in the queue are less then a certain threshold.
  469. */
  470. if (rt2x00queue_threshold(queue) ||
  471. !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
  472. queue->rt2x00dev->ops->lib->kick_queue(queue);
  473. }
  474. int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
  475. bool local)
  476. {
  477. struct ieee80211_tx_info *tx_info;
  478. struct queue_entry *entry;
  479. struct txentry_desc txdesc;
  480. struct skb_frame_desc *skbdesc;
  481. u8 rate_idx, rate_flags;
  482. int ret = 0;
  483. /*
  484. * Copy all TX descriptor information into txdesc,
  485. * after that we are free to use the skb->cb array
  486. * for our information.
  487. */
  488. rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc);
  489. /*
  490. * All information is retrieved from the skb->cb array,
  491. * now we should claim ownership of the driver part of that
  492. * array, preserving the bitrate index and flags.
  493. */
  494. tx_info = IEEE80211_SKB_CB(skb);
  495. rate_idx = tx_info->control.rates[0].idx;
  496. rate_flags = tx_info->control.rates[0].flags;
  497. skbdesc = get_skb_frame_desc(skb);
  498. memset(skbdesc, 0, sizeof(*skbdesc));
  499. skbdesc->tx_rate_idx = rate_idx;
  500. skbdesc->tx_rate_flags = rate_flags;
  501. if (local)
  502. skbdesc->flags |= SKBDESC_NOT_MAC80211;
  503. /*
  504. * When hardware encryption is supported, and this frame
  505. * is to be encrypted, we should strip the IV/EIV data from
  506. * the frame so we can provide it to the driver separately.
  507. */
  508. if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
  509. !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
  510. if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags))
  511. rt2x00crypto_tx_copy_iv(skb, &txdesc);
  512. else
  513. rt2x00crypto_tx_remove_iv(skb, &txdesc);
  514. }
  515. /*
  516. * When DMA allocation is required we should guarantee to the
  517. * driver that the DMA is aligned to a 4-byte boundary.
  518. * However some drivers require L2 padding to pad the payload
  519. * rather then the header. This could be a requirement for
  520. * PCI and USB devices, while header alignment only is valid
  521. * for PCI devices.
  522. */
  523. if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags))
  524. rt2x00queue_insert_l2pad(skb, txdesc.header_length);
  525. else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags))
  526. rt2x00queue_align_frame(skb);
  527. spin_lock(&queue->tx_lock);
  528. if (unlikely(rt2x00queue_full(queue))) {
  529. ERROR(queue->rt2x00dev,
  530. "Dropping frame due to full tx queue %d.\n", queue->qid);
  531. ret = -ENOBUFS;
  532. goto out;
  533. }
  534. entry = rt2x00queue_get_entry(queue, Q_INDEX);
  535. if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
  536. &entry->flags))) {
  537. ERROR(queue->rt2x00dev,
  538. "Arrived at non-free entry in the non-full queue %d.\n"
  539. "Please file bug report to %s.\n",
  540. queue->qid, DRV_PROJECT);
  541. ret = -EINVAL;
  542. goto out;
  543. }
  544. skbdesc->entry = entry;
  545. entry->skb = skb;
  546. /*
  547. * It could be possible that the queue was corrupted and this
  548. * call failed. Since we always return NETDEV_TX_OK to mac80211,
  549. * this frame will simply be dropped.
  550. */
  551. if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
  552. clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
  553. entry->skb = NULL;
  554. ret = -EIO;
  555. goto out;
  556. }
  557. set_bit(ENTRY_DATA_PENDING, &entry->flags);
  558. rt2x00queue_index_inc(entry, Q_INDEX);
  559. rt2x00queue_write_tx_descriptor(entry, &txdesc);
  560. rt2x00queue_kick_tx_queue(queue, &txdesc);
  561. out:
  562. spin_unlock(&queue->tx_lock);
  563. return ret;
  564. }
  565. int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
  566. struct ieee80211_vif *vif)
  567. {
  568. struct rt2x00_intf *intf = vif_to_intf(vif);
  569. if (unlikely(!intf->beacon))
  570. return -ENOBUFS;
  571. mutex_lock(&intf->beacon_skb_mutex);
  572. /*
  573. * Clean up the beacon skb.
  574. */
  575. rt2x00queue_free_skb(intf->beacon);
  576. /*
  577. * Clear beacon (single bssid devices don't need to clear the beacon
  578. * since the beacon queue will get stopped anyway).
  579. */
  580. if (rt2x00dev->ops->lib->clear_beacon)
  581. rt2x00dev->ops->lib->clear_beacon(intf->beacon);
  582. mutex_unlock(&intf->beacon_skb_mutex);
  583. return 0;
  584. }
  585. int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
  586. struct ieee80211_vif *vif)
  587. {
  588. struct rt2x00_intf *intf = vif_to_intf(vif);
  589. struct skb_frame_desc *skbdesc;
  590. struct txentry_desc txdesc;
  591. if (unlikely(!intf->beacon))
  592. return -ENOBUFS;
  593. /*
  594. * Clean up the beacon skb.
  595. */
  596. rt2x00queue_free_skb(intf->beacon);
  597. intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
  598. if (!intf->beacon->skb)
  599. return -ENOMEM;
  600. /*
  601. * Copy all TX descriptor information into txdesc,
  602. * after that we are free to use the skb->cb array
  603. * for our information.
  604. */
  605. rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc);
  606. /*
  607. * Fill in skb descriptor
  608. */
  609. skbdesc = get_skb_frame_desc(intf->beacon->skb);
  610. memset(skbdesc, 0, sizeof(*skbdesc));
  611. skbdesc->entry = intf->beacon;
  612. /*
  613. * Send beacon to hardware.
  614. */
  615. rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
  616. return 0;
  617. }
  618. int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
  619. struct ieee80211_vif *vif)
  620. {
  621. struct rt2x00_intf *intf = vif_to_intf(vif);
  622. int ret;
  623. mutex_lock(&intf->beacon_skb_mutex);
  624. ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif);
  625. mutex_unlock(&intf->beacon_skb_mutex);
  626. return ret;
  627. }
  628. bool rt2x00queue_for_each_entry(struct data_queue *queue,
  629. enum queue_index start,
  630. enum queue_index end,
  631. void *data,
  632. bool (*fn)(struct queue_entry *entry,
  633. void *data))
  634. {
  635. unsigned long irqflags;
  636. unsigned int index_start;
  637. unsigned int index_end;
  638. unsigned int i;
  639. if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
  640. ERROR(queue->rt2x00dev,
  641. "Entry requested from invalid index range (%d - %d)\n",
  642. start, end);
  643. return true;
  644. }
  645. /*
  646. * Only protect the range we are going to loop over,
  647. * if during our loop a extra entry is set to pending
  648. * it should not be kicked during this run, since it
  649. * is part of another TX operation.
  650. */
  651. spin_lock_irqsave(&queue->index_lock, irqflags);
  652. index_start = queue->index[start];
  653. index_end = queue->index[end];
  654. spin_unlock_irqrestore(&queue->index_lock, irqflags);
  655. /*
  656. * Start from the TX done pointer, this guarantees that we will
  657. * send out all frames in the correct order.
  658. */
  659. if (index_start < index_end) {
  660. for (i = index_start; i < index_end; i++) {
  661. if (fn(&queue->entries[i], data))
  662. return true;
  663. }
  664. } else {
  665. for (i = index_start; i < queue->limit; i++) {
  666. if (fn(&queue->entries[i], data))
  667. return true;
  668. }
  669. for (i = 0; i < index_end; i++) {
  670. if (fn(&queue->entries[i], data))
  671. return true;
  672. }
  673. }
  674. return false;
  675. }
  676. EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
  677. struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
  678. enum queue_index index)
  679. {
  680. struct queue_entry *entry;
  681. unsigned long irqflags;
  682. if (unlikely(index >= Q_INDEX_MAX)) {
  683. ERROR(queue->rt2x00dev,
  684. "Entry requested from invalid index type (%d)\n", index);
  685. return NULL;
  686. }
  687. spin_lock_irqsave(&queue->index_lock, irqflags);
  688. entry = &queue->entries[queue->index[index]];
  689. spin_unlock_irqrestore(&queue->index_lock, irqflags);
  690. return entry;
  691. }
  692. EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
  693. void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
  694. {
  695. struct data_queue *queue = entry->queue;
  696. unsigned long irqflags;
  697. if (unlikely(index >= Q_INDEX_MAX)) {
  698. ERROR(queue->rt2x00dev,
  699. "Index change on invalid index type (%d)\n", index);
  700. return;
  701. }
  702. spin_lock_irqsave(&queue->index_lock, irqflags);
  703. queue->index[index]++;
  704. if (queue->index[index] >= queue->limit)
  705. queue->index[index] = 0;
  706. entry->last_action = jiffies;
  707. if (index == Q_INDEX) {
  708. queue->length++;
  709. } else if (index == Q_INDEX_DONE) {
  710. queue->length--;
  711. queue->count++;
  712. }
  713. spin_unlock_irqrestore(&queue->index_lock, irqflags);
  714. }
  715. void rt2x00queue_pause_queue(struct data_queue *queue)
  716. {
  717. if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
  718. !test_bit(QUEUE_STARTED, &queue->flags) ||
  719. test_and_set_bit(QUEUE_PAUSED, &queue->flags))
  720. return;
  721. switch (queue->qid) {
  722. case QID_AC_VO:
  723. case QID_AC_VI:
  724. case QID_AC_BE:
  725. case QID_AC_BK:
  726. /*
  727. * For TX queues, we have to disable the queue
  728. * inside mac80211.
  729. */
  730. ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
  731. break;
  732. default:
  733. break;
  734. }
  735. }
  736. EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
  737. void rt2x00queue_unpause_queue(struct data_queue *queue)
  738. {
  739. if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
  740. !test_bit(QUEUE_STARTED, &queue->flags) ||
  741. !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
  742. return;
  743. switch (queue->qid) {
  744. case QID_AC_VO:
  745. case QID_AC_VI:
  746. case QID_AC_BE:
  747. case QID_AC_BK:
  748. /*
  749. * For TX queues, we have to enable the queue
  750. * inside mac80211.
  751. */
  752. ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
  753. break;
  754. case QID_RX:
  755. /*
  756. * For RX we need to kick the queue now in order to
  757. * receive frames.
  758. */
  759. queue->rt2x00dev->ops->lib->kick_queue(queue);
  760. default:
  761. break;
  762. }
  763. }
  764. EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue);
  765. void rt2x00queue_start_queue(struct data_queue *queue)
  766. {
  767. mutex_lock(&queue->status_lock);
  768. if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
  769. test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
  770. mutex_unlock(&queue->status_lock);
  771. return;
  772. }
  773. set_bit(QUEUE_PAUSED, &queue->flags);
  774. queue->rt2x00dev->ops->lib->start_queue(queue);
  775. rt2x00queue_unpause_queue(queue);
  776. mutex_unlock(&queue->status_lock);
  777. }
  778. EXPORT_SYMBOL_GPL(rt2x00queue_start_queue);
  779. void rt2x00queue_stop_queue(struct data_queue *queue)
  780. {
  781. mutex_lock(&queue->status_lock);
  782. if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
  783. mutex_unlock(&queue->status_lock);
  784. return;
  785. }
  786. rt2x00queue_pause_queue(queue);
  787. queue->rt2x00dev->ops->lib->stop_queue(queue);
  788. mutex_unlock(&queue->status_lock);
  789. }
  790. EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
  791. void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
  792. {
  793. bool started;
  794. bool tx_queue =
  795. (queue->qid == QID_AC_VO) ||
  796. (queue->qid == QID_AC_VI) ||
  797. (queue->qid == QID_AC_BE) ||
  798. (queue->qid == QID_AC_BK);
  799. mutex_lock(&queue->status_lock);
  800. /*
  801. * If the queue has been started, we must stop it temporarily
  802. * to prevent any new frames to be queued on the device. If
  803. * we are not dropping the pending frames, the queue must
  804. * only be stopped in the software and not the hardware,
  805. * otherwise the queue will never become empty on its own.
  806. */
  807. started = test_bit(QUEUE_STARTED, &queue->flags);
  808. if (started) {
  809. /*
  810. * Pause the queue
  811. */
  812. rt2x00queue_pause_queue(queue);
  813. /*
  814. * If we are not supposed to drop any pending
  815. * frames, this means we must force a start (=kick)
  816. * to the queue to make sure the hardware will
  817. * start transmitting.
  818. */
  819. if (!drop && tx_queue)
  820. queue->rt2x00dev->ops->lib->kick_queue(queue);
  821. }
  822. /*
  823. * Check if driver supports flushing, if that is the case we can
  824. * defer the flushing to the driver. Otherwise we must use the
  825. * alternative which just waits for the queue to become empty.
  826. */
  827. if (likely(queue->rt2x00dev->ops->lib->flush_queue))
  828. queue->rt2x00dev->ops->lib->flush_queue(queue, drop);
  829. /*
  830. * The queue flush has failed...
  831. */
  832. if (unlikely(!rt2x00queue_empty(queue)))
  833. WARNING(queue->rt2x00dev, "Queue %d failed to flush\n", queue->qid);
  834. /*
  835. * Restore the queue to the previous status
  836. */
  837. if (started)
  838. rt2x00queue_unpause_queue(queue);
  839. mutex_unlock(&queue->status_lock);
  840. }
  841. EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
  842. void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
  843. {
  844. struct data_queue *queue;
  845. /*
  846. * rt2x00queue_start_queue will call ieee80211_wake_queue
  847. * for each queue after is has been properly initialized.
  848. */
  849. tx_queue_for_each(rt2x00dev, queue)
  850. rt2x00queue_start_queue(queue);
  851. rt2x00queue_start_queue(rt2x00dev->rx);
  852. }
  853. EXPORT_SYMBOL_GPL(rt2x00queue_start_queues);
  854. void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
  855. {
  856. struct data_queue *queue;
  857. /*
  858. * rt2x00queue_stop_queue will call ieee80211_stop_queue
  859. * as well, but we are completely shutting doing everything
  860. * now, so it is much safer to stop all TX queues at once,
  861. * and use rt2x00queue_stop_queue for cleaning up.
  862. */
  863. ieee80211_stop_queues(rt2x00dev->hw);
  864. tx_queue_for_each(rt2x00dev, queue)
  865. rt2x00queue_stop_queue(queue);
  866. rt2x00queue_stop_queue(rt2x00dev->rx);
  867. }
  868. EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues);
  869. void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop)
  870. {
  871. struct data_queue *queue;
  872. tx_queue_for_each(rt2x00dev, queue)
  873. rt2x00queue_flush_queue(queue, drop);
  874. rt2x00queue_flush_queue(rt2x00dev->rx, drop);
  875. }
  876. EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues);
  877. static void rt2x00queue_reset(struct data_queue *queue)
  878. {
  879. unsigned long irqflags;
  880. unsigned int i;
  881. spin_lock_irqsave(&queue->index_lock, irqflags);
  882. queue->count = 0;
  883. queue->length = 0;
  884. for (i = 0; i < Q_INDEX_MAX; i++)
  885. queue->index[i] = 0;
  886. spin_unlock_irqrestore(&queue->index_lock, irqflags);
  887. }
  888. void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
  889. {
  890. struct data_queue *queue;
  891. unsigned int i;
  892. queue_for_each(rt2x00dev, queue) {
  893. rt2x00queue_reset(queue);
  894. for (i = 0; i < queue->limit; i++)
  895. rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
  896. }
  897. }
  898. static int rt2x00queue_alloc_entries(struct data_queue *queue,
  899. const struct data_queue_desc *qdesc)
  900. {
  901. struct queue_entry *entries;
  902. unsigned int entry_size;
  903. unsigned int i;
  904. rt2x00queue_reset(queue);
  905. queue->limit = qdesc->entry_num;
  906. queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
  907. queue->data_size = qdesc->data_size;
  908. queue->desc_size = qdesc->desc_size;
  909. /*
  910. * Allocate all queue entries.
  911. */
  912. entry_size = sizeof(*entries) + qdesc->priv_size;
  913. entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
  914. if (!entries)
  915. return -ENOMEM;
  916. #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
  917. (((char *)(__base)) + ((__limit) * (__esize)) + \
  918. ((__index) * (__psize)))
  919. for (i = 0; i < queue->limit; i++) {
  920. entries[i].flags = 0;
  921. entries[i].queue = queue;
  922. entries[i].skb = NULL;
  923. entries[i].entry_idx = i;
  924. entries[i].priv_data =
  925. QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
  926. sizeof(*entries), qdesc->priv_size);
  927. }
  928. #undef QUEUE_ENTRY_PRIV_OFFSET
  929. queue->entries = entries;
  930. return 0;
  931. }
  932. static void rt2x00queue_free_skbs(struct data_queue *queue)
  933. {
  934. unsigned int i;
  935. if (!queue->entries)
  936. return;
  937. for (i = 0; i < queue->limit; i++) {
  938. rt2x00queue_free_skb(&queue->entries[i]);
  939. }
  940. }
  941. static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
  942. {
  943. unsigned int i;
  944. struct sk_buff *skb;
  945. for (i = 0; i < queue->limit; i++) {
  946. skb = rt2x00queue_alloc_rxskb(&queue->entries[i]);
  947. if (!skb)
  948. return -ENOMEM;
  949. queue->entries[i].skb = skb;
  950. }
  951. return 0;
  952. }
  953. int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
  954. {
  955. struct data_queue *queue;
  956. int status;
  957. status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
  958. if (status)
  959. goto exit;
  960. tx_queue_for_each(rt2x00dev, queue) {
  961. status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
  962. if (status)
  963. goto exit;
  964. }
  965. status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
  966. if (status)
  967. goto exit;
  968. if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) {
  969. status = rt2x00queue_alloc_entries(rt2x00dev->atim,
  970. rt2x00dev->ops->atim);
  971. if (status)
  972. goto exit;
  973. }
  974. status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx);
  975. if (status)
  976. goto exit;
  977. return 0;
  978. exit:
  979. ERROR(rt2x00dev, "Queue entries allocation failed.\n");
  980. rt2x00queue_uninitialize(rt2x00dev);
  981. return status;
  982. }
  983. void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
  984. {
  985. struct data_queue *queue;
  986. rt2x00queue_free_skbs(rt2x00dev->rx);
  987. queue_for_each(rt2x00dev, queue) {
  988. kfree(queue->entries);
  989. queue->entries = NULL;
  990. }
  991. }
  992. static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
  993. struct data_queue *queue, enum data_queue_qid qid)
  994. {
  995. mutex_init(&queue->status_lock);
  996. spin_lock_init(&queue->tx_lock);
  997. spin_lock_init(&queue->index_lock);
  998. queue->rt2x00dev = rt2x00dev;
  999. queue->qid = qid;
  1000. queue->txop = 0;
  1001. queue->aifs = 2;
  1002. queue->cw_min = 5;
  1003. queue->cw_max = 10;
  1004. }
  1005. int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
  1006. {
  1007. struct data_queue *queue;
  1008. enum data_queue_qid qid;
  1009. unsigned int req_atim =
  1010. !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags);
  1011. /*
  1012. * We need the following queues:
  1013. * RX: 1
  1014. * TX: ops->tx_queues
  1015. * Beacon: 1
  1016. * Atim: 1 (if required)
  1017. */
  1018. rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
  1019. queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
  1020. if (!queue) {
  1021. ERROR(rt2x00dev, "Queue allocation failed.\n");
  1022. return -ENOMEM;
  1023. }
  1024. /*
  1025. * Initialize pointers
  1026. */
  1027. rt2x00dev->rx = queue;
  1028. rt2x00dev->tx = &queue[1];
  1029. rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
  1030. rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
  1031. /*
  1032. * Initialize queue parameters.
  1033. * RX: qid = QID_RX
  1034. * TX: qid = QID_AC_VO + index
  1035. * TX: cw_min: 2^5 = 32.
  1036. * TX: cw_max: 2^10 = 1024.
  1037. * BCN: qid = QID_BEACON
  1038. * ATIM: qid = QID_ATIM
  1039. */
  1040. rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
  1041. qid = QID_AC_VO;
  1042. tx_queue_for_each(rt2x00dev, queue)
  1043. rt2x00queue_init(rt2x00dev, queue, qid++);
  1044. rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON);
  1045. if (req_atim)
  1046. rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM);
  1047. return 0;
  1048. }
  1049. void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
  1050. {
  1051. kfree(rt2x00dev->rx);
  1052. rt2x00dev->rx = NULL;
  1053. rt2x00dev->tx = NULL;
  1054. rt2x00dev->bcn = NULL;
  1055. }