rt2x00queue.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690
  1. /*
  2. Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
  3. <http://rt2x00.serialmonkey.com>
  4. This program is free software; you can redistribute it and/or modify
  5. it under the terms of the GNU General Public License as published by
  6. the Free Software Foundation; either version 2 of the License, or
  7. (at your option) any later version.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with this program; if not, write to the
  14. Free Software Foundation, Inc.,
  15. 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  16. */
  17. /*
  18. Module: rt2x00
  19. Abstract: rt2x00 queue datastructures and routines
  20. */
  21. #ifndef RT2X00QUEUE_H
  22. #define RT2X00QUEUE_H
  23. #include <linux/prefetch.h>
  24. /**
  25. * DOC: Entry frame size
  26. *
  27. * Ralink PCI devices demand the Frame size to be a multiple of 128 bytes,
  28. * for USB devices this restriction does not apply, but the value of
  29. * 2432 makes sense since it is big enough to contain the maximum fragment
  30. * size according to the ieee802.11 specs.
  31. * The aggregation size depends on support from the driver, but should
  32. * be something around 3840 bytes.
  33. */
  34. #define DATA_FRAME_SIZE 2432
  35. #define MGMT_FRAME_SIZE 256
  36. #define AGGREGATION_SIZE 3840
  37. /**
  38. * enum data_queue_qid: Queue identification
  39. *
  40. * @QID_AC_VO: AC VO queue
  41. * @QID_AC_VI: AC VI queue
  42. * @QID_AC_BE: AC BE queue
  43. * @QID_AC_BK: AC BK queue
  44. * @QID_HCCA: HCCA queue
  45. * @QID_MGMT: MGMT queue (prio queue)
  46. * @QID_RX: RX queue
  47. * @QID_OTHER: None of the above (don't use, only present for completeness)
  48. * @QID_BEACON: Beacon queue (value unspecified, don't send it to device)
  49. * @QID_ATIM: Atim queue (value unspeficied, don't send it to device)
  50. */
  51. enum data_queue_qid {
  52. QID_AC_VO = 0,
  53. QID_AC_VI = 1,
  54. QID_AC_BE = 2,
  55. QID_AC_BK = 3,
  56. QID_HCCA = 4,
  57. QID_MGMT = 13,
  58. QID_RX = 14,
  59. QID_OTHER = 15,
  60. QID_BEACON,
  61. QID_ATIM,
  62. };
  63. /**
  64. * enum skb_frame_desc_flags: Flags for &struct skb_frame_desc
  65. *
  66. * @SKBDESC_DMA_MAPPED_RX: &skb_dma field has been mapped for RX
  67. * @SKBDESC_DMA_MAPPED_TX: &skb_dma field has been mapped for TX
  68. * @SKBDESC_IV_STRIPPED: Frame contained a IV/EIV provided by
  69. * mac80211 but was stripped for processing by the driver.
  70. * @SKBDESC_NOT_MAC80211: Frame didn't originate from mac80211,
  71. * don't try to pass it back.
  72. * @SKBDESC_DESC_IN_SKB: The descriptor is at the start of the
  73. * skb, instead of in the desc field.
  74. */
  75. enum skb_frame_desc_flags {
  76. SKBDESC_DMA_MAPPED_RX = 1 << 0,
  77. SKBDESC_DMA_MAPPED_TX = 1 << 1,
  78. SKBDESC_IV_STRIPPED = 1 << 2,
  79. SKBDESC_NOT_MAC80211 = 1 << 3,
  80. SKBDESC_DESC_IN_SKB = 1 << 4,
  81. };
  82. /**
  83. * struct skb_frame_desc: Descriptor information for the skb buffer
  84. *
  85. * This structure is placed over the driver_data array, this means that
  86. * this structure should not exceed the size of that array (40 bytes).
  87. *
  88. * @flags: Frame flags, see &enum skb_frame_desc_flags.
  89. * @desc_len: Length of the frame descriptor.
  90. * @tx_rate_idx: the index of the TX rate, used for TX status reporting
  91. * @tx_rate_flags: the TX rate flags, used for TX status reporting
  92. * @desc: Pointer to descriptor part of the frame.
  93. * Note that this pointer could point to something outside
  94. * of the scope of the skb->data pointer.
  95. * @iv: IV/EIV data used during encryption/decryption.
  96. * @skb_dma: (PCI-only) the DMA address associated with the sk buffer.
  97. * @entry: The entry to which this sk buffer belongs.
  98. */
  99. struct skb_frame_desc {
  100. u8 flags;
  101. u8 desc_len;
  102. u8 tx_rate_idx;
  103. u8 tx_rate_flags;
  104. void *desc;
  105. __le32 iv[2];
  106. dma_addr_t skb_dma;
  107. struct queue_entry *entry;
  108. };
  109. /**
  110. * get_skb_frame_desc - Obtain the rt2x00 frame descriptor from a sk_buff.
  111. * @skb: &struct sk_buff from where we obtain the &struct skb_frame_desc
  112. */
  113. static inline struct skb_frame_desc* get_skb_frame_desc(struct sk_buff *skb)
  114. {
  115. BUILD_BUG_ON(sizeof(struct skb_frame_desc) >
  116. IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
  117. return (struct skb_frame_desc *)&IEEE80211_SKB_CB(skb)->driver_data;
  118. }
  119. /**
  120. * enum rxdone_entry_desc_flags: Flags for &struct rxdone_entry_desc
  121. *
  122. * @RXDONE_SIGNAL_PLCP: Signal field contains the plcp value.
  123. * @RXDONE_SIGNAL_BITRATE: Signal field contains the bitrate value.
  124. * @RXDONE_SIGNAL_MCS: Signal field contains the mcs value.
  125. * @RXDONE_MY_BSS: Does this frame originate from device's BSS.
  126. * @RXDONE_CRYPTO_IV: Driver provided IV/EIV data.
  127. * @RXDONE_CRYPTO_ICV: Driver provided ICV data.
  128. * @RXDONE_L2PAD: 802.11 payload has been padded to 4-byte boundary.
  129. */
  130. enum rxdone_entry_desc_flags {
  131. RXDONE_SIGNAL_PLCP = BIT(0),
  132. RXDONE_SIGNAL_BITRATE = BIT(1),
  133. RXDONE_SIGNAL_MCS = BIT(2),
  134. RXDONE_MY_BSS = BIT(3),
  135. RXDONE_CRYPTO_IV = BIT(4),
  136. RXDONE_CRYPTO_ICV = BIT(5),
  137. RXDONE_L2PAD = BIT(6),
  138. };
  139. /**
  140. * RXDONE_SIGNAL_MASK - Define to mask off all &rxdone_entry_desc_flags flags
  141. * except for the RXDONE_SIGNAL_* flags. This is useful to convert the dev_flags
  142. * from &rxdone_entry_desc to a signal value type.
  143. */
  144. #define RXDONE_SIGNAL_MASK \
  145. ( RXDONE_SIGNAL_PLCP | RXDONE_SIGNAL_BITRATE | RXDONE_SIGNAL_MCS )
  146. /**
  147. * struct rxdone_entry_desc: RX Entry descriptor
  148. *
  149. * Summary of information that has been read from the RX frame descriptor.
  150. *
  151. * @timestamp: RX Timestamp
  152. * @signal: Signal of the received frame.
  153. * @rssi: RSSI of the received frame.
  154. * @size: Data size of the received frame.
  155. * @flags: MAC80211 receive flags (See &enum mac80211_rx_flags).
  156. * @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags).
  157. * @rate_mode: Rate mode (See @enum rate_modulation).
  158. * @cipher: Cipher type used during decryption.
  159. * @cipher_status: Decryption status.
  160. * @iv: IV/EIV data used during decryption.
  161. * @icv: ICV data used during decryption.
  162. */
  163. struct rxdone_entry_desc {
  164. u64 timestamp;
  165. int signal;
  166. int rssi;
  167. int size;
  168. int flags;
  169. int dev_flags;
  170. u16 rate_mode;
  171. u8 cipher;
  172. u8 cipher_status;
  173. __le32 iv[2];
  174. __le32 icv;
  175. };
  176. /**
  177. * enum txdone_entry_desc_flags: Flags for &struct txdone_entry_desc
  178. *
  179. * Every txdone report has to contain the basic result of the
  180. * transmission, either &TXDONE_UNKNOWN, &TXDONE_SUCCESS or
  181. * &TXDONE_FAILURE. The flag &TXDONE_FALLBACK can be used in
  182. * conjunction with all of these flags but should only be set
  183. * if retires > 0. The flag &TXDONE_EXCESSIVE_RETRY can only be used
  184. * in conjunction with &TXDONE_FAILURE.
  185. *
  186. * @TXDONE_UNKNOWN: Hardware could not determine success of transmission.
  187. * @TXDONE_SUCCESS: Frame was successfully send
  188. * @TXDONE_FALLBACK: Hardware used fallback rates for retries
  189. * @TXDONE_FAILURE: Frame was not successfully send
  190. * @TXDONE_EXCESSIVE_RETRY: In addition to &TXDONE_FAILURE, the
  191. * frame transmission failed due to excessive retries.
  192. */
  193. enum txdone_entry_desc_flags {
  194. TXDONE_UNKNOWN,
  195. TXDONE_SUCCESS,
  196. TXDONE_FALLBACK,
  197. TXDONE_FAILURE,
  198. TXDONE_EXCESSIVE_RETRY,
  199. };
  200. /**
  201. * struct txdone_entry_desc: TX done entry descriptor
  202. *
  203. * Summary of information that has been read from the TX frame descriptor
  204. * after the device is done with transmission.
  205. *
  206. * @flags: TX done flags (See &enum txdone_entry_desc_flags).
  207. * @retry: Retry count.
  208. */
  209. struct txdone_entry_desc {
  210. unsigned long flags;
  211. int retry;
  212. };
  213. /**
  214. * enum txentry_desc_flags: Status flags for TX entry descriptor
  215. *
  216. * @ENTRY_TXD_RTS_FRAME: This frame is a RTS frame.
  217. * @ENTRY_TXD_CTS_FRAME: This frame is a CTS-to-self frame.
  218. * @ENTRY_TXD_GENERATE_SEQ: This frame requires sequence counter.
  219. * @ENTRY_TXD_FIRST_FRAGMENT: This is the first frame.
  220. * @ENTRY_TXD_MORE_FRAG: This frame is followed by another fragment.
  221. * @ENTRY_TXD_REQ_TIMESTAMP: Require timestamp to be inserted.
  222. * @ENTRY_TXD_BURST: This frame belongs to the same burst event.
  223. * @ENTRY_TXD_ACK: An ACK is required for this frame.
  224. * @ENTRY_TXD_RETRY_MODE: When set, the long retry count is used.
  225. * @ENTRY_TXD_ENCRYPT: This frame should be encrypted.
  226. * @ENTRY_TXD_ENCRYPT_PAIRWISE: Use pairwise key table (instead of shared).
  227. * @ENTRY_TXD_ENCRYPT_IV: Generate IV/EIV in hardware.
  228. * @ENTRY_TXD_ENCRYPT_MMIC: Generate MIC in hardware.
  229. * @ENTRY_TXD_HT_AMPDU: This frame is part of an AMPDU.
  230. * @ENTRY_TXD_HT_BW_40: Use 40MHz Bandwidth.
  231. * @ENTRY_TXD_HT_SHORT_GI: Use short GI.
  232. * @ENTRY_TXD_HT_MIMO_PS: The receiving STA is in dynamic SM PS mode.
  233. */
  234. enum txentry_desc_flags {
  235. ENTRY_TXD_RTS_FRAME,
  236. ENTRY_TXD_CTS_FRAME,
  237. ENTRY_TXD_GENERATE_SEQ,
  238. ENTRY_TXD_FIRST_FRAGMENT,
  239. ENTRY_TXD_MORE_FRAG,
  240. ENTRY_TXD_REQ_TIMESTAMP,
  241. ENTRY_TXD_BURST,
  242. ENTRY_TXD_ACK,
  243. ENTRY_TXD_RETRY_MODE,
  244. ENTRY_TXD_ENCRYPT,
  245. ENTRY_TXD_ENCRYPT_PAIRWISE,
  246. ENTRY_TXD_ENCRYPT_IV,
  247. ENTRY_TXD_ENCRYPT_MMIC,
  248. ENTRY_TXD_HT_AMPDU,
  249. ENTRY_TXD_HT_BW_40,
  250. ENTRY_TXD_HT_SHORT_GI,
  251. ENTRY_TXD_HT_MIMO_PS,
  252. };
  253. /**
  254. * struct txentry_desc: TX Entry descriptor
  255. *
  256. * Summary of information for the frame descriptor before sending a TX frame.
  257. *
  258. * @flags: Descriptor flags (See &enum queue_entry_flags).
  259. * @length: Length of the entire frame.
  260. * @header_length: Length of 802.11 header.
  261. * @length_high: PLCP length high word.
  262. * @length_low: PLCP length low word.
  263. * @signal: PLCP signal.
  264. * @service: PLCP service.
  265. * @msc: MCS.
  266. * @stbc: STBC.
  267. * @ba_size: BA size.
  268. * @rate_mode: Rate mode (See @enum rate_modulation).
  269. * @mpdu_density: MDPU density.
  270. * @retry_limit: Max number of retries.
  271. * @ifs: IFS value.
  272. * @txop: IFS value for 11n capable chips.
  273. * @cipher: Cipher type used for encryption.
  274. * @key_idx: Key index used for encryption.
  275. * @iv_offset: Position where IV should be inserted by hardware.
  276. * @iv_len: Length of IV data.
  277. */
  278. struct txentry_desc {
  279. unsigned long flags;
  280. u16 length;
  281. u16 header_length;
  282. u16 length_high;
  283. u16 length_low;
  284. u16 signal;
  285. u16 service;
  286. u16 mcs;
  287. u16 stbc;
  288. u16 ba_size;
  289. u16 rate_mode;
  290. u16 mpdu_density;
  291. short retry_limit;
  292. short ifs;
  293. short txop;
  294. enum cipher cipher;
  295. u16 key_idx;
  296. u16 iv_offset;
  297. u16 iv_len;
  298. };
  299. /**
  300. * enum queue_entry_flags: Status flags for queue entry
  301. *
  302. * @ENTRY_BCN_ASSIGNED: This entry has been assigned to an interface.
  303. * As long as this bit is set, this entry may only be touched
  304. * through the interface structure.
  305. * @ENTRY_OWNER_DEVICE_DATA: This entry is owned by the device for data
  306. * transfer (either TX or RX depending on the queue). The entry should
  307. * only be touched after the device has signaled it is done with it.
  308. * @ENTRY_DATA_PENDING: This entry contains a valid frame and is waiting
  309. * for the signal to start sending.
  310. * @ENTRY_DATA_IO_FAILED: Hardware indicated that an IO error occured
  311. * while transfering the data to the hardware. No TX status report will
  312. * be expected from the hardware.
  313. * @ENTRY_DATA_STATUS_PENDING: The entry has been send to the device and
  314. * returned. It is now waiting for the status reporting before the
  315. * entry can be reused again.
  316. */
  317. enum queue_entry_flags {
  318. ENTRY_BCN_ASSIGNED,
  319. ENTRY_OWNER_DEVICE_DATA,
  320. ENTRY_DATA_PENDING,
  321. ENTRY_DATA_IO_FAILED,
  322. ENTRY_DATA_STATUS_PENDING,
  323. };
  324. /**
  325. * struct queue_entry: Entry inside the &struct data_queue
  326. *
  327. * @flags: Entry flags, see &enum queue_entry_flags.
  328. * @queue: The data queue (&struct data_queue) to which this entry belongs.
  329. * @skb: The buffer which is currently being transmitted (for TX queue),
  330. * or used to directly recieve data in (for RX queue).
  331. * @entry_idx: The entry index number.
  332. * @priv_data: Private data belonging to this queue entry. The pointer
  333. * points to data specific to a particular driver and queue type.
  334. */
  335. struct queue_entry {
  336. unsigned long flags;
  337. struct data_queue *queue;
  338. struct sk_buff *skb;
  339. unsigned int entry_idx;
  340. void *priv_data;
  341. };
  342. /**
  343. * enum queue_index: Queue index type
  344. *
  345. * @Q_INDEX: Index pointer to the current entry in the queue, if this entry is
  346. * owned by the hardware then the queue is considered to be full.
  347. * @Q_INDEX_DMA_DONE: Index pointer for the next entry which will have been
  348. * transfered to the hardware.
  349. * @Q_INDEX_DONE: Index pointer to the next entry which will be completed by
  350. * the hardware and for which we need to run the txdone handler. If this
  351. * entry is not owned by the hardware the queue is considered to be empty.
  352. * @Q_INDEX_MAX: Keep last, used in &struct data_queue to determine the size
  353. * of the index array.
  354. */
  355. enum queue_index {
  356. Q_INDEX,
  357. Q_INDEX_DMA_DONE,
  358. Q_INDEX_DONE,
  359. Q_INDEX_MAX,
  360. };
  361. /**
  362. * enum data_queue_flags: Status flags for data queues
  363. *
  364. * @QUEUE_STARTED: The queue has been started. Fox RX queues this means the
  365. * device might be DMA'ing skbuffers. TX queues will accept skbuffers to
  366. * be transmitted and beacon queues will start beaconing the configured
  367. * beacons.
  368. * @QUEUE_PAUSED: The queue has been started but is currently paused.
  369. * When this bit is set, the queue has been stopped in mac80211,
  370. * preventing new frames to be enqueued. However, a few frames
  371. * might still appear shortly after the pausing...
  372. */
  373. enum data_queue_flags {
  374. QUEUE_STARTED,
  375. QUEUE_PAUSED,
  376. };
  377. /**
  378. * struct data_queue: Data queue
  379. *
  380. * @rt2x00dev: Pointer to main &struct rt2x00dev where this queue belongs to.
  381. * @entries: Base address of the &struct queue_entry which are
  382. * part of this queue.
  383. * @qid: The queue identification, see &enum data_queue_qid.
  384. * @flags: Entry flags, see &enum queue_entry_flags.
  385. * @status_lock: The mutex for protecting the start/stop/flush
  386. * handling on this queue.
  387. * @index_lock: Spinlock to protect index handling. Whenever @index, @index_done or
  388. * @index_crypt needs to be changed this lock should be grabbed to prevent
  389. * index corruption due to concurrency.
  390. * @count: Number of frames handled in the queue.
  391. * @limit: Maximum number of entries in the queue.
  392. * @threshold: Minimum number of free entries before queue is kicked by force.
  393. * @length: Number of frames in queue.
  394. * @index: Index pointers to entry positions in the queue,
  395. * use &enum queue_index to get a specific index field.
  396. * @txop: maximum burst time.
  397. * @aifs: The aifs value for outgoing frames (field ignored in RX queue).
  398. * @cw_min: The cw min value for outgoing frames (field ignored in RX queue).
  399. * @cw_max: The cw max value for outgoing frames (field ignored in RX queue).
  400. * @data_size: Maximum data size for the frames in this queue.
  401. * @desc_size: Hardware descriptor size for the data in this queue.
  402. * @usb_endpoint: Device endpoint used for communication (USB only)
  403. * @usb_maxpacket: Max packet size for given endpoint (USB only)
  404. */
  405. struct data_queue {
  406. struct rt2x00_dev *rt2x00dev;
  407. struct queue_entry *entries;
  408. enum data_queue_qid qid;
  409. unsigned long flags;
  410. struct mutex status_lock;
  411. spinlock_t index_lock;
  412. unsigned int count;
  413. unsigned short limit;
  414. unsigned short threshold;
  415. unsigned short length;
  416. unsigned short index[Q_INDEX_MAX];
  417. unsigned long last_action[Q_INDEX_MAX];
  418. unsigned short txop;
  419. unsigned short aifs;
  420. unsigned short cw_min;
  421. unsigned short cw_max;
  422. unsigned short data_size;
  423. unsigned short desc_size;
  424. unsigned short usb_endpoint;
  425. unsigned short usb_maxpacket;
  426. };
  427. /**
  428. * struct data_queue_desc: Data queue description
  429. *
  430. * The information in this structure is used by drivers
  431. * to inform rt2x00lib about the creation of the data queue.
  432. *
  433. * @entry_num: Maximum number of entries for a queue.
  434. * @data_size: Maximum data size for the frames in this queue.
  435. * @desc_size: Hardware descriptor size for the data in this queue.
  436. * @priv_size: Size of per-queue_entry private data.
  437. */
  438. struct data_queue_desc {
  439. unsigned short entry_num;
  440. unsigned short data_size;
  441. unsigned short desc_size;
  442. unsigned short priv_size;
  443. };
  444. /**
  445. * queue_end - Return pointer to the last queue (HELPER MACRO).
  446. * @__dev: Pointer to &struct rt2x00_dev
  447. *
  448. * Using the base rx pointer and the maximum number of available queues,
  449. * this macro will return the address of 1 position beyond the end of the
  450. * queues array.
  451. */
  452. #define queue_end(__dev) \
  453. &(__dev)->rx[(__dev)->data_queues]
  454. /**
  455. * tx_queue_end - Return pointer to the last TX queue (HELPER MACRO).
  456. * @__dev: Pointer to &struct rt2x00_dev
  457. *
  458. * Using the base tx pointer and the maximum number of available TX
  459. * queues, this macro will return the address of 1 position beyond
  460. * the end of the TX queue array.
  461. */
  462. #define tx_queue_end(__dev) \
  463. &(__dev)->tx[(__dev)->ops->tx_queues]
  464. /**
  465. * queue_next - Return pointer to next queue in list (HELPER MACRO).
  466. * @__queue: Current queue for which we need the next queue
  467. *
  468. * Using the current queue address we take the address directly
  469. * after the queue to take the next queue. Note that this macro
  470. * should be used carefully since it does not protect against
  471. * moving past the end of the list. (See macros &queue_end and
  472. * &tx_queue_end for determining the end of the queue).
  473. */
  474. #define queue_next(__queue) \
  475. &(__queue)[1]
  476. /**
  477. * queue_loop - Loop through the queues within a specific range (HELPER MACRO).
  478. * @__entry: Pointer where the current queue entry will be stored in.
  479. * @__start: Start queue pointer.
  480. * @__end: End queue pointer.
  481. *
  482. * This macro will loop through all queues between &__start and &__end.
  483. */
  484. #define queue_loop(__entry, __start, __end) \
  485. for ((__entry) = (__start); \
  486. prefetch(queue_next(__entry)), (__entry) != (__end);\
  487. (__entry) = queue_next(__entry))
  488. /**
  489. * queue_for_each - Loop through all queues
  490. * @__dev: Pointer to &struct rt2x00_dev
  491. * @__entry: Pointer where the current queue entry will be stored in.
  492. *
  493. * This macro will loop through all available queues.
  494. */
  495. #define queue_for_each(__dev, __entry) \
  496. queue_loop(__entry, (__dev)->rx, queue_end(__dev))
  497. /**
  498. * tx_queue_for_each - Loop through the TX queues
  499. * @__dev: Pointer to &struct rt2x00_dev
  500. * @__entry: Pointer where the current queue entry will be stored in.
  501. *
  502. * This macro will loop through all TX related queues excluding
  503. * the Beacon and Atim queues.
  504. */
  505. #define tx_queue_for_each(__dev, __entry) \
  506. queue_loop(__entry, (__dev)->tx, tx_queue_end(__dev))
  507. /**
  508. * txall_queue_for_each - Loop through all TX related queues
  509. * @__dev: Pointer to &struct rt2x00_dev
  510. * @__entry: Pointer where the current queue entry will be stored in.
  511. *
  512. * This macro will loop through all TX related queues including
  513. * the Beacon and Atim queues.
  514. */
  515. #define txall_queue_for_each(__dev, __entry) \
  516. queue_loop(__entry, (__dev)->tx, queue_end(__dev))
  517. /**
  518. * rt2x00queue_for_each_entry - Loop through all entries in the queue
  519. * @queue: Pointer to @data_queue
  520. * @start: &enum queue_index Pointer to start index
  521. * @end: &enum queue_index Pointer to end index
  522. * @fn: The function to call for each &struct queue_entry
  523. *
  524. * This will walk through all entries in the queue, in chronological
  525. * order. This means it will start at the current @start pointer
  526. * and will walk through the queue until it reaches the @end pointer.
  527. */
  528. void rt2x00queue_for_each_entry(struct data_queue *queue,
  529. enum queue_index start,
  530. enum queue_index end,
  531. void (*fn)(struct queue_entry *entry));
  532. /**
  533. * rt2x00queue_empty - Check if the queue is empty.
  534. * @queue: Queue to check if empty.
  535. */
  536. static inline int rt2x00queue_empty(struct data_queue *queue)
  537. {
  538. return queue->length == 0;
  539. }
  540. /**
  541. * rt2x00queue_full - Check if the queue is full.
  542. * @queue: Queue to check if full.
  543. */
  544. static inline int rt2x00queue_full(struct data_queue *queue)
  545. {
  546. return queue->length == queue->limit;
  547. }
  548. /**
  549. * rt2x00queue_free - Check the number of available entries in queue.
  550. * @queue: Queue to check.
  551. */
  552. static inline int rt2x00queue_available(struct data_queue *queue)
  553. {
  554. return queue->limit - queue->length;
  555. }
  556. /**
  557. * rt2x00queue_threshold - Check if the queue is below threshold
  558. * @queue: Queue to check.
  559. */
  560. static inline int rt2x00queue_threshold(struct data_queue *queue)
  561. {
  562. return rt2x00queue_available(queue) < queue->threshold;
  563. }
  564. /**
  565. * rt2x00queue_status_timeout - Check if a timeout occured for STATUS reports
  566. * @queue: Queue to check.
  567. */
  568. static inline int rt2x00queue_status_timeout(struct data_queue *queue)
  569. {
  570. return time_after(queue->last_action[Q_INDEX_DMA_DONE],
  571. queue->last_action[Q_INDEX_DONE] + (HZ / 10));
  572. }
  573. /**
  574. * rt2x00queue_timeout - Check if a timeout occured for DMA transfers
  575. * @queue: Queue to check.
  576. */
  577. static inline int rt2x00queue_dma_timeout(struct data_queue *queue)
  578. {
  579. return time_after(queue->last_action[Q_INDEX],
  580. queue->last_action[Q_INDEX_DMA_DONE] + (HZ / 10));
  581. }
  582. /**
  583. * _rt2x00_desc_read - Read a word from the hardware descriptor.
  584. * @desc: Base descriptor address
  585. * @word: Word index from where the descriptor should be read.
  586. * @value: Address where the descriptor value should be written into.
  587. */
  588. static inline void _rt2x00_desc_read(__le32 *desc, const u8 word, __le32 *value)
  589. {
  590. *value = desc[word];
  591. }
  592. /**
  593. * rt2x00_desc_read - Read a word from the hardware descriptor, this
  594. * function will take care of the byte ordering.
  595. * @desc: Base descriptor address
  596. * @word: Word index from where the descriptor should be read.
  597. * @value: Address where the descriptor value should be written into.
  598. */
  599. static inline void rt2x00_desc_read(__le32 *desc, const u8 word, u32 *value)
  600. {
  601. __le32 tmp;
  602. _rt2x00_desc_read(desc, word, &tmp);
  603. *value = le32_to_cpu(tmp);
  604. }
  605. /**
  606. * rt2x00_desc_write - write a word to the hardware descriptor, this
  607. * function will take care of the byte ordering.
  608. * @desc: Base descriptor address
  609. * @word: Word index from where the descriptor should be written.
  610. * @value: Value that should be written into the descriptor.
  611. */
  612. static inline void _rt2x00_desc_write(__le32 *desc, const u8 word, __le32 value)
  613. {
  614. desc[word] = value;
  615. }
  616. /**
  617. * rt2x00_desc_write - write a word to the hardware descriptor.
  618. * @desc: Base descriptor address
  619. * @word: Word index from where the descriptor should be written.
  620. * @value: Value that should be written into the descriptor.
  621. */
  622. static inline void rt2x00_desc_write(__le32 *desc, const u8 word, u32 value)
  623. {
  624. _rt2x00_desc_write(desc, word, cpu_to_le32(value));
  625. }
  626. #endif /* RT2X00QUEUE_H */